From 95f80e54b1f42a7f246c758697237c1dc7d737d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Mon, 29 Jan 2024 10:21:39 +0100 Subject: [PATCH 01/24] Restore ExampleStack and TensorExampleStack constructors --- .../org/bytedeco/pytorch/ExampleStack.java | 27 ------------------- .../bytedeco/pytorch/TensorExampleStack.java | 27 ------------------- .../org/bytedeco/pytorch/global/torch.java | 27 ------------------- .../org/bytedeco/pytorch/ExampleStack.java | 21 +++++++++++++++ .../bytedeco/pytorch/TensorExampleStack.java | 17 ++++++++++++ .../org/bytedeco/pytorch/presets/torch.java | 6 ++--- .../bytedeco/pytorch/presets/torch_include.h | 4 +-- 7 files changed, 42 insertions(+), 87 deletions(-) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleStack.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/ExampleStack.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/TensorExampleStack.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java deleted file mode 100644 index 5ad4f1a8f2b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java +++ /dev/null @@ -1,27 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Name("torch::data::transforms::Stack >") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ExampleStack extends ExampleCollation { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public ExampleStack() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ExampleStack(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleStack.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleStack.java deleted file mode 100644 index c220bc2d9b9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleStack.java +++ /dev/null @@ -1,27 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Name("torch::data::transforms::Stack >") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorExampleStack extends TensorExampleCollation { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public TensorExampleStack() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorExampleStack(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index f6f9203770e..9debcd199c1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -65087,33 +65087,6 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/csrc/api/include/torch/data/transforms/stack.h - -// #pragma once - -// #include -// #include -// #include - -// #include -// #include -// Targeting ../ExampleStack.java - - -// Targeting ../TensorExampleStack.java - - - -/** A {@code Collation} for {@code Example} types that stacks all data - * tensors into one tensor, and all target (label) tensors into one tensor. */ - -/** A {@code Collation} for {@code Example} types that stacks all data - * tensors into one tensor. */ - // namespace transforms - // namespace data - // namespace torch - - // Parsed from torch/csrc/api/include/torch/data/transforms/tensor.h // #pragma once diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/ExampleStack.java b/pytorch/src/main/java/org/bytedeco/pytorch/ExampleStack.java new file mode 100644 index 00000000000..be2c90738c6 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/ExampleStack.java @@ -0,0 +1,21 @@ +package org.bytedeco.pytorch; + +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +/* This is an instantiation of a specialized template defined in torch/data/transforms/stack.h. + * Template specializations are ignored since 1.5.10. + * The primary template being a simple declaration without body, Parser would create + * an @Opaque class without mapping any native constructor. + * So we give this explicit definition with a native constructor and ignore stack.h during parsing. */ + +/** A {@code Collation} for {@code Example} types that stacks all data + * tensors into one tensor, and all target (label) tensors into one tensor. */ +@Name("torch::data::transforms::Stack >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ExampleStack extends ExampleCollation { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public ExampleStack() { super((Pointer)null); allocate(); } + private native void allocate(); + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ExampleStack(Pointer p) { super(p); } +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/TensorExampleStack.java b/pytorch/src/main/java/org/bytedeco/pytorch/TensorExampleStack.java new file mode 100644 index 00000000000..e3298c1710c --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/TensorExampleStack.java @@ -0,0 +1,17 @@ +package org.bytedeco.pytorch; + +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +/* See ExampleStack */ + +/** A {@code Collation} for {@code Example} types that stacks all data + * tensors into one tensor. */ +@Name("torch::data::transforms::Stack >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorExampleStack extends TensorExampleCollation { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public TensorExampleStack() { super((Pointer)null); allocate(); } + private native void allocate(); + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorExampleStack(Pointer p) { super(p); } +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 55b552881fc..9fa8f4a267a 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1307,10 +1307,8 @@ public void map(InfoMap infoMap) { template("torch::data::transforms::BatchTransform", template("std::vector", example), example), template("torch::data::transforms::Collation", example) ).pointerTypes(p + "ExampleCollation")) - // The Stack primary template is empty. Constructors are defined in template specializations. - // So the generated Java classes are @Opaque and have no constructors. - // We might need to force the generation of constructors somehow. - .put(new Info(template("torch::data::transforms::Stack", example)).pointerTypes(p + "ExampleStack").base(p + "ExampleCollation")) + // See explicit definition of ExampleStack and TensorExampleStack. + .put(new Info(template("torch::data::transforms::Stack", example)).pointerTypes(p + "ExampleStack")) .put(new Info(chunkDataReader).pointerTypes("Chunk" + p + "DataReader").virtualize()) .put(new Info( template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler") diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h index a759d1a0c1d..84914ddfa90 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -5,7 +5,7 @@ // Excluding: // - the ones that fill at::meta at::native and at::_ops namespaces // (ATen/ops/*_native.h ATen/ops/*_meta.h ATen/ops/*_ops.h) -// - ATen/ops/_* +// - ATen/ops/_* (internal, API can change) // - and some exceptions commented below #include "torch/csrc/utils/python_stub.h" #include "c10/macros/cmake_macros.h" @@ -1273,7 +1273,7 @@ #include "torch/csrc/api/include/torch/data/transforms/base.h" #include "torch/csrc/api/include/torch/data/transforms/lambda.h" #include "torch/csrc/api/include/torch/data/transforms/collate.h" -#include "torch/csrc/api/include/torch/data/transforms/stack.h" +// #include "torch/csrc/api/include/torch/data/transforms/stack.h" // See ExampleStack explicit definition #include "torch/csrc/api/include/torch/data/transforms/tensor.h" #include "torch/csrc/api/include/torch/data/transforms.h" #include "torch/csrc/api/include/torch/data.h" From f4a464f547f60bc92c7b841b19e2e391e0e4bc86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Wed, 31 Jan 2024 09:57:03 +0100 Subject: [PATCH 02/24] Generate more overloads of methods taking an arrayref --- .../java/org/bytedeco/pytorch/JitNode.java | 6 + .../org/bytedeco/pytorch/NamedTensorMeta.java | 10 +- .../org/bytedeco/pytorch/global/torch.java | 264 ++++++++++++++++++ .../org/bytedeco/pytorch/presets/torch.java | 8 +- 4 files changed, 283 insertions(+), 5 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java index d7aba37414d..a3eb66d1d9e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java @@ -315,6 +315,12 @@ public class JitNode extends Pointer { @ByVal(nullValue = "at::ArrayRef{}") SymbolVector const_inputs); public native @Cast("bool") boolean matches( String signature_literal); + public native @Cast("bool") boolean matches( + @Cast("const char*") BytePointer signature_literal, + @ByVal(nullValue = "at::ArrayRef{}") SymbolVector const_inputs); + public native @Cast("bool") boolean matches( + String signature_literal, + @ByVal(nullValue = "at::ArrayRef{}") SymbolArrayRef const_inputs); public native @Cast("bool") boolean isMemberOf(@Const @ByRef OperatorSet os); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java index 78a54f658fa..bcdca1faf95 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java @@ -53,8 +53,10 @@ public enum HAS_NON_WILDCARD { private native void allocate(HAS_NON_WILDCARD arg0, @ByVal DimnameArrayRef names); public NamedTensorMeta(@Cast("at::NamedTensorMeta::HAS_NON_WILDCARD") int arg0, @ByVal DimnameVector names) { super((Pointer)null); allocate(arg0, names); } private native void allocate(@Cast("at::NamedTensorMeta::HAS_NON_WILDCARD") int arg0, @ByVal DimnameVector names); - public NamedTensorMeta(HAS_NON_WILDCARD arg0, @StdMove DimnameVector names) { super((Pointer)null); allocate(arg0, names); } - private native void allocate(HAS_NON_WILDCARD arg0, @StdMove DimnameVector names); + public NamedTensorMeta(HAS_NON_WILDCARD arg0, @ByVal DimnameVector names) { super((Pointer)null); allocate(arg0, names); } + private native void allocate(HAS_NON_WILDCARD arg0, @ByVal DimnameVector names); + public NamedTensorMeta(@Cast("at::NamedTensorMeta::HAS_NON_WILDCARD") int arg0, @ByVal DimnameArrayRef names) { super((Pointer)null); allocate(arg0, names); } + private native void allocate(@Cast("at::NamedTensorMeta::HAS_NON_WILDCARD") int arg0, @ByVal DimnameArrayRef names); public native @UniquePtr NamedTensorMetaInterface clone(); @@ -67,8 +69,8 @@ public enum HAS_NON_WILDCARD { public native void set_names(HAS_NON_WILDCARD arg0, @ByVal DimnameArrayRef new_names); public native void set_names(@Cast("at::NamedTensorMeta::HAS_NON_WILDCARD") int arg0, @ByVal DimnameVector new_names); - - public native void set_names(HAS_NON_WILDCARD arg0, @StdMove DimnameVector new_names); + public native void set_names(HAS_NON_WILDCARD arg0, @ByVal DimnameVector new_names); + public native void set_names(@Cast("at::NamedTensorMeta::HAS_NON_WILDCARD") int arg0, @ByVal DimnameArrayRef new_names); // INVARIANT: at least one Dimname is non-WILDCARD public native @StdMove DimnameVector names_(); public native NamedTensorMeta names_(DimnameVector setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 9debcd199c1..5df2e40fbd0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -14697,6 +14697,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @StdMove DimnameVector unify_from_right( @ByVal DimnameVector names, @ByVal DimnameVector other); +@Namespace("at") public static native @StdMove DimnameVector unify_from_right( + @ByVal DimnameVector names, + @ByVal DimnameVector other, + @Cast("const char*") BytePointer action/*="broadcast"*/); +@Namespace("at") public static native @StdMove DimnameVector unify_from_right( + @ByVal DimnameArrayRef names, + @ByVal DimnameArrayRef other, + String action/*="broadcast"*/); @Namespace("at") public static native void reportNYIDimnameOverload(@Cast("const char*") BytePointer op_name); @Namespace("at") public static native void reportNYIDimnameOverload(String op_name); @@ -18633,6 +18641,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { int pos, @Cast("c10::DeviceType") byte device_type, ScalarType scalar_type); +@Namespace("at") public static native @ByVal TensorImplVector checked_dense_tensor_list_unwrap( + @ByVal TensorVector tensors, + @Cast("const char*") BytePointer name, + int pos, + DeviceType device_type, + ScalarType scalar_type); +@Namespace("at") public static native @ByVal TensorImplVector checked_dense_tensor_list_unwrap( + @ByVal TensorArrayRef tensors, + String name, + int pos, + @Cast("c10::DeviceType") byte device_type, + ScalarType scalar_type); // namespace detail // namespace at @@ -18721,6 +18741,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("at::CheckedFrom") String c, @Const @ByRef TensorGeometryArg t, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); +@Namespace("at") public static native void checkSize( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorGeometryArg t, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); +@Namespace("at") public static native void checkSize( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorGeometryArg t, + @ByVal LongArrayRef sizes); @Namespace("at") public static native void checkSize_symint( @Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorGeometryArg t, @@ -18770,6 +18798,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("at::CheckedFrom") String c, @Const @ByRef TensorArg t, @ByVal ScalarTypeVector l); +@Namespace("at") public static native void checkScalarTypes( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorArg t, + @ByVal ScalarTypeVector l); +@Namespace("at") public static native void checkScalarTypes( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorArg t, + @ByVal ScalarTypeArrayRef l); @Namespace("at") public static native void checkSameGPU( @Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorArg t1, @@ -18814,6 +18850,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("at::CheckedFrom") String c, @ByVal TensorVector t, @ByVal Backend backend); +@Namespace("at") public static native void checkBackend( + @Cast("at::CheckedFrom") BytePointer c, + @ByVal TensorVector t, + @ByVal Backend backend); +@Namespace("at") public static native void checkBackend( + @Cast("at::CheckedFrom") String c, + @ByVal TensorArrayRef t, + @ByVal Backend backend); @Namespace("at") public static native void checkDeviceType( @Cast("at::CheckedFrom") BytePointer c, @@ -18823,6 +18867,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("at::CheckedFrom") String c, @ByVal TensorVector tensors, @ByVal DeviceType device_type); +@Namespace("at") public static native void checkDeviceType( + @Cast("at::CheckedFrom") BytePointer c, + @ByVal TensorVector tensors, + @ByVal DeviceType device_type); +@Namespace("at") public static native void checkDeviceType( + @Cast("at::CheckedFrom") String c, + @ByVal TensorArrayRef tensors, + @ByVal DeviceType device_type); @Namespace("at") public static native void checkLayout(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef Tensor t, Layout layout); @Namespace("at") public static native void checkLayout(@Cast("at::CheckedFrom") String c, @Const @ByRef Tensor t, @Cast("c10::Layout") byte layout); @@ -18835,6 +18887,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("at::CheckedFrom") String c, @ByVal TensorVector tensors, @ByVal Layout layout); +@Namespace("at") public static native void checkLayout( + @Cast("at::CheckedFrom") BytePointer c, + @ByVal TensorVector tensors, + @ByVal Layout layout); +@Namespace("at") public static native void checkLayout( + @Cast("at::CheckedFrom") String c, + @ByVal TensorArrayRef tensors, + @ByVal Layout layout); // Methods for getting data_ptr if tensor is defined @Namespace("at") public static native Pointer maybe_data_ptr(@Const @ByRef Tensor tensor); @@ -24096,6 +24156,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView BytePointer padding); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView String padding); @@ -24141,6 +24205,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView BytePointer padding); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView String padding); @@ -24186,6 +24254,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView BytePointer padding); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView String padding); @@ -24515,6 +24587,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) @@ -24525,11 +24599,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @@ -26722,6 +26800,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor einsum(@StringView BytePointer equation, @ByVal TensorArrayRef tensors); @Namespace("at") public static native @ByVal Tensor einsum(@StringView String equation, @ByVal TensorVector tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... path); @Namespace("at") public static native @ByVal Tensor einsum(@StringView String equation, @ByVal TensorVector tensors); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView BytePointer equation, @ByVal TensorVector tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional path); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView BytePointer equation, @ByVal TensorVector tensors); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView String equation, @ByVal TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... path); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView String equation, @ByVal TensorArrayRef tensors); @@ -28454,6 +28536,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @@ -28466,11 +28550,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); // aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @@ -28702,6 +28790,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @@ -28714,11 +28804,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); // aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @@ -28879,6 +28973,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @@ -28891,11 +28987,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); // aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @@ -29088,6 +29188,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @@ -29100,11 +29202,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); // aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @@ -29265,6 +29371,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @@ -29277,11 +29385,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @@ -29442,6 +29554,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @@ -29454,11 +29568,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); // aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @@ -32240,6 +32358,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal LongArrayRef bins); @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... bins); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal LongArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); // aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); @@ -32251,6 +32371,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal TensorArrayRef bins); @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal TensorVector bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal TensorVector bins); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal TensorVector bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal TensorArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); @@ -35548,14 +35670,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @StringView BytePointer ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @StringView String ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @StringView BytePointer ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @StringView String ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); // aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView BytePointer ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView String ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView BytePointer ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView String ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); // aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @StringView BytePointer ord, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @StringView String ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @StringView BytePointer ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @StringView String ord, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); @@ -38552,6 +38680,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorArrayRef tensors, @StringView BytePointer indexing); @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorVector tensors, @StringView String indexing); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorVector tensors, @StringView BytePointer indexing); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorArrayRef tensors, @StringView String indexing); @@ -39572,14 +39702,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); // aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); // aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal LongArrayRefOptional input_size, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal LongArrayRefOptional input_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByRef Tensor out); @@ -42737,6 +42873,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal LongArrayRef pad); @Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] pad, @StringView String mode/*="constant"*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); @Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... pad); +@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] pad, @StringView BytePointer mode/*="constant"*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); +@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal LongArrayRef pad, @StringView String mode/*="constant"*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); // aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor @@ -58552,6 +58690,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByRef Tensor to_expand, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, String api_name); +@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( + @Const @ByRef Tensor to_expand, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @Cast("const char*") BytePointer api_name); +@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( + @Const @ByRef Tensor to_expand, + @ByVal LongArrayRef sizes, + String api_name); @@ -65427,6 +65573,16 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByVal(nullValue = "c10::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor fft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "c10::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor fft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "c10::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the inverse of torch.fft.fft2 * See https://pytorch.org/docs/master/fft.html#torch.fft.ifft2. @@ -65450,6 +65606,16 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor ifft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor ifft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N dimensional fast Fourier transform over given dimensions. * See https://pytorch.org/docs/master/fft.html#torch.fft.fftn. @@ -65560,6 +65726,16 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor rfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor rfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the inverse of torch.fft.rfft2. * See https://pytorch.org/docs/master/fft.html#torch.fft.irfft2. @@ -65583,6 +65759,16 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor irfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor irfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N dimensional FFT of real input with onesided Hermitian output. * See https://pytorch.org/docs/master/fft.html#torch.fft.rfftn @@ -65702,6 +65888,16 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor hfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor hfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the 2-dimensional IFFT of a real input signal. * @@ -65730,6 +65926,16 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor ihfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor ihfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N-dimensional FFT of a Hermitian symmetric input signal. * @@ -65757,6 +65963,16 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor hfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor hfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N-dimensional IFFT of a real input signal. * @@ -65785,6 +66001,16 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor ihfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("torch::fft") public static native @ByVal Tensor ihfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the discrete Fourier Transform sample frequencies for a signal of * size n. @@ -66111,6 +66337,18 @@ The list of (type, depth) pairs controls the type of specializations and the num @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_norm( + @Const @ByRef Tensor self, + @StdString BytePointer ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_norm( + @Const @ByRef Tensor self, + @StdString String ord, + @ByVal LongArrayRef dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype); @Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_norm_out( @Const @ByRef Tensor self, @@ -66126,6 +66364,20 @@ The list of (type, depth) pairs controls the type of specializations and the num @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor result); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_norm_out( + @Const @ByRef Tensor self, + @StdString BytePointer ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype, + @ByRef Tensor result); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_norm_out( + @Const @ByRef Tensor self, + @StdString String ord, + @ByVal LongArrayRef dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype, + @ByRef Tensor result); @Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_power_out(@Const @ByRef Tensor self, @Cast("int64_t") long n, @ByRef Tensor result); @@ -78525,6 +78777,18 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::jit") public static native @ByVal IValue unpickle( String data, @Cast("size_t") long size); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + @Cast("const char*") BytePointer data, + @Cast("size_t") long size, + @ByVal(nullValue = "torch::jit::TypeResolver(nullptr)") TypeResolver type_resolver, + @ByVal(nullValue = "c10::ArrayRef{}") TensorVector tensor_table, + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + String data, + @Cast("size_t") long size, + @ByVal(nullValue = "torch::jit::TypeResolver(nullptr)") TypeResolver type_resolver, + @ByVal(nullValue = "c10::ArrayRef{}") TensorArrayRef tensor_table, + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); // namespace jit // namespace torch diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 9fa8f4a267a..2edce027a3d 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -2606,11 +2606,17 @@ void mapArrayRef(InfoMap infoMap) { elementValueType.equals("int") || elementValueType.equals("long") || elementValueType.equals("float") || elementValueType.equals("double"); - String[] pt = new String[otherPointerTypes.length + (variadicPointerType ? 2 : 1)]; + int numPt = otherPointerTypes.length + (variadicPointerType ? 2 : 1); + String[] pt = new String[numPt * numPt]; // List numPt times to help generating all possible combinations + // when a method takes other arguments having multiple pointerTypes pt[0] = baseJavaName + "ArrayRef"; System.arraycopy(otherPointerTypes, 0, pt, 1, otherPointerTypes.length); if (variadicPointerType) pt[otherPointerTypes.length + 1] = "@Cast({\"" + elementTypes[0] + "*\", \"" + cppNames[0] + "\", \"std::vector<" + elementTypes[0] + ">&\"}) @StdVector(\"" + elementTypes[0] + "\") " + elementValueType + "..."; + for (int i = 1; i < numPt; i++) { + pt[i * numPt] = pt[i * numPt - 1]; + System.arraycopy(pt, (i - 1) * numPt, pt, i * numPt + 1, numPt - 1); + } Info info = new Info(cppNames).pointerTypes(pt); if (baseJavaName.contains("@Cast")) info.cast(); infoMap.put(info); From 1c4eec0ddb33c788cac60aaf568f1b94a43ec631 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Thu, 1 Feb 2024 14:02:55 +0100 Subject: [PATCH 03/24] Update to PyTorch 2.2.0 --- pytorch/README.md | 8 +- pytorch/cppbuild.sh | 2 +- pytorch/platform/gpu/pom.xml | 2 +- pytorch/platform/pom.xml | 2 +- pytorch/pom.xml | 2 +- pytorch/samples/pom.xml | 4 +- .../java/org/bytedeco/pytorch/ASMoutput.java | 2 +- .../org/bytedeco/pytorch/ActivityTypeSet.java | 2 +- .../java/org/bytedeco/pytorch/Adagrad.java | 2 +- .../org/bytedeco/pytorch/AdagradOptions.java | 2 +- .../bytedeco/pytorch/AdagradParamState.java | 10 +- .../gen/java/org/bytedeco/pytorch/Adam.java | 2 +- .../org/bytedeco/pytorch/AdamOptions.java | 2 +- .../org/bytedeco/pytorch/AdamParamState.java | 2 +- .../gen/java/org/bytedeco/pytorch/AdamW.java | 2 +- .../org/bytedeco/pytorch/AdamWOptions.java | 2 +- .../org/bytedeco/pytorch/AdamWParamState.java | 2 +- .../pytorch/AdaptiveAvgPool1dImpl.java | 4 +- .../pytorch/AdaptiveAvgPool1dImplBase.java | 4 +- .../AdaptiveAvgPool1dImplCloneable.java | 4 +- .../pytorch/AdaptiveAvgPool1dOptions.java | 2 +- .../pytorch/AdaptiveAvgPool2dImpl.java | 4 +- .../pytorch/AdaptiveAvgPool2dImplBase.java | 4 +- .../AdaptiveAvgPool2dImplCloneable.java | 4 +- .../pytorch/AdaptiveAvgPool2dOptions.java | 2 +- .../pytorch/AdaptiveAvgPool3dImpl.java | 4 +- .../pytorch/AdaptiveAvgPool3dImplBase.java | 4 +- .../AdaptiveAvgPool3dImplCloneable.java | 4 +- .../pytorch/AdaptiveAvgPool3dOptions.java | 2 +- .../AdaptiveLogSoftmaxWithLossImpl.java | 4 +- ...aptiveLogSoftmaxWithLossImplCloneable.java | 4 +- .../AdaptiveLogSoftmaxWithLossOptions.java | 2 +- .../pytorch/AdaptiveMaxPool1dImpl.java | 4 +- .../pytorch/AdaptiveMaxPool1dImplBase.java | 4 +- .../AdaptiveMaxPool1dImplCloneable.java | 4 +- .../pytorch/AdaptiveMaxPool1dOptions.java | 2 +- .../pytorch/AdaptiveMaxPool2dImpl.java | 4 +- .../pytorch/AdaptiveMaxPool2dImplBase.java | 4 +- .../AdaptiveMaxPool2dImplCloneable.java | 4 +- .../pytorch/AdaptiveMaxPool2dOptions.java | 2 +- .../pytorch/AdaptiveMaxPool3dImpl.java | 4 +- .../pytorch/AdaptiveMaxPool3dImplBase.java | 4 +- .../AdaptiveMaxPool3dImplCloneable.java | 4 +- .../pytorch/AdaptiveMaxPool3dOptions.java | 2 +- .../java/org/bytedeco/pytorch/AliasDb.java | 2 +- .../java/org/bytedeco/pytorch/AliasInfo.java | 2 +- .../bytedeco/pytorch/AliasInfoOptional.java | 2 +- .../pytorch/AliasTypeSetOptional.java | 2 +- .../java/org/bytedeco/pytorch/Allocator.java | 2 +- .../pytorch/AlphaDropoutFuncOptions.java | 2 +- .../bytedeco/pytorch/AlphaDropoutImpl.java | 4 +- .../pytorch/AlphaDropoutImplBase.java | 4 +- .../pytorch/AlphaDropoutImplCloneable.java | 4 +- .../org/bytedeco/pytorch/AnomalyMetadata.java | 2 +- .../org/bytedeco/pytorch/AnomalyMode.java | 2 +- .../org/bytedeco/pytorch/AnyClassType.java | 2 +- .../org/bytedeco/pytorch/AnyClassTypePtr.java | 2 +- .../org/bytedeco/pytorch/AnyEnumType.java | 2 +- .../org/bytedeco/pytorch/AnyEnumTypePtr.java | 2 +- .../org/bytedeco/pytorch/AnyListType.java | 2 +- .../org/bytedeco/pytorch/AnyListTypePtr.java | 2 +- .../java/org/bytedeco/pytorch/AnyModule.java | 2 +- .../org/bytedeco/pytorch/AnyModuleVector.java | 2 +- .../org/bytedeco/pytorch/AnyTupleType.java | 2 +- .../org/bytedeco/pytorch/AnyTupleTypePtr.java | 2 +- .../java/org/bytedeco/pytorch/AnyType.java | 2 +- .../java/org/bytedeco/pytorch/AnyTypePtr.java | 2 +- .../java/org/bytedeco/pytorch/AnyValue.java | 2 +- .../gen/java/org/bytedeco/pytorch/Apply.java | 2 +- .../java/org/bytedeco/pytorch/Argument.java | 10 +- .../bytedeco/pytorch/ArgumentArrayRef.java | 2 +- .../org/bytedeco/pytorch/ArgumentDef.java | 2 +- .../bytedeco/pytorch/ArgumentDefArrayRef.java | 2 +- .../org/bytedeco/pytorch/ArgumentInfo.java | 2 +- .../org/bytedeco/pytorch/ArgumentSpec.java | 3 +- .../bytedeco/pytorch/ArgumentSpecCreator.java | 2 +- .../pytorch/ArgumentSpecExecutionPlanMap.java | 2 +- .../gen/java/org/bytedeco/pytorch/Assert.java | 2 +- .../gen/java/org/bytedeco/pytorch/Assign.java | 2 +- .../java/org/bytedeco/pytorch/AssignList.java | 2 +- .../bytedeco/pytorch/AssignListIterator.java | 2 +- .../org/bytedeco/pytorch/AssignListMaybe.java | 2 +- .../java/org/bytedeco/pytorch/Attribute.java | 2 +- .../org/bytedeco/pytorch/AttributeList.java | 2 +- .../pytorch/AttributeListIterator.java | 2 +- .../org/bytedeco/pytorch/AttributePolicy.java | 2 +- .../org/bytedeco/pytorch/AttributeValue.java | 2 +- .../java/org/bytedeco/pytorch/AugAssign.java | 2 +- .../org/bytedeco/pytorch/AugAssignKind.java | 2 +- .../AutoDispatchBelowADInplaceOrView.java | 2 +- .../pytorch/AutoDispatchBelowAutograd.java | 2 +- .../AutoDispatchSkipFunctionalize.java | 2 +- .../org/bytedeco/pytorch/AutoFwGradMode.java | 2 +- .../org/bytedeco/pytorch/AutoGradMode.java | 2 +- .../pytorch/AutoNonVariableTypeMode.java | 2 +- .../org/bytedeco/pytorch/AutogradContext.java | 2 +- .../org/bytedeco/pytorch/AutogradMeta.java | 2 +- .../bytedeco/pytorch/AutogradMetaFactory.java | 2 +- .../AutogradMetaFactoryRegisterer.java | 2 +- .../pytorch/AutogradMetaInterface.java | 2 +- .../org/bytedeco/pytorch/AutogradState.java | 2 +- .../org/bytedeco/pytorch/AvgPool1dImpl.java | 4 +- .../bytedeco/pytorch/AvgPool1dImplBase.java | 4 +- .../pytorch/AvgPool1dImplCloneable.java | 4 +- .../bytedeco/pytorch/AvgPool1dOptions.java | 2 +- .../org/bytedeco/pytorch/AvgPool2dImpl.java | 4 +- .../bytedeco/pytorch/AvgPool2dImplBase.java | 4 +- .../pytorch/AvgPool2dImplCloneable.java | 4 +- .../bytedeco/pytorch/AvgPool2dOptions.java | 2 +- .../org/bytedeco/pytorch/AvgPool3dImpl.java | 4 +- .../bytedeco/pytorch/AvgPool3dImplBase.java | 4 +- .../pytorch/AvgPool3dImplCloneable.java | 4 +- .../bytedeco/pytorch/AvgPool3dOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/Await.java | 6 +- .../java/org/bytedeco/pytorch/AwaitPtr.java | 2 +- .../pytorch/AwaitSingleElementType.java | 2 +- .../java/org/bytedeco/pytorch/AwaitType.java | 2 +- .../org/bytedeco/pytorch/BCELossImpl.java | 4 +- .../pytorch/BCELossImplCloneable.java | 4 +- .../org/bytedeco/pytorch/BCELossOptions.java | 2 +- .../pytorch/BCEWithLogitsLossImpl.java | 4 +- .../BCEWithLogitsLossImplCloneable.java | 4 +- .../pytorch/BCEWithLogitsLossOptions.java | 2 +- .../java/org/bytedeco/pytorch/BFloat16.java | 2 +- .../bytedeco/pytorch/BFloat16ArrayRef.java | 2 +- .../org/bytedeco/pytorch/BackendMeta.java | 2 +- .../org/bytedeco/pytorch/BackendMetaRef.java | 2 +- .../org/bytedeco/pytorch/BatchNorm1dImpl.java | 4 +- .../bytedeco/pytorch/BatchNorm1dImplBase.java | 4 +- .../pytorch/BatchNorm1dImplBaseBase.java | 4 +- .../pytorch/BatchNorm1dImplCloneable.java | 4 +- .../org/bytedeco/pytorch/BatchNorm2dImpl.java | 4 +- .../bytedeco/pytorch/BatchNorm2dImplBase.java | 4 +- .../pytorch/BatchNorm2dImplBaseBase.java | 4 +- .../pytorch/BatchNorm2dImplCloneable.java | 4 +- .../org/bytedeco/pytorch/BatchNorm3dImpl.java | 4 +- .../bytedeco/pytorch/BatchNorm3dImplBase.java | 4 +- .../pytorch/BatchNorm3dImplBaseBase.java | 4 +- .../pytorch/BatchNorm3dImplCloneable.java | 4 +- .../pytorch/BatchNormFuncOptions.java | 2 +- .../bytedeco/pytorch/BatchNormOptions.java | 2 +- .../java/org/bytedeco/pytorch/BatchSize.java | 2 +- .../bytedeco/pytorch/BatchSizeOptional.java | 6 +- .../bytedeco/pytorch/BatchSizeSampler.java | 2 +- .../org/bytedeco/pytorch/BilinearImpl.java | 4 +- .../pytorch/BilinearImplCloneable.java | 4 +- .../org/bytedeco/pytorch/BilinearOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/BinOp.java | 2 +- .../gen/java/org/bytedeco/pytorch/Blob.java | 2 +- .../gen/java/org/bytedeco/pytorch/Block.java | 2 +- .../org/bytedeco/pytorch/BlockArrayRef.java | 2 +- .../java/org/bytedeco/pytorch/BlockWrap.java | 2 +- .../org/bytedeco/pytorch/BoolArrayRef.java | 2 +- .../org/bytedeco/pytorch/BoolOptional.java | 2 +- .../java/org/bytedeco/pytorch/BoolType.java | 2 +- .../org/bytedeco/pytorch/BoolTypePtr.java | 2 +- .../java/org/bytedeco/pytorch/BoolVector.java | 2 +- .../bytedeco/pytorch/BoolVectorOptional.java | 2 +- .../pytorch/BooleanElementReference.java | 2 +- .../org/bytedeco/pytorch/BooleanList.java | 2 +- .../bytedeco/pytorch/BooleanListIterator.java | 2 +- .../gen/java/org/bytedeco/pytorch/Break.java | 2 +- .../org/bytedeco/pytorch/BufferPolicy.java | 2 +- .../org/bytedeco/pytorch/BuiltinFunction.java | 2 +- .../org/bytedeco/pytorch/BuiltinModule.java | 2 +- .../org/bytedeco/pytorch/ByteArrayRef.java | 2 +- .../org/bytedeco/pytorch/ByteOptional.java | 2 +- .../org/bytedeco/pytorch/BytePointerPair.java | 49 + .../pytorch/BytePointerPairOptional.java | 35 + .../bytedeco/pytorch/BytePointerVector.java | 2 +- .../org/bytedeco/pytorch/C10FlagParser.java | 2 +- .../java/org/bytedeco/pytorch/CELUImpl.java | 4 +- .../bytedeco/pytorch/CELUImplCloneable.java | 4 +- .../org/bytedeco/pytorch/CELUOptions.java | 2 +- .../bytedeco/pytorch/CPUGeneratorImpl.java | 2 +- .../org/bytedeco/pytorch/CTCLossImpl.java | 4 +- .../pytorch/CTCLossImplCloneable.java | 4 +- .../org/bytedeco/pytorch/CTCLossOptions.java | 2 +- .../org/bytedeco/pytorch/CUDAHooksArgs.java | 2 +- .../bytedeco/pytorch/CUDAHooksInterface.java | 2 +- .../gen/java/org/bytedeco/pytorch/Call.java | 2 +- .../org/bytedeco/pytorch/CapsuleType.java | 2 +- .../org/bytedeco/pytorch/CapsuleTypePtr.java | 2 +- .../java/org/bytedeco/pytorch/CastValue.java | 2 +- .../bytedeco/pytorch/ChunkBatchDataset.java | 2 +- .../pytorch/ChunkBatchSharedBatchDataset.java | 2 +- .../ChunkBatchSharedTensorBatchDataset.java | 2 +- .../org/bytedeco/pytorch/ChunkDataReader.java | 2 +- .../org/bytedeco/pytorch/ChunkDataset.java | 2 +- .../bytedeco/pytorch/ChunkDatasetOptions.java | 2 +- .../pytorch/ChunkMapBatchDataset.java | 2 +- .../org/bytedeco/pytorch/ChunkMapDataset.java | 2 +- .../pytorch/ChunkMapTensorBatchDataset.java | 2 +- .../pytorch/ChunkMapTensorDataset.java | 2 +- .../pytorch/ChunkRandomDataLoader.java | 2 +- .../pytorch/ChunkRandomDataLoaderBase.java | 2 +- .../pytorch/ChunkRandomTensorDataLoader.java | 2 +- .../ChunkRandomTensorDataLoaderBase.java | 2 +- .../bytedeco/pytorch/ChunkRecordIterator.java | 30 + .../pytorch/ChunkSharedBatchDataset.java | 2 +- .../ChunkSharedTensorBatchDataset.java | 2 +- .../pytorch/ChunkStatefulDataset.java | 2 +- .../pytorch/ChunkStatefulTensorDataset.java | 2 +- .../pytorch/ChunkTensorBatchDataset.java | 2 +- .../pytorch/ChunkTensorDataReader.java | 2 +- .../bytedeco/pytorch/ChunkTensorDataset.java | 2 +- .../org/bytedeco/pytorch/ClassAttribute.java | 2 +- .../java/org/bytedeco/pytorch/ClassDef.java | 2 +- .../java/org/bytedeco/pytorch/ClassType.java | 6 +- .../pytorch/ClassTypePropertyOptional.java | 2 +- .../java/org/bytedeco/pytorch/ClassValue.java | 2 +- .../org/bytedeco/pytorch/ClosureValue.java | 2 +- .../gen/java/org/bytedeco/pytorch/Code.java | 2 +- .../java/org/bytedeco/pytorch/CodeImpl.java | 2 +- .../org/bytedeco/pytorch/CompilationUnit.java | 2 +- .../pytorch/CompileTimeEmptyString.java | 2 +- .../bytedeco/pytorch/CompiledNodeArgs.java | 2 +- .../org/bytedeco/pytorch/ComplexType.java | 2 +- .../org/bytedeco/pytorch/ComplexTypePtr.java | 2 +- .../java/org/bytedeco/pytorch/Compound.java | 2 +- .../java/org/bytedeco/pytorch/ConstExpr.java | 2 +- .../bytedeco/pytorch/ConstantPad1dImpl.java | 4 +- .../pytorch/ConstantPad1dImplBase.java | 4 +- .../pytorch/ConstantPad1dImplCloneable.java | 4 +- .../pytorch/ConstantPad1dOptions.java | 2 +- .../bytedeco/pytorch/ConstantPad2dImpl.java | 4 +- .../pytorch/ConstantPad2dImplBase.java | 4 +- .../pytorch/ConstantPad2dImplCloneable.java | 4 +- .../pytorch/ConstantPad2dOptions.java | 2 +- .../bytedeco/pytorch/ConstantPad3dImpl.java | 4 +- .../pytorch/ConstantPad3dImplBase.java | 4 +- .../pytorch/ConstantPad3dImplCloneable.java | 4 +- .../pytorch/ConstantPad3dOptions.java | 2 +- .../org/bytedeco/pytorch/ConstantString.java | 2 +- .../bytedeco/pytorch/ConstantStringPtr.java | 2 +- .../java/org/bytedeco/pytorch/Context.java | 4 +- .../java/org/bytedeco/pytorch/Continue.java | 2 +- .../bytedeco/pytorch/Conv1dFuncOptions.java | 2 +- .../java/org/bytedeco/pytorch/Conv1dImpl.java | 4 +- .../org/bytedeco/pytorch/Conv1dImplBase.java | 4 +- .../bytedeco/pytorch/Conv1dImplCloneable.java | 4 +- .../org/bytedeco/pytorch/Conv1dOptions.java | 2 +- .../org/bytedeco/pytorch/Conv1dPadding.java | 10 +- .../bytedeco/pytorch/Conv2dFuncOptions.java | 2 +- .../java/org/bytedeco/pytorch/Conv2dImpl.java | 4 +- .../org/bytedeco/pytorch/Conv2dImplBase.java | 4 +- .../bytedeco/pytorch/Conv2dImplCloneable.java | 4 +- .../org/bytedeco/pytorch/Conv2dOptions.java | 2 +- .../org/bytedeco/pytorch/Conv2dPadding.java | 10 +- .../bytedeco/pytorch/Conv3dFuncOptions.java | 2 +- .../java/org/bytedeco/pytorch/Conv3dImpl.java | 4 +- .../org/bytedeco/pytorch/Conv3dImplBase.java | 4 +- .../bytedeco/pytorch/Conv3dImplCloneable.java | 4 +- .../org/bytedeco/pytorch/Conv3dOptions.java | 2 +- .../org/bytedeco/pytorch/Conv3dPadding.java | 10 +- .../org/bytedeco/pytorch/ConvPaddingMode.java | 12 +- .../pytorch/ConvTranspose1dFuncOptions.java | 2 +- .../bytedeco/pytorch/ConvTranspose1dImpl.java | 4 +- .../pytorch/ConvTranspose1dImplBase.java | 4 +- .../pytorch/ConvTranspose1dImplBaseBase.java | 4 +- .../pytorch/ConvTranspose1dImplCloneable.java | 4 +- .../pytorch/ConvTranspose1dOptions.java | 2 +- .../pytorch/ConvTranspose2dFuncOptions.java | 2 +- .../bytedeco/pytorch/ConvTranspose2dImpl.java | 4 +- .../pytorch/ConvTranspose2dImplBase.java | 4 +- .../pytorch/ConvTranspose2dImplBaseBase.java | 4 +- .../pytorch/ConvTranspose2dImplCloneable.java | 4 +- .../pytorch/ConvTranspose2dOptions.java | 2 +- .../pytorch/ConvTranspose3dFuncOptions.java | 2 +- .../bytedeco/pytorch/ConvTranspose3dImpl.java | 4 +- .../pytorch/ConvTranspose3dImplBase.java | 4 +- .../pytorch/ConvTranspose3dImplBaseBase.java | 4 +- .../pytorch/ConvTranspose3dImplCloneable.java | 4 +- .../pytorch/ConvTranspose3dOptions.java | 2 +- .../pytorch/CosineEmbeddingLossImpl.java | 4 +- .../CosineEmbeddingLossImplCloneable.java | 4 +- .../pytorch/CosineEmbeddingLossOptions.java | 2 +- .../pytorch/CosineSimilarityImpl.java | 4 +- .../CosineSimilarityImplCloneable.java | 4 +- .../pytorch/CosineSimilarityOptions.java | 2 +- .../org/bytedeco/pytorch/CppFunction.java | 2 +- .../org/bytedeco/pytorch/CppSignature.java | 2 +- .../pytorch/CppSignatureOptional.java | 2 +- .../pytorch/CrossEntropyLossImpl.java | 4 +- .../CrossEntropyLossImplCloneable.java | 4 +- .../pytorch/CrossEntropyLossOptions.java | 2 +- .../bytedeco/pytorch/CrossMapLRN2dImpl.java | 4 +- .../pytorch/CrossMapLRN2dImplCloneable.java | 4 +- .../pytorch/CrossMapLRN2dOptions.java | 2 +- .../bytedeco/pytorch/CustomBatchRequest.java | 2 +- .../bytedeco/pytorch/CustomClassHolder.java | 2 +- .../org/bytedeco/pytorch/DDPLoggingData.java | 2 +- .../java/org/bytedeco/pytorch/DLDevice_.java | 2 +- .../bytedeco/pytorch/DataLoaderOptions.java | 2 +- .../java/org/bytedeco/pytorch/DataPtr.java | 2 +- .../org/bytedeco/pytorch/DataPtrVector.java | 2 +- .../org/bytedeco/pytorch/DebugInfoBase.java | 2 +- .../org/bytedeco/pytorch/DebugInfoGuard.java | 2 +- .../gen/java/org/bytedeco/pytorch/Decl.java | 2 +- .../gen/java/org/bytedeco/pytorch/Def.java | 2 +- .../java/org/bytedeco/pytorch/DefMaybe.java | 2 +- .../java/org/bytedeco/pytorch/DefVector.java | 2 +- .../gen/java/org/bytedeco/pytorch/Delete.java | 2 +- .../DeserializationStorageContext.java | 2 +- .../bytedeco/pytorch/DetailConv1dOptions.java | 2 +- .../bytedeco/pytorch/DetailConv2dOptions.java | 2 +- .../bytedeco/pytorch/DetailConv3dOptions.java | 2 +- .../bytedeco/pytorch/DetectAnomalyGuard.java | 2 +- .../gen/java/org/bytedeco/pytorch/Device.java | 2 +- .../pytorch/DeviceGuardImplInterface.java | 4 +- .../pytorch/DeviceGuardImplRegistrar.java | 2 +- .../org/bytedeco/pytorch/DeviceObjType.java | 2 +- .../bytedeco/pytorch/DeviceObjTypePtr.java | 2 +- .../org/bytedeco/pytorch/DeviceOptional.java | 2 +- .../org/bytedeco/pytorch/DeviceTypeSet.java | 2 +- .../java/org/bytedeco/pytorch/DictComp.java | 2 +- .../org/bytedeco/pytorch/DictLiteral.java | 2 +- .../java/org/bytedeco/pytorch/DictType.java | 6 +- .../pytorch/DifferentiableViewMeta.java | 2 +- .../java/org/bytedeco/pytorch/DimVector.java | 2 +- .../DimVectorInferExpandGeometryResult.java | 2 +- .../bytedeco/pytorch/DimVectorOptional.java | 2 +- .../java/org/bytedeco/pytorch/Dimname.java | 2 +- .../org/bytedeco/pytorch/DimnameArrayRef.java | 2 +- .../bytedeco/pytorch/DimnameListOptional.java | 2 +- .../org/bytedeco/pytorch/DimnameOptional.java | 2 +- .../org/bytedeco/pytorch/DimnameVector.java | 2 +- .../pytorch/DisablePythonDispatcher.java | 2 +- .../pytorch/DisableRecordFunctionGuard.java | 2 +- .../org/bytedeco/pytorch/DisabledStr.java | 2 +- .../pytorch/DispatchKeyExtractor.java | 2 +- .../bytedeco/pytorch/DispatchKeyOptional.java | 2 +- .../org/bytedeco/pytorch/DispatchKeySet.java | 12 +- .../java/org/bytedeco/pytorch/Dispatcher.java | 24 +- .../bytedeco/pytorch/DistBackendError.java | 4 +- .../java/org/bytedeco/pytorch/DistError.java | 29 + .../bytedeco/pytorch/DistNetworkError.java | 29 + .../org/bytedeco/pytorch/DistStoreError.java | 29 + .../pytorch/DistributedRandomSampler.java | 2 +- .../bytedeco/pytorch/DistributedSampler.java | 2 +- .../pytorch/DistributedSequentialSampler.java | 2 +- .../pytorch/DontIncreaseRefcount.java | 2 +- .../gen/java/org/bytedeco/pytorch/Dots.java | 2 +- .../org/bytedeco/pytorch/DoubleArrayRef.java | 2 +- .../pytorch/DoubleArrayRefOptional.java | 2 +- .../org/bytedeco/pytorch/DoubleComplex.java | 2 +- .../pytorch/DoubleComplexArrayRef.java | 2 +- .../DoubleComplexElementReference.java | 2 +- .../bytedeco/pytorch/DoubleComplexList.java | 2 +- .../pytorch/DoubleComplexListIterator.java | 2 +- .../pytorch/DoubleElementReference.java | 2 +- .../pytorch/DoubleExpandingArrayOptional.java | 2 +- .../java/org/bytedeco/pytorch/DoubleList.java | 2 +- .../bytedeco/pytorch/DoubleListIterator.java | 2 +- .../org/bytedeco/pytorch/DoubleOptional.java | 2 +- .../org/bytedeco/pytorch/DoubleVector.java | 2 +- .../pytorch/DoubleVectorOptional.java | 2 +- .../org/bytedeco/pytorch/Dropout2dImpl.java | 4 +- .../bytedeco/pytorch/Dropout2dImplBase.java | 4 +- .../pytorch/Dropout2dImplCloneable.java | 4 +- .../org/bytedeco/pytorch/Dropout3dImpl.java | 4 +- .../bytedeco/pytorch/Dropout3dImplBase.java | 4 +- .../pytorch/Dropout3dImplCloneable.java | 4 +- .../bytedeco/pytorch/DropoutFuncOptions.java | 2 +- .../org/bytedeco/pytorch/DropoutImpl.java | 4 +- .../org/bytedeco/pytorch/DropoutImplBase.java | 4 +- .../pytorch/DropoutImplCloneable.java | 4 +- .../org/bytedeco/pytorch/DropoutOptions.java | 2 +- .../java/org/bytedeco/pytorch/ELUImpl.java | 4 +- .../bytedeco/pytorch/ELUImplCloneable.java | 4 +- .../java/org/bytedeco/pytorch/ELUOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/Edge.java | 2 +- .../java/org/bytedeco/pytorch/EdgeVector.java | 2 +- .../bytedeco/pytorch/EllipsisIndexType.java | 2 +- .../EmbeddingBagFromPretrainedOptions.java | 2 +- .../pytorch/EmbeddingBagFuncOptions.java | 2 +- .../bytedeco/pytorch/EmbeddingBagImpl.java | 4 +- .../pytorch/EmbeddingBagImplCloneable.java | 4 +- .../bytedeco/pytorch/EmbeddingBagMode.java | 10 +- .../bytedeco/pytorch/EmbeddingBagOptions.java | 2 +- .../EmbeddingFromPretrainedOptions.java | 2 +- .../pytorch/EmbeddingFuncOptions.java | 2 +- .../org/bytedeco/pytorch/EmbeddingImpl.java | 4 +- .../pytorch/EmbeddingImplCloneable.java | 4 +- .../bytedeco/pytorch/EmbeddingOptions.java | 2 +- .../pytorch/EnableProfilingGuard.java | 2 +- .../java/org/bytedeco/pytorch/EnabledStr.java | 2 +- .../bytedeco/pytorch/EnforceFiniteError.java | 2 +- .../java/org/bytedeco/pytorch/EnumHolder.java | 2 +- .../org/bytedeco/pytorch/EnumHolderPtr.java | 2 +- .../org/bytedeco/pytorch/EnumNameValue.java | 2 +- .../pytorch/EnumNameValueArrayRef.java | 2 +- .../java/org/bytedeco/pytorch/EnumType.java | 4 +- .../gen/java/org/bytedeco/pytorch/Error.java | 2 +- .../pytorch/ErrorAlwaysShowCppStacktrace.java | 29 + .../org/bytedeco/pytorch/ErrorReport.java | 2 +- .../java/org/bytedeco/pytorch/Example.java | 2 +- .../bytedeco/pytorch/ExampleCollation.java | 2 +- .../org/bytedeco/pytorch/ExampleIterator.java | 2 +- .../org/bytedeco/pytorch/ExampleOptional.java | 2 +- .../org/bytedeco/pytorch/ExampleVector.java | 2 +- .../pytorch/ExampleVectorIterator.java | 2 +- .../pytorch/ExampleVectorOptional.java | 2 +- .../pytorch/ExceptionMessageValue.java | 2 +- .../org/bytedeco/pytorch/ExceptionValue.java | 2 +- .../org/bytedeco/pytorch/ExecutionPlan.java | 2 +- .../ExecutorExecutionModeOptional.java | 2 +- .../bytedeco/pytorch/ExperimentalConfig.java | 4 +- .../gen/java/org/bytedeco/pytorch/Expr.java | 2 +- .../java/org/bytedeco/pytorch/ExprList.java | 2 +- .../bytedeco/pytorch/ExprListIterator.java | 2 +- .../java/org/bytedeco/pytorch/ExprMaybe.java | 2 +- .../java/org/bytedeco/pytorch/ExprStmt.java | 2 +- .../org/bytedeco/pytorch/ExtraFilesMap.java | 2 +- .../org/bytedeco/pytorch/FanModeType.java | 8 +- .../FeatureAlphaDropoutFuncOptions.java | 2 +- .../pytorch/FeatureAlphaDropoutImpl.java | 4 +- .../pytorch/FeatureAlphaDropoutImplBase.java | 4 +- .../FeatureAlphaDropoutImplCloneable.java | 4 +- .../org/bytedeco/pytorch/FileLineFunc.java | 2 +- .../org/bytedeco/pytorch/FlattenImpl.java | 4 +- .../pytorch/FlattenImplCloneable.java | 4 +- .../org/bytedeco/pytorch/FlattenOptions.java | 2 +- .../org/bytedeco/pytorch/Float8_e4m3fn.java | 2 +- .../org/bytedeco/pytorch/Float8_e4m3fnuz.java | 46 + .../org/bytedeco/pytorch/Float8_e5m2.java | 2 +- .../org/bytedeco/pytorch/Float8_e5m2fnuz.java | 46 + .../org/bytedeco/pytorch/FloatArrayRef.java | 2 +- .../org/bytedeco/pytorch/FloatComplex.java | 2 +- .../pytorch/FloatComplexArrayRef.java | 2 +- .../org/bytedeco/pytorch/FloatOptional.java | 2 +- .../java/org/bytedeco/pytorch/FloatType.java | 2 +- .../org/bytedeco/pytorch/FloatTypePtr.java | 2 +- .../java/org/bytedeco/pytorch/FoldImpl.java | 4 +- .../bytedeco/pytorch/FoldImplCloneable.java | 4 +- .../org/bytedeco/pytorch/FoldOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/For.java | 2 +- .../pytorch/ForceDispatchKeyGuard.java | 8 +- .../org/bytedeco/pytorch/ForwardADLevel.java | 2 +- .../org/bytedeco/pytorch/ForwardGrad.java | 2 +- .../pytorch/FractionalMaxPool1dOptions.java | 2 +- .../pytorch/FractionalMaxPool2dImpl.java | 4 +- .../FractionalMaxPool2dImplCloneable.java | 4 +- .../pytorch/FractionalMaxPool2dOptions.java | 2 +- .../pytorch/FractionalMaxPool3dImpl.java | 4 +- .../FractionalMaxPool3dImplCloneable.java | 4 +- .../pytorch/FractionalMaxPool3dOptions.java | 2 +- .../pytorch/FullDataLoaderOptions.java | 2 +- .../bytedeco/pytorch/FuncTorchTLSBase.java | 2 +- .../java/org/bytedeco/pytorch/Function.java | 3 +- .../pytorch/FunctionCrossMapLRN2d.java | 2 +- .../bytedeco/pytorch/FunctionPostHook.java | 2 +- .../pytorch/FunctionPostHookVector.java | 2 +- .../org/bytedeco/pytorch/FunctionPreHook.java | 2 +- .../pytorch/FunctionPreHookVector.java | 2 +- .../org/bytedeco/pytorch/FunctionSchema.java | 2 +- .../pytorch/FunctionSchemaOptional.java | 2 +- .../pytorch/FunctionSchemaVector.java | 2 +- .../org/bytedeco/pytorch/FunctionType.java | 2 +- .../org/bytedeco/pytorch/FunctionValue.java | 2 +- .../org/bytedeco/pytorch/FunctionVector.java | 2 +- .../pytorch/FunctionalityOffsetAndMask.java | 2 +- .../org/bytedeco/pytorch/FusionStrategy.java | 2 +- .../gen/java/org/bytedeco/pytorch/Future.java | 4 +- .../java/org/bytedeco/pytorch/FuturePtr.java | 2 +- .../bytedeco/pytorch/FuturePtrArrayRef.java | 2 +- .../pytorch/FuturePtrElementReference.java | 2 +- .../org/bytedeco/pytorch/FuturePtrList.java | 2 +- .../pytorch/FuturePtrListIterator.java | 2 +- .../pytorch/FutureSingleElementType.java | 2 +- .../java/org/bytedeco/pytorch/FutureType.java | 2 +- .../java/org/bytedeco/pytorch/GELUImpl.java | 4 +- .../bytedeco/pytorch/GELUImplCloneable.java | 4 +- .../org/bytedeco/pytorch/GELUOptions.java | 2 +- .../java/org/bytedeco/pytorch/GLUImpl.java | 4 +- .../bytedeco/pytorch/GLUImplCloneable.java | 4 +- .../java/org/bytedeco/pytorch/GLUOptions.java | 2 +- .../org/bytedeco/pytorch/GRUCellImpl.java | 4 +- .../org/bytedeco/pytorch/GRUCellImplBase.java | 4 +- .../pytorch/GRUCellImplCloneable.java | 4 +- .../org/bytedeco/pytorch/GRUCellOptions.java | 2 +- .../java/org/bytedeco/pytorch/GRUImpl.java | 4 +- .../org/bytedeco/pytorch/GRUImplBase.java | 4 +- .../bytedeco/pytorch/GRUImplCloneable.java | 4 +- .../java/org/bytedeco/pytorch/GRUOptions.java | 2 +- .../org/bytedeco/pytorch/GatheredContext.java | 2 +- .../java/org/bytedeco/pytorch/Generator.java | 2 +- .../org/bytedeco/pytorch/GeneratorImpl.java | 2 +- .../bytedeco/pytorch/GeneratorImplPtr.java | 2 +- .../bytedeco/pytorch/GeneratorOptional.java | 2 +- .../org/bytedeco/pytorch/GeneratorType.java | 2 +- .../bytedeco/pytorch/GeneratorTypePtr.java | 2 +- .../org/bytedeco/pytorch/GenericDict.java | 2 +- .../bytedeco/pytorch/GenericDictEntryRef.java | 2 +- .../bytedeco/pytorch/GenericDictIterator.java | 2 +- .../pytorch/GenericElementReference.java | 2 +- .../org/bytedeco/pytorch/GenericList.java | 2 +- .../bytedeco/pytorch/GenericListIterator.java | 2 +- .../gen/java/org/bytedeco/pytorch/Global.java | 2 +- .../java/org/bytedeco/pytorch/GradMode.java | 2 +- .../gen/java/org/bytedeco/pytorch/Graph.java | 2 +- .../java/org/bytedeco/pytorch/GraphAttr.java | 2 +- .../org/bytedeco/pytorch/GraphExecutor.java | 2 +- .../pytorch/GraphExecutorImplBase.java | 2 +- .../bytedeco/pytorch/GraphExecutorState.java | 2 +- .../org/bytedeco/pytorch/GraphFunction.java | 2 +- .../pytorch/GraphOptimizerEnabledGuard.java | 2 +- .../org/bytedeco/pytorch/GraphVector.java | 2 +- .../java/org/bytedeco/pytorch/GraphsAttr.java | 2 +- .../pytorch/GridSampleFuncOptions.java | 2 +- .../org/bytedeco/pytorch/GridSampleMode.java | 8 +- .../pytorch/GridSamplePaddingMode.java | 10 +- .../pytorch/GroupNormFuncOptions.java | 2 +- .../org/bytedeco/pytorch/GroupNormImpl.java | 4 +- .../pytorch/GroupNormImplCloneable.java | 4 +- .../bytedeco/pytorch/GroupNormOptions.java | 2 +- .../pytorch/GumbelSoftmaxFuncOptions.java | 2 +- .../org/bytedeco/pytorch/HIPHooksArgs.java | 2 +- .../bytedeco/pytorch/HIPHooksInterface.java | 2 +- .../gen/java/org/bytedeco/pytorch/Half.java | 2 +- .../org/bytedeco/pytorch/HalfArrayRef.java | 2 +- .../org/bytedeco/pytorch/HalfComplex.java | 2 +- .../org/bytedeco/pytorch/HardshrinkImpl.java | 4 +- .../pytorch/HardshrinkImplCloneable.java | 4 +- .../bytedeco/pytorch/HardshrinkOptions.java | 2 +- .../org/bytedeco/pytorch/HardtanhImpl.java | 4 +- .../pytorch/HardtanhImplCloneable.java | 4 +- .../org/bytedeco/pytorch/HardtanhOptions.java | 2 +- .../pytorch/HashAliasedIValueMap.java | 2 +- .../bytedeco/pytorch/HashAliasedIValues.java | 2 +- .../bytedeco/pytorch/HermeticPyObjectTLS.java | 2 +- .../pytorch/HingeEmbeddingLossImpl.java | 4 +- .../HingeEmbeddingLossImplCloneable.java | 4 +- .../pytorch/HingeEmbeddingLossOptions.java | 2 +- .../org/bytedeco/pytorch/HuberLossImpl.java | 4 +- .../pytorch/HuberLossImplCloneable.java | 4 +- .../bytedeco/pytorch/HuberLossOptions.java | 2 +- .../java/org/bytedeco/pytorch/IMethod.java | 5 +- .../org/bytedeco/pytorch/IPUHooksArgs.java | 27 + .../bytedeco/pytorch/IPUHooksInterface.java | 46 + .../org/bytedeco/pytorch/IStreamAdapter.java | 2 +- .../gen/java/org/bytedeco/pytorch/IValue.java | 35 +- .../org/bytedeco/pytorch/IValueArrayRef.java | 2 +- .../org/bytedeco/pytorch/IValueOptional.java | 2 +- .../pytorch/IValueOptionalVector.java | 2 +- .../org/bytedeco/pytorch/IValueVector.java | 2 +- .../gen/java/org/bytedeco/pytorch/Ident.java | 2 +- .../java/org/bytedeco/pytorch/IdentList.java | 2 +- .../bytedeco/pytorch/IdentListIterator.java | 2 +- .../org/bytedeco/pytorch/IdentityImpl.java | 4 +- .../pytorch/IdentityImplCloneable.java | 4 +- .../src/gen/java/org/bytedeco/pytorch/If.java | 2 +- .../pytorch/IncludeDispatchKeyGuard.java | 2 +- .../java/org/bytedeco/pytorch/IndexError.java | 4 +- .../org/bytedeco/pytorch/InferenceMode.java | 2 +- .../org/bytedeco/pytorch/InferredType.java | 2 +- .../bytedeco/pytorch/InlinedCallStack.java | 30 +- .../pytorch/InlinedCallStackOptional.java | 6 +- .../org/bytedeco/pytorch/InputArchive.java | 2 +- .../bytedeco/pytorch/InstanceNorm1dImpl.java | 4 +- .../pytorch/InstanceNorm1dImplBase.java | 4 +- .../pytorch/InstanceNorm1dImplBaseBase.java | 4 +- .../pytorch/InstanceNorm1dImplCloneable.java | 4 +- .../bytedeco/pytorch/InstanceNorm2dImpl.java | 4 +- .../pytorch/InstanceNorm2dImplBase.java | 4 +- .../pytorch/InstanceNorm2dImplBaseBase.java | 4 +- .../pytorch/InstanceNorm2dImplCloneable.java | 4 +- .../bytedeco/pytorch/InstanceNorm3dImpl.java | 4 +- .../pytorch/InstanceNorm3dImplBase.java | 4 +- .../pytorch/InstanceNorm3dImplBaseBase.java | 4 +- .../pytorch/InstanceNorm3dImplCloneable.java | 4 +- .../pytorch/InstanceNormFuncOptions.java | 2 +- .../bytedeco/pytorch/InstanceNormOptions.java | 2 +- .../org/bytedeco/pytorch/Instruction.java | 2 +- .../org/bytedeco/pytorch/IntArrayRef.java | 2 +- .../org/bytedeco/pytorch/IntOptional.java | 2 +- .../pytorch/IntSizedSmallVectorBase.java | 2 +- .../java/org/bytedeco/pytorch/IntType.java | 2 +- .../java/org/bytedeco/pytorch/IntTypePtr.java | 2 +- .../org/bytedeco/pytorch/InterfaceType.java | 2 +- .../pytorch/InterpolateFuncOptions.java | 2 +- .../org/bytedeco/pytorch/InterpolateMode.java | 18 +- .../bytedeco/pytorch/JavaBatchDataset.java | 2 +- .../org/bytedeco/pytorch/JavaDataset.java | 2 +- .../org/bytedeco/pytorch/JavaDatasetBase.java | 2 +- .../JavaDistributedRandomDataLoader.java | 2 +- .../JavaDistributedRandomDataLoaderBase.java | 2 +- ...JavaDistributedRandomTensorDataLoader.java | 2 +- ...DistributedRandomTensorDataLoaderBase.java | 2 +- .../JavaDistributedSequentialDataLoader.java | 2 +- ...vaDistributedSequentialDataLoaderBase.java | 2 +- ...DistributedSequentialTensorDataLoader.java | 2 +- ...ributedSequentialTensorDataLoaderBase.java | 2 +- .../pytorch/JavaRandomDataLoader.java | 2 +- .../pytorch/JavaRandomDataLoaderBase.java | 2 +- .../pytorch/JavaRandomTensorDataLoader.java | 2 +- .../JavaRandomTensorDataLoaderBase.java | 2 +- .../pytorch/JavaSequentialDataLoader.java | 2 +- .../pytorch/JavaSequentialDataLoaderBase.java | 2 +- .../JavaSequentialTensorDataLoader.java | 2 +- .../JavaSequentialTensorDataLoaderBase.java | 2 +- .../pytorch/JavaStatefulBatchDataset.java | 2 +- .../pytorch/JavaStatefulDataLoader.java | 2 +- .../pytorch/JavaStatefulDataLoaderBase.java | 2 +- .../bytedeco/pytorch/JavaStatefulDataset.java | 2 +- .../pytorch/JavaStatefulDatasetBase.java | 2 +- .../JavaStatefulTensorBatchDataset.java | 2 +- .../pytorch/JavaStatefulTensorDataLoader.java | 2 +- .../JavaStatefulTensorDataLoaderBase.java | 2 +- .../pytorch/JavaStatefulTensorDataset.java | 2 +- .../JavaStatefulTensorDatasetBase.java | 2 +- .../pytorch/JavaStreamBatchDataset.java | 2 +- .../pytorch/JavaStreamDataLoader.java | 2 +- .../pytorch/JavaStreamDataLoaderBase.java | 2 +- .../bytedeco/pytorch/JavaStreamDataset.java | 2 +- .../pytorch/JavaStreamTensorBatchDataset.java | 2 +- .../pytorch/JavaStreamTensorDataLoader.java | 2 +- .../JavaStreamTensorDataLoaderBase.java | 2 +- .../pytorch/JavaStreamTensorDataset.java | 2 +- .../pytorch/JavaTensorBatchDataset.java | 2 +- .../bytedeco/pytorch/JavaTensorDataset.java | 2 +- .../pytorch/JavaTensorDatasetBase.java | 2 +- .../java/org/bytedeco/pytorch/JitModule.java | 8 +- .../java/org/bytedeco/pytorch/JitNode.java | 2 +- .../org/bytedeco/pytorch/JitNodeVector.java | 2 +- .../org/bytedeco/pytorch/JitNodeWrap.java | 2 +- .../java/org/bytedeco/pytorch/JitObject.java | 10 +- .../java/org/bytedeco/pytorch/JitString.java | 2 +- .../org/bytedeco/pytorch/KLDivLossImpl.java | 4 +- .../pytorch/KLDivLossImplCloneable.java | 4 +- .../bytedeco/pytorch/KLDivLossOptions.java | 2 +- .../bytedeco/pytorch/KLDivLossReduction.java | 12 +- .../org/bytedeco/pytorch/KernelFunction.java | 2 +- .../java/org/bytedeco/pytorch/L1LossImpl.java | 4 +- .../bytedeco/pytorch/L1LossImplCloneable.java | 4 +- .../org/bytedeco/pytorch/L1LossOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/LBFGS.java | 2 +- .../org/bytedeco/pytorch/LBFGSOptions.java | 2 +- .../org/bytedeco/pytorch/LBFGSParamState.java | 2 +- .../org/bytedeco/pytorch/LPPool1dImpl.java | 4 +- .../bytedeco/pytorch/LPPool1dImplBase.java | 4 +- .../pytorch/LPPool1dImplCloneable.java | 4 +- .../org/bytedeco/pytorch/LPPool1dOptions.java | 2 +- .../org/bytedeco/pytorch/LPPool2dImpl.java | 4 +- .../bytedeco/pytorch/LPPool2dImplBase.java | 4 +- .../pytorch/LPPool2dImplCloneable.java | 4 +- .../org/bytedeco/pytorch/LPPool2dOptions.java | 2 +- .../org/bytedeco/pytorch/LPPool3dOptions.java | 2 +- .../org/bytedeco/pytorch/LRScheduler.java | 2 +- .../org/bytedeco/pytorch/LSTMCellImpl.java | 4 +- .../bytedeco/pytorch/LSTMCellImplBase.java | 4 +- .../pytorch/LSTMCellImplCloneable.java | 4 +- .../org/bytedeco/pytorch/LSTMCellOptions.java | 2 +- .../java/org/bytedeco/pytorch/LSTMImpl.java | 4 +- .../org/bytedeco/pytorch/LSTMImplBase.java | 4 +- .../bytedeco/pytorch/LSTMImplCloneable.java | 4 +- .../org/bytedeco/pytorch/LSTMOptions.java | 2 +- .../pytorch/LayerNormFuncOptions.java | 2 +- .../org/bytedeco/pytorch/LayerNormImpl.java | 4 +- .../pytorch/LayerNormImplCloneable.java | 4 +- .../bytedeco/pytorch/LayerNormOptions.java | 2 +- .../pytorch/LayoutEnumerationType.java | 2 +- .../org/bytedeco/pytorch/LayoutOptional.java | 2 +- .../java/org/bytedeco/pytorch/LayoutType.java | 2 +- .../org/bytedeco/pytorch/LayoutTypePtr.java | 2 +- .../org/bytedeco/pytorch/LeakyReLUImpl.java | 4 +- .../pytorch/LeakyReLUImplCloneable.java | 4 +- .../bytedeco/pytorch/LeakyReLUOptions.java | 2 +- .../pytorch/LegacyTensorConstructor.java | 2 +- .../java/org/bytedeco/pytorch/Library.java | 28 +- .../org/bytedeco/pytorch/LinAlgError.java | 2 +- .../java/org/bytedeco/pytorch/LinearImpl.java | 4 +- .../bytedeco/pytorch/LinearImplCloneable.java | 4 +- .../org/bytedeco/pytorch/LinearOptions.java | 2 +- .../java/org/bytedeco/pytorch/ListComp.java | 2 +- .../org/bytedeco/pytorch/ListLiteral.java | 2 +- .../pytorch/ListSingleElementType.java | 2 +- .../java/org/bytedeco/pytorch/ListType.java | 3 +- .../bytedeco/pytorch/LocalDispatchKeySet.java | 2 +- .../pytorch/LocalResponseNormImpl.java | 4 +- .../LocalResponseNormImplCloneable.java | 4 +- .../pytorch/LocalResponseNormOptions.java | 2 +- .../org/bytedeco/pytorch/LogSigmoidImpl.java | 4 +- .../pytorch/LogSigmoidImplCloneable.java | 4 +- .../pytorch/LogSoftmaxFuncOptions.java | 2 +- .../org/bytedeco/pytorch/LogSoftmaxImpl.java | 4 +- .../pytorch/LogSoftmaxImplCloneable.java | 4 +- .../bytedeco/pytorch/LogSoftmaxOptions.java | 2 +- .../org/bytedeco/pytorch/LongArrayRef.java | 2 +- .../pytorch/LongArrayRefOptional.java | 2 +- .../pytorch/LongElementReference.java | 2 +- .../pytorch/LongExpandingArrayOptional.java | 2 +- .../java/org/bytedeco/pytorch/LongList.java | 2 +- .../bytedeco/pytorch/LongListIterator.java | 2 +- .../org/bytedeco/pytorch/LongOptional.java | 2 +- .../pytorch/LongOptionalArrayRef.java | 2 +- .../bytedeco/pytorch/LongOptionalVector.java | 2 +- .../bytedeco/pytorch/LongSmallVectorBase.java | 2 +- .../pytorch/LongSmallVectorCommon.java | 2 +- .../bytedeco/pytorch/LongSmallVectorImpl.java | 2 +- .../bytedeco/pytorch/LongVaryingShape.java | 2 +- .../java/org/bytedeco/pytorch/LongVector.java | 2 +- .../bytedeco/pytorch/LongVectorOptional.java | 2 +- .../org/bytedeco/pytorch/LossReduction.java | 10 +- .../gen/java/org/bytedeco/pytorch/MNIST.java | 2 +- .../bytedeco/pytorch/MNISTBatchDataset.java | 2 +- .../org/bytedeco/pytorch/MNISTDataset.java | 2 +- .../pytorch/MNISTMapBatchDataset.java | 2 +- .../org/bytedeco/pytorch/MNISTMapDataset.java | 2 +- .../pytorch/MNISTRandomDataLoader.java | 2 +- .../pytorch/MNISTRandomDataLoaderBase.java | 2 +- .../org/bytedeco/pytorch/MPSHooksArgs.java | 2 +- .../bytedeco/pytorch/MPSHooksInterface.java | 2 +- .../org/bytedeco/pytorch/MSELossImpl.java | 4 +- .../pytorch/MSELossImplCloneable.java | 4 +- .../org/bytedeco/pytorch/MSELossOptions.java | 2 +- .../bytedeco/pytorch/MTIAHooksInterface.java | 2 +- .../org/bytedeco/pytorch/MagicMethod.java | 2 +- .../pytorch/MarginRankingLossImpl.java | 4 +- .../MarginRankingLossImplCloneable.java | 4 +- .../pytorch/MarginRankingLossOptions.java | 2 +- .../org/bytedeco/pytorch/MatchTypeReturn.java | 2 +- .../org/bytedeco/pytorch/MatchedSchema.java | 2 +- .../org/bytedeco/pytorch/MaxPool1dImpl.java | 4 +- .../bytedeco/pytorch/MaxPool1dImplBase.java | 4 +- .../pytorch/MaxPool1dImplCloneable.java | 4 +- .../bytedeco/pytorch/MaxPool1dOptions.java | 2 +- .../org/bytedeco/pytorch/MaxPool2dImpl.java | 4 +- .../bytedeco/pytorch/MaxPool2dImplBase.java | 4 +- .../pytorch/MaxPool2dImplCloneable.java | 4 +- .../bytedeco/pytorch/MaxPool2dOptions.java | 2 +- .../org/bytedeco/pytorch/MaxPool3dImpl.java | 4 +- .../bytedeco/pytorch/MaxPool3dImplBase.java | 4 +- .../pytorch/MaxPool3dImplCloneable.java | 4 +- .../bytedeco/pytorch/MaxPool3dOptions.java | 2 +- .../pytorch/MaxUnpool1dFuncOptions.java | 2 +- .../org/bytedeco/pytorch/MaxUnpool1dImpl.java | 4 +- .../bytedeco/pytorch/MaxUnpool1dImplBase.java | 4 +- .../pytorch/MaxUnpool1dImplCloneable.java | 4 +- .../bytedeco/pytorch/MaxUnpool1dOptions.java | 2 +- .../pytorch/MaxUnpool2dFuncOptions.java | 2 +- .../org/bytedeco/pytorch/MaxUnpool2dImpl.java | 4 +- .../bytedeco/pytorch/MaxUnpool2dImplBase.java | 4 +- .../pytorch/MaxUnpool2dImplCloneable.java | 4 +- .../bytedeco/pytorch/MaxUnpool2dOptions.java | 2 +- .../pytorch/MaxUnpool3dFuncOptions.java | 2 +- .../org/bytedeco/pytorch/MaxUnpool3dImpl.java | 4 +- .../bytedeco/pytorch/MaxUnpool3dImplBase.java | 4 +- .../pytorch/MaxUnpool3dImplCloneable.java | 4 +- .../bytedeco/pytorch/MaxUnpool3dOptions.java | 2 +- .../pytorch/MemoryFormatOptional.java | 2 +- .../bytedeco/pytorch/MemoryFormatType.java | 2 +- .../pytorch/MemoryFormattEnumerationType.java | 2 +- .../pytorch/MemoryReportingInfoBase.java | 2 +- .../java/org/bytedeco/pytorch/MetaBase.java | 3 +- .../gen/java/org/bytedeco/pytorch/Method.java | 2 +- .../org/bytedeco/pytorch/MethodOptional.java | 2 +- .../org/bytedeco/pytorch/MethodValue.java | 2 +- .../java/org/bytedeco/pytorch/MishImpl.java | 4 +- .../bytedeco/pytorch/MishImplCloneable.java | 4 +- .../java/org/bytedeco/pytorch/MobileCode.java | 2 +- .../gen/java/org/bytedeco/pytorch/Module.java | 6 +- .../org/bytedeco/pytorch/ModuleDictImpl.java | 4 +- .../pytorch/ModuleDictImplCloneable.java | 4 +- .../bytedeco/pytorch/ModuleInstanceInfo.java | 2 +- .../pytorch/ModuleInstanceInfoOptional.java | 2 +- .../org/bytedeco/pytorch/ModuleListImpl.java | 4 +- .../pytorch/ModuleListImplCloneable.java | 4 +- .../org/bytedeco/pytorch/ModulePolicy.java | 2 +- .../pytorch/MultiLabelMarginLossImpl.java | 4 +- .../MultiLabelMarginLossImplCloneable.java | 4 +- .../pytorch/MultiLabelMarginLossOptions.java | 2 +- .../pytorch/MultiLabelSoftMarginLossImpl.java | 4 +- ...MultiLabelSoftMarginLossImplCloneable.java | 4 +- .../MultiLabelSoftMarginLossOptions.java | 2 +- .../bytedeco/pytorch/MultiMarginLossImpl.java | 4 +- .../pytorch/MultiMarginLossImplCloneable.java | 4 +- .../pytorch/MultiMarginLossOptions.java | 2 +- .../MultiheadAttentionForwardFuncOptions.java | 2 +- .../pytorch/MultiheadAttentionImpl.java | 4 +- .../MultiheadAttentionImplCloneable.java | 4 +- .../pytorch/MultiheadAttentionOptions.java | 2 +- .../pytorch/MzZipReaderIterWrapper.java | 27 + .../org/bytedeco/pytorch/NLLLossImpl.java | 4 +- .../pytorch/NLLLossImplCloneable.java | 4 +- .../org/bytedeco/pytorch/NLLLossOptions.java | 2 +- .../org/bytedeco/pytorch/NameMangler.java | 2 +- .../org/bytedeco/pytorch/NamedIValue.java | 2 +- .../org/bytedeco/pytorch/NamedJitModule.java | 2 +- .../org/bytedeco/pytorch/NamedTensor.java | 2 +- .../org/bytedeco/pytorch/NamedTensorMeta.java | 2 +- .../pytorch/NamedTensorMetaInterface.java | 2 +- .../pytorch/NamedTupleConstructor.java | 2 +- .../java/org/bytedeco/pytorch/NamedType.java | 2 +- .../java/org/bytedeco/pytorch/NamedValue.java | 2 +- .../bytedeco/pytorch/NamedValueArrayRef.java | 2 +- .../bytedeco/pytorch/NamedValueOptional.java | 2 +- .../java/org/bytedeco/pytorch/NamesMode.java | 2 +- .../org/bytedeco/pytorch/NativeResolver.java | 2 +- .../bytedeco/pytorch/NestedTensorImpl.java | 4 +- .../org/bytedeco/pytorch/NoGradGuard.java | 2 +- .../org/bytedeco/pytorch/NoNamesGuard.java | 2 +- .../org/bytedeco/pytorch/NoTF32Guard.java | 2 +- .../java/org/bytedeco/pytorch/NoTarget.java | 2 +- .../gen/java/org/bytedeco/pytorch/Node.java | 11 +- .../java/org/bytedeco/pytorch/NodeSet.java | 2 +- .../java/org/bytedeco/pytorch/NoneType.java | 2 +- .../org/bytedeco/pytorch/NoneTypePtr.java | 2 +- .../org/bytedeco/pytorch/Nonlinearity.java | 26 +- .../pytorch/NormalizeFuncOptions.java | 2 +- .../bytedeco/pytorch/NotImplementedError.java | 2 +- .../java/org/bytedeco/pytorch/NumberType.java | 2 +- .../org/bytedeco/pytorch/NumberTypePtr.java | 2 +- .../org/bytedeco/pytorch/ORTHooksArgs.java | 2 +- .../bytedeco/pytorch/ORTHooksInterface.java | 2 +- .../gen/java/org/bytedeco/pytorch/ObjPtr.java | 150 + .../gen/java/org/bytedeco/pytorch/Object.java | 115 + .../pytorch/OnnxfiBackendSystemError.java | 2 +- .../pytorch/OpRegistrationListener.java | 2 +- .../org/bytedeco/pytorch/OperandInfo.java | 6 +- .../java/org/bytedeco/pytorch/Operation.java | 2 +- .../java/org/bytedeco/pytorch/Operator.java | 4 +- .../org/bytedeco/pytorch/OperatorHandle.java | 7 +- .../pytorch/OperatorHandleOptional.java | 2 +- .../org/bytedeco/pytorch/OperatorKernel.java | 2 +- .../org/bytedeco/pytorch/OperatorName.java | 2 +- .../pytorch/OperatorNameOptional.java | 2 +- .../org/bytedeco/pytorch/OperatorSet.java | 2 +- .../org/bytedeco/pytorch/OperatorVector.java | 2 +- .../java/org/bytedeco/pytorch/Optimizer.java | 2 +- .../OptimizerCloneableAdagradOptions.java | 2 +- .../OptimizerCloneableAdagradParamState.java | 2 +- .../OptimizerCloneableAdamOptions.java | 2 +- .../OptimizerCloneableAdamParamState.java | 2 +- .../OptimizerCloneableAdamWOptions.java | 2 +- .../OptimizerCloneableAdamWParamState.java | 2 +- .../OptimizerCloneableLBFGSOptions.java | 2 +- .../OptimizerCloneableLBFGSParamState.java | 2 +- .../OptimizerCloneableRMSpropOptions.java | 2 +- .../OptimizerCloneableRMSpropParamState.java | 2 +- .../pytorch/OptimizerCloneableSGDOptions.java | 2 +- .../OptimizerCloneableSGDParamState.java | 2 +- .../bytedeco/pytorch/OptimizerOptions.java | 10 +- .../bytedeco/pytorch/OptimizerParamGroup.java | 2 +- .../pytorch/OptimizerParamGroupVector.java | 2 +- .../bytedeco/pytorch/OptimizerParamState.java | 10 +- .../bytedeco/pytorch/OptionalDeviceGuard.java | 2 +- .../org/bytedeco/pytorch/OptionalType.java | 4 +- .../bytedeco/pytorch/OutOfMemoryError.java | 2 +- .../org/bytedeco/pytorch/OutputArchive.java | 2 +- .../pytorch/PODLocalDispatchKeySet.java | 2 +- .../java/org/bytedeco/pytorch/PReLUImpl.java | 4 +- .../bytedeco/pytorch/PReLUImplCloneable.java | 4 +- .../org/bytedeco/pytorch/PReLUOptions.java | 2 +- .../org/bytedeco/pytorch/PackedSequence.java | 2 +- .../org/bytedeco/pytorch/PadFuncOptions.java | 2 +- .../org/bytedeco/pytorch/PaddingMode.java | 12 +- .../pytorch/PairwiseDistanceImpl.java | 4 +- .../PairwiseDistanceImplCloneable.java | 4 +- .../pytorch/PairwiseDistanceOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/Param.java | 2 +- .../java/org/bytedeco/pytorch/ParamList.java | 2 +- .../bytedeco/pytorch/ParamListIterator.java | 2 +- .../bytedeco/pytorch/ParameterDictImpl.java | 4 +- .../pytorch/ParameterDictImplCloneable.java | 4 +- .../bytedeco/pytorch/ParameterListImpl.java | 4 +- .../pytorch/ParameterListImplCloneable.java | 4 +- .../org/bytedeco/pytorch/ParameterPolicy.java | 2 +- .../gen/java/org/bytedeco/pytorch/Pass.java | 2 +- .../java/org/bytedeco/pytorch/Pickler.java | 2 +- .../bytedeco/pytorch/PixelShuffleImpl.java | 4 +- .../pytorch/PixelShuffleImplCloneable.java | 4 +- .../bytedeco/pytorch/PixelShuffleOptions.java | 2 +- .../bytedeco/pytorch/PixelUnshuffleImpl.java | 4 +- .../pytorch/PixelUnshuffleImplCloneable.java | 4 +- .../pytorch/PixelUnshuffleOptions.java | 2 +- .../pytorch/PlacementDeleteContext.java | 2 +- .../org/bytedeco/pytorch/PointerPair.java | 2 +- .../bytedeco/pytorch/PointerPairOptional.java | 2 +- .../bytedeco/pytorch/PoissonNLLLossImpl.java | 4 +- .../pytorch/PoissonNLLLossImplCloneable.java | 4 +- .../pytorch/PoissonNLLLossOptions.java | 2 +- .../pytorch/PostAccumulateGradHook.java | 6 +- .../java/org/bytedeco/pytorch/PrintValue.java | 2 +- .../pytorch/PrivateUse1HooksArgs.java | 2 +- .../pytorch/PrivateUse1HooksInterface.java | 2 +- .../org/bytedeco/pytorch/ProfileIValueOp.java | 2 +- .../org/bytedeco/pytorch/ProfilerConfig.java | 2 +- .../java/org/bytedeco/pytorch/Property.java | 2 +- .../org/bytedeco/pytorch/PropertyList.java | 2 +- .../pytorch/PropertyListIterator.java | 2 +- .../bytedeco/pytorch/PropertyListMaybe.java | 2 +- .../org/bytedeco/pytorch/PropertyVector.java | 2 +- .../org/bytedeco/pytorch/PyInterpreter.java | 2 +- .../bytedeco/pytorch/PyInterpreterVTable.java | 16 +- .../org/bytedeco/pytorch/PyObjectHolder.java | 2 +- .../bytedeco/pytorch/PyObjectHolderPtr.java | 2 +- .../org/bytedeco/pytorch/PyObjectType.java | 2 +- .../org/bytedeco/pytorch/PyObjectTypePtr.java | 2 +- .../bytedeco/pytorch/PyTorchStreamReader.java | 63 +- .../bytedeco/pytorch/PythonDispatcherTLS.java | 2 +- .../java/org/bytedeco/pytorch/PythonOp.java | 2 +- .../pytorch/PythonTorchFunctionTLS.java | 8 +- .../org/bytedeco/pytorch/QEngineVector.java | 2 +- .../org/bytedeco/pytorch/QSchemeType.java | 2 +- .../org/bytedeco/pytorch/QSchemeTypePtr.java | 2 +- .../org/bytedeco/pytorch/QTensorImpl.java | 2 +- .../org/bytedeco/pytorch/QualifiedName.java | 2 +- .../pytorch/QualifiedNameOptional.java | 2 +- .../java/org/bytedeco/pytorch/Quantizer.java | 2 +- .../org/bytedeco/pytorch/QuantizerPtr.java | 4 +- .../org/bytedeco/pytorch/QuantizerType.java | 2 +- .../bytedeco/pytorch/QuantizerTypePtr.java | 2 +- .../java/org/bytedeco/pytorch/RMSprop.java | 2 +- .../org/bytedeco/pytorch/RMSpropOptions.java | 2 +- .../bytedeco/pytorch/RMSpropParamState.java | 2 +- .../org/bytedeco/pytorch/RNNBaseMode.java | 12 +- .../org/bytedeco/pytorch/RNNCellImpl.java | 4 +- .../org/bytedeco/pytorch/RNNCellImplBase.java | 4 +- .../pytorch/RNNCellImplCloneable.java | 4 +- .../org/bytedeco/pytorch/RNNCellOptions.java | 2 +- .../bytedeco/pytorch/RNNCellOptionsBase.java | 2 +- .../java/org/bytedeco/pytorch/RNNImpl.java | 4 +- .../org/bytedeco/pytorch/RNNImplBase.java | 4 +- .../bytedeco/pytorch/RNNImplCloneable.java | 4 +- .../org/bytedeco/pytorch/RNNNonlinearity.java | 8 +- .../java/org/bytedeco/pytorch/RNNOptions.java | 2 +- .../org/bytedeco/pytorch/RNNOptionsBase.java | 2 +- .../bytedeco/pytorch/RReLUFuncOptions.java | 2 +- .../java/org/bytedeco/pytorch/RReLUImpl.java | 4 +- .../bytedeco/pytorch/RReLUImplCloneable.java | 4 +- .../org/bytedeco/pytorch/RReLUOptions.java | 2 +- .../org/bytedeco/pytorch/RRefInterface.java | 2 +- .../bytedeco/pytorch/RRefInterfacePtr.java | 2 +- .../pytorch/RRefSingleElementType.java | 2 +- .../java/org/bytedeco/pytorch/RRefType.java | 2 +- .../gen/java/org/bytedeco/pytorch/Raise.java | 2 +- .../org/bytedeco/pytorch/RandomSampler.java | 2 +- .../java/org/bytedeco/pytorch/RangeValue.java | 2 +- .../java/org/bytedeco/pytorch/ReLU6Impl.java | 4 +- .../bytedeco/pytorch/ReLU6ImplCloneable.java | 4 +- .../org/bytedeco/pytorch/ReLU6Options.java | 2 +- .../java/org/bytedeco/pytorch/ReLUImpl.java | 4 +- .../bytedeco/pytorch/ReLUImplCloneable.java | 4 +- .../org/bytedeco/pytorch/ReLUOptions.java | 2 +- .../pytorch/ReadAdapterInterface.java | 2 +- .../pytorch/ReadAdapterInterfaceVector.java | 90 + .../org/bytedeco/pytorch/RecordFunction.java | 8 +- .../pytorch/RecordFunctionCallbacksEntry.java | 2 +- .../bytedeco/pytorch/RecordFunctionGuard.java | 2 +- .../pytorch/RecordFunctionHandleIntList.java | 2 +- .../pytorch/RecordFunctionHandleIntPair.java | 2 +- .../bytedeco/pytorch/RecordFunctionTLS.java | 2 +- .../bytedeco/pytorch/ReflectionPad1dImpl.java | 4 +- .../pytorch/ReflectionPad1dImplBase.java | 4 +- .../pytorch/ReflectionPad1dImplCloneable.java | 4 +- .../pytorch/ReflectionPad1dOptions.java | 2 +- .../bytedeco/pytorch/ReflectionPad2dImpl.java | 4 +- .../pytorch/ReflectionPad2dImplBase.java | 4 +- .../pytorch/ReflectionPad2dImplCloneable.java | 4 +- .../pytorch/ReflectionPad2dOptions.java | 2 +- .../bytedeco/pytorch/ReflectionPad3dImpl.java | 4 +- .../pytorch/ReflectionPad3dImplBase.java | 4 +- .../pytorch/ReflectionPad3dImplCloneable.java | 4 +- .../pytorch/ReflectionPad3dOptions.java | 2 +- .../bytedeco/pytorch/RegisterOperators.java | 2 +- .../pytorch/RegistrationHandleRAII.java | 2 +- .../pytorch/ReplicationPad1dImpl.java | 4 +- .../pytorch/ReplicationPad1dImplBase.java | 4 +- .../ReplicationPad1dImplCloneable.java | 4 +- .../pytorch/ReplicationPad1dOptions.java | 2 +- .../pytorch/ReplicationPad2dImpl.java | 4 +- .../pytorch/ReplicationPad2dImplBase.java | 4 +- .../ReplicationPad2dImplCloneable.java | 4 +- .../pytorch/ReplicationPad2dOptions.java | 2 +- .../pytorch/ReplicationPad3dImpl.java | 4 +- .../pytorch/ReplicationPad3dImplBase.java | 4 +- .../ReplicationPad3dImplCloneable.java | 4 +- .../pytorch/ReplicationPad3dOptions.java | 2 +- .../java/org/bytedeco/pytorch/Resolver.java | 2 +- .../org/bytedeco/pytorch/ResolverVector.java | 2 +- .../gen/java/org/bytedeco/pytorch/Result.java | 2 +- .../gen/java/org/bytedeco/pytorch/Return.java | 2 +- .../java/org/bytedeco/pytorch/SELUImpl.java | 4 +- .../bytedeco/pytorch/SELUImplCloneable.java | 4 +- .../org/bytedeco/pytorch/SELUOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/SGD.java | 2 +- .../java/org/bytedeco/pytorch/SGDOptions.java | 2 +- .../org/bytedeco/pytorch/SGDParamState.java | 2 +- .../org/bytedeco/pytorch/SafePyHandle.java | 2 +- .../org/bytedeco/pytorch/SafePyObject.java | 6 +- .../pytorch/SafePyObjectOptional.java | 35 + .../java/org/bytedeco/pytorch/Sampler.java | 2 +- .../pytorch/SavedTensorDefaultHooks.java | 2 +- .../pytorch/SavedTensorDefaultHooksTLS.java | 2 +- .../bytedeco/pytorch/SavedVariableHooks.java | 2 +- .../gen/java/org/bytedeco/pytorch/Scalar.java | 19 +- .../org/bytedeco/pytorch/ScalarArrayRef.java | 2 +- .../org/bytedeco/pytorch/ScalarOptional.java | 2 +- .../bytedeco/pytorch/ScalarTypeArrayRef.java | 2 +- .../pytorch/ScalarTypeEnumerationType.java | 2 +- .../bytedeco/pytorch/ScalarTypeOptional.java | 2 +- .../org/bytedeco/pytorch/ScalarTypeType.java | 2 +- .../bytedeco/pytorch/ScalarTypeTypePtr.java | 2 +- .../bytedeco/pytorch/ScalarTypeVector.java | 2 +- .../org/bytedeco/pytorch/SchemaArgument.java | 2 +- .../java/org/bytedeco/pytorch/SchemaInfo.java | 2 +- .../gen/java/org/bytedeco/pytorch/Scope.java | 2 +- .../org/bytedeco/pytorch/ScopeOptional.java | 2 +- .../bytedeco/pytorch/ScriptTypeParser.java | 2 +- .../gen/java/org/bytedeco/pytorch/Select.java | 2 +- .../gen/java/org/bytedeco/pytorch/Self.java | 2 +- .../org/bytedeco/pytorch/SequentialImpl.java | 4 +- .../pytorch/SequentialImplCloneable.java | 4 +- .../bytedeco/pytorch/SequentialSampler.java | 2 +- .../org/bytedeco/pytorch/ShapeSymbol.java | 2 +- .../bytedeco/pytorch/ShapeSymbolVector.java | 2 +- .../pytorch/ShapeSymbolVectorOptional.java | 2 +- .../pytorch/SharedClassTypeVector.java | 2 +- .../bytedeco/pytorch/SharedModuleVector.java | 2 +- .../bytedeco/pytorch/SharedParserData.java | 2 +- .../pytorch/SharedSugaredValueVector.java | 2 +- .../java/org/bytedeco/pytorch/SharedType.java | 2 +- .../org/bytedeco/pytorch/ShortArrayRef.java | 2 +- .../java/org/bytedeco/pytorch/SiLUImpl.java | 4 +- .../bytedeco/pytorch/SiLUImplCloneable.java | 4 +- .../org/bytedeco/pytorch/SigmoidImpl.java | 4 +- .../pytorch/SigmoidImplCloneable.java | 4 +- .../java/org/bytedeco/pytorch/SimpleSelf.java | 2 +- .../org/bytedeco/pytorch/SimpleValue.java | 2 +- .../bytedeco/pytorch/SingletonTypePtr.java | 2 +- .../org/bytedeco/pytorch/SizeTArrayRef.java | 2 +- .../pytorch/SizeTMatchedSchemaPair.java | 2 +- .../org/bytedeco/pytorch/SizeTOptional.java | 2 +- .../org/bytedeco/pytorch/SizeTVector.java | 2 +- .../bytedeco/pytorch/SizeTVectorOptional.java | 2 +- .../org/bytedeco/pytorch/SizesAndStrides.java | 2 +- .../gen/java/org/bytedeco/pytorch/Slice.java | 2 +- .../java/org/bytedeco/pytorch/SliceExpr.java | 2 +- .../java/org/bytedeco/pytorch/SliceValue.java | 2 +- .../java/org/bytedeco/pytorch/SlotCursor.java | 2 +- .../bytedeco/pytorch/SmoothL1LossImpl.java | 4 +- .../pytorch/SmoothL1LossImplCloneable.java | 4 +- .../bytedeco/pytorch/SmoothL1LossOptions.java | 4 +- .../bytedeco/pytorch/SoftMarginLossImpl.java | 4 +- .../pytorch/SoftMarginLossImplCloneable.java | 4 +- .../pytorch/SoftMarginLossOptions.java | 2 +- .../org/bytedeco/pytorch/Softmax2dImpl.java | 4 +- .../pytorch/Softmax2dImplCloneable.java | 4 +- .../bytedeco/pytorch/SoftmaxFuncOptions.java | 2 +- .../org/bytedeco/pytorch/SoftmaxImpl.java | 4 +- .../pytorch/SoftmaxImplCloneable.java | 4 +- .../org/bytedeco/pytorch/SoftmaxOptions.java | 2 +- .../bytedeco/pytorch/SoftminFuncOptions.java | 2 +- .../org/bytedeco/pytorch/SoftminImpl.java | 4 +- .../pytorch/SoftminImplCloneable.java | 4 +- .../org/bytedeco/pytorch/SoftminOptions.java | 2 +- .../org/bytedeco/pytorch/SoftplusImpl.java | 4 +- .../pytorch/SoftplusImplCloneable.java | 4 +- .../org/bytedeco/pytorch/SoftplusOptions.java | 2 +- .../org/bytedeco/pytorch/SoftshrinkImpl.java | 4 +- .../pytorch/SoftshrinkImplCloneable.java | 4 +- .../bytedeco/pytorch/SoftshrinkOptions.java | 2 +- .../org/bytedeco/pytorch/SoftsignImpl.java | 4 +- .../pytorch/SoftsignImplCloneable.java | 4 +- .../gen/java/org/bytedeco/pytorch/Source.java | 2 +- .../org/bytedeco/pytorch/SourceLocation.java | 2 +- .../org/bytedeco/pytorch/SourceRange.java | 2 +- .../bytedeco/pytorch/SourceRangeHasher.java | 2 +- .../bytedeco/pytorch/SourceRangeOptional.java | 2 +- .../pytorch/SourceRangeUnpickler.java | 2 +- .../bytedeco/pytorch/SpecialFormValue.java | 2 +- .../org/bytedeco/pytorch/SplitUntil32Bit.java | 2 +- .../java/org/bytedeco/pytorch/StackEntry.java | 2 +- .../java/org/bytedeco/pytorch/Starred.java | 2 +- .../gen/java/org/bytedeco/pytorch/StepLR.java | 2 +- .../gen/java/org/bytedeco/pytorch/Stmt.java | 2 +- .../java/org/bytedeco/pytorch/StmtList.java | 2 +- .../bytedeco/pytorch/StmtListIterator.java | 2 +- .../java/org/bytedeco/pytorch/Storage.java | 20 +- .../org/bytedeco/pytorch/StorageImpl.java | 4 +- .../org/bytedeco/pytorch/StorageImplPtr.java | 2 +- .../org/bytedeco/pytorch/StorageType.java | 2 +- .../org/bytedeco/pytorch/StorageTypePtr.java | 2 +- .../gen/java/org/bytedeco/pytorch/Stream.java | 2 +- .../org/bytedeco/pytorch/StreamData3.java | 2 +- .../org/bytedeco/pytorch/StreamObjType.java | 2 +- .../bytedeco/pytorch/StreamObjTypePtr.java | 2 +- .../org/bytedeco/pytorch/StreamOptional.java | 2 +- .../org/bytedeco/pytorch/StreamSampler.java | 2 +- .../gen/java/org/bytedeco/pytorch/Stride.java | 2 +- .../org/bytedeco/pytorch/StrideArrayRef.java | 2 +- .../org/bytedeco/pytorch/StrideOptional.java | 2 +- .../bytedeco/pytorch/StrideVaryingShape.java | 2 +- .../org/bytedeco/pytorch/StrideVector.java | 2 +- .../pytorch/StrideVectorOptional.java | 2 +- .../bytedeco/pytorch/StringAnyModuleDict.java | 2 +- .../pytorch/StringAnyModuleDictItem.java | 2 +- .../StringAnyModuleDictItemVector.java | 2 +- .../bytedeco/pytorch/StringAnyModulePair.java | 2 +- .../pytorch/StringAnyModuleVector.java | 2 +- .../org/bytedeco/pytorch/StringArrayRef.java | 2 +- .../org/bytedeco/pytorch/StringBoolMap.java | 2 +- .../org/bytedeco/pytorch/StringCordView.java | 4 +- .../pytorch/StringGenericListDict.java | 2 +- .../org/bytedeco/pytorch/StringIValueMap.java | 2 +- .../org/bytedeco/pytorch/StringLiteral.java | 2 +- .../org/bytedeco/pytorch/StringLongMap.java | 2 +- .../bytedeco/pytorch/StringLongVector.java | 2 +- .../org/bytedeco/pytorch/StringOptional.java | 2 +- .../java/org/bytedeco/pytorch/StringSet.java | 2 +- .../pytorch/StringSharedModuleDict.java | 2 +- .../pytorch/StringSharedModuleDictItem.java | 2 +- .../StringSharedModuleDictItemVector.java | 2 +- .../pytorch/StringSharedModulePair.java | 2 +- .../pytorch/StringSharedModuleVector.java | 2 +- .../org/bytedeco/pytorch/StringSizeTMap.java | 2 +- .../org/bytedeco/pytorch/StringStringMap.java | 2 +- .../bytedeco/pytorch/StringTensorDict.java | 2 +- .../pytorch/StringTensorDictItem.java | 2 +- .../pytorch/StringTensorDictItemVector.java | 2 +- .../bytedeco/pytorch/StringTensorPair.java | 2 +- .../bytedeco/pytorch/StringTensorVector.java | 2 +- .../java/org/bytedeco/pytorch/StringType.java | 2 +- .../org/bytedeco/pytorch/StringTypePtr.java | 2 +- .../org/bytedeco/pytorch/StringValueMap.java | 2 +- .../org/bytedeco/pytorch/StringVector.java | 2 +- .../pytorch/StringVectorOptional.java | 2 +- .../bytedeco/pytorch/StringViewOptional.java | 2 +- .../bytedeco/pytorch/StringViewVector.java | 2 +- .../pytorch/StringViewVectorOptional.java | 2 +- .../org/bytedeco/pytorch/StrongTypePtr.java | 2 +- .../java/org/bytedeco/pytorch/Subscript.java | 2 +- .../bytedeco/pytorch/SugaredEnumClass.java | 2 +- .../bytedeco/pytorch/SugaredTupleValue.java | 2 +- .../org/bytedeco/pytorch/SugaredValue.java | 2 +- .../bytedeco/pytorch/SwapSavedVariables.java | 2 +- .../java/org/bytedeco/pytorch/SymBool.java | 2 +- .../org/bytedeco/pytorch/SymBoolType.java | 2 +- .../org/bytedeco/pytorch/SymDimVector.java | 2 +- .../pytorch/SymDimVectorOptional.java | 2 +- .../java/org/bytedeco/pytorch/SymFloat.java | 2 +- .../org/bytedeco/pytorch/SymFloatType.java | 2 +- .../gen/java/org/bytedeco/pytorch/SymInt.java | 16 +- .../org/bytedeco/pytorch/SymIntArrayRef.java | 2 +- .../pytorch/SymIntArrayRefOptional.java | 2 +- .../org/bytedeco/pytorch/SymIntOptional.java | 2 +- .../pytorch/SymIntSmallVectorBase.java | 2 +- .../pytorch/SymIntSmallVectorCommon.java | 2 +- .../pytorch/SymIntSmallVectorImpl.java | 2 +- .../java/org/bytedeco/pytorch/SymIntType.java | 2 +- .../org/bytedeco/pytorch/SymIntVector.java | 2 +- .../java/org/bytedeco/pytorch/SymNode.java | 2 +- .../org/bytedeco/pytorch/SymNodeArrayRef.java | 2 +- .../org/bytedeco/pytorch/SymNodeImpl.java | 8 +- .../gen/java/org/bytedeco/pytorch/Symbol.java | 2 +- .../org/bytedeco/pytorch/SymbolArrayRef.java | 2 +- .../java/org/bytedeco/pytorch/SymbolSet.java | 2 +- .../org/bytedeco/pytorch/SymbolVector.java | 2 +- .../org/bytedeco/pytorch/SymbolicShape.java | 2 +- .../bytedeco/pytorch/SymbolicShapeMeta.java | 73 +- .../bytedeco/pytorch/T_DataPtrSizeT_T.java | 2 +- .../java/org/bytedeco/pytorch/T_IntInt_T.java | 2 +- .../org/bytedeco/pytorch/T_LongLong_T.java | 2 +- .../T_PackedSequenceT_TensorTensor_T_T.java | 2 +- .../pytorch/T_PackedSequenceTensor_T.java | 2 +- .../T_SafePyObjectTorchDispatchModeKey_T.java | 36 + .../pytorch/T_StringSizeTSizeT_T.java | 2 +- .../pytorch/T_StringSizeTSizeT_TOptional.java | 2 +- ...nedTensorMaybeOwnedTensorMaybeOwned_T.java | 2 +- .../T_TensorMaybeOwnedTensorMaybeOwned_T.java | 2 +- .../pytorch/T_TensorT_TensorTensor_T_T.java | 2 +- .../pytorch/T_TensorTensorDoubleLong_T.java | 2 +- ...ensorTensorTensorTensorTensorTensor_T.java | 2 +- .../T_TensorTensorTensorTensorTensor_T.java | 2 +- .../T_TensorTensorTensorTensorVector_T.java | 2 +- .../pytorch/T_TensorTensorTensorTensor_T.java | 2 +- .../pytorch/T_TensorTensorTensor_T.java | 2 +- .../T_TensorTensorVectorTensorVector_T.java | 2 +- .../pytorch/T_TensorTensorVector_T.java | 2 +- .../bytedeco/pytorch/T_TensorTensor_T.java | 2 +- .../pytorch/T_TensorTensor_TOptional.java | 2 +- .../org/bytedeco/pytorch/T_TypePtrLong_T.java | 2 +- .../pytorch/T_TypePtrLong_TOptional.java | 2 +- .../org/bytedeco/pytorch/TagArrayRef.java | 4 +- .../java/org/bytedeco/pytorch/TagVector.java | 90 + .../java/org/bytedeco/pytorch/TanhImpl.java | 4 +- .../bytedeco/pytorch/TanhImplCloneable.java | 4 +- .../org/bytedeco/pytorch/TanhshrinkImpl.java | 4 +- .../pytorch/TanhshrinkImplCloneable.java | 4 +- .../gen/java/org/bytedeco/pytorch/Tensor.java | 14 +- .../java/org/bytedeco/pytorch/TensorArg.java | 2 +- .../bytedeco/pytorch/TensorArgArrayRef.java | 2 +- .../org/bytedeco/pytorch/TensorArrayRef.java | 2 +- .../pytorch/TensorArrayRefOptional.java | 2 +- .../java/org/bytedeco/pytorch/TensorBase.java | 8 +- .../pytorch/TensorBaseMaybeOwned.java | 2 +- .../bytedeco/pytorch/TensorBatchDataset.java | 2 +- .../org/bytedeco/pytorch/TensorCastValue.java | 2 +- .../org/bytedeco/pytorch/TensorDataset.java | 2 +- .../bytedeco/pytorch/TensorDatasetBase.java | 2 +- .../org/bytedeco/pytorch/TensorDeque.java | 2 +- .../pytorch/TensorElementReference.java | 2 +- .../org/bytedeco/pytorch/TensorExample.java | 2 +- .../pytorch/TensorExampleCollation.java | 2 +- .../pytorch/TensorExampleIterator.java | 2 +- .../pytorch/TensorExampleOptional.java | 2 +- .../bytedeco/pytorch/TensorExampleVector.java | 2 +- .../pytorch/TensorExampleVectorIterator.java | 2 +- .../pytorch/TensorExampleVectorOptional.java | 2 +- .../org/bytedeco/pytorch/TensorGeometry.java | 2 +- .../bytedeco/pytorch/TensorGeometryArg.java | 2 +- .../java/org/bytedeco/pytorch/TensorImpl.java | 4 +- .../org/bytedeco/pytorch/TensorImplPtr.java | 2 +- .../org/bytedeco/pytorch/TensorImplSet.java | 2 +- .../bytedeco/pytorch/TensorImplVector.java | 2 +- .../org/bytedeco/pytorch/TensorIndex.java | 2 +- .../bytedeco/pytorch/TensorIndexArrayRef.java | 2 +- .../bytedeco/pytorch/TensorIndexVector.java | 2 +- .../org/bytedeco/pytorch/TensorIterator.java | 2 +- .../bytedeco/pytorch/TensorIteratorBase.java | 2 +- .../pytorch/TensorIteratorConfig.java | 2 +- .../java/org/bytedeco/pytorch/TensorList.java | 2 +- .../bytedeco/pytorch/TensorListIterator.java | 2 +- .../org/bytedeco/pytorch/TensorMaker.java | 2 +- .../bytedeco/pytorch/TensorMaybeOwned.java | 2 +- .../java/org/bytedeco/pytorch/TensorName.java | 2 +- .../org/bytedeco/pytorch/TensorNames.java | 2 +- .../org/bytedeco/pytorch/TensorOptional.java | 2 +- .../pytorch/TensorOptionalArrayRef.java | 2 +- .../TensorOptionalElementReference.java | 2 +- .../bytedeco/pytorch/TensorOptionalList.java | 2 +- .../pytorch/TensorOptionalListIterator.java | 2 +- .../pytorch/TensorOptionalVector.java | 2 +- .../org/bytedeco/pytorch/TensorOptions.java | 2 +- .../java/org/bytedeco/pytorch/TensorType.java | 2 +- .../org/bytedeco/pytorch/TensorVector.java | 2 +- .../pytorch/TensorVectorOptional.java | 2 +- .../java/org/bytedeco/pytorch/TernaryIf.java | 2 +- .../org/bytedeco/pytorch/ThreadIdGuard.java | 2 +- .../pytorch/ThreadLocalDebugInfo.java | 2 +- .../pytorch/ThreadLocalPythonObjects.java | 10 +- .../bytedeco/pytorch/ThreadLocalState.java | 2 +- .../pytorch/ThreadLocalStateGuard.java | 2 +- .../org/bytedeco/pytorch/ThresholdImpl.java | 4 +- .../pytorch/ThresholdImplCloneable.java | 4 +- .../bytedeco/pytorch/ThresholdOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/Token.java | 2 +- .../pytorch/TorchDispatchModeTLS.java | 38 +- .../bytedeco/pytorch/TraceableFunction.java | 2 +- .../pytorch/TransformerDecoderImpl.java | 4 +- .../TransformerDecoderImplCloneable.java | 4 +- .../pytorch/TransformerDecoderLayerImpl.java | 4 +- .../TransformerDecoderLayerImplCloneable.java | 4 +- .../TransformerDecoderLayerOptions.java | 2 +- .../pytorch/TransformerDecoderOptions.java | 2 +- .../pytorch/TransformerEncoderImpl.java | 4 +- .../TransformerEncoderImplCloneable.java | 4 +- .../pytorch/TransformerEncoderLayerImpl.java | 4 +- .../TransformerEncoderLayerImplCloneable.java | 4 +- .../TransformerEncoderLayerOptions.java | 2 +- .../pytorch/TransformerEncoderOptions.java | 2 +- .../org/bytedeco/pytorch/TransformerImpl.java | 4 +- .../pytorch/TransformerImplCloneable.java | 4 +- .../bytedeco/pytorch/TransformerOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/Tree.java | 2 +- .../java/org/bytedeco/pytorch/TreeRef.java | 2 +- .../bytedeco/pytorch/TreeRefStringMap.java | 2 +- .../java/org/bytedeco/pytorch/TreeView.java | 2 +- .../pytorch/TripletMarginLossImpl.java | 4 +- .../TripletMarginLossImplCloneable.java | 4 +- .../pytorch/TripletMarginLossOptions.java | 2 +- .../TripletMarginWithDistanceLossImpl.java | 4 +- ...etMarginWithDistanceLossImplCloneable.java | 4 +- .../TripletMarginWithDistanceLossOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/Tuple.java | 2 +- .../org/bytedeco/pytorch/TupleElements.java | 2 +- .../org/bytedeco/pytorch/TupleLiteral.java | 2 +- .../java/org/bytedeco/pytorch/TuplePtr.java | 4 +- .../java/org/bytedeco/pytorch/TupleType.java | 2 +- .../gen/java/org/bytedeco/pytorch/Type.java | 4 +- .../org/bytedeco/pytorch/TypeArrayRef.java | 2 +- .../java/org/bytedeco/pytorch/TypeEnv.java | 2 +- .../java/org/bytedeco/pytorch/TypeError.java | 2 +- .../org/bytedeco/pytorch/TypeIdentifier.java | 2 +- .../java/org/bytedeco/pytorch/TypeMeta.java | 2 +- .../bytedeco/pytorch/TypeMetaOptional.java | 2 +- .../org/bytedeco/pytorch/TypePtrOptional.java | 2 +- .../java/org/bytedeco/pytorch/TypeVector.java | 2 +- .../java/org/bytedeco/pytorch/UnaryOp.java | 2 +- .../bytedeco/pytorch/UndefinedTensorImpl.java | 2 +- .../org/bytedeco/pytorch/UnflattenImpl.java | 4 +- .../pytorch/UnflattenImplCloneable.java | 4 +- .../bytedeco/pytorch/UnflattenOptions.java | 2 +- .../java/org/bytedeco/pytorch/UnfoldImpl.java | 4 +- .../bytedeco/pytorch/UnfoldImplCloneable.java | 4 +- .../org/bytedeco/pytorch/UnfoldOptions.java | 2 +- .../java/org/bytedeco/pytorch/UnionType.java | 2 +- .../org/bytedeco/pytorch/UniqueVoidPtr.java | 2 +- .../java/org/bytedeco/pytorch/Unpickler.java | 2 +- .../org/bytedeco/pytorch/UpsampleImpl.java | 4 +- .../pytorch/UpsampleImplCloneable.java | 4 +- .../org/bytedeco/pytorch/UpsampleMode.java | 14 +- .../org/bytedeco/pytorch/UpsampleOptions.java | 2 +- .../gen/java/org/bytedeco/pytorch/Use.java | 2 +- .../gen/java/org/bytedeco/pytorch/Value.java | 4 +- .../org/bytedeco/pytorch/ValueArrayRef.java | 2 +- .../java/org/bytedeco/pytorch/ValueError.java | 2 +- .../org/bytedeco/pytorch/ValueOptional.java | 2 +- .../org/bytedeco/pytorch/ValueValueMap.java | 2 +- .../org/bytedeco/pytorch/ValueVector.java | 2 +- .../java/org/bytedeco/pytorch/ValueWrap.java | 2 +- .../gen/java/org/bytedeco/pytorch/Var.java | 2 +- .../java/org/bytedeco/pytorch/VarMaybe.java | 2 +- .../pytorch/VariableHooksInterface.java | 2 +- .../org/bytedeco/pytorch/VariableInfo.java | 2 +- .../org/bytedeco/pytorch/VariableVersion.java | 2 +- .../java/org/bytedeco/pytorch/WarnAlways.java | 2 +- .../java/org/bytedeco/pytorch/Warning.java | 16 +- .../org/bytedeco/pytorch/WarningHandler.java | 2 +- .../bytedeco/pytorch/WarningHandlerGuard.java | 2 +- .../org/bytedeco/pytorch/WarningVariant.java | 38 + .../java/org/bytedeco/pytorch/WeakIValue.java | 2 +- .../pytorch/WeakOrStrongCompilationUnit.java | 2 +- .../bytedeco/pytorch/WeakOrStrongTypePtr.java | 2 +- .../org/bytedeco/pytorch/WeakStorage.java | 2 +- .../bytedeco/pytorch/WeakStorageVector.java | 2 +- .../pytorch/WeakStorageVectorOptional.java | 2 +- .../org/bytedeco/pytorch/WeakTypePtr.java | 2 +- .../gen/java/org/bytedeco/pytorch/While.java | 2 +- .../gen/java/org/bytedeco/pytorch/With.java | 2 +- .../java/org/bytedeco/pytorch/WithItem.java | 2 +- .../org/bytedeco/pytorch/WithItemList.java | 2 +- .../pytorch/WithItemListIterator.java | 2 +- .../bytedeco/pytorch/WriteableTensorData.java | 2 +- .../org/bytedeco/pytorch/XPUHooksArgs.java | 2 +- .../bytedeco/pytorch/XPUHooksInterface.java | 10 +- .../org/bytedeco/pytorch/ZeroPad1dImpl.java | 4 +- .../bytedeco/pytorch/ZeroPad1dImplBase.java | 4 +- .../pytorch/ZeroPad1dImplCloneable.java | 4 +- .../bytedeco/pytorch/ZeroPad1dOptions.java | 2 +- .../org/bytedeco/pytorch/ZeroPad2dImpl.java | 4 +- .../bytedeco/pytorch/ZeroPad2dImplBase.java | 4 +- .../pytorch/ZeroPad2dImplCloneable.java | 4 +- .../bytedeco/pytorch/ZeroPad2dOptions.java | 2 +- .../org/bytedeco/pytorch/ZeroPad3dImpl.java | 4 +- .../bytedeco/pytorch/ZeroPad3dImplBase.java | 4 +- .../pytorch/ZeroPad3dImplCloneable.java | 4 +- .../bytedeco/pytorch/ZeroPad3dOptions.java | 2 +- .../bytedeco/pytorch/attribute_iterator.java | 2 +- .../org/bytedeco/pytorch/attribute_list.java | 2 +- .../gen/java/org/bytedeco/pytorch/bits16.java | 2 +- .../java/org/bytedeco/pytorch/bits1x8.java | 2 +- .../java/org/bytedeco/pytorch/bits2x4.java | 2 +- .../java/org/bytedeco/pytorch/bits4x2.java | 2 +- .../gen/java/org/bytedeco/pytorch/bits8.java | 2 +- .../gen/java/org/bytedeco/pytorch/bitset.java | 2 +- .../org/bytedeco/pytorch/buffer_iterator.java | 2 +- .../org/bytedeco/pytorch/buffer_list.java | 2 +- .../java/org/bytedeco/pytorch/crc64_t.java | 2 +- .../pytorch/cuda/ActivationDescriptor.java | 4 +- .../bytedeco/pytorch/cuda/AllocatorState.java | 4 +- .../ApproximateClockToUnixTimeConverter.java | 66 + .../org/bytedeco/pytorch/cuda/BlockInfo.java | 4 +- .../pytorch/cuda/CTCLossDescriptor.java | 4 +- .../bytedeco/pytorch/cuda/CUDAAllocator.java | 16 +- .../org/bytedeco/pytorch/cuda/CUDAGuard.java | 4 +- .../pytorch/cuda/CUDAKernelLaunchInfo.java | 4 +- .../cuda/CUDAKernelLaunchInfoVector.java | 4 +- .../cuda/CUDAKernelLaunchRegistry.java | 4 +- .../pytorch/cuda/CUDAMultiStreamGuard.java | 4 +- .../org/bytedeco/pytorch/cuda/CUDAStream.java | 4 +- .../pytorch/cuda/CUDAStreamArrayRef.java | 4 +- .../cuda/CUDAStreamCaptureModeGuard.java | 4 +- .../pytorch/cuda/CUDAStreamGuard.java | 4 +- .../pytorch/cuda/CheckpointDelta.java | 4 +- .../org/bytedeco/pytorch/cuda/Constant.java | 4 +- .../pytorch/cuda/ConvolutionDescriptor.java | 4 +- .../org/bytedeco/pytorch/cuda/CuDNNError.java | 4 +- .../pytorch/cuda/DeviceAssertionData.java | 4 +- .../pytorch/cuda/DeviceAssertionsData.java | 4 +- .../cuda/DeviceAssertionsDataVector.java | 4 +- ...aVectorCUDAKernelLaunchInfoVectorPair.java | 4 +- .../bytedeco/pytorch/cuda/DeviceStats.java | 4 +- .../pytorch/cuda/DropoutDescriptor.java | 4 +- .../pytorch/cuda/FilterDescriptor.java | 4 +- .../org/bytedeco/pytorch/cuda/PointerSet.java | 4 +- .../bytedeco/pytorch/cuda/RNNDescriptor.java | 4 +- .../bytedeco/pytorch/cuda/SegmentInfo.java | 4 +- .../bytedeco/pytorch/cuda/SnapshotInfo.java | 4 +- .../cuda/SpatialTransformerDescriptor.java | 4 +- .../java/org/bytedeco/pytorch/cuda/Stat.java | 17 +- .../pytorch/cuda/TensorDescriptor.java | 4 +- .../org/bytedeco/pytorch/cuda/TraceEntry.java | 34 +- .../pytorch/cuda/TraceEntryVector.java | 4 +- .../bytedeco/pytorch/cuda/trace_time_.java | 45 + .../org/bytedeco/pytorch/global/torch.java | 3872 ++++++++++------- .../bytedeco/pytorch/global/torch_cuda.java | 243 +- .../org/bytedeco/pytorch/graph_node_list.java | 2 +- .../pytorch/graph_node_list_iterator.java | 2 +- .../gen/java/org/bytedeco/pytorch/kArea.java | 2 +- .../java/org/bytedeco/pytorch/kBatchMean.java | 2 +- .../java/org/bytedeco/pytorch/kBicubic.java | 2 +- .../java/org/bytedeco/pytorch/kBilinear.java | 2 +- .../java/org/bytedeco/pytorch/kBorder.java | 2 +- .../java/org/bytedeco/pytorch/kCircular.java | 2 +- .../java/org/bytedeco/pytorch/kConstant.java | 2 +- .../java/org/bytedeco/pytorch/kConv1D.java | 2 +- .../java/org/bytedeco/pytorch/kConv2D.java | 2 +- .../java/org/bytedeco/pytorch/kConv3D.java | 2 +- .../bytedeco/pytorch/kConvTranspose1D.java | 2 +- .../bytedeco/pytorch/kConvTranspose2D.java | 2 +- .../bytedeco/pytorch/kConvTranspose3D.java | 2 +- .../gen/java/org/bytedeco/pytorch/kFanIn.java | 2 +- .../java/org/bytedeco/pytorch/kFanOut.java | 2 +- .../gen/java/org/bytedeco/pytorch/kGELU.java | 2 +- .../gen/java/org/bytedeco/pytorch/kGRU.java | 2 +- .../gen/java/org/bytedeco/pytorch/kLSTM.java | 2 +- .../java/org/bytedeco/pytorch/kLeakyReLU.java | 2 +- .../java/org/bytedeco/pytorch/kLinear.java | 2 +- .../gen/java/org/bytedeco/pytorch/kMax.java | 2 +- .../gen/java/org/bytedeco/pytorch/kMean.java | 2 +- .../gen/java/org/bytedeco/pytorch/kMish.java | 2 +- .../java/org/bytedeco/pytorch/kNearest.java | 2 +- .../org/bytedeco/pytorch/kNearestExact.java | 2 +- .../gen/java/org/bytedeco/pytorch/kNone.java | 2 +- .../java/org/bytedeco/pytorch/kRNN_RELU.java | 2 +- .../java/org/bytedeco/pytorch/kRNN_TANH.java | 2 +- .../gen/java/org/bytedeco/pytorch/kReLU.java | 2 +- .../java/org/bytedeco/pytorch/kReflect.java | 2 +- .../org/bytedeco/pytorch/kReflection.java | 2 +- .../java/org/bytedeco/pytorch/kReplicate.java | 2 +- .../gen/java/org/bytedeco/pytorch/kSame.java | 2 +- .../gen/java/org/bytedeco/pytorch/kSiLU.java | 2 +- .../java/org/bytedeco/pytorch/kSigmoid.java | 2 +- .../gen/java/org/bytedeco/pytorch/kSum.java | 2 +- .../gen/java/org/bytedeco/pytorch/kTanh.java | 2 +- .../java/org/bytedeco/pytorch/kTrilinear.java | 2 +- .../gen/java/org/bytedeco/pytorch/kValid.java | 2 +- .../gen/java/org/bytedeco/pytorch/kZeros.java | 2 +- .../org/bytedeco/pytorch/module_iterator.java | 2 +- .../org/bytedeco/pytorch/module_list.java | 2 +- .../bytedeco/pytorch/mt19937_data_pod.java | 2 +- .../org/bytedeco/pytorch/mt19937_engine.java | 3 +- .../pytorch/named_attribute_iterator.java | 2 +- .../pytorch/named_attribute_list.java | 2 +- .../pytorch/named_buffer_iterator.java | 2 +- .../bytedeco/pytorch/named_buffer_list.java | 2 +- .../pytorch/named_module_iterator.java | 2 +- .../bytedeco/pytorch/named_module_list.java | 2 +- .../pytorch/named_parameter_iterator.java | 2 +- .../pytorch/named_parameter_list.java | 2 +- .../bytedeco/pytorch/parameter_iterator.java | 2 +- .../org/bytedeco/pytorch/parameter_list.java | 2 +- .../org/bytedeco/pytorch/pretty_tree.java | 2 +- .../gen/java/org/bytedeco/pytorch/qint32.java | 2 +- .../gen/java/org/bytedeco/pytorch/qint8.java | 2 +- .../java/org/bytedeco/pytorch/quint2x4.java | 2 +- .../java/org/bytedeco/pytorch/quint4x2.java | 2 +- .../gen/java/org/bytedeco/pytorch/quint8.java | 2 +- .../java/org/bytedeco/pytorch/type_index.java | 2 +- .../pytorch/TransformerActivation.java | 6 +- .../cuda/functions/AllocatorTraceTracker.java | 32 + .../cuda/functions/OutOfMemoryObserver.java | 29 + .../bytedeco/pytorch/functions/ObjLoader.java | 33 + .../org/bytedeco/pytorch/presets/torch.java | 114 +- .../bytedeco/pytorch/presets/torch_cuda.java | 7 +- .../pytorch/presets/torch_cuda_include.h | 2 + .../bytedeco/pytorch/presets/torch_include.h | 59 +- 1469 files changed, 6200 insertions(+), 3655 deletions(-) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPair.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPairOptional.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRecordIterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DistError.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DistNetworkError.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DistStoreError.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ErrorAlwaysShowCppStacktrace.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fnuz.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2fnuz.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksArgs.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksInterface.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MzZipReaderIterWrapper.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ObjPtr.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Object.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterfaceVector.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObjectOptional.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_SafePyObjectTorchDispatchModeKey_T.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TagVector.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/WarningVariant.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ApproximateClockToUnixTimeConverter.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/trace_time_.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/cuda/functions/AllocatorTraceTracker.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/cuda/functions/OutOfMemoryObserver.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/ObjLoader.java diff --git a/pytorch/README.md b/pytorch/README.md index cbdb732e2e2..0a90dd2002c 100644 --- a/pytorch/README.md +++ b/pytorch/README.md @@ -9,7 +9,7 @@ Introduction ------------ This directory contains the JavaCPP Presets module for: - * PyTorch 2.1.2 https://pytorch.org/ + * PyTorch 2.2.0 https://pytorch.org/ Please refer to the parent README.md file for more detailed information about the JavaCPP Presets. @@ -40,7 +40,7 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic 4.0.0 org.bytedeco.pytorch simplemnist - 1.5.10 + 1.5.11-SNAPSHOT SimpleMNIST @@ -48,14 +48,14 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic org.bytedeco pytorch-platform - 2.1.2-1.5.10 + 2.2.0-1.5.11-SNAPSHOT org.bytedeco pytorch-platform-gpu - 2.1.2-1.5.10 + 2.2.0-1.5.11-SNAPSHOT diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh index 8a3803f1fe7..ceecc5924a7 100755 --- a/pytorch/cppbuild.sh +++ b/pytorch/cppbuild.sh @@ -35,7 +35,7 @@ if [[ $PLATFORM == windows* ]]; then export PYTHON_BIN_PATH=$(which python.exe) fi -PYTORCH_VERSION=2.1.2 +PYTORCH_VERSION=2.2.0 export PYTORCH_BUILD_VERSION="$PYTORCH_VERSION" export PYTORCH_BUILD_NUMBER=1 diff --git a/pytorch/platform/gpu/pom.xml b/pytorch/platform/gpu/pom.xml index a2535ca1d46..a6d719c9d43 100644 --- a/pytorch/platform/gpu/pom.xml +++ b/pytorch/platform/gpu/pom.xml @@ -12,7 +12,7 @@ org.bytedeco pytorch-platform-gpu - 2.1.2-${project.parent.version} + 2.2.0-${project.parent.version} JavaCPP Presets Platform GPU for PyTorch diff --git a/pytorch/platform/pom.xml b/pytorch/platform/pom.xml index d57f42b7b32..7be6c3a7ca7 100644 --- a/pytorch/platform/pom.xml +++ b/pytorch/platform/pom.xml @@ -12,7 +12,7 @@ org.bytedeco pytorch-platform - 2.1.2-${project.parent.version} + 2.2.0-${project.parent.version} JavaCPP Presets Platform for PyTorch diff --git a/pytorch/pom.xml b/pytorch/pom.xml index 6501ca42c37..676d8a70fb1 100644 --- a/pytorch/pom.xml +++ b/pytorch/pom.xml @@ -11,7 +11,7 @@ org.bytedeco pytorch - 2.1.2-${project.parent.version} + 2.2.0-${project.parent.version} JavaCPP Presets for PyTorch diff --git a/pytorch/samples/pom.xml b/pytorch/samples/pom.xml index 78b9db77337..6ed2a5735d4 100644 --- a/pytorch/samples/pom.xml +++ b/pytorch/samples/pom.xml @@ -12,14 +12,14 @@ org.bytedeco pytorch-platform - 2.1.2-1.5.11-SNAPSHOT + 2.2.0-1.5.11-SNAPSHOT org.bytedeco pytorch-platform-gpu - 2.1.2-1.5.11-SNAPSHOT + 2.2.0-1.5.11-SNAPSHOT diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java index 84d6a228a2c..16a89281a22 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java index 6e529401db3..a439f4848b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java index 46007a84e6e..17d93d7c548 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java index 029d51cd26f..04ff7eeb15d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java index 9d3b5676e73..359ee78b6c3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -21,14 +21,16 @@ @Namespace("torch::optim") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class AdagradParamState extends OptimizerCloneableAdagradParamState { static { Loader.load(); } - /** Default native constructor. */ - public AdagradParamState() { super((Pointer)null); allocate(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdagradParamState(Pointer p) { super(p); } - @UniquePtr @Name("std::make_unique") private native void allocate(); public native @ByRef @NoException(true) Tensor sum(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer step(); + public AdagradParamState() { super((Pointer)null); allocate(); } + @UniquePtr @Name("std::make_unique") private native void allocate(); + public AdagradParamState(@Const @ByRef AdagradParamState arg0) { super((Pointer)null); allocate(arg0); } + @UniquePtr @Name("std::make_unique") private native void allocate(@Const @ByRef AdagradParamState arg0); + public native @ByRef @Name("operator =") AdagradParamState put(@Const @ByRef AdagradParamState arg0); private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java index ecdf5711627..90a85d786dd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java index cf89f8ff15f..a466026a740 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java index 8e566b180af..d944ca047fe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java index 4dc34d7b275..89fde2d5d67 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java index e270d83e7a7..e80fb9128bd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java index 39b37dad975..16efed25070 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java index 171508fb216..226e1b7a262 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -46,7 +46,7 @@ private native void allocate( public AdaptiveAvgPool1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveAvgPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java index 123b07224a1..a66fcdd7661 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,7 +28,7 @@ public class AdaptiveAvgPool1dImplBase extends AdaptiveAvgPool1dImplCloneable { public AdaptiveAvgPool1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveAvgPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveAvgPool1dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveAvgPool1dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveAvgPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java index ce04804f4ca..01cdc15049f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveAvgPool1dImplCloneable extends Module { public AdaptiveAvgPool1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveAvgPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java index 57172086f26..d5c58a579ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java index 2b009f7768e..1801896a8f0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -46,7 +46,7 @@ private native void allocate( public AdaptiveAvgPool2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveAvgPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java index a3570edd8a4..35587ecaa9c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveAvgPool2dImplBase extends AdaptiveAvgPool2dImplCloneable { public AdaptiveAvgPool2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveAvgPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveAvgPool2dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveAvgPool2dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveAvgPool2dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java index 747751d4800..bfb4afc7800 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveAvgPool2dImplCloneable extends Module { public AdaptiveAvgPool2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveAvgPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java index 45355287d28..e51d5d8c1fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java index d0d29ed2285..34562039864 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -46,7 +46,7 @@ private native void allocate( public AdaptiveAvgPool3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveAvgPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java index b16d3fbf583..4c3fa104e86 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveAvgPool3dImplBase extends AdaptiveAvgPool3dImplCloneable { public AdaptiveAvgPool3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveAvgPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveAvgPool3dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveAvgPool3dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveAvgPool3dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java index 330171a4ba9..4f04de525fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveAvgPool3dImplCloneable extends Module { public AdaptiveAvgPool3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveAvgPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java index 56437ae5e40..71020055604 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java index b12e2ae4485..7bb4db72f32 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -43,7 +43,7 @@ public class AdaptiveLogSoftmaxWithLossImpl extends AdaptiveLogSoftmaxWithLossIm public AdaptiveLogSoftmaxWithLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveLogSoftmaxWithLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public AdaptiveLogSoftmaxWithLossImpl( @Cast("int64_t") long in_features, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java index cfaaefc7e8f..457b439a882 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveLogSoftmaxWithLossImplCloneable extends Module { public AdaptiveLogSoftmaxWithLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveLogSoftmaxWithLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveLogSoftmaxWithLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java index a843ebe4099..ca1256eafa8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java index b8dd432312c..7e67b0b29db 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -46,7 +46,7 @@ private native void allocate( public AdaptiveMaxPool1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveMaxPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java index d2f0afcfd76..5ead4bdba58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,7 +28,7 @@ public class AdaptiveMaxPool1dImplBase extends AdaptiveMaxPool1dImplCloneable { public AdaptiveMaxPool1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveMaxPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveMaxPool1dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveMaxPool1dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveMaxPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java index dccc7d084e7..9671ef0ec88 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveMaxPool1dImplCloneable extends Module { public AdaptiveMaxPool1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveMaxPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java index 4480c46ff09..0b41b14b6ee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java index 8bfd16b6ff7..ae1ecc328e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -46,7 +46,7 @@ private native void allocate( public AdaptiveMaxPool2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveMaxPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java index fd1f6d6ffea..af355086439 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveMaxPool2dImplBase extends AdaptiveMaxPool2dImplCloneable { public AdaptiveMaxPool2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveMaxPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveMaxPool2dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveMaxPool2dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveMaxPool2dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java index b51e5be3831..b9e057d7585 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveMaxPool2dImplCloneable extends Module { public AdaptiveMaxPool2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveMaxPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java index e32c7c6fffa..2b9b8f5b633 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java index 333827b96b3..891b7d7323e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -46,7 +46,7 @@ private native void allocate( public AdaptiveMaxPool3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveMaxPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java index 71d0824d229..d9176dff49d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveMaxPool3dImplBase extends AdaptiveMaxPool3dImplCloneable { public AdaptiveMaxPool3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveMaxPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast,torch::nn::AdaptiveMaxPool3dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveMaxPool3dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveMaxPool3dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java index 106fe1c0276..f709df5b1d1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AdaptiveMaxPool3dImplCloneable extends Module { public AdaptiveMaxPool3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AdaptiveMaxPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java index e665177151d..d52a037fca0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java index 7ee28a83b82..8595ece7434 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java index 7212812afc5..f8c3f056ccc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java index 8b8f93a4fd8..c91646faf20 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java index 8b3053d5eeb..0e80109be7f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java index 4512c77c683..db9d39b70e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java index 59ec56c3f36..13a6855649f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java index ca3a3358743..7e3ac0d1aca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -47,7 +47,7 @@ public class AlphaDropoutImpl extends AlphaDropoutImplBase { public AlphaDropoutImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AlphaDropoutImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java index 92a39bdc090..f02821cceea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -25,7 +25,7 @@ public class AlphaDropoutImplBase extends AlphaDropoutImplCloneable { public AlphaDropoutImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public AlphaDropoutImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AlphaDropoutImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java index e8c522963fc..4f037cea950 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AlphaDropoutImplCloneable extends Module { public AlphaDropoutImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AlphaDropoutImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AlphaDropoutImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java index db0054fcc7b..78e2a22497e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java index c7d5697b961..c5574cb4df9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java index 7a6bff244b6..31dc3152958 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java index 49001c520c6..16a3faca35f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java index 0370a159063..d3312cd32db 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java index f62a3932311..7ff7901fdf4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java index 0edf4663fb7..a039e827418 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java index 469b7284409..f25a6d33ee6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java index 925538bb388..f5d68f0fad4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java index edf0c3bb58a..a386153cdd3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java index 8e386c44efd..65d89929367 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java index ef2b8d737ca..66ff484d421 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java index 08104d0aa10..19a4af193fa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java index 9149717852b..da7ffc8590e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java index 9b3fd073dd0..7af1d36c38f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java index 9e8a04351a1..d24b10f427d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java index 93a000e0fb9..063e9d0b520 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -35,14 +35,14 @@ public class Argument extends Pointer { public Argument( @StdString BytePointer name/*=""*/, - @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, + @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N, @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, type, N, default_value, kwarg_only, alias_info); } private native void allocate( @StdString BytePointer name/*=""*/, - @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, + @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N, @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, @@ -51,14 +51,14 @@ private native void allocate( private native void allocate(); public Argument( @StdString String name/*=""*/, - @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, + @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N, @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, type, N, default_value, kwarg_only, alias_info); } private native void allocate( @StdString String name/*=""*/, - @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, + @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N, @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java index eb293a0e46f..ace579e04af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java index 72a301dd7bc..91235fdfa60 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java index 842dc133162..7d08eb14fa8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java index 528fb8ab28e..7596de57fbe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java index be0dfea710a..51f1009575c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,6 @@ public class ArgumentSpec extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ArgumentSpec(Pointer p) { super(p); } - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) public ArgumentSpec(@Cast("size_t") long num_flat_tensor_inputs, @Cast("size_t") long num_flat_optional_inputs) { super((Pointer)null); allocate(num_flat_tensor_inputs, num_flat_optional_inputs); } private native void allocate(@Cast("size_t") long num_flat_tensor_inputs, @Cast("size_t") long num_flat_optional_inputs); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java index 79792f9ace9..04e02c3e52b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java index 2de1499ad71..c08603e1f7c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java index f9874d40006..abd978952b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java index fbe86fa4191..b708332cc3c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java index bb7cb9e28c7..68282742cdf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java index 74e4a1729d8..ad6633bba4b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java index 9dd9bf7b94e..1387fb99e9e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java index 1d494c3321f..27ce7cc23e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java index 4d92ff89e56..7e44a5c75ba 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java index c9bc0cdc345..86f3a2a863f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java index ca45796e4cf..318e6081a01 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java index 2dc24beb629..353b3fbda30 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java index a4bebe9a45c..ce606b315ce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java index 5566d45616f..31fb6cdf990 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java index f69107ef272..5aeb3816687 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java index 97dea63b69a..b7e076813aa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java index 789d37b0b06..4b1c2d77330 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java index 5ee2386ad7c..a18572b6266 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java index b692bada3ce..ceb13776019 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java index cf6dd3813bc..114e70da000 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java index bb620bc279c..20d33e18457 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java index 50111da690a..788de523b3f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java index bd49aea08b2..3581dfb6461 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java index 90dda80c8ac..74e14258664 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java index 6c1246f2256..368bb234eab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java index 2277dc45739..de586a29db0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java index 592df1cf1ca..f231dfe7123 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class AvgPool1dImpl extends AvgPool1dImplBase { public AvgPool1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AvgPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java index c90409abc1e..1c3d2b8adf9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -26,7 +26,7 @@ public class AvgPool1dImplBase extends AvgPool1dImplCloneable { public AvgPool1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public AvgPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AvgPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java index 3dd5a203305..8b0e77dbeb2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AvgPool1dImplCloneable extends Module { public AvgPool1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AvgPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java index 55fef6eb6b0..e816c99a187 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java index f5b1781da04..4a88680e572 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class AvgPool2dImpl extends AvgPool2dImplBase { public AvgPool2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AvgPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java index ed7de5fe0d3..6d720bf3018 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AvgPool2dImplBase extends AvgPool2dImplCloneable { public AvgPool2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public AvgPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AvgPool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java index 555819565e7..d13fbf236c3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AvgPool2dImplCloneable extends Module { public AvgPool2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AvgPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java index b72f0d12429..88055b7067e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java index 99aaa8fb476..599ced93035 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class AvgPool3dImpl extends AvgPool3dImplBase { public AvgPool3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public AvgPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java index 2551ed45a34..b4417c31280 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AvgPool3dImplBase extends AvgPool3dImplCloneable { public AvgPool3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public AvgPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AvgPool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java index dafc95ecc73..2f0f79146b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class AvgPool3dImplCloneable extends Module { public AvgPool3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public AvgPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java index 29db5e0bd4d..42d125c2084 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java index 4b820db366e..9b8f7335da5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -41,9 +41,9 @@ public class Await extends Pointer { - public native @ByVal Type.TypePtr elementType(); + public native @Const @ByRef Type.TypePtr elementType(); - public native @ByVal Type.TypePtr type(); + public native @Const @ByRef Type.TypePtr type(); public native void setArgs(@ByVal IValueVector args); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java index 0ba3aad00aa..2aabc38bebb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java index 1101c354673..f39c07a2a75 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java index 7c8d56de06b..702535a3092 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java index 3223dc8a13a..bdd0545b6e2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class BCELossImpl extends BCELossImplCloneable { public BCELossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public BCELossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public BCELossImpl(@ByVal(nullValue = "torch::nn::BCELossOptions{}") BCELossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::BCELossOptions{}") BCELossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java index 063f64149db..e2443f065e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class BCELossImplCloneable extends Module { public BCELossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public BCELossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BCELossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java index f681fbc29ec..5a32a0cb0a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java index 6d91eed2a07..4d1c829b806 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -43,7 +43,7 @@ public class BCEWithLogitsLossImpl extends BCEWithLogitsLossImplCloneable { public BCEWithLogitsLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public BCEWithLogitsLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public BCEWithLogitsLossImpl(@ByVal(nullValue = "torch::nn::BCEWithLogitsLossOptions{}") BCEWithLogitsLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::BCEWithLogitsLossOptions{}") BCEWithLogitsLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java index f7221be4683..92efee160fb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class BCEWithLogitsLossImplCloneable extends Module { public BCEWithLogitsLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public BCEWithLogitsLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BCEWithLogitsLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java index 7a92effefd9..27b96ac34f0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java index 32738efa3c8..f7e51e10ccc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java index c7a062a6f2c..a97a23d3891 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java index 20140131fcf..f7a8ea37cf4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java index 4e188155a93..3846c176118 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java index 181677e4519..2c72020726c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class BatchNorm1dImpl extends BatchNorm1dImplBase { public BatchNorm1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java index d0ed378620f..b068097725a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -26,7 +26,7 @@ public class BatchNorm1dImplBase extends BatchNorm1dImplBaseBase { public BatchNorm1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java index ef7145767ef..12a05a38e11 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -27,7 +27,7 @@ public class BatchNorm1dImplBaseBase extends BatchNorm1dImplCloneable { public BatchNorm1dImplBaseBase(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm1dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java index 756595fd2a4..e79f157841f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class BatchNorm1dImplCloneable extends Module { public BatchNorm1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java index 0abe72a063f..ad69547e5f1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class BatchNorm2dImpl extends BatchNorm2dImplBase { public BatchNorm2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java index e1f19727684..975b4b6ac52 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class BatchNorm2dImplBase extends BatchNorm2dImplBaseBase { public BatchNorm2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java index 5be55e6ee8b..1f679f5d67b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class BatchNorm2dImplBaseBase extends BatchNorm2dImplCloneable { public BatchNorm2dImplBaseBase(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm2dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java index fb370b26601..fae34c5a3a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class BatchNorm2dImplCloneable extends Module { public BatchNorm2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java index 4d0d760077e..9c4bc5ce1ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class BatchNorm3dImpl extends BatchNorm3dImplBase { public BatchNorm3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java index 5ab44850df8..ef80d4b6a86 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class BatchNorm3dImplBase extends BatchNorm3dImplBaseBase { public BatchNorm3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java index bcd29083a49..e78392d0ea3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class BatchNorm3dImplBaseBase extends BatchNorm3dImplCloneable { public BatchNorm3dImplBaseBase(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm3dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java index ffc83e68a80..4226bbbeb76 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class BatchNorm3dImplCloneable extends Module { public BatchNorm3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public BatchNorm3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java index 3fc5835d2a2..82a33aee581 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java index cdf2c8b59e6..d2ebc2c0b58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java index 0eb92ed468a..b0b0957520b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java index e9aa51bb0bd..526bfd696df 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -22,14 +22,12 @@ public class BatchSizeOptional extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchSizeOptional(Pointer p) { super(p); } - public BatchSizeOptional(BatchSize value) { this(); put(value); } public BatchSizeOptional() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef BatchSizeOptional put(@ByRef BatchSizeOptional x); + public native boolean has_value(); public native void reset(); public native @Name("value") @ByRef BatchSize get(); - @ValueSetter public native BatchSizeOptional put(@ByRef BatchSize value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java index 7562f59135c..a87bb33cff1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java index de37e8ec00d..b0ee161545a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class BilinearImpl extends BilinearImplCloneable { public BilinearImpl(Pointer p) { super(p); } /** Downcast constructor. */ public BilinearImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public BilinearImpl(@Cast("int64_t") long in1_features, @Cast("int64_t") long in2_features, @Cast("int64_t") long out_features) { super((Pointer)null); allocate(in1_features, in2_features, out_features); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long in1_features, @Cast("int64_t") long in2_features, @Cast("int64_t") long out_features); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java index 59515659c54..fcecb842634 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class BilinearImplCloneable extends Module { public BilinearImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public BilinearImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BilinearImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java index 962810e59fc..d438dfc0b6d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java index dc8d4a631bd..9dd22d383c1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java index 0fa752de78c..7e38b95d478 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java index 83f186bb22e..6528e789332 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java index 6787e14c37c..08f07737a0f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java index 17c5092be4e..42eabf3ad02 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java index 85d85ccb9a5..9830abb5b28 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java index 6f12475e5d2..59675b1de16 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java index d41517ad66d..1f3e6816cbd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java index 123677de131..6cdbdc83f8f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java index 82fb0b65280..1b9db184bef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java index e01948077fa..5c0ae0ddcc1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java index 276f7cb605d..024c78a2095 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java index 42ead5f69c4..64488385c2c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java index 6382600e122..8aac364c51c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java index 15d39839ecb..311a86ff665 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java index 1ea6aa52c4a..d7eacdbc360 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java index 15d1fd3c08d..5f9bf340946 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java index f1381861845..097921b400e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java index 3e63907dc88..49d4a670633 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java index 48d5d34f756..829928771c5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPair.java new file mode 100644 index 00000000000..ec56895cfd0 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPair.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::pair") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class BytePointerPair extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public BytePointerPair(Pointer p) { super(p); } + public BytePointerPair(@Cast("const char*") BytePointer firstValue, @Cast("const char*") BytePointer secondValue) { this(); put(firstValue, secondValue); } + public BytePointerPair(String firstValue, String secondValue) { this(); put(firstValue, secondValue); } + public BytePointerPair() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef BytePointerPair put(@ByRef BytePointerPair x); + + + @MemberGetter public native @Const @Cast("const char*") BytePointer first(); public native BytePointerPair first(BytePointer first); + @MemberGetter public native @Const @Cast("const char*") BytePointer second(); public native BytePointerPair second(BytePointer second); + @MemberSetter @Index public native BytePointerPair first(@Const String first); + @MemberSetter @Index public native BytePointerPair second(@Const String second); + + public BytePointerPair put(BytePointer firstValue, BytePointer secondValue) { + first(firstValue); + second(secondValue); + return this; + } + + public BytePointerPair put(String firstValue, String secondValue) { + first(firstValue); + second(secondValue); + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPairOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPairOptional.java new file mode 100644 index 00000000000..6d24b3b1f00 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPairOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class BytePointerPairOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public BytePointerPairOptional(Pointer p) { super(p); } + public BytePointerPairOptional(BytePointerPair value) { this(); put(value); } + public BytePointerPairOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef BytePointerPairOptional put(@ByRef BytePointerPairOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @ByRef BytePointerPair get(); + @ValueSetter public native BytePointerPairOptional put(@ByRef BytePointerPair value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java index 06b385f5ddf..5127b9f713c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java b/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java index d3b19a40991..0175cf91c61 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java index 290717ba9bf..7fa9a51abd4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class CELUImpl extends CELUImplCloneable { public CELUImpl(Pointer p) { super(p); } /** Downcast constructor. */ public CELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CELUImpl(@Const @ByRef(nullValue = "torch::nn::CELUOptions{}") CELUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::CELUOptions{}") CELUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java index 07b52334c1d..f5d0401bd36 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class CELUImplCloneable extends Module { public CELUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public CELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CELUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java index 710dc97ce4d..433be6070af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java index 1158093d2cc..be3c5e63022 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java index 390fd91cd16..2ca70f1a029 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class CTCLossImpl extends CTCLossImplCloneable { public CTCLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public CTCLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CTCLossImpl(@ByVal(nullValue = "torch::nn::CTCLossOptions{}") CTCLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::CTCLossOptions{}") CTCLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java index 08a20833f2b..c543c778517 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class CTCLossImplCloneable extends Module { public CTCLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public CTCLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CTCLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java index 74cec973383..910b3ff8d97 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java index 9ca9bad7d8e..bda89287dc0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java index 295097a31ab..a446e505655 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java index 1d5be1c2781..4b0f13710ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java index 539e9559678..6cdc616619f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java index 608ec1bbb0d..02c266c4cce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java index 647739f1a5e..485082ee80a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java index fcdc318e836..4bd639f1d2a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java index eff51251111..372fe258744 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java index fcfbdf2da89..ceed73e0d0f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java index 9ea3d185a40..a95b3f7fbf2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java index ad63d2d3d0a..25de5e1c011 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java index 44758ffa266..c7c7ea60241 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java index b2b012a886f..d90c8378d8b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java index d044a513c2b..76e2b21ce91 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java index d9e25dddce7..191ed20c628 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java index 078d4d22d5e..11471071c9c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java index 8486c70c0c6..eff35875117 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java index 7a9d4e4fbc6..d2b3dd7d98e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java index 195f356edc6..316fa2a369e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java index 8ce879a93a2..771c1076a83 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRecordIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRecordIterator.java new file mode 100644 index 00000000000..f144159f573 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRecordIterator.java @@ -0,0 +1,30 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("caffe2::serialize") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkRecordIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkRecordIterator(Pointer p) { super(p); } + + + // Read at most `chunkSize` into `buf`. Return the number of actual bytes read. + public native @Cast("size_t") long next(Pointer buf); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java index 7c844d8a0c0..098e8c2c591 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java index 5b478247c28..1daed38af62 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java index 4a99533335f..48649299a16 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java index 3836af1162a..d40d1dc2c25 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java index 00c7f6e775a..c9c16904e1b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java index 99eff8c5f74..3f165de5493 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java index 18e16f1dc9f..bb02ac9f39a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java index b98adebe32b..245e1e947ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java index 51aef3a4793..4f6e30671a4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java index 02b16129d9d..8a19ca8e0ee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -121,8 +121,8 @@ public static class Property extends Pointer { // it is unsafe to maintain uses of the old type of the attribute, // and any code that works on the attribute is now invalid. // Only newly created code is valid again. - public native void unsafeChangeAttributeType(@StdString BytePointer name, @ByVal Type.TypePtr new_ty); - public native void unsafeChangeAttributeType(@StdString String name, @ByVal Type.TypePtr new_ty); + public native void unsafeChangeAttributeType(@StdString BytePointer name, @Const @ByRef Type.TypePtr new_ty); + public native void unsafeChangeAttributeType(@StdString String name, @Const @ByRef Type.TypePtr new_ty); // Add attribute \p NAME if it doesn't exist or verify that it has a // compatible type otherwise. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java index 3c82c6d0b94..0f6ee68e172 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java index fba8d9c6c95..b740cf551de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java index 170459a01d6..8a2c916eebd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java index c3f1c66ff72..9c5c559676b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java index 9b2d3926419..56fe902a48c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java index 758ed102ff0..80bfa8cea8e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java index 58d1b1993f2..3b493e6d8cb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java index eb05330a0b3..778fdee0f15 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java index a169d0a4556..2b4228b06da 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java index 6132a3fd4a1..40343406921 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java index d6466a8c60e..097c118f750 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java index 46511f72af6..9f15c3265af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java index 8eb010bd4f8..14b7bb75920 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,6 +44,6 @@ public class ConstantPad1dImpl extends ConstantPad1dImplBase { public ConstantPad1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ConstantPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java index d83d22b646e..3b95292ab83 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,7 +28,7 @@ public class ConstantPad1dImplBase extends ConstantPad1dImplCloneable { public ConstantPad1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ConstantPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConstantPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java index 96044b1aaf5..b2393789cff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConstantPad1dImplCloneable extends Module { public ConstantPad1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ConstantPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java index c9a7db4b3af..df804685b72 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java index 1c32f9c2986..c7a3370d1ae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,6 +44,6 @@ public class ConstantPad2dImpl extends ConstantPad2dImplBase { public ConstantPad2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ConstantPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java index 2bdeb13265f..16e6e5f4a1b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConstantPad2dImplBase extends ConstantPad2dImplCloneable { public ConstantPad2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ConstantPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConstantPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java index c638b6de067..b6a275de58a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConstantPad2dImplCloneable extends Module { public ConstantPad2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ConstantPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java index 0a5b8416242..e6c3d2da67e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java index 18eda2cb41e..120b0322198 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,6 +44,6 @@ public class ConstantPad3dImpl extends ConstantPad3dImplBase { public ConstantPad3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ConstantPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java index 151b29fde19..f9802662cb1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConstantPad3dImplBase extends ConstantPad3dImplCloneable { public ConstantPad3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ConstantPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConstantPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java index 396b5aa4ec1..72fc32ddbc9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConstantPad3dImplCloneable extends Module { public ConstantPad3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ConstantPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java index 255633d760b..cadeea7485d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java index 74e9b651205..74e14d15f19 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java index e7090d69ab9..96fa79756dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java index d3e1dccaaa2..a55f2aa984d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -138,6 +138,8 @@ public class Context extends Pointer { public native @Cast("bool") boolean deterministicAlgorithms(); public native @Cast("bool") boolean deterministicAlgorithmsWarnOnly(); public native void setDeterministicAlgorithms(@Cast("bool") boolean arg0, @Cast("bool") boolean arg1); + public native @Cast("bool") boolean deterministicFillUninitializedMemory(); + public native void setDeterministicFillUninitializedMemory(@Cast("bool") boolean arg0); // Note [Writing Nondeterministic Operations] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java index addc051c24c..0a47ef3e3db 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java index 1a5cd3a9290..f0ab894ff77 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java index 5510f8c45dc..458a686abcf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class Conv1dImpl extends Conv1dImplBase { public Conv1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public Conv1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public Conv1dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java index 75593f6b4e9..7fb92fdd393 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -26,7 +26,7 @@ public class Conv1dImplBase extends Conv1dImplCloneable { public Conv1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public Conv1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Conv1dImplBase(@ByVal DetailConv1dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv1dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java index 547f9d2f279..f505c1c1385 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class Conv1dImplCloneable extends Module { public Conv1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public Conv1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java index 21ffa043df8..c33496b60be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java index 9ed98f583de..4b7fec2f254 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,19 +17,19 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant,torch::enumtype::kValid,torch::enumtype::kSame>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Conv1dPadding extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv1dPadding(Pointer p) { super(p); } public @Cast("torch::ExpandingArray<1>*") @ByRef LongPointer get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @Cast("torch::ExpandingArray<1>*") @ByRef LongPointer get0(@ByRef Conv1dPadding container); + @Namespace @Name("std::get<0>") public static native @Cast("torch::ExpandingArray<1>*") @ByRef LongPointer get0(@ByRef Conv1dPadding container); @ValueSetter public native Conv1dPadding put(@Cast("torch::ExpandingArray<1>*") @ByRef LongPointer value); public @ByRef kValid get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kValid get1(@ByRef Conv1dPadding container); + @Namespace @Name("std::get<1>") public static native @ByRef kValid get1(@ByRef Conv1dPadding container); @ValueSetter public native Conv1dPadding put(@ByRef kValid value); public @ByRef kSame get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kSame get2(@ByRef Conv1dPadding container); + @Namespace @Name("std::get<2>") public static native @ByRef kSame get2(@ByRef Conv1dPadding container); @ValueSetter public native Conv1dPadding put(@ByRef kSame value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java index 65d8ebfe980..e584973c787 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java index dfd2f099365..6c09f570c22 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class Conv2dImpl extends Conv2dImplBase { public Conv2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public Conv2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public Conv2dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java index 9b3423396ef..d0ac3c79e8d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class Conv2dImplBase extends Conv2dImplCloneable { public Conv2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public Conv2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Conv2dImplBase(@ByVal DetailConv2dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv2dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java index a7e6d1de26d..22fb1655ad2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class Conv2dImplCloneable extends Module { public Conv2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public Conv2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java index c37d0f8f58c..ba32723ac78 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java index ea8893052c6..61c4261f4b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,19 +17,19 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant,torch::enumtype::kValid,torch::enumtype::kSame>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Conv2dPadding extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2dPadding(Pointer p) { super(p); } public @Cast("torch::ExpandingArray<2>*") @ByRef LongPointer get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @Cast("torch::ExpandingArray<2>*") @ByRef LongPointer get0(@ByRef Conv2dPadding container); + @Namespace @Name("std::get<0>") public static native @Cast("torch::ExpandingArray<2>*") @ByRef LongPointer get0(@ByRef Conv2dPadding container); @ValueSetter public native Conv2dPadding put(@Cast("torch::ExpandingArray<2>*") @ByRef LongPointer value); public @ByRef kValid get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kValid get1(@ByRef Conv2dPadding container); + @Namespace @Name("std::get<1>") public static native @ByRef kValid get1(@ByRef Conv2dPadding container); @ValueSetter public native Conv2dPadding put(@ByRef kValid value); public @ByRef kSame get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kSame get2(@ByRef Conv2dPadding container); + @Namespace @Name("std::get<2>") public static native @ByRef kSame get2(@ByRef Conv2dPadding container); @ValueSetter public native Conv2dPadding put(@ByRef kSame value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java index 2393b28fde8..a2d7a206d87 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java index 19994e2d76a..841d6e8f9e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class Conv3dImpl extends Conv3dImplBase { public Conv3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public Conv3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public Conv3dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java index e4061c69a01..1bd9bf61d22 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class Conv3dImplBase extends Conv3dImplCloneable { public Conv3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public Conv3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Conv3dImplBase(@ByVal DetailConv3dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv3dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java index ab3cba20ab5..819cb65d1d1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class Conv3dImplCloneable extends Module { public Conv3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public Conv3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java index 74b9ff14b89..e7dd8351238 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java index b09cbea15dd..86affc897c2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,19 +17,19 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant,torch::enumtype::kValid,torch::enumtype::kSame>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Conv3dPadding extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3dPadding(Pointer p) { super(p); } public @Cast("torch::ExpandingArray<3>*") @ByRef LongPointer get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @Cast("torch::ExpandingArray<3>*") @ByRef LongPointer get0(@ByRef Conv3dPadding container); + @Namespace @Name("std::get<0>") public static native @Cast("torch::ExpandingArray<3>*") @ByRef LongPointer get0(@ByRef Conv3dPadding container); @ValueSetter public native Conv3dPadding put(@Cast("torch::ExpandingArray<3>*") @ByRef LongPointer value); public @ByRef kValid get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kValid get1(@ByRef Conv3dPadding container); + @Namespace @Name("std::get<1>") public static native @ByRef kValid get1(@ByRef Conv3dPadding container); @ValueSetter public native Conv3dPadding put(@ByRef kValid value); public @ByRef kSame get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kSame get2(@ByRef Conv3dPadding container); + @Namespace @Name("std::get<2>") public static native @ByRef kSame get2(@ByRef Conv3dPadding container); @ValueSetter public native Conv3dPadding put(@ByRef kSame value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java index a8ab94abe70..6454ff02e98 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ConvPaddingMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -31,16 +31,16 @@ public class ConvPaddingMode extends Pointer { public native @Name("operator =") @ByRef ConvPaddingMode put(@ByRef ConvPaddingMode x); public @ByRef kZeros get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kZeros get0(@ByRef ConvPaddingMode container); + @Namespace @Name("std::get<0>") public static native @ByRef kZeros get0(@ByRef ConvPaddingMode container); @ValueSetter public native ConvPaddingMode put(@ByRef kZeros value); public @ByRef kReflect get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kReflect get1(@ByRef ConvPaddingMode container); + @Namespace @Name("std::get<1>") public static native @ByRef kReflect get1(@ByRef ConvPaddingMode container); @ValueSetter public native ConvPaddingMode put(@ByRef kReflect value); public @ByRef kReplicate get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kReplicate get2(@ByRef ConvPaddingMode container); + @Namespace @Name("std::get<2>") public static native @ByRef kReplicate get2(@ByRef ConvPaddingMode container); @ValueSetter public native ConvPaddingMode put(@ByRef kReplicate value); public @ByRef kCircular get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kCircular get3(@ByRef ConvPaddingMode container); + @Namespace @Name("std::get<3>") public static native @ByRef kCircular get3(@ByRef ConvPaddingMode container); @ValueSetter public native ConvPaddingMode put(@ByRef kCircular value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java index a4b829c2991..1dee0a27b19 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java index 9265bfa0ad8..6d2d69d4705 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class ConvTranspose1dImpl extends ConvTranspose1dImplBase { public ConvTranspose1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ConvTranspose1dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java index b9af89ada31..05e994ba372 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -32,7 +32,7 @@ public class ConvTranspose1dImplBase extends ConvTranspose1dImplBaseBase { public ConvTranspose1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); /** Pretty prints the {@code ConvTranspose{1,2,3}d} module into the given {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java index cd489c43846..519f8c5149c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConvTranspose1dImplBaseBase extends ConvTranspose1dImplCloneable { public ConvTranspose1dImplBaseBase(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose1dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConvTranspose1dImplBaseBase(@ByVal DetailConv1dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv1dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java index 5ba913e41b0..db7be0f6ce8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConvTranspose1dImplCloneable extends Module { public ConvTranspose1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java index 0b1a00f4551..f8cf246f305 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java index 01c4af6c1a0..336ac8f6073 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java index fa35920cf5a..6c99250f640 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class ConvTranspose2dImpl extends ConvTranspose2dImplBase { public ConvTranspose2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ConvTranspose2dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java index fe3bd3bebbe..d33e2a70df2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,7 +28,7 @@ public class ConvTranspose2dImplBase extends ConvTranspose2dImplBaseBase { public ConvTranspose2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); /** Pretty prints the {@code ConvTranspose{1,2,3}d} module into the given {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java index 87f6961ee22..4b1053103f6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConvTranspose2dImplBaseBase extends ConvTranspose2dImplCloneable { public ConvTranspose2dImplBaseBase(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose2dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConvTranspose2dImplBaseBase(@ByVal DetailConv2dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv2dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java index 51f1e6c0e3f..5196602780b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConvTranspose2dImplCloneable extends Module { public ConvTranspose2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dOptions.java index 95afa0be76a..ca3021ac5bd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dFuncOptions.java index 46ed91af223..86b32722e3f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java index 4684f19d800..281eeaf3b46 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class ConvTranspose3dImpl extends ConvTranspose3dImplBase { public ConvTranspose3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ConvTranspose3dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java index e1a1ee7cdb0..7c013dd973c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,7 +28,7 @@ public class ConvTranspose3dImplBase extends ConvTranspose3dImplBaseBase { public ConvTranspose3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); /** Pretty prints the {@code ConvTranspose{1,2,3}d} module into the given {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java index 76597346543..ba49bf1b854 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConvTranspose3dImplBaseBase extends ConvTranspose3dImplCloneable { public ConvTranspose3dImplBaseBase(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose3dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConvTranspose3dImplBaseBase(@ByVal DetailConv3dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv3dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java index 5a38d0b8424..fd4404b9aa9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ConvTranspose3dImplCloneable extends Module { public ConvTranspose3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ConvTranspose3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dOptions.java index 334bfc32a8e..29a0fe486a2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java index 77c3f25cc58..6a8968f6d15 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -43,7 +43,7 @@ public class CosineEmbeddingLossImpl extends CosineEmbeddingLossImplCloneable { public CosineEmbeddingLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public CosineEmbeddingLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CosineEmbeddingLossImpl(@ByVal(nullValue = "torch::nn::CosineEmbeddingLossOptions{}") CosineEmbeddingLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::CosineEmbeddingLossOptions{}") CosineEmbeddingLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java index 9fc22f3a41d..0910a547d1b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class CosineEmbeddingLossImplCloneable extends Module { public CosineEmbeddingLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public CosineEmbeddingLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CosineEmbeddingLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossOptions.java index dce0dc9e1cf..8275ab12f30 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java index d47fdd3bbc2..937d9a6787b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -37,7 +37,7 @@ public class CosineSimilarityImpl extends CosineSimilarityImplCloneable { public CosineSimilarityImpl(Pointer p) { super(p); } /** Downcast constructor. */ public CosineSimilarityImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CosineSimilarityImpl(@Const @ByRef(nullValue = "torch::nn::CosineSimilarityOptions{}") CosineSimilarityOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::CosineSimilarityOptions{}") CosineSimilarityOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java index dd451c84575..d2c107901ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class CosineSimilarityImplCloneable extends Module { public CosineSimilarityImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public CosineSimilarityImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CosineSimilarityImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityOptions.java index 045d2c03fa0..b208d407929 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CppFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CppFunction.java index 3af0e8b9747..b89554d9d1e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CppFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CppFunction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignature.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignature.java index 5d09135e162..a2385feeac3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignature.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignature.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignatureOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignatureOptional.java index 4feacf874b8..cae6a80365f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignatureOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignatureOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java index ffcf1fa8937..eecd92b4b80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -41,7 +41,7 @@ public class CrossEntropyLossImpl extends CrossEntropyLossImplCloneable { public CrossEntropyLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public CrossEntropyLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CrossEntropyLossImpl(@ByVal(nullValue = "torch::nn::CrossEntropyLossOptions{}") CrossEntropyLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::CrossEntropyLossOptions{}") CrossEntropyLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java index 7eaccaec8fe..b30424c29a2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class CrossEntropyLossImplCloneable extends Module { public CrossEntropyLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public CrossEntropyLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CrossEntropyLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossOptions.java index 9cbccf57b72..db83588ab31 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java index 2ca05834654..b0354584da6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -34,7 +34,7 @@ public class CrossMapLRN2dImpl extends CrossMapLRN2dImplCloneable { public CrossMapLRN2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public CrossMapLRN2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CrossMapLRN2dImpl(@Cast("int64_t") long size) { super((Pointer)null); allocate(size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java index c8f1331f38e..95491174c97 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class CrossMapLRN2dImplCloneable extends Module { public CrossMapLRN2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public CrossMapLRN2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CrossMapLRN2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dOptions.java index 5a65cd9553e..5f133a4d68b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CustomBatchRequest.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CustomBatchRequest.java index e6e850db0f3..479428dbcc1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CustomBatchRequest.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CustomBatchRequest.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CustomClassHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CustomClassHolder.java index b9b2c5eae02..31e913b9cc5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CustomClassHolder.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CustomClassHolder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DDPLoggingData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DDPLoggingData.java index 68bdff02ecf..4eab385c552 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DDPLoggingData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DDPLoggingData.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java index 19719ed5c12..2d9bd766d75 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DataLoaderOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DataLoaderOptions.java index e5fce0cd415..58c29b4d1a4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DataLoaderOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DataLoaderOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java index afbe83c8d3a..01a4199309d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java index d0ba80af803..e88111330e8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoBase.java index e01e9e11d31..9aeb5ca081e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoGuard.java index 33ae5472eae..f0e50ba3b9c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Decl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Decl.java index 0abeb5d2de9..5282d287598 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Decl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Decl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Def.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Def.java index 212b2110475..e81b9338315 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Def.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Def.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DefMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DefMaybe.java index 974bc34c180..e1d98af8c93 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DefMaybe.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DefMaybe.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DefVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DefVector.java index 95f86efb719..a50f96b8a82 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DefVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DefVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Delete.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Delete.java index 8a5537e96e3..39b34b7f541 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Delete.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Delete.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java index 2d4cd5d47cb..92500bc5cc3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv1dOptions.java index 1d80828eb84..5266008478c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv2dOptions.java index 84ce75512f0..129ea188245 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv3dOptions.java index d0f504ae329..0c81c19afe2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DetectAnomalyGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DetectAnomalyGuard.java index 2c0866b732a..e7450aad17d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DetectAnomalyGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DetectAnomalyGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java index a371ce0ea13..2bb6ffd6c06 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplInterface.java index b595aa6d6fa..dbb16d8d7b3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -41,6 +41,8 @@ public class DeviceGuardImplInterface extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeviceGuardImplInterface(Pointer p) { super(p); } + public native @ByRef @Name("operator =") DeviceGuardImplInterface put(@Const @ByRef DeviceGuardImplInterface arg0); + /** * Return the type of device managed by this guard implementation. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplRegistrar.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplRegistrar.java index 9593b57be5b..8b6612ce82d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplRegistrar.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplRegistrar.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjType.java index f81d60d134f..a34c7694173 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjTypePtr.java index 2cf74cca4fd..850ca373b48 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceOptional.java index 3555dc1e1fb..2c0e86d6f8c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java index f09f9657d41..c01b8ab4756 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DictComp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DictComp.java index 2798706db63..a80f1616258 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DictComp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DictComp.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DictLiteral.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DictLiteral.java index d1034f65c4b..ff431466229 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DictLiteral.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DictLiteral.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DictType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DictType.java index 12350de9934..0f0e920b918 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DictType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DictType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,9 +45,9 @@ public class DictType extends SharedType { // global singleton // Given an inner type T and an identifier, - // this function wil return the global singleton type pointer + // this function will return the global singleton type pointer // the type List. - // The extra "identifier" argument is needed beccause we have multiple container types + // The extra "identifier" argument is needed because we have multiple container types // that all re-use this function (Dict and unordered_map) public static native @ByVal Type.TypePtr get(@StdString BytePointer identifier, @ByVal Type.TypePtr key, @ByVal Type.TypePtr val); public static native @ByVal Type.TypePtr get(@StdString String identifier, @ByVal Type.TypePtr key, @ByVal Type.TypePtr val); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DifferentiableViewMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DifferentiableViewMeta.java index d3c27bbb521..148bb86e524 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DifferentiableViewMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DifferentiableViewMeta.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVector.java index 2cd86548ad3..8b7a4992184 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java index 230831af267..6ae1dc877f1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorOptional.java index 54644f41f53..ba737dc1e7e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dimname.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dimname.java index 833b568a065..d1aeb489911 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dimname.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dimname.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java index be2a393a6fd..66a373aac38 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameListOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameListOptional.java index fb3386cf768..88cb97f7484 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameListOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameListOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameOptional.java index 09a9f97dab9..979cb729d39 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameVector.java index 262982095db..2cc858d7c97 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DisablePythonDispatcher.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DisablePythonDispatcher.java index f83cccf3ad7..c700ecc78fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DisablePythonDispatcher.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DisablePythonDispatcher.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DisableRecordFunctionGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DisableRecordFunctionGuard.java index 42f5ed3fff2..d17ca88d144 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DisableRecordFunctionGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DisableRecordFunctionGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DisabledStr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DisabledStr.java index b946f573828..a8d294cb134 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DisabledStr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DisabledStr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyExtractor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyExtractor.java index 87e04eac47e..4829954eb86 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyExtractor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyExtractor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyOptional.java index 662330001e4..6aa7c050bd0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeySet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeySet.java index 62e5d5e8e21..adaeeae62cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeySet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeySet.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -63,8 +63,8 @@ // 5 categories. // // (1) "Building block" keys -// (a) backends: jEverything in the BackendComponent enum (e.g. CPUBit, -// CUDABIt) (b) functionalities: (per-backend) functionality-bit DispatchKeys +// (a) backends: Everything in the BackendComponent enum (e.g. CPUBit, +// CUDABit) (b) functionalities: (per-backend) functionality-bit DispatchKeys // (e.g. AutogradFunctionality, Sparse, Dense) // (2) "Runtime" keys // (a) "non-customizable backends" (e.g. FPGA) @@ -94,7 +94,7 @@ // Sparse, Quantized, AutogradFunctionality, ...). These keys together allow // every dispatcher operator to be customized in up to 12*4 different ways. Each // of those requires a slot in the operator table of every dispatcher operator. -// Not every piece of functionality necessarily needs to be customizeable +// Not every piece of functionality necessarily needs to be customizable // per-backend, and not every backend necessarily needs to be able to customize // every type of functionality. // @@ -110,10 +110,10 @@ // (2a) and (2b) are represented identically in the DispatchKeySet logic: // - backend-agnostic functionalities (e.g. FuncTorchBatched) are NOT -// customizeable per backend. +// customizable per backend. // In order to do so, we'd need to promote it to a per-backend functionality // "building block" key. -// - non-customizeable backends (e.g. FPGA) can NOT customize existing +// - non-customizable backends (e.g. FPGA) can NOT customize existing // functionality like Sparse, Autograd, etc. // In order to do so, we'd need to promote it to a backend "building block" // key. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java index 5acc26a4c46..02be66397b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -111,14 +111,10 @@ public class Dispatcher extends Pointer { * If a schema with the same operator name and overload name already exists, * this function will check that both schemas are exactly identical. */ - public native @ByVal RegistrationHandleRAII registerDef(@ByVal FunctionSchema schema, @StdString BytePointer debug, @StdVector @Cast("at::Tag*") IntPointer tags/*={}*/); + public native @ByVal RegistrationHandleRAII registerDef(@ByVal FunctionSchema schema, @StdString BytePointer debug, @ByVal(nullValue = "std::vector{}") TagVector tags); public native @ByVal RegistrationHandleRAII registerDef(@ByVal FunctionSchema schema, @StdString BytePointer debug); - public native @ByVal RegistrationHandleRAII registerDef(@ByVal FunctionSchema schema, @StdString String debug, @StdVector @Cast("at::Tag*") IntBuffer tags/*={}*/); + public native @ByVal RegistrationHandleRAII registerDef(@ByVal FunctionSchema schema, @StdString String debug, @ByVal(nullValue = "std::vector{}") TagVector tags); public native @ByVal RegistrationHandleRAII registerDef(@ByVal FunctionSchema schema, @StdString String debug); - public native @ByVal RegistrationHandleRAII registerDef(@ByVal FunctionSchema schema, @StdString BytePointer debug, @StdVector @Cast("at::Tag*") int[] tags/*={}*/); - public native @ByVal RegistrationHandleRAII registerDef(@ByVal FunctionSchema schema, @StdString String debug, @StdVector @Cast("at::Tag*") IntPointer tags/*={}*/); - public native @ByVal RegistrationHandleRAII registerDef(@ByVal FunctionSchema schema, @StdString BytePointer debug, @StdVector @Cast("at::Tag*") IntBuffer tags/*={}*/); - public native @ByVal RegistrationHandleRAII registerDef(@ByVal FunctionSchema schema, @StdString String debug, @StdVector @Cast("at::Tag*") int[] tags/*={}*/); /** * Register a kernel to the dispatch table for an operator. @@ -132,6 +128,20 @@ public class Dispatcher extends Pointer { public native @ByVal RegistrationHandleRAII registerImpl(@ByVal OperatorName op_name, @ByVal DispatchKeyOptional dispatch_key, @ByVal KernelFunction kernel, @ByVal CppSignatureOptional cpp_signature, @UniquePtr @ByVal FunctionSchema inferred_function_schema, @StdString BytePointer debug); public native @ByVal RegistrationHandleRAII registerImpl(@ByVal OperatorName op_name, @ByVal DispatchKeyOptional dispatch_key, @ByVal KernelFunction kernel, @ByVal CppSignatureOptional cpp_signature, @UniquePtr @ByVal FunctionSchema inferred_function_schema, @StdString String debug); + /** + * Given an operator, tells the Dispatcher that we have implemented an abstract impl + * for this op in the given Python module. Call this a "pystub". + */ + public native @ByVal RegistrationHandleRAII registerAbstractImplPyStub(@Const @ByRef OperatorName op_name, @Cast("const char*") BytePointer pymodule, @Cast("const char*") BytePointer context); + public native @ByVal RegistrationHandleRAII registerAbstractImplPyStub(@Const @ByRef OperatorName op_name, String pymodule, String context); + + /** + * Given an operator, throws if we have an abstract impl pystub. + */ + public native void throwIfHasAbstractImplPyStub(@ByVal OperatorName op_name); + + public native @ByVal BytePointerPairOptional getAbstractImplPyStub(@ByVal OperatorName op_name); + /** * Register a new operator by name. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java index 4aacd6d8122..99321bfa0d8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -21,7 +21,7 @@ // Used for collective communication library errors from the distributed module. // These turn into DistBackendError when they cross into Python. @Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DistBackendError extends Error { +public class DistBackendError extends DistError { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DistBackendError(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistError.java new file mode 100644 index 00000000000..109c90321b8 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistError.java @@ -0,0 +1,29 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// Base error type for all distributed errors. +// These turn into DistError when they cross into Python. +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DistError extends Error { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DistError(Pointer p) { super(p); } + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistNetworkError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistNetworkError.java new file mode 100644 index 00000000000..0e71b470845 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistNetworkError.java @@ -0,0 +1,29 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// Used for errors originating from the TCP/IP stack and not from collective +// libraries. These turn into DistNetworkError when they cross into Python. +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DistNetworkError extends DistError { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DistNetworkError(Pointer p) { super(p); } + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistStoreError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistStoreError.java new file mode 100644 index 00000000000..9966e069871 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistStoreError.java @@ -0,0 +1,29 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// Used for errors originating from the store. +// These turn into DistStoreError when they cross into Python. +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DistStoreError extends DistError { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DistStoreError(Pointer p) { super(p); } + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java index de2c4ec05fc..28a34a9d70e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSampler.java index cd65d05c09c..ecdd52edd98 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSampler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java index 611313c4675..07ebbc7f716 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DontIncreaseRefcount.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DontIncreaseRefcount.java index 0a0949c2b90..ea5a7e645bd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DontIncreaseRefcount.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DontIncreaseRefcount.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dots.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dots.java index f0579689d76..8e3cadb236e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dots.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dots.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRef.java index e7a6fc866a2..7ca9b4d0570 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRefOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRefOptional.java index f1554603ab8..e057fbcf6b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRefOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRefOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplex.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplex.java index 548417fe9dc..626e27952f3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplex.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplex.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexArrayRef.java index dac1eced34e..ba8803704de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexElementReference.java index ed0678fdfeb..e310dff0489 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexElementReference.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexElementReference.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexList.java index 8f67ec69bc4..976c6baa857 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexListIterator.java index c690353cb58..097ff3b1fb8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleElementReference.java index 27e89835946..b50ba7021be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleElementReference.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleElementReference.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleExpandingArrayOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleExpandingArrayOptional.java index aadc8343966..d60fb062e8e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleExpandingArrayOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleExpandingArrayOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleList.java index 8dd24b2fac9..b7435241b6e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleListIterator.java index b97902782f6..a0aa8912f85 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleOptional.java index 6431560a96a..3401c413f0a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVector.java index e9bd7c61f6a..9493bc6359b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVectorOptional.java index dc51967825f..65afa08f024 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java index 19c26318204..df1a60721ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -47,7 +47,7 @@ public class Dropout2dImpl extends Dropout2dImplBase { public Dropout2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public Dropout2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java index 2f7e8494d5a..4d0c67a3c6c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -25,7 +25,7 @@ public class Dropout2dImplBase extends Dropout2dImplCloneable { public Dropout2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public Dropout2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Dropout2dImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java index 2365821e467..665449c71ed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class Dropout2dImplCloneable extends Module { public Dropout2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public Dropout2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Dropout2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java index 06bf0c47195..891ea028af7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -47,7 +47,7 @@ public class Dropout3dImpl extends Dropout3dImplBase { public Dropout3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public Dropout3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java index 1ab3ca45ccc..f6e94308368 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -25,7 +25,7 @@ public class Dropout3dImplBase extends Dropout3dImplCloneable { public Dropout3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public Dropout3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Dropout3dImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java index 8fec81fb9d8..5feb8ff3e3a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class Dropout3dImplCloneable extends Module { public Dropout3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public Dropout3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Dropout3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutFuncOptions.java index 9abba5ff310..c365357e174 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java index 095389f5224..5313f5de715 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -47,7 +47,7 @@ public class DropoutImpl extends DropoutImplBase { public DropoutImpl(Pointer p) { super(p); } /** Downcast constructor. */ public DropoutImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java index 1f2a129b45f..4bd921a2c6d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -25,7 +25,7 @@ public class DropoutImplBase extends DropoutImplCloneable { public DropoutImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public DropoutImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public DropoutImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java index 844e9d3a0a1..6539714e20f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class DropoutImplCloneable extends Module { public DropoutImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public DropoutImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr DropoutImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutOptions.java index edde6efb8b1..1301324ff98 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java index 7fb1321079b..928af9d2393 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class ELUImpl extends ELUImplCloneable { public ELUImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ELUImpl(@Const @ByRef(nullValue = "torch::nn::ELUOptions{}") ELUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::ELUOptions{}") ELUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java index 1607667d87a..f88d7c05d83 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ELUImplCloneable extends Module { public ELUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ELUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUOptions.java index 001c9eaafae..ed8f9223921 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Edge.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Edge.java index f4c4e5f646a..ca90e7ac0d9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Edge.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Edge.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java index 2b719f8e377..1625f734810 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EllipsisIndexType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EllipsisIndexType.java index 29810fe0416..21c2f74df2a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EllipsisIndexType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EllipsisIndexType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFromPretrainedOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFromPretrainedOptions.java index 725290ae8b9..269d14e79ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFromPretrainedOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFromPretrainedOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFuncOptions.java index 44f7bf011f5..bad4e4597c9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java index 5e466e29033..a8889bb6884 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -41,7 +41,7 @@ public class EmbeddingBagImpl extends EmbeddingBagImplCloneable { public EmbeddingBagImpl(Pointer p) { super(p); } /** Downcast constructor. */ public EmbeddingBagImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public EmbeddingBagImpl(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim) { super((Pointer)null); allocate(num_embeddings, embedding_dim); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java index 316a79f8eed..57b8ed85924 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class EmbeddingBagImplCloneable extends Module { public EmbeddingBagImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public EmbeddingBagImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr EmbeddingBagImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagMode.java index ec886c06ca1..7e90f5aa280 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class EmbeddingBagMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -30,13 +30,13 @@ public class EmbeddingBagMode extends Pointer { public native @Name("operator =") @ByRef EmbeddingBagMode put(@ByRef EmbeddingBagMode x); public @ByRef kSum get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kSum get0(@ByRef EmbeddingBagMode container); + @Namespace @Name("std::get<0>") public static native @ByRef kSum get0(@ByRef EmbeddingBagMode container); @ValueSetter public native EmbeddingBagMode put(@ByRef kSum value); public @ByRef kMean get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kMean get1(@ByRef EmbeddingBagMode container); + @Namespace @Name("std::get<1>") public static native @ByRef kMean get1(@ByRef EmbeddingBagMode container); @ValueSetter public native EmbeddingBagMode put(@ByRef kMean value); public @ByRef kMax get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kMax get2(@ByRef EmbeddingBagMode container); + @Namespace @Name("std::get<2>") public static native @ByRef kMax get2(@ByRef EmbeddingBagMode container); @ValueSetter public native EmbeddingBagMode put(@ByRef kMax value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagOptions.java index 020f0e5d734..c2bcb58fada 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFromPretrainedOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFromPretrainedOptions.java index 78d14695ec1..e8dee41ce6c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFromPretrainedOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFromPretrainedOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFuncOptions.java index ea2a67e5050..7ae841e4fb3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java index b689cc4cb74..48cd74d50d2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class EmbeddingImpl extends EmbeddingImplCloneable { public EmbeddingImpl(Pointer p) { super(p); } /** Downcast constructor. */ public EmbeddingImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public EmbeddingImpl(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim) { super((Pointer)null); allocate(num_embeddings, embedding_dim); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java index e327c17d5d1..c1d2ac98db4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class EmbeddingImplCloneable extends Module { public EmbeddingImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public EmbeddingImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr EmbeddingImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingOptions.java index f15d45b9d03..c26f344cf50 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnableProfilingGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnableProfilingGuard.java index e89351f42a7..e7b2adf0f78 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnableProfilingGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnableProfilingGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnabledStr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnabledStr.java index c1cdf06ec8e..9888ea7dcea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnabledStr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnabledStr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnforceFiniteError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnforceFiniteError.java index 266fcfce5e6..db594231c95 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnforceFiniteError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnforceFiniteError.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolder.java index f5976eff5d9..c93e6c76e0e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolder.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolderPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolderPtr.java index 78621608088..8744b022f2d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolderPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolderPtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValue.java index eab2d8d5f3c..5add36fa810 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValueArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValueArrayRef.java index 9d9980f7ba9..c20bbde9aa0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValueArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValueArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumType.java index e4e10fe99a8..99079ab9316 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -37,7 +37,7 @@ public class EnumType extends NamedType { public native @SharedPtr("const torch::jit::CompilationUnit") @ByVal CompilationUnit compilation_unit(); - public native @Const @ByVal QualifiedName qualifiedClassName(); + public native @Const @ByRef QualifiedName qualifiedClassName(); public native @ByVal TypeArrayRef containedTypes(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Error.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Error.java index 66557fade74..387bf1d4910 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Error.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Error.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorAlwaysShowCppStacktrace.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorAlwaysShowCppStacktrace.java new file mode 100644 index 00000000000..ab4dcba52f1 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorAlwaysShowCppStacktrace.java @@ -0,0 +1,29 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + // namespace WarningUtils + +// Like Error, but we always report the C++ backtrace, instead of only +// reporting when TORCH_SHOW_CPP_STACKTRACES +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ErrorAlwaysShowCppStacktrace extends Error { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ErrorAlwaysShowCppStacktrace(Pointer p) { super(p); } + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorReport.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorReport.java index 018ab310c30..e1d4147586d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorReport.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorReport.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Example.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Example.java index 580c9e3d022..cd956c142c9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Example.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Example.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java index 2338312ad02..7fd3ffb9c25 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java index 76c8a9a1625..6ac7af85fdc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleOptional.java index 7507278cbc9..27c991fb625 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java index 4e3651343a9..3ccbaf22ff1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java index 16783866d5b..719e4936861 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java index 9503d259514..f05953814d1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionMessageValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionMessageValue.java index 5874b77c021..71623f605e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionMessageValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionMessageValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionValue.java index 4d355f48afd..c03389fddae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutionPlan.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutionPlan.java index e3043677010..b1ce937d048 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutionPlan.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutionPlan.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutorExecutionModeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutorExecutionModeOptional.java index d9ff64faed3..b8d26483818 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutorExecutionModeOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutorExecutionModeOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java index f848386f7d3..6fdfd2ce0e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -73,7 +73,7 @@ private native void allocate( * their child events) and delaying CPU event start times (to * prevent overlaps), so this should not be used unless Vulkan events are * being profiled and it is ok to use this modified timestamp/duration - * information instead of the the original information. + * information instead of the original information. */ public native @Cast("bool") boolean adjust_timestamps(); public native ExperimentalConfig adjust_timestamps(boolean setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Expr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Expr.java index 2cb29428e11..1c65d0402d5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Expr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Expr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprList.java index df76a941d89..8b1650910c3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprListIterator.java index 97ba2ec0ddc..5ee012a0567 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprMaybe.java index 182f94f92bc..99c13ec7bdc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprMaybe.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprMaybe.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprStmt.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprStmt.java index 6c01ebf412c..fbc74c82fb4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprStmt.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprStmt.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExtraFilesMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExtraFilesMap.java index dd3b8f226ca..cf0b84c1bff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExtraFilesMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExtraFilesMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FanModeType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FanModeType.java index aea2abb31bf..0a3ca7649f7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FanModeType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FanModeType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class FanModeType extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -29,10 +29,10 @@ public class FanModeType extends Pointer { public native @Name("operator =") @ByRef FanModeType put(@ByRef FanModeType x); public @ByRef kFanIn get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kFanIn get0(@ByRef FanModeType container); + @Namespace @Name("std::get<0>") public static native @ByRef kFanIn get0(@ByRef FanModeType container); @ValueSetter public native FanModeType put(@ByRef kFanIn value); public @ByRef kFanOut get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kFanOut get1(@ByRef FanModeType container); + @Namespace @Name("std::get<1>") public static native @ByRef kFanOut get1(@ByRef FanModeType container); @ValueSetter public native FanModeType put(@ByRef kFanOut value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutFuncOptions.java index 7592ac13fff..b2d9e41ddf2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java index e1ed3e07b58..95671dd5ebc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class FeatureAlphaDropoutImpl extends FeatureAlphaDropoutImplBase { public FeatureAlphaDropoutImpl(Pointer p) { super(p); } /** Downcast constructor. */ public FeatureAlphaDropoutImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java index e55118a05b3..7a45508ef1b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -25,7 +25,7 @@ public class FeatureAlphaDropoutImplBase extends FeatureAlphaDropoutImplCloneabl public FeatureAlphaDropoutImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public FeatureAlphaDropoutImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public FeatureAlphaDropoutImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java index 0a8266c3d94..123d8475bdd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class FeatureAlphaDropoutImplCloneable extends Module { public FeatureAlphaDropoutImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public FeatureAlphaDropoutImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FeatureAlphaDropoutImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FileLineFunc.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FileLineFunc.java index c49620ea045..5d857d712fe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FileLineFunc.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FileLineFunc.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java index 65ed7de7255..f5084538c87 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class FlattenImpl extends FlattenImplCloneable { public FlattenImpl(Pointer p) { super(p); } /** Downcast constructor. */ public FlattenImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public FlattenImpl(@Const @ByRef(nullValue = "torch::nn::FlattenOptions{}") FlattenOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::FlattenOptions{}") FlattenOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java index aa613b16906..57421f8fd40 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class FlattenImplCloneable extends Module { public FlattenImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public FlattenImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FlattenImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenOptions.java index 90e21b28c2a..2aa758f331f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java index 0f13fd6bc27..41f33f9683c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fnuz.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fnuz.java new file mode 100644 index 00000000000..6bbbdaca43a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fnuz.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + // namespace detail + +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class Float8_e4m3fnuz extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Float8_e4m3fnuz(Pointer p) { super(p); } + + public native @Cast("uint8_t") byte x(); public native Float8_e4m3fnuz x(byte setter); + + @Opaque public static class from_bits_t extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public from_bits_t() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public from_bits_t(Pointer p) { super(p); } + } + public static native @Const @ByVal from_bits_t from_bits(); + + public Float8_e4m3fnuz() { super((Pointer)null); allocate(); } + private native void allocate(); + + public Float8_e4m3fnuz(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1) { super((Pointer)null); allocate(bits, arg1); } + private native void allocate(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1); + public Float8_e4m3fnuz(float value) { super((Pointer)null); allocate(value); } + private native void allocate(float value); + public native @Name("operator float") float asFloat(); + public native @Cast("bool") boolean isnan(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java index 17ebbe53818..a644d80cb21 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2fnuz.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2fnuz.java new file mode 100644 index 00000000000..1b7382cbf63 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2fnuz.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + // namespace detail + +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class Float8_e5m2fnuz extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Float8_e5m2fnuz(Pointer p) { super(p); } + + public native @Cast("uint8_t") byte x(); public native Float8_e5m2fnuz x(byte setter); + + @Opaque public static class from_bits_t extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public from_bits_t() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public from_bits_t(Pointer p) { super(p); } + } + public static native @Const @ByVal from_bits_t from_bits(); + + public Float8_e5m2fnuz() { super((Pointer)null); allocate(); } + private native void allocate(); + + public Float8_e5m2fnuz(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1) { super((Pointer)null); allocate(bits, arg1); } + private native void allocate(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1); + public Float8_e5m2fnuz(float value) { super((Pointer)null); allocate(value); } + private native void allocate(float value); + public native @Name("operator float") float asFloat(); + public native @Cast("bool") boolean isnan(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java index 55caa15934a..8b7e990794f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplex.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplex.java index 22892195598..2688414f17e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplex.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplex.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplexArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplexArrayRef.java index ad365dfa974..a45083e7b55 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplexArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplexArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatOptional.java index cf81f00dab2..a145e36197b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatType.java index b9ae15623b9..ba7faf2d5fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatTypePtr.java index fc02126faea..e31153b746b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java index fba4968fc83..89b0725f9b3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -37,7 +37,7 @@ public class FoldImpl extends FoldImplCloneable { public FoldImpl(Pointer p) { super(p); } /** Downcast constructor. */ public FoldImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public FoldImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(output_size, kernel_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java index 91444ff1f94..aa55fe58cdf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class FoldImplCloneable extends Module { public FoldImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public FoldImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FoldImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldOptions.java index e1f2abc884d..92de1c40b93 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/For.java b/pytorch/src/gen/java/org/bytedeco/pytorch/For.java index e6bd11777cd..76149ae488b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/For.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/For.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ForceDispatchKeyGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ForceDispatchKeyGuard.java index cdc38a9abba..d4c24a2a5ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ForceDispatchKeyGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ForceDispatchKeyGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -26,4 +26,10 @@ public class ForceDispatchKeyGuard extends Pointer { public ForceDispatchKeyGuard(@ByVal LocalDispatchKeySet key_set) { super((Pointer)null); allocate(key_set); } private native void allocate(@ByVal LocalDispatchKeySet key_set); + public ForceDispatchKeyGuard( + @ByVal DispatchKeySet include, + @ByVal DispatchKeySet exclude) { super((Pointer)null); allocate(include, exclude); } + private native void allocate( + @ByVal DispatchKeySet include, + @ByVal DispatchKeySet exclude); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardADLevel.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardADLevel.java index e0f69aa648b..28da57541b8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardADLevel.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardADLevel.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardGrad.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardGrad.java index 8aa55bd46f1..4ad1b8ac48b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardGrad.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardGrad.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool1dOptions.java index 56ea583f8d1..c040ba6cdf2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java index eb3a208993b..95143991442 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class FractionalMaxPool2dImpl extends FractionalMaxPool2dImplCloneable { public FractionalMaxPool2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public FractionalMaxPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public FractionalMaxPool2dImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java index 20e4b302f50..2ba5c7c2caf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class FractionalMaxPool2dImplCloneable extends Module { public FractionalMaxPool2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public FractionalMaxPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FractionalMaxPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dOptions.java index 0ed11ebd548..3f28dbbc469 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java index b7eb3e35ddd..1f0d49c52c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class FractionalMaxPool3dImpl extends FractionalMaxPool3dImplCloneable { public FractionalMaxPool3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public FractionalMaxPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public FractionalMaxPool3dImpl(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java index 0d332bcd041..a1388fdb3b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class FractionalMaxPool3dImplCloneable extends Module { public FractionalMaxPool3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public FractionalMaxPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FractionalMaxPool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dOptions.java index cf53c8a5475..47718860d4a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java index 16a935e0bd7..3225364ede7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java index a8258621cb0..6656758dda0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java index 5c3b5704565..734549bde4c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,6 +28,7 @@ public class Function extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Function(Pointer p) { super(p); } + public native @ByRef @Name("operator =") Function put(@Const @ByRef Function arg0); public native @StringView BytePointer doc_string(); public native @Cast("bool") boolean isGraphFunction(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java index 6e131d2b132..60ce6592285 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java index df82f92d4d5..e0d5e3429e0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java index 46101381358..1c5e670261e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java index 0f0cdb190df..19614a70941 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java index 2c0ac1e177d..f815d7233ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java index c673eb383bd..372aa10cb6f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaOptional.java index 708c982c371..6f7d31b1d78 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaVector.java index f340d70b8bf..80f050b3908 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionType.java index 478fad5bb7e..be838df1422 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionValue.java index 5a9993656ab..6123302af43 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionVector.java index f775a4b4d5c..8e77dd9b234 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionalityOffsetAndMask.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionalityOffsetAndMask.java index addf3301a4e..cdee8dad713 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionalityOffsetAndMask.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionalityOffsetAndMask.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FusionStrategy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FusionStrategy.java index afbe7ed8851..9a298133bf7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FusionStrategy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FusionStrategy.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java index e63038b5037..0171c07886d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -127,7 +127,7 @@ public native void markCompleted( - public native @ByVal Type.TypePtr elementType(); + public native @Const @ByRef Type.TypePtr elementType(); public native @StdVector Device devices(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtr.java index 24a0ae1908f..17f7e70c8a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrArrayRef.java index 57bd2f35ce9..ad0b4d9a057 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrElementReference.java index 5ed91effd9b..e36923471fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrElementReference.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrElementReference.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrList.java index 440089ac8fa..c75b294c5de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrListIterator.java index f980f519e08..efebd1b6261 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FutureSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FutureSingleElementType.java index 7f410b63aad..9c7ccd12a58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FutureSingleElementType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FutureSingleElementType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FutureType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FutureType.java index 8971864a376..413d60c4e02 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FutureType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FutureType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java index d9cd6006b1c..551609bf9ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -30,7 +30,7 @@ public class GELUImpl extends GELUImplCloneable { public GELUImpl(Pointer p) { super(p); } /** Downcast constructor. */ public GELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public GELUImpl(@ByVal(nullValue = "torch::nn::GELUOptions{}") GELUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::GELUOptions{}") GELUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java index 29f8633d70b..3f26d87eae7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class GELUImplCloneable extends Module { public GELUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public GELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GELUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUOptions.java index 9dfe33c8b25..67042257a77 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java index 2d14da6de7c..78bf46ace76 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class GLUImpl extends GLUImplCloneable { public GLUImpl(Pointer p) { super(p); } /** Downcast constructor. */ public GLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public GLUImpl(@Const @ByRef(nullValue = "torch::nn::GLUOptions{}") GLUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::GLUOptions{}") GLUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java index 0dfa12ae47a..212e33756b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class GLUImplCloneable extends Module { public GLUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public GLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUOptions.java index 27ea3cb2c74..be435e2b043 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java index 9ac64428510..7614eea76f0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class GRUCellImpl extends GRUCellImplBase { public GRUCellImpl(Pointer p) { super(p); } /** Downcast constructor. */ public GRUCellImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public GRUCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java index 7573a51258a..4da554dfc41 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class GRUCellImplBase extends GRUCellImplCloneable { public GRUCellImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public GRUCellImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public GRUCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNCellOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java index 0f87bf04496..848a9936814 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class GRUCellImplCloneable extends Module { public GRUCellImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public GRUCellImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GRUCellImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellOptions.java index 89bdcb8b3e4..4970a9ffe1d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java index c648aa72450..981109759e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class GRUImpl extends GRUImplBase { public GRUImpl(Pointer p) { super(p); } /** Downcast constructor. */ public GRUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public GRUImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java index eb855f178f1..40ba56e31e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class GRUImplBase extends GRUImplCloneable { public GRUImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public GRUImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public GRUImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java index 12c8a1be03c..8e9c2a6e5af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class GRUImplCloneable extends Module { public GRUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public GRUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GRUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUOptions.java index 796c3f51c23..c7975f7cce7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java index 7bf1f7d2107..9c7ac7de8f2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java index 449799d31d7..b6deac700ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java index 98e236dc2a1..d87ae60d2ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImplPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImplPtr.java index 31bb8b21551..c193fb90c30 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImplPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImplPtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorOptional.java index 0c330eadfb6..c99688225dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorType.java index e4e424b28e8..c684cd347d9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorTypePtr.java index ee2ce7ebf7d..731ae893415 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java index 47964ac218d..e3a958cbf95 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictEntryRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictEntryRef.java index 1d039c0b310..97d2141c6df 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictEntryRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictEntryRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictIterator.java index d2f78af9c71..aab39186008 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericElementReference.java index a36986056b9..3f0a27bb3a4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericElementReference.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericElementReference.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java index 5c717f7404e..392299e3fe5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericListIterator.java index b854408af9f..c2dee0683d9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Global.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Global.java index cb567210a6f..fbd303a1275 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Global.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Global.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GradMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GradMode.java index 1e8fc4b0acd..04d7840f3c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GradMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GradMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java index 3d6139c3b75..dc99d45c137 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java index fb4c80aa02c..591c286f94b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java index 7dd572b7367..ae8667792d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorImplBase.java index d3fe99ffc35..e75c1d941c2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorState.java index 612b72f9124..84ade44a006 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java index 95d45910573..534b360e810 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphOptimizerEnabledGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphOptimizerEnabledGuard.java index cb59e71e87f..0f650fedba5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphOptimizerEnabledGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphOptimizerEnabledGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphVector.java index aaa7206f344..1a01b562988 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphsAttr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphsAttr.java index 7964afb4b08..e16b610f800 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphsAttr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphsAttr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleFuncOptions.java index 55a8c1161b6..482891f6dbc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleMode.java index b6c1e8df1db..611d26543ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class GridSampleMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -29,10 +29,10 @@ public class GridSampleMode extends Pointer { public native @Name("operator =") @ByRef GridSampleMode put(@ByRef GridSampleMode x); public @ByRef kBilinear get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kBilinear get0(@ByRef GridSampleMode container); + @Namespace @Name("std::get<0>") public static native @ByRef kBilinear get0(@ByRef GridSampleMode container); @ValueSetter public native GridSampleMode put(@ByRef kBilinear value); public @ByRef kNearest get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kNearest get1(@ByRef GridSampleMode container); + @Namespace @Name("std::get<1>") public static native @ByRef kNearest get1(@ByRef GridSampleMode container); @ValueSetter public native GridSampleMode put(@ByRef kNearest value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GridSamplePaddingMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSamplePaddingMode.java index 85801fe63bf..f0a0d579a16 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GridSamplePaddingMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSamplePaddingMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class GridSamplePaddingMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -30,13 +30,13 @@ public class GridSamplePaddingMode extends Pointer { public native @Name("operator =") @ByRef GridSamplePaddingMode put(@ByRef GridSamplePaddingMode x); public @ByRef kZeros get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kZeros get0(@ByRef GridSamplePaddingMode container); + @Namespace @Name("std::get<0>") public static native @ByRef kZeros get0(@ByRef GridSamplePaddingMode container); @ValueSetter public native GridSamplePaddingMode put(@ByRef kZeros value); public @ByRef kBorder get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kBorder get1(@ByRef GridSamplePaddingMode container); + @Namespace @Name("std::get<1>") public static native @ByRef kBorder get1(@ByRef GridSamplePaddingMode container); @ValueSetter public native GridSamplePaddingMode put(@ByRef kBorder value); public @ByRef kReflection get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kReflection get2(@ByRef GridSamplePaddingMode container); + @Namespace @Name("std::get<2>") public static native @ByRef kReflection get2(@ByRef GridSamplePaddingMode container); @ValueSetter public native GridSamplePaddingMode put(@ByRef kReflection value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormFuncOptions.java index 2e65b7a201b..317d2492f44 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java index cec8a109970..eae47f85155 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class GroupNormImpl extends GroupNormImplCloneable { public GroupNormImpl(Pointer p) { super(p); } /** Downcast constructor. */ public GroupNormImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public GroupNormImpl(@Cast("int64_t") long num_groups, @Cast("int64_t") long num_channels) { super((Pointer)null); allocate(num_groups, num_channels); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long num_groups, @Cast("int64_t") long num_channels); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java index 7b5f00e5860..ada151423ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class GroupNormImplCloneable extends Module { public GroupNormImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public GroupNormImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GroupNormImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormOptions.java index b5e5df217a2..3847a1729c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GumbelSoftmaxFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GumbelSoftmaxFuncOptions.java index c2600fdf380..bc4d44181e8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GumbelSoftmaxFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GumbelSoftmaxFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksArgs.java index a043a948ae4..1c24ac7573b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksArgs.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksArgs.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java index 7d304e7d777..af34f7bc116 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java index 61091400e45..f82960077e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HalfArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HalfArrayRef.java index dd142c5d4e0..3e91f84d355 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HalfArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HalfArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HalfComplex.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HalfComplex.java index fbecae5ed4c..366d336e62f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HalfComplex.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HalfComplex.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java index 356eb9c69c4..8d60bee19e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class HardshrinkImpl extends HardshrinkImplCloneable { public HardshrinkImpl(Pointer p) { super(p); } /** Downcast constructor. */ public HardshrinkImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public HardshrinkImpl(@Const @ByRef(nullValue = "torch::nn::HardshrinkOptions{}") HardshrinkOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardshrinkOptions{}") HardshrinkOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java index 866569ba30a..fcbd6e94a18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class HardshrinkImplCloneable extends Module { public HardshrinkImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public HardshrinkImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HardshrinkImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkOptions.java index fce32301a55..81c978e4c4b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java index f890d563379..38f17d60868 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class HardtanhImpl extends HardtanhImplCloneable { public HardtanhImpl(Pointer p) { super(p); } /** Downcast constructor. */ public HardtanhImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public HardtanhImpl(@Const @ByRef(nullValue = "torch::nn::HardtanhOptions{}") HardtanhOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardtanhOptions{}") HardtanhOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java index 1d78ea141ba..2409ee6a32e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class HardtanhImplCloneable extends Module { public HardtanhImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public HardtanhImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HardtanhImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhOptions.java index 42999b020a9..12d97fa8d18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValueMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValueMap.java index 50bff800f11..e55a205d539 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValueMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValueMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValues.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValues.java index 0af21b7c45a..c95c706878c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValues.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValues.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HermeticPyObjectTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HermeticPyObjectTLS.java index a99f9123884..e9b79e0f566 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HermeticPyObjectTLS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HermeticPyObjectTLS.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java index ed953f2959d..bd6b1c9416c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -41,7 +41,7 @@ public class HingeEmbeddingLossImpl extends HingeEmbeddingLossImplCloneable { public HingeEmbeddingLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public HingeEmbeddingLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public HingeEmbeddingLossImpl(@ByVal(nullValue = "torch::nn::HingeEmbeddingLossOptions{}") HingeEmbeddingLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::HingeEmbeddingLossOptions{}") HingeEmbeddingLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java index da8185fa3a5..08dec534cb0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class HingeEmbeddingLossImplCloneable extends Module { public HingeEmbeddingLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public HingeEmbeddingLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HingeEmbeddingLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossOptions.java index 4fb6d542d7c..5396d9eb270 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java index fb47d8a1536..43601fadd89 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class HuberLossImpl extends HuberLossImplCloneable { public HuberLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public HuberLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public HuberLossImpl(@ByVal(nullValue = "torch::nn::HuberLossOptions{}") HuberLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::HuberLossOptions{}") HuberLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java index 435163e4b24..ed3bc534847 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class HuberLossImplCloneable extends Module { public HuberLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public HuberLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HuberLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossOptions.java index 188c8c22361..4c26ae4f9f1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IMethod.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IMethod.java index e947d134ae1..eed59590dfe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IMethod.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IMethod.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -18,12 +18,13 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("torch") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("torch") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class IMethod extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IMethod(Pointer p) { super(p); } + public native @ByRef @Name("operator =") IMethod put(@Const @ByRef IMethod arg0); public native @ByVal @Name("operator ()") IValue apply( @ByVal IValueVector args, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksArgs.java new file mode 100644 index 00000000000..16128406487 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksArgs.java @@ -0,0 +1,27 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class IPUHooksArgs extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public IPUHooksArgs() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPUHooksArgs(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksInterface.java new file mode 100644 index 00000000000..c664a2f4f84 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksInterface.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class IPUHooksInterface extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public IPUHooksInterface() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public IPUHooksInterface(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPUHooksInterface(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public IPUHooksInterface position(long position) { + return (IPUHooksInterface)super.position(position); + } + @Override public IPUHooksInterface getPointer(long i) { + return new IPUHooksInterface((Pointer)this).offsetAddress(i); + } + + + public native @Const @ByRef Generator getDefaultIPUGenerator( + @Cast("c10::DeviceIndex") byte device_index/*=-1*/); + public native @Const @ByRef Generator getDefaultIPUGenerator(); + + public native @ByVal Generator newIPUGenerator(@Cast("c10::DeviceIndex") byte device_index/*=-1*/); + public native @ByVal Generator newIPUGenerator(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java index 5e6fa1b52d2..ddd551112b6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java index 35c15328725..8a03b49384b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -54,7 +54,7 @@ * // {@code my_ivalue} is tagged as an int and cannot be used as another type * torch::Tensor my_tensor = my_ivalue.toTensor(); * \endrst */ -@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class IValue extends Pointer { static { Loader.load(); } @@ -105,7 +105,7 @@ public class IValue extends Pointer { */ public native @Cast("bool") boolean is(@Const @ByRef IValue rhs); - /** + /** * Hashing for IValues. Returns an IValue-boxed int. * * Some notes: @@ -236,24 +236,24 @@ public class IValue extends Pointer { public IValue(@Cast("int64_t") long i) { super((Pointer)null); allocate(i); } private native void allocate(@Cast("int64_t") long i); - public IValue(@ByVal SymInt i) { super((Pointer)null); allocate(i); } - private native void allocate(@ByVal SymInt i); + public IValue(@Const @ByRef SymInt i) { super((Pointer)null); allocate(i); } + private native void allocate(@Const @ByRef SymInt i); public native @Cast("bool") boolean isSymInt(); public native @ByVal SymInt toSymInt(); - public IValue(@ByVal SymFloat i) { super((Pointer)null); allocate(i); } - private native void allocate(@ByVal SymFloat i); + public IValue(@Const @ByRef SymFloat i) { super((Pointer)null); allocate(i); } + private native void allocate(@Const @ByRef SymFloat i); public native @Cast("bool") boolean isSymFloat(); public native @ByVal SymFloat toSymFloat(); - public IValue(@ByVal SymBool i) { super((Pointer)null); allocate(i); } - private native void allocate(@ByVal SymBool i); + public IValue(@Const @ByRef SymBool i) { super((Pointer)null); allocate(i); } + private native void allocate(@Const @ByRef SymBool i); public native @Cast("bool") boolean isSymBool(); @@ -358,9 +358,12 @@ public class IValue extends Pointer { public native @ByVal GenericDict toGenericDict(); // ClassType + public IValue(@ByVal ObjPtr v) { super((Pointer)null); allocate(v); } + private native void allocate(@ByVal ObjPtr v); public native @Cast("bool") boolean isObject(); - public native @ByVal @Cast("c10::intrusive_ptr*") Pointer toObject(); + public native @ByVal ObjPtr toObject(); + public native @ByRef Object toObjectRef(); public native @Cast("bool") boolean isModule(); @@ -484,9 +487,7 @@ public class IValue extends Pointer { // This is different from `repr()` in that there is no expectation that we can // exactly reconstruct an IValue from the output; feel free to use a // concise/pretty form - private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer out, - @Const @ByRef IValue v); + private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef IValue v); public Pointer shiftLeft(Pointer out) { return shiftLeft(out, this); } public native @Cast("bool") boolean isPtrType(); @@ -514,4 +515,12 @@ public class IValue extends Pointer { @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); public native @ByVal IValue deepcopy( @ByRef HashAliasedIValueMap memo); + // Don't edit this just to add results for new tags; edit + // isIntrusivePtrConstexpr above. + public native @Cast("bool") boolean isIntrusivePtr(); + + // Storage and Generator were treated specially when + // is_intrusive_ptr was stored as explicit state. This getter + // preserves the old behavior for use with WeakIValue for now. + public native @Cast("bool") boolean isIntrusivePtrLegacyBehavior(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueArrayRef.java index c9561763676..bf2d206c354 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptional.java index df2038f920b..98bacdca838 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptionalVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptionalVector.java index f80b6fbd52e..eb97c90a6a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptionalVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptionalVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueVector.java index 04ab1aa72da..6d5e09970ae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Ident.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Ident.java index 986f56bf9d2..ebac329b4b4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Ident.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Ident.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentList.java index 8bce187b602..a6d436638af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentListIterator.java index d3310410790..5413c4b6271 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java index d02743b0f6f..2d6e9f01562 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,7 +33,7 @@ public class IdentityImpl extends IdentityImplCloneable { @SharedPtr @Name("std::make_shared") private native void allocate(); /** Downcast constructor. */ public IdentityImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java index 18b0d2a7d32..98c59a911bb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class IdentityImplCloneable extends Module { public IdentityImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public IdentityImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr IdentityImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/If.java b/pytorch/src/gen/java/org/bytedeco/pytorch/If.java index e77ccfe3248..d8b11fd5395 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/If.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/If.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IncludeDispatchKeyGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IncludeDispatchKeyGuard.java index aee3da4b34f..ec2668bfb0c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IncludeDispatchKeyGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IncludeDispatchKeyGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IndexError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IndexError.java index f9581cdb3de..df13f6e1eac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IndexError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IndexError.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -16,7 +16,7 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; - // namespace WarningUtils + // Used in ATen for out-of-bound indices that can reasonably only be detected // lazily inside a kernel (See: advanced indexing). These turn into diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java index ea7f3c815cc..af26c21b570 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InferredType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InferredType.java index e76c313fa48..7509a5dfd27 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InferredType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InferredType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStack.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStack.java index 4702d6f6253..d63e5642add 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStack.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStack.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,6 +38,18 @@ private native void allocate( @ByVal SourceRange source_range, @ByVal ModuleInstanceInfoOptional module_instance_info); + // Constructor for a leaf callstack node. + public InlinedCallStack( + Function fn, + @ByVal SourceRange source_range, + @ByVal ModuleInstanceInfoOptional module_instance_info, + @StdString @ByRef BytePointer function_name) { super((Pointer)null); allocate(fn, source_range, module_instance_info, function_name); } + private native void allocate( + Function fn, + @ByVal SourceRange source_range, + @ByVal ModuleInstanceInfoOptional module_instance_info, + @StdString @ByRef BytePointer function_name); + // Constructor for an inner callstack node. public InlinedCallStack( @ByVal @Cast("torch::jit::InlinedCallStackPtr*") Pointer callee, @@ -59,6 +71,19 @@ private native void allocate( @ByVal SourceRange source_range, @ByVal ModuleInstanceInfoOptional module_instance_info); + public InlinedCallStack( + @ByVal @Cast("torch::jit::InlinedCallStackPtr*") Pointer callee, + Function fn, + @ByVal SourceRange source_range, + @ByVal ModuleInstanceInfoOptional module_instance_info, + @StdString @ByRef BytePointer function_name) { super((Pointer)null); allocate(callee, fn, source_range, module_instance_info, function_name); } + private native void allocate( + @ByVal @Cast("torch::jit::InlinedCallStackPtr*") Pointer callee, + Function fn, + @ByVal SourceRange source_range, + @ByVal ModuleInstanceInfoOptional module_instance_info, + @StdString @ByRef BytePointer function_name); + // Return next element in the callstack list. public native @ByVal @Cast("c10::optional*") InlinedCallStackOptional callee(); @@ -70,9 +95,6 @@ private native void allocate( public native Function function(); - public native void set_function_name(@StdString BytePointer fn_name); - public native void set_function_name(@StdString String fn_name); - public native @StdString BytePointer function_name(); // Return callstack as a vector of [Function, SourceRange] pairs. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStackOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStackOptional.java index 7283809ba2d..0f4fcfa6110 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStackOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStackOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -22,14 +22,12 @@ public class InlinedCallStackOptional extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InlinedCallStackOptional(Pointer p) { super(p); } - public InlinedCallStackOptional(InlinedCallStack value) { this(); put(value); } public InlinedCallStackOptional() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef InlinedCallStackOptional put(@ByRef InlinedCallStackOptional x); + public native boolean has_value(); public native void reset(); public native @Name("value") @ByRef InlinedCallStack get(); - @ValueSetter public native InlinedCallStackOptional put(@ByRef InlinedCallStack value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java index 90812391908..648a08863a5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java index 2e9e6a8a2f6..1f810106ba7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class InstanceNorm1dImpl extends InstanceNorm1dImplBase { public InstanceNorm1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java index 0e2aea3f115..ed0926b221f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -26,7 +26,7 @@ public class InstanceNorm1dImplBase extends InstanceNorm1dImplBaseBase { public InstanceNorm1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java index 92f380bfb5c..457810710c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class InstanceNorm1dImplBaseBase extends InstanceNorm1dImplCloneable { public InstanceNorm1dImplBaseBase(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm1dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java index f5f501313f6..fee8898b975 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class InstanceNorm1dImplCloneable extends Module { public InstanceNorm1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java index 35251edf667..a5ff4e0c751 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class InstanceNorm2dImpl extends InstanceNorm2dImplBase { public InstanceNorm2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java index c453f21d158..4c70fe88a62 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class InstanceNorm2dImplBase extends InstanceNorm2dImplBaseBase { public InstanceNorm2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java index bb1266995db..555948f48ed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class InstanceNorm2dImplBaseBase extends InstanceNorm2dImplCloneable { public InstanceNorm2dImplBaseBase(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm2dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java index bc019e0b1a2..f5ad0431f1a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class InstanceNorm2dImplCloneable extends Module { public InstanceNorm2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java index 98a76d85738..a7b61a92e24 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class InstanceNorm3dImpl extends InstanceNorm3dImplBase { public InstanceNorm3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java index 5c11cfd7191..b841fba4294 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class InstanceNorm3dImplBase extends InstanceNorm3dImplBaseBase { public InstanceNorm3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java index 0c6f79aa806..690c0b76a81 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class InstanceNorm3dImplBaseBase extends InstanceNorm3dImplCloneable { public InstanceNorm3dImplBaseBase(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm3dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java index d8fe8b1c889..fd6487f91a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class InstanceNorm3dImplCloneable extends Module { public InstanceNorm3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public InstanceNorm3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormFuncOptions.java index 67144b3d038..0a53176a984 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormOptions.java index 4d3bb3691d4..8f3b1263360 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Instruction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Instruction.java index 58bd5f3e9bb..8985c698da5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Instruction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Instruction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java index 07d574429be..1dab2380e86 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntOptional.java index 1cbaaa9612a..a84b4fe3360 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntSizedSmallVectorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntSizedSmallVectorBase.java index caf84d6c00e..c648b4546de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntSizedSmallVectorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntSizedSmallVectorBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntType.java index e797a89bf91..5d62ce747ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntTypePtr.java index d59103eac5d..7cd65569478 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InterfaceType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InterfaceType.java index 20ecdb93f0f..4f0b9e1739f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InterfaceType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InterfaceType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateFuncOptions.java index 9fb68078b66..4dcce2e7316 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateMode.java index 78f6d03910a..8cc9b78b307 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class InterpolateMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -34,25 +34,25 @@ public class InterpolateMode extends Pointer { public native @Name("operator =") @ByRef InterpolateMode put(@ByRef InterpolateMode x); public @ByRef kNearest get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kNearest get0(@ByRef InterpolateMode container); + @Namespace @Name("std::get<0>") public static native @ByRef kNearest get0(@ByRef InterpolateMode container); @ValueSetter public native InterpolateMode put(@ByRef kNearest value); public @ByRef kLinear get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kLinear get1(@ByRef InterpolateMode container); + @Namespace @Name("std::get<1>") public static native @ByRef kLinear get1(@ByRef InterpolateMode container); @ValueSetter public native InterpolateMode put(@ByRef kLinear value); public @ByRef kBilinear get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kBilinear get2(@ByRef InterpolateMode container); + @Namespace @Name("std::get<2>") public static native @ByRef kBilinear get2(@ByRef InterpolateMode container); @ValueSetter public native InterpolateMode put(@ByRef kBilinear value); public @ByRef kBicubic get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kBicubic get3(@ByRef InterpolateMode container); + @Namespace @Name("std::get<3>") public static native @ByRef kBicubic get3(@ByRef InterpolateMode container); @ValueSetter public native InterpolateMode put(@ByRef kBicubic value); public @ByRef kTrilinear get4() { return get4(this); } - @Namespace @Name("c10::get<4>") public static native @ByRef kTrilinear get4(@ByRef InterpolateMode container); + @Namespace @Name("std::get<4>") public static native @ByRef kTrilinear get4(@ByRef InterpolateMode container); @ValueSetter public native InterpolateMode put(@ByRef kTrilinear value); public @ByRef kArea get5() { return get5(this); } - @Namespace @Name("c10::get<5>") public static native @ByRef kArea get5(@ByRef InterpolateMode container); + @Namespace @Name("std::get<5>") public static native @ByRef kArea get5(@ByRef InterpolateMode container); @ValueSetter public native InterpolateMode put(@ByRef kArea value); public @ByRef kNearestExact get6() { return get6(this); } - @Namespace @Name("c10::get<6>") public static native @ByRef kNearestExact get6(@ByRef InterpolateMode container); + @Namespace @Name("std::get<6>") public static native @ByRef kNearestExact get6(@ByRef InterpolateMode container); @ValueSetter public native InterpolateMode put(@ByRef kNearestExact value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java index 284163a8028..070b25c1bc2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java index 6faaeedfe5b..2026396a316 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java index c008e2fe889..41227baa469 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java index 1e437376b21..295fbc3ba70 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java index d1235b4ecc2..488b2ee57aa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java index 39b89e40dfa..09da0d62c0a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java index 4ac0fc2ba51..10ca3e2a4e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java index b99e3eefc61..1c2db328bac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java index 9e963c927bc..8a044a5f179 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java index 66478f19c1e..73ecc0d78d8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java index 76df83ddcdf..bfa7dc3d5a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java index 1f7c83f7ae4..a65d8c5402b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java index 82175a8079e..dc69c0e0228 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java index 95b2f7cdd27..cc79ac4eeca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java index b86dc401aa5..c68992e046a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java index 0c16fe99e8c..059f1d82b21 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java index dfdb896b847..1debcd6b30e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java index 002dd5161b9..a0b1d5666c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java index 5bb83c71d57..ca4b35e907b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java index b3aded08d9b..245c8fc4a08 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java index 6b101317336..39859457979 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java index b93a63c197a..dcc9d7c8fcf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java index a0ee6a439f8..261b70c0c41 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java index 3306253ba28..d0855445e2f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java index 03fa9d9ced5..04b6e7ba23b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java index 8c65279e887..7c766f7e78e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java index 012a27c00c7..fb7bd820ced 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java index 77808116dd5..0ba6f97f161 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java index a21ec5eacc7..e62d876c52d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java index 4d144034fbc..5aa418c4099 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java index bdeffd56c9c..8f200364f77 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java index 3d463fee422..96cc7a19451 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java index 143e0a84486..0e0926f475d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java index 375614d472f..6a945fbd217 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java index 416a2bf1e16..6207a3bccda 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java index b8fabaa6f10..d7909b69959 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java index f13ffe1a343..bb11245e6e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java index 2e3156a607f..df70a3e93a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java index 2100e8302bb..18748e3c185 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java index f13fa52006d..2555a2499ce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java index db3b9fa1372..fe4519db301 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -21,6 +21,8 @@ @Name("torch::jit::Module") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class JitModule extends JitObject { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JitModule(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public JitModule(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @@ -54,8 +56,8 @@ public JitModule( private native void allocate( @ByVal QualifiedName arg0, @SharedPtr CompilationUnit cu); - public JitModule(@ByVal @Cast("torch::jit::ModulePtr*") Pointer module_value) { super((Pointer)null); allocate(module_value); } - private native void allocate(@ByVal @Cast("torch::jit::ModulePtr*") Pointer module_value); + public JitModule(@ByVal @Cast("torch::jit::ModulePtr*") ObjPtr module_value) { super((Pointer)null); allocate(module_value); } + private native void allocate(@ByVal @Cast("torch::jit::ModulePtr*") ObjPtr module_value); public native void set_optimized(@Cast("bool") boolean o); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java index a3eb66d1d9e..06face20c73 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeVector.java index d74972e6d77..6c9098e1fa1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java index 6ad559df4b6..63d14aa06cc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java index 6709f88f895..30f4cf43678 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -21,6 +21,8 @@ @Name("torch::jit::Object") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class JitObject extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JitObject(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public JitObject(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @@ -36,13 +38,13 @@ public class JitObject extends Pointer { public JitObject(@Const @ByRef JitObject arg0) { super((Pointer)null); allocate(arg0); } private native void allocate(@Const @ByRef JitObject arg0); public native @ByRef @Name("operator =") JitObject put(@Const @ByRef JitObject arg0); - public JitObject(@ByVal @Cast("torch::jit::ObjectPtr*") Pointer _ivalue) { super((Pointer)null); allocate(_ivalue); } - private native void allocate(@ByVal @Cast("torch::jit::ObjectPtr*") Pointer _ivalue); + public JitObject(@ByVal @Cast("torch::jit::ObjectPtr*") ObjPtr _ivalue) { super((Pointer)null); allocate(_ivalue); } + private native void allocate(@ByVal @Cast("torch::jit::ObjectPtr*") ObjPtr _ivalue); public JitObject(@SharedPtr CompilationUnit cu, @Const @SharedPtr("c10::ClassType") @ByRef ClassType type) { super((Pointer)null); allocate(cu, type); } private native void allocate(@SharedPtr CompilationUnit cu, @Const @SharedPtr("c10::ClassType") @ByRef ClassType type); - public native @ByVal @Cast("torch::jit::ObjectPtr*") Pointer _ivalue(); + public native @ByVal @Cast("torch::jit::ObjectPtr*") ObjPtr _ivalue(); public native @SharedPtr("c10::ClassType") @ByVal ClassType type(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitString.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitString.java index 46fef60203b..4b35fe0237b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitString.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitString.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java index 36e52859ddf..335948d04e0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class KLDivLossImpl extends KLDivLossImplCloneable { public KLDivLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public KLDivLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public KLDivLossImpl(@ByVal(nullValue = "torch::nn::KLDivLossOptions{}") KLDivLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::KLDivLossOptions{}") KLDivLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java index a5bdc1ee671..384cd017f79 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class KLDivLossImplCloneable extends Module { public KLDivLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public KLDivLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr KLDivLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossOptions.java index 8c890a12346..f54e4656f36 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossReduction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossReduction.java index e160641bc13..0abe9659440 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossReduction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossReduction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class KLDivLossReduction extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -31,16 +31,16 @@ public class KLDivLossReduction extends Pointer { public native @Name("operator =") @ByRef KLDivLossReduction put(@ByRef KLDivLossReduction x); public @ByRef kNone get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kNone get0(@ByRef KLDivLossReduction container); + @Namespace @Name("std::get<0>") public static native @ByRef kNone get0(@ByRef KLDivLossReduction container); @ValueSetter public native KLDivLossReduction put(@ByRef kNone value); public @ByRef kBatchMean get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kBatchMean get1(@ByRef KLDivLossReduction container); + @Namespace @Name("std::get<1>") public static native @ByRef kBatchMean get1(@ByRef KLDivLossReduction container); @ValueSetter public native KLDivLossReduction put(@ByRef kBatchMean value); public @ByRef kSum get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kSum get2(@ByRef KLDivLossReduction container); + @Namespace @Name("std::get<2>") public static native @ByRef kSum get2(@ByRef KLDivLossReduction container); @ValueSetter public native KLDivLossReduction put(@ByRef kSum value); public @ByRef kMean get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kMean get3(@ByRef KLDivLossReduction container); + @Namespace @Name("std::get<3>") public static native @ByRef kMean get3(@ByRef KLDivLossReduction container); @ValueSetter public native KLDivLossReduction put(@ByRef kMean value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KernelFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KernelFunction.java index 50953b55ac3..b7c584749f0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KernelFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KernelFunction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java index 37fbb699088..36954004930 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class L1LossImpl extends L1LossImplCloneable { public L1LossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public L1LossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public L1LossImpl(@ByVal(nullValue = "torch::nn::L1LossOptions{}") L1LossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::L1LossOptions{}") L1LossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java index 103daafa4c8..263c18e3666 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class L1LossImplCloneable extends Module { public L1LossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public L1LossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr L1LossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossOptions.java index a64a2f24f7e..1ad151da764 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java index 0e97e7d5a21..192e446e6fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java index 2fa78414e7b..7c7222846a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java index 8598cbce762..05ac147c07e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java index 8d9f460c3f2..14091780b2a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class LPPool1dImpl extends LPPool1dImplBase { public LPPool1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public LPPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java index 27b976cbcde..4ffc8f3366d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,7 +28,7 @@ public class LPPool1dImplBase extends LPPool1dImplCloneable { public LPPool1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public LPPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LPPool1dImplBase(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(norm_type, kernel_size); } private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java index c242dcc70ff..00aa4022bff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LPPool1dImplCloneable extends Module { public LPPool1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public LPPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LPPool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dOptions.java index fe3cb61a102..daf4df22add 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java index d8234385ff4..eaee937df4b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,7 +45,7 @@ public class LPPool2dImpl extends LPPool2dImplBase { public LPPool2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public LPPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java index 8db391f09db..1b15025d22e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LPPool2dImplBase extends LPPool2dImplCloneable { public LPPool2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public LPPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LPPool2dImplBase(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(norm_type, kernel_size); } private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java index db817a67004..fbfdabb7cbe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LPPool2dImplCloneable extends Module { public LPPool2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public LPPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LPPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dOptions.java index b9022cb5e23..b582a6585be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dOptions.java index a04f6b95670..db613a833f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LRScheduler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LRScheduler.java index 465a1bd55b8..7bdf0473b43 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LRScheduler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LRScheduler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java index c5ca3b5b37e..1eb5ab36e2b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class LSTMCellImpl extends LSTMCellImplBase { public LSTMCellImpl(Pointer p) { super(p); } /** Downcast constructor. */ public LSTMCellImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LSTMCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java index 0caddc5ebfb..474d86e2d1a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LSTMCellImplBase extends LSTMCellImplCloneable { public LSTMCellImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public LSTMCellImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LSTMCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNCellOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java index 482dc807b85..fcf5dc005a8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LSTMCellImplCloneable extends Module { public LSTMCellImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public LSTMCellImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LSTMCellImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellOptions.java index 9113957fc6b..88e36a2c648 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java index 2271f31869c..231a323504a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class LSTMImpl extends LSTMImplBase { public LSTMImpl(Pointer p) { super(p); } /** Downcast constructor. */ public LSTMImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LSTMImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java index c20a0e3150c..27ff0b13b45 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LSTMImplBase extends LSTMImplCloneable { public LSTMImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public LSTMImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LSTMImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java index 98e7b1ccd7b..86c0804d600 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LSTMImplCloneable extends Module { public LSTMImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public LSTMImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LSTMImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMOptions.java index 3c0c1af9d9a..2b8396eb447 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormFuncOptions.java index 644cb23a0ce..29ef7a627fb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java index 6d02477319f..893ce319f44 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class LayerNormImpl extends LayerNormImplCloneable { public LayerNormImpl(Pointer p) { super(p); } /** Downcast constructor. */ public LayerNormImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LayerNormImpl(@ByVal @Cast("std::vector*") LongVector normalized_shape) { super((Pointer)null); allocate(normalized_shape); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("std::vector*") LongVector normalized_shape); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java index bf67db1d201..07d4736aac8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LayerNormImplCloneable extends Module { public LayerNormImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public LayerNormImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LayerNormImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormOptions.java index 02dc79d6b60..b20a2c1c111 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutEnumerationType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutEnumerationType.java index 676aea1fa4c..202dbfc1a82 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutEnumerationType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutEnumerationType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutOptional.java index c6d9d4da321..4884bac4930 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutType.java index 0c01cb6f933..b98cbd35cae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutTypePtr.java index 848ee8edbeb..503172897cb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java index 2a16e129f3c..d8610884c51 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class LeakyReLUImpl extends LeakyReLUImplCloneable { public LeakyReLUImpl(Pointer p) { super(p); } /** Downcast constructor. */ public LeakyReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LeakyReLUImpl(@Const @ByRef(nullValue = "torch::nn::LeakyReLUOptions{}") LeakyReLUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::LeakyReLUOptions{}") LeakyReLUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java index 0e1dafedf9e..31ea3bdc611 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LeakyReLUImplCloneable extends Module { public LeakyReLUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public LeakyReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LeakyReLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUOptions.java index 142cd189b5f..06a8b01e881 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LegacyTensorConstructor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LegacyTensorConstructor.java index fc4c86201ea..62d9a701436 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LegacyTensorConstructor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LegacyTensorConstructor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Library.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Library.java index cb5b50f73eb..0c021da8d99 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Library.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Library.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -137,6 +137,25 @@ private native void allocate( * m.def("add(Tensor self, Tensor other) -> Tensor"); * } * } */ + + + + /** Declares that for all operators that are subsequently def'ed, their + * abstract impls may be found in the given Python module (pymodule). + * This registers some help text that is used if the abstract impl + * cannot be found. + * + * Args: + * - pymodule: the python module + * - context: We may include this in the error message. */ + + /// + /// + public native @ByRef Library impl_abstract_pystub(@Cast("const char*") BytePointer pymodule, @Cast("const char*") BytePointer context/*=""*/); + public native @ByRef Library impl_abstract_pystub(@Cast("const char*") BytePointer pymodule); + public native @ByRef Library impl_abstract_pystub(String pymodule, String context/*=""*/); + public native @ByRef Library impl_abstract_pystub(String pymodule); + /** Define an operator for a schema and then register an implementation for * it. This is typically what you would use if you aren't planning * on making use of the dispatcher to structure your operator @@ -158,6 +177,7 @@ private native void allocate( * m.def("add", add_fn); * } * } */ + /** Register an implementation for an operator. You may register multiple * implementations for a single operator at different dispatch keys @@ -206,8 +226,10 @@ private native void allocate( // These overloads cover cases when a SelectiveStr (see Note [Selective // build]) has been disabled at compile time. In that case, don't generate // any code referencing the passed in functions at all. - public native @ByRef Library def(@ByVal DisabledStr arg0); - public native @ByRef Library def(@ByVal EnabledStr raw_schema); + + + + /** Register a fallback implementation for all operators which will be used * if there is not a specific implementation for an operator available. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinAlgError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinAlgError.java index 39f65ebcf6f..bb4096bf787 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinAlgError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinAlgError.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java index 7bf73a1f7bb..7665a5457ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class LinearImpl extends LinearImplCloneable { public LinearImpl(Pointer p) { super(p); } /** Downcast constructor. */ public LinearImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LinearImpl(@Cast("int64_t") long in_features, @Cast("int64_t") long out_features) { super((Pointer)null); allocate(in_features, out_features); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long in_features, @Cast("int64_t") long out_features); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java index 17982f3c9ea..1b792be3b99 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LinearImplCloneable extends Module { public LinearImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public LinearImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LinearImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearOptions.java index 730d2f2aa7b..02f619365c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListComp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListComp.java index 371c2d054f9..3d9773dda0b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListComp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ListComp.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListLiteral.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListLiteral.java index 967667d8b2b..0847c7a6ecf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListLiteral.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ListLiteral.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListSingleElementType.java index a6cf9ccc08d..6a9863187d2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListSingleElementType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ListSingleElementType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java index ab75a0f410c..07555921026 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -50,4 +50,5 @@ public class ListType extends ListSingleElementType { public static native @SharedPtr ListType ofComplexDoubles(); public static native @SharedPtr ListType ofBools(); public static native @SharedPtr ListType ofStrings(); + public static native @SharedPtr ListType ofNumbers(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalDispatchKeySet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalDispatchKeySet.java index cbfbe6feda9..62f2fea5ea9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalDispatchKeySet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalDispatchKeySet.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java index 1d4688158cd..0f341fffa75 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -42,7 +42,7 @@ public class LocalResponseNormImpl extends LocalResponseNormImplCloneable { public LocalResponseNormImpl(Pointer p) { super(p); } /** Downcast constructor. */ public LocalResponseNormImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LocalResponseNormImpl(@Cast("int64_t") long size) { super((Pointer)null); allocate(size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java index f0c8eb26358..0fadc9bfa9e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LocalResponseNormImplCloneable extends Module { public LocalResponseNormImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public LocalResponseNormImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LocalResponseNormImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormOptions.java index e0809c40ff8..b241a366ba3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java index 7fd206e28d1..aa074362dc7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,7 +33,7 @@ public class LogSigmoidImpl extends LogSigmoidImplCloneable { @SharedPtr @Name("std::make_shared") private native void allocate(); /** Downcast constructor. */ public LogSigmoidImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java index 130ee5f6632..84029b0f3ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LogSigmoidImplCloneable extends Module { public LogSigmoidImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public LogSigmoidImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LogSigmoidImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxFuncOptions.java index 2fa6f6968e8..cd5c07922fb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java index 6a58a7d9e6b..6a77efc49d1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class LogSoftmaxImpl extends LogSoftmaxImplCloneable { public LogSoftmaxImpl(Pointer p) { super(p); } /** Downcast constructor. */ public LogSoftmaxImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LogSoftmaxImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java index 8263c965704..cf567ea3bc5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class LogSoftmaxImplCloneable extends Module { public LogSoftmaxImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public LogSoftmaxImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LogSoftmaxImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxOptions.java index 3c5e0c72141..970ae9b2ff1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java index 774c75ef640..d4b950b9ae8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java index 3e74d4df0e1..2d59151673a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongElementReference.java index 91eaa7054b4..fc7ca966b78 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongElementReference.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongElementReference.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongExpandingArrayOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongExpandingArrayOptional.java index cdedd3c782d..af466012305 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongExpandingArrayOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongExpandingArrayOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java index a32be71c61f..9fd2ff51e1a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongListIterator.java index 1cba0a9ae82..33f3cb356d2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptional.java index 62c132e8523..6997e1a7028 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalArrayRef.java index f0962313524..67d61803f21 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalVector.java index 7a0baa51d7e..3c9e6f9e995 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorBase.java index 42a8184ce71..c7c35948854 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorCommon.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorCommon.java index 6ea6e9ea79d..d14ae147f24 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorCommon.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorCommon.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorImpl.java index 8220e614fdb..0854b6d714f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java index fe7bfe54c04..e8f0e3a3d07 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVector.java index 8d8d4fb8104..91bd426e8e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorOptional.java index 5e4fe4c9756..a16d2585006 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LossReduction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LossReduction.java index 67ff04f2039..7cd8452bd09 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LossReduction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LossReduction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class LossReduction extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -30,13 +30,13 @@ public class LossReduction extends Pointer { public native @Name("operator =") @ByRef LossReduction put(@ByRef LossReduction x); public @ByRef kNone get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kNone get0(@ByRef LossReduction container); + @Namespace @Name("std::get<0>") public static native @ByRef kNone get0(@ByRef LossReduction container); @ValueSetter public native LossReduction put(@ByRef kNone value); public @ByRef kMean get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kMean get1(@ByRef LossReduction container); + @Namespace @Name("std::get<1>") public static native @ByRef kMean get1(@ByRef LossReduction container); @ValueSetter public native LossReduction put(@ByRef kMean value); public @ByRef kSum get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kSum get2(@ByRef LossReduction container); + @Namespace @Name("std::get<2>") public static native @ByRef kSum get2(@ByRef LossReduction container); @ValueSetter public native LossReduction put(@ByRef kSum value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNIST.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNIST.java index 387617f8948..af750d8ff58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNIST.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNIST.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java index b49a0dad99e..eca20ed0825 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java index 633d1c47679..21f066ab3df 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java index 85db8a57965..fa64a1a9781 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java index a4c4b8825fc..141e67dc680 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java index 992aae326d3..2ef299f71b6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java index c6e256418b5..dc9d31b0d46 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksArgs.java index 5383ed9e768..51b6a473d88 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksArgs.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksArgs.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java index 3552c1a2770..2044cfaa3af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java index ac605fe7499..3b0d7330dc8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class MSELossImpl extends MSELossImplCloneable { public MSELossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MSELossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MSELossImpl(@ByVal(nullValue = "torch::nn::MSELossOptions{}") MSELossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MSELossOptions{}") MSELossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java index 17352671d59..9032c751d25 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MSELossImplCloneable extends Module { public MSELossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MSELossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MSELossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossOptions.java index 8b440aba6d6..3ed9281e725 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java index 6dd18cf2548..7486498dfe7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MagicMethod.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MagicMethod.java index 295fb079730..fa2c94a6d65 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MagicMethod.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MagicMethod.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java index 5e19d4249b2..27228758ca3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -42,7 +42,7 @@ public class MarginRankingLossImpl extends MarginRankingLossImplCloneable { public MarginRankingLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MarginRankingLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MarginRankingLossImpl(@ByVal(nullValue = "torch::nn::MarginRankingLossOptions{}") MarginRankingLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MarginRankingLossOptions{}") MarginRankingLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java index 671fbe79ccd..8b81ca61bed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MarginRankingLossImplCloneable extends Module { public MarginRankingLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MarginRankingLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MarginRankingLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossOptions.java index 762311327cc..5d5652fe12c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MatchTypeReturn.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MatchTypeReturn.java index f456fa90755..f7d25ee4b62 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MatchTypeReturn.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MatchTypeReturn.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java index a439ed93816..90a22bf0ee7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java index 78b1a45cc8e..32b2ea85819 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class MaxPool1dImpl extends MaxPool1dImplBase { public MaxPool1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MaxPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java index a3cc438a916..3f0981fb1b0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,7 +28,7 @@ public class MaxPool1dImplBase extends MaxPool1dImplCloneable { public MaxPool1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public MaxPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java index 4f5f1c67ec7..7c237cbc398 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MaxPool1dImplCloneable extends Module { public MaxPool1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MaxPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dOptions.java index 04b2caf402b..e493d559404 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java index 6b5159ec6ad..ff540157117 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class MaxPool2dImpl extends MaxPool2dImplBase { public MaxPool2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MaxPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java index 1667f525ddf..d57d7ef0403 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MaxPool2dImplBase extends MaxPool2dImplCloneable { public MaxPool2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public MaxPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxPool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java index 77c9fcdfb24..3baca54ae9a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MaxPool2dImplCloneable extends Module { public MaxPool2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MaxPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dOptions.java index 770b4a44538..2bf9af0cfa1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java index 69c69ebef0f..54f6d4de5b0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class MaxPool3dImpl extends MaxPool3dImplBase { public MaxPool3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MaxPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java index cdc44474f56..3dfe436efa8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MaxPool3dImplBase extends MaxPool3dImplCloneable { public MaxPool3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public MaxPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxPool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java index 9f4f6b74783..7869295f029 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MaxPool3dImplCloneable extends Module { public MaxPool3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MaxPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dOptions.java index 75ee814c7a5..9bdefbbf38f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dFuncOptions.java index b2d93c4b3b0..6f684d8e905 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java index 4a7795a879e..b1164c14d6d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class MaxUnpool1dImpl extends MaxUnpool1dImplBase { public MaxUnpool1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MaxUnpool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java index 06b882be119..24d3cb92c40 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,7 +28,7 @@ public class MaxUnpool1dImplBase extends MaxUnpool1dImplCloneable { public MaxUnpool1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public MaxUnpool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxUnpool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java index 18ed0cc3253..09611be8a7c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MaxUnpool1dImplCloneable extends Module { public MaxUnpool1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MaxUnpool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dOptions.java index 632f3a54b5c..a0bfd805d5f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dFuncOptions.java index c8517734900..e1333038a13 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java index 0d049090a50..3368fabf8bb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class MaxUnpool2dImpl extends MaxUnpool2dImplBase { public MaxUnpool2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MaxUnpool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java index d44da26b439..91edb8e0192 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MaxUnpool2dImplBase extends MaxUnpool2dImplCloneable { public MaxUnpool2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public MaxUnpool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxUnpool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java index f67aa2eae62..2ab1cd9b7d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MaxUnpool2dImplCloneable extends Module { public MaxUnpool2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MaxUnpool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dOptions.java index 77a382dd67c..9d1c3a449dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dFuncOptions.java index 0591ca52113..283d9b7c1b3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java index ce07df71ff6..a912c6aac12 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -44,7 +44,7 @@ public class MaxUnpool3dImpl extends MaxUnpool3dImplBase { public MaxUnpool3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MaxUnpool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java index f44e4bd2098..ef0c1fdd526 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MaxUnpool3dImplBase extends MaxUnpool3dImplCloneable { public MaxUnpool3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public MaxUnpool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxUnpool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java index 1f30621b433..ea6b5e74825 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MaxUnpool3dImplCloneable extends Module { public MaxUnpool3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MaxUnpool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dOptions.java index 2bf4b6acd44..f89ba13da46 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatOptional.java index 83b20dca017..43da2339a42 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatType.java index 963e4cff9fb..a59c412cd51 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormattEnumerationType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormattEnumerationType.java index 8ea5ec9b529..37e28e4c732 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormattEnumerationType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormattEnumerationType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryReportingInfoBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryReportingInfoBase.java index 7aeeaf595ad..4a93add074d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryReportingInfoBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryReportingInfoBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java index 1cca14eaa46..1acc4fc3b71 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -30,6 +30,7 @@ public class MetaBase extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MetaBase(Pointer p) { super(p); } + public native @ByRef @Name("operator =") MetaBase put(@Const @ByRef MetaBase arg0); public native @Const @ByRef Tensor maybe_get_output(@Cast("int64_t") long output_idx); // Note: [set_output_*] diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Method.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Method.java index 42f25d359fb..c8c04f8a1ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Method.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Method.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MethodOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MethodOptional.java index 262b82da080..64196796a93 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MethodOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MethodOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MethodValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MethodValue.java index 80306b8432c..fc464e4dd3f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MethodValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MethodValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java index 8aaf4d0767e..cfd6d798c7c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,7 +33,7 @@ public class MishImpl extends MishImplCloneable { @SharedPtr @Name("std::make_shared") private native void allocate(); /** Downcast constructor. */ public MishImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java index 64e98aeca5b..a4af5d046d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MishImplCloneable extends Module { public MishImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MishImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MishImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MobileCode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MobileCode.java index 0aac42d53c5..3702b1f214d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MobileCode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MobileCode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java index 84f0dad568b..14d5a8a8fbb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -79,6 +79,10 @@ public class Module extends Pointer { * time {@code .name()} is invoked. */ public Module() { super((Pointer)null); allocate(); } @SharedPtr @Name("std::make_shared") private native void allocate(); + public Module(@Const @ByRef Module arg0) { super((Pointer)null); allocate(arg0.asModule()); } + @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef Module arg0); + public Module put(Module arg0) { return asModule()._put(arg0.asModule()); } + private native @ByRef @Name("operator =") Module _put(@Const @ByRef Module arg0); /** Returns the name of the {@code Module}. * diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java index 2b4d3218e1d..c70f0a38458 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -81,7 +81,7 @@ public class ModuleDictImpl extends ModuleDictImplCloneable { public ModuleDictImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ModuleDictImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ModuleDictImpl() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java index 107fd861f80..c92565281cb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -31,7 +31,7 @@ public class ModuleDictImplCloneable extends Module { public ModuleDictImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ModuleDictImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ModuleDictImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfo.java index a2f98aac51e..cf7908b5b6a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfoOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfoOptional.java index 9bfa8a660ce..50d24ddc010 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfoOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfoOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java index 1093575841d..a85a4be5188 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -69,7 +69,7 @@ public class ModuleListImpl extends ModuleListImplCloneable { public ModuleListImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ModuleListImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ModuleListImpl() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java index 7d8960308c9..c71d0ef5093 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ModuleListImplCloneable extends Module { public ModuleListImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ModuleListImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ModuleListImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModulePolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModulePolicy.java index 5dc85ae5784..bba22126f0d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModulePolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModulePolicy.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java index daee022d763..39091dc6c54 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -42,7 +42,7 @@ public class MultiLabelMarginLossImpl extends MultiLabelMarginLossImplCloneable public MultiLabelMarginLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MultiLabelMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiLabelMarginLossImpl(@ByVal(nullValue = "torch::nn::MultiLabelMarginLossOptions{}") MultiLabelMarginLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MultiLabelMarginLossOptions{}") MultiLabelMarginLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java index 3e365aa398f..ad3df9b4b9b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MultiLabelMarginLossImplCloneable extends Module { public MultiLabelMarginLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MultiLabelMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiLabelMarginLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossOptions.java index 50789fab61d..3dedd8a1525 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java index c5e761babd8..8e05af0c6cc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -42,7 +42,7 @@ public class MultiLabelSoftMarginLossImpl extends MultiLabelSoftMarginLossImplCl public MultiLabelSoftMarginLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MultiLabelSoftMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiLabelSoftMarginLossImpl( @ByVal(nullValue = "torch::nn::MultiLabelSoftMarginLossOptions{}") MultiLabelSoftMarginLossOptions options_) { super((Pointer)null); allocate(options_); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java index fb36cdedda2..7f625222a72 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MultiLabelSoftMarginLossImplCloneable extends Module { public MultiLabelSoftMarginLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MultiLabelSoftMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiLabelSoftMarginLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossOptions.java index faccf54d483..d58f92e2c41 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java index c7651d0d0aa..10ebc98b558 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -42,7 +42,7 @@ public class MultiMarginLossImpl extends MultiMarginLossImplCloneable { public MultiMarginLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MultiMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiMarginLossImpl(@ByVal(nullValue = "torch::nn::MultiMarginLossOptions{}") MultiMarginLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MultiMarginLossOptions{}") MultiMarginLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java index 93ebad6be33..cd4b1562f0f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MultiMarginLossImplCloneable extends Module { public MultiMarginLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MultiMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiMarginLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossOptions.java index a1ba90e4ba3..454e69b02b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionForwardFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionForwardFuncOptions.java index 35dfa934617..3e978d73d58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionForwardFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionForwardFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java index 264d11b99e7..9a70eab0562 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class MultiheadAttentionImpl extends MultiheadAttentionImplCloneable { public MultiheadAttentionImpl(Pointer p) { super(p); } /** Downcast constructor. */ public MultiheadAttentionImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiheadAttentionImpl(@Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads) { super((Pointer)null); allocate(embed_dim, num_heads); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java index d115cd1d069..b0849693f74 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class MultiheadAttentionImplCloneable extends Module { public MultiheadAttentionImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public MultiheadAttentionImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiheadAttentionImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionOptions.java index 720d7284e0d..56efe685281 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MzZipReaderIterWrapper.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MzZipReaderIterWrapper.java new file mode 100644 index 00000000000..6a6c612829b --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MzZipReaderIterWrapper.java @@ -0,0 +1,27 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("caffe2::serialize") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class MzZipReaderIterWrapper extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public MzZipReaderIterWrapper() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public MzZipReaderIterWrapper(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java index b7ce4ff3b0c..858a69d77e2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class NLLLossImpl extends NLLLossImplCloneable { public NLLLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public NLLLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public NLLLossImpl(@ByVal(nullValue = "torch::nn::NLLLossOptions{}") NLLLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::NLLLossOptions{}") NLLLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java index f04b772b322..15260d65061 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class NLLLossImplCloneable extends Module { public NLLLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public NLLLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr NLLLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossOptions.java index 4fee2f8114f..00874140452 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NameMangler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NameMangler.java index d4716b71223..a5e995488b3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NameMangler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NameMangler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java index 96b42e38862..6c711b910dd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModule.java index c6bf5dfe9bc..94cdeff700e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModule.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java index 089808e2710..fb7bc81c397 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java index bcdca1faf95..b3aface5867 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java index a36fea9f086..99d4312ec0f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTupleConstructor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTupleConstructor.java index f47d0c33f88..2bbe3395c5e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTupleConstructor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTupleConstructor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedType.java index 1ae6311d83c..5fe564485b6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValue.java index 08243c09f50..6d5f49773c2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueArrayRef.java index 6d06af3aed5..b404ed2d2dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueOptional.java index e87178c0a71..276fe501dcd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamesMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamesMode.java index 181262a4e66..d180cd48b44 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamesMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamesMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NativeResolver.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NativeResolver.java index 8b6578885e4..65eb0601da9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NativeResolver.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NativeResolver.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java index d19b2f16b1d..c8ae1e30a43 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -112,5 +112,5 @@ private native void allocate( */ public native @ByVal Tensor get_unsafe_storage_as_tensor(); - public native @Cast("int64_t") long get_buffer_size(); + public native @Cast("size_t") long get_buffer_size(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoGradGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoGradGuard.java index fc0830f88bf..3205548b2dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoGradGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoGradGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoNamesGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoNamesGuard.java index 3098b707090..3fa62f8ceb4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoNamesGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoNamesGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoTF32Guard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoTF32Guard.java index d1b91557961..a11a1f33570 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoTF32Guard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoTF32Guard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoTarget.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoTarget.java index c51101d07f5..194f99fabed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoTarget.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoTarget.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java index c518bd4a7d6..dd94e37e3c5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -106,7 +106,8 @@ public class Node extends Pointer { public native @Cast("uint32_t") @NoException(true) int add_input_metadata( @Const @ByRef TensorOptions options, @ByVal SymIntArrayRef shape, - @Cast("bool") boolean is_tensor_subclass); + @Cast("bool") boolean is_tensor_subclass, + @Cast("bool") boolean is_nested); public native @Cast("uint32_t") @NoException(true) int add_input_metadata(@Const @ByRef Tensor t); @@ -173,6 +174,8 @@ public class Node extends Pointer { * in a new thread */ public native @Cast("uint64_t") @NoException(true) long sequence_nr(); + public native void set_sequence_nr(@Cast("uint64_t") long sequence_nr); + // NOTE [ Topological Number ] // // topological_nr is used to prune branches in the DAG during autograd @@ -271,9 +274,9 @@ public class Node extends Pointer { public native void add_retains_grad_hook( @UniquePtr @ByRef(true) FunctionPreHook pre_hook, - int output_idx); + @Cast("size_t") long output_idx); - public native @UniquePtr @ByVal FunctionPreHook pop_retains_grad_hook(int output_idx); + public native @UniquePtr @ByVal FunctionPreHook pop_retains_grad_hook(@Cast("size_t") long output_idx); public native @ByRef @NoException(true) FunctionPreHookVector pre_hooks(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSet.java index 2a79b154796..55e7c49b566 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSet.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoneType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoneType.java index 78324cc45f4..c0937efcabd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoneType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoneType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoneTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoneTypePtr.java index b01c1d3a982..ae2e7b7df29 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoneTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoneTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Nonlinearity.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Nonlinearity.java index 2f4fc568394..3a2964bd9a6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Nonlinearity.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Nonlinearity.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Nonlinearity extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -38,37 +38,37 @@ public class Nonlinearity extends Pointer { public native @Name("operator =") @ByRef Nonlinearity put(@ByRef Nonlinearity x); public @ByRef kLinear get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kLinear get0(@ByRef Nonlinearity container); + @Namespace @Name("std::get<0>") public static native @ByRef kLinear get0(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kLinear value); public @ByRef kConv1D get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kConv1D get1(@ByRef Nonlinearity container); + @Namespace @Name("std::get<1>") public static native @ByRef kConv1D get1(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kConv1D value); public @ByRef kConv2D get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kConv2D get2(@ByRef Nonlinearity container); + @Namespace @Name("std::get<2>") public static native @ByRef kConv2D get2(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kConv2D value); public @ByRef kConv3D get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kConv3D get3(@ByRef Nonlinearity container); + @Namespace @Name("std::get<3>") public static native @ByRef kConv3D get3(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kConv3D value); public @ByRef kConvTranspose1D get4() { return get4(this); } - @Namespace @Name("c10::get<4>") public static native @ByRef kConvTranspose1D get4(@ByRef Nonlinearity container); + @Namespace @Name("std::get<4>") public static native @ByRef kConvTranspose1D get4(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kConvTranspose1D value); public @ByRef kConvTranspose2D get5() { return get5(this); } - @Namespace @Name("c10::get<5>") public static native @ByRef kConvTranspose2D get5(@ByRef Nonlinearity container); + @Namespace @Name("std::get<5>") public static native @ByRef kConvTranspose2D get5(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kConvTranspose2D value); public @ByRef kConvTranspose3D get6() { return get6(this); } - @Namespace @Name("c10::get<6>") public static native @ByRef kConvTranspose3D get6(@ByRef Nonlinearity container); + @Namespace @Name("std::get<6>") public static native @ByRef kConvTranspose3D get6(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kConvTranspose3D value); public @ByRef kSigmoid get7() { return get7(this); } - @Namespace @Name("c10::get<7>") public static native @ByRef kSigmoid get7(@ByRef Nonlinearity container); + @Namespace @Name("std::get<7>") public static native @ByRef kSigmoid get7(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kSigmoid value); public @ByRef kTanh get8() { return get8(this); } - @Namespace @Name("c10::get<8>") public static native @ByRef kTanh get8(@ByRef Nonlinearity container); + @Namespace @Name("std::get<8>") public static native @ByRef kTanh get8(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kTanh value); public @ByRef kReLU get9() { return get9(this); } - @Namespace @Name("c10::get<9>") public static native @ByRef kReLU get9(@ByRef Nonlinearity container); + @Namespace @Name("std::get<9>") public static native @ByRef kReLU get9(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kReLU value); public @ByRef kLeakyReLU get10() { return get10(this); } - @Namespace @Name("c10::get<10>") public static native @ByRef kLeakyReLU get10(@ByRef Nonlinearity container); + @Namespace @Name("std::get<10>") public static native @ByRef kLeakyReLU get10(@ByRef Nonlinearity container); @ValueSetter public native Nonlinearity put(@ByRef kLeakyReLU value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NormalizeFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NormalizeFuncOptions.java index 407c3e9c5b4..dcb2c86a67e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NormalizeFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NormalizeFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NotImplementedError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NotImplementedError.java index 7d1fb5e4055..d20fe75ede2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NotImplementedError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NotImplementedError.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NumberType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NumberType.java index f702f3f6fdb..f46065557be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NumberType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NumberType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NumberTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NumberTypePtr.java index 5a24684b97d..aa5f1d7c564 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NumberTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NumberTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksArgs.java index 82adc035548..f8ec14fbebb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksArgs.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksArgs.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksInterface.java index f6a6838dd8b..77ee5d7ae7b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ObjPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ObjPtr.java new file mode 100644 index 00000000000..9fa6a8d2d33 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ObjPtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ObjPtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ObjPtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ObjPtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public ObjPtr position(long position) { + return (ObjPtr)super.position(position); + } + @Override public ObjPtr getPointer(long i) { + return new ObjPtr((Pointer)this).offsetAddress(i); + } + + + public ObjPtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public ObjPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public ObjPtr(Object target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(Object target, @ByVal DontIncreaseRefcount arg1); + + + + public ObjPtr(@ByRef(true) ObjPtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) ObjPtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) ObjPtr put(@ByRef(true) ObjPtr rhs); + + public native @NoException(true) Object get(); + + public native @ByRef @Name("operator *") @NoException(true) Object multiply(); + + public native @Name("operator ->") @NoException(true) Object access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef ObjPtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) Object release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal ObjPtr reclaim(Object owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal ObjPtr reclaim_copy(Object owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal ObjPtr unsafe_steal_from_new(Object raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal ObjPtr unsafe_adapt_non_heap_allocated( + Object raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal ObjPtr unsafe_reclaim_from_nonowning(Object raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Object.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Object.java new file mode 100644 index 00000000000..365858e9bcc --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Object.java @@ -0,0 +1,115 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// User-defined object. +@Name("c10::ivalue::Object") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class Object extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Object(Pointer p) { super(p); } + + // In general, class types hold a shared_ptr to its owning CompilationUnit, + // so that its type and methods do not get deallocated while the class exists. + // However, the CompilationUnit holds ownership of the type's graphs, so + // inserting a constant object into a Graph would create a reference cycle if + // that constant object held a shared_ptr to its CU. For these objects we + // instatiate them with non-owning references to its CU + public Object(@ByVal WeakOrStrongTypePtr type, @Cast("size_t") long numSlots) { super((Pointer)null); allocate(type, numSlots); } + private native void allocate(@ByVal WeakOrStrongTypePtr type, @Cast("size_t") long numSlots); + + public Object(@ByVal StrongTypePtr type, @Cast("size_t") long numSlots) { super((Pointer)null); allocate(type, numSlots); } + private native void allocate(@ByVal StrongTypePtr type, @Cast("size_t") long numSlots); + + public static native @ByVal ObjPtr create( + @ByVal WeakOrStrongTypePtr type, + @Cast("size_t") long numSlots); + + public static native @ByVal ObjPtr create( + @ByVal StrongTypePtr type, + @Cast("size_t") long numSlots); + + public static native @ByVal ObjPtr create(@SharedPtr("c10::ClassType") @ByVal ClassType classType, @Cast("size_t") long numSlots); + + /** + * Slot API. + * + * Attributes are stored as a simple vector so that lookups are fast at + * runtime. A "slot" is just an index into that vector, which can be computed + * statically if you have access to the class type. Use this API if you are + * writing compiler stuff. + */ + public native void setSlot(@Cast("size_t") long slot, @ByVal IValue v); + + public native @Const @ByRef IValue getSlot(@Cast("size_t") long slot); + + public native void unsafeRemoveSlot(@Cast("size_t") long slot); + + /** + * Attribute API. + * + * Wrappers around the slot stuff so that users can access attributes + * directly. Use this API if you are a user. + * + * Note: Unlike in Python, TorchScript must make a distinction between + * attributes (which are IValues) and methods (which are Methods). If you + * want a method, use {@code obj.type()->getMethod()} + */ + public native @ByVal IValue getAttr(@StdString BytePointer name); + public native @ByVal IValue getAttr(@StdString String name); + public native void setAttr(@StdString BytePointer name, @ByVal IValue v); + public native void setAttr(@StdString String name, @ByVal IValue v); + // Remove attribute by name, caller is responsible for + // the safety of this operation + // We didn't remove the attribute in the type because the type + // might be shared by multiple objects. + // Therefore after removing attribute, the object is in an inconsistent + // state where it has more attribute types in its Type than + // the attribute slots it has, user needs to make sure the object + // has consistent by removing the attribute in type as well + public native void unsafeRemoveAttr(@StdString BytePointer name); + public native void unsafeRemoveAttr(@StdString String name); + + public native @StdString BytePointer name(); + + public native @Const @ByRef IValueVector slots(); + public native @SharedPtr("c10::ClassType") @ByVal ClassType type(); + + public native @SharedPtr CompilationUnit compilation_unit(); + + public native @ByVal ObjPtr copy_to_weak_compilation_ref(); + + public native void unsafe_make_weak_compilation_ref(); + + public native @ByVal ObjPtr copy(); + + public native @ByVal ObjPtr deepcopy( + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @ByVal ObjPtr deepcopy(); + + public native @ByVal ObjPtr deepcopy( + @ByRef HashAliasedIValueMap memo, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @ByVal ObjPtr deepcopy( + @ByRef HashAliasedIValueMap memo); + + public native @Cast("bool") boolean is_weak_compilation_ref(); + + public native @Cast("bool") boolean is_empty_strong_compilation_ref(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OnnxfiBackendSystemError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OnnxfiBackendSystemError.java index c85d23ee70c..0f850c376dd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OnnxfiBackendSystemError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OnnxfiBackendSystemError.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OpRegistrationListener.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OpRegistrationListener.java index a6151488be0..f5b427c3776 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OpRegistrationListener.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OpRegistrationListener.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java index a34da316398..94b2cf8e10a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,6 +38,10 @@ public class OperandInfo extends Pointer { public OperandInfo(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned t) { super((Pointer)null); allocate(t); } private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned t); + public OperandInfo(@Const @ByRef OperandInfo arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef OperandInfo arg0); + public native @ByRef @Name("operator =") OperandInfo put(@Const @ByRef OperandInfo arg0); + /** The data pointer. This may be different from tensor->data_ptr() if the * iterator is split. */ public native Pointer data(); public native OperandInfo data(Pointer setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Operation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Operation.java index e128bd554a0..7e14fdf7ee2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Operation.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Operation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Operator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Operator.java index cfdb6f2773d..d663dc8eb35 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Operator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Operator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -37,7 +37,7 @@ // An Operator is a thin wrapper around either a pure JIT operator (e.g. prim // ops) or a c10 operator, allowing some common operations and abstracting away // the concrete operator nature. -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Operator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java index ae25077fffe..caf4c66c3fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -58,10 +58,7 @@ public class OperatorHandle extends Pointer { public native @ByVal TagArrayRef getTags(); - public native void setReportErrorCallback_(@UniquePtr @ByVal SafePyObject callback); - - public native @Cast("bool") boolean hasTag(Tag tag); - public native @Cast("bool") boolean hasTag(@Cast("at::Tag") int tag); + public native @Cast("bool") boolean hasTag(@Const @ByRef Tag tag); public native void callBoxed(@Cast("c10::Stack*") IValueVector stack); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandleOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandleOptional.java index fb96ea61e37..862b0ef31f8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandleOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandleOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorKernel.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorKernel.java index 2a1a0238166..4b37ec7c434 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorKernel.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorKernel.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java index d881d072370..02072e0385d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameOptional.java index 7dec005e4ef..0c48f85a086 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorSet.java index c0c251eefaf..f4429fe1ad0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorSet.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorVector.java index 8c9458bffcf..fa8ce574213 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java index 0a70f99235e..a589c87b0b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradOptions.java index 9887ef4221b..ad3e66db5e4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradParamState.java index d368e7a9e21..aa7d7d64b3c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamOptions.java index f9e77a548d3..deb1217cc45 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamParamState.java index b800add33e7..b10d2c9ee42 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWOptions.java index dd5402798e5..a3d5dbd23a4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWParamState.java index aa2837db0a9..43c575c2015 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSOptions.java index 66a574aee36..da7db452ea4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSParamState.java index e8c15c5a0f2..e24f54f598d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropOptions.java index 7f171eaf100..1c225e7da78 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropParamState.java index 69c9e6350b6..6bead639efa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDOptions.java index f6545c2cd7c..1a3eaf99a7b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDParamState.java index bfdd35fdca1..9e1c2c58814 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java index 9e24a514768..13f688f1b71 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -21,12 +21,14 @@ @Namespace("torch::optim") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class OptimizerOptions extends Pointer { static { Loader.load(); } - /** Default native constructor. */ - public OptimizerOptions() { super((Pointer)null); allocate(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OptimizerOptions(Pointer p) { super(p); } - @UniquePtr @Name("std::make_unique") private native void allocate(); + public OptimizerOptions() { super((Pointer)null); allocate(); } + @UniquePtr @Name("std::make_unique") private native void allocate(); + public OptimizerOptions(@Const @ByRef OptimizerOptions arg0) { super((Pointer)null); allocate(arg0); } + @UniquePtr @Name("std::make_unique") private native void allocate(@Const @ByRef OptimizerOptions arg0); + public native @ByRef @Name("operator =") OptimizerOptions put(@Const @ByRef OptimizerOptions arg0); public native @UniquePtr @ByVal OptimizerOptions clone(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java index fe6e58d6136..37d07afe44e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroupVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroupVector.java index bf3adc679ae..a195ef91e54 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroupVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroupVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java index a7cb2a0629b..d0ae5d9353a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -22,12 +22,14 @@ @Namespace("torch::optim") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class OptimizerParamState extends Pointer { static { Loader.load(); } - /** Default native constructor. */ - public OptimizerParamState() { super((Pointer)null); allocate(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OptimizerParamState(Pointer p) { super(p); } - @UniquePtr @Name("std::make_unique") private native void allocate(); + public OptimizerParamState() { super((Pointer)null); allocate(); } + @UniquePtr @Name("std::make_unique") private native void allocate(); + public OptimizerParamState(@Const @ByRef OptimizerParamState arg0) { super((Pointer)null); allocate(arg0); } + @UniquePtr @Name("std::make_unique") private native void allocate(@Const @ByRef OptimizerParamState arg0); + public native @ByRef @Name("operator =") OptimizerParamState put(@Const @ByRef OptimizerParamState arg0); public native @UniquePtr @ByVal OptimizerParamState clone(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java index 5006c64e782..fc259955adb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java index 3ed511ef0de..82a57d2e257 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -31,7 +31,7 @@ public class OptionalType extends UnionType { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OptionalType(Pointer p) { super(p); } - public static native @SharedPtr OptionalType create(@ByVal Type.TypePtr contained); + public static native @SharedPtr OptionalType create(@Const @ByRef Type.TypePtr contained); @MemberGetter public static native TypeKind Kind(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OutOfMemoryError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OutOfMemoryError.java index c224bc743b9..12d5a6198a5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OutOfMemoryError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OutOfMemoryError.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OutputArchive.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OutputArchive.java index afd6c777b47..001aab077c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OutputArchive.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OutputArchive.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PODLocalDispatchKeySet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PODLocalDispatchKeySet.java index 46544708e04..dff1e8e5f41 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PODLocalDispatchKeySet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PODLocalDispatchKeySet.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java index 0f1c5f218b2..aa280065541 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class PReLUImpl extends PReLUImplCloneable { public PReLUImpl(Pointer p) { super(p); } /** Downcast constructor. */ public PReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public PReLUImpl(@Const @ByRef(nullValue = "torch::nn::PReLUOptions{}") PReLUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::PReLUOptions{}") PReLUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java index b778518be93..dff4304aca0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class PReLUImplCloneable extends Module { public PReLUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public PReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PReLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUOptions.java index 08e232ecc40..44b2e6d028c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequence.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequence.java index aa6df6bfb52..95fe12a8584 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequence.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequence.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PadFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PadFuncOptions.java index a3a3377a1e2..53348dc7aa3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PadFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PadFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PaddingMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PaddingMode.java index 9864513fe18..bb8dd0e1dd1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PaddingMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PaddingMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class PaddingMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -31,16 +31,16 @@ public class PaddingMode extends Pointer { public native @Name("operator =") @ByRef PaddingMode put(@ByRef PaddingMode x); public @ByRef kConstant get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kConstant get0(@ByRef PaddingMode container); + @Namespace @Name("std::get<0>") public static native @ByRef kConstant get0(@ByRef PaddingMode container); @ValueSetter public native PaddingMode put(@ByRef kConstant value); public @ByRef kReflect get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kReflect get1(@ByRef PaddingMode container); + @Namespace @Name("std::get<1>") public static native @ByRef kReflect get1(@ByRef PaddingMode container); @ValueSetter public native PaddingMode put(@ByRef kReflect value); public @ByRef kReplicate get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kReplicate get2(@ByRef PaddingMode container); + @Namespace @Name("std::get<2>") public static native @ByRef kReplicate get2(@ByRef PaddingMode container); @ValueSetter public native PaddingMode put(@ByRef kReplicate value); public @ByRef kCircular get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kCircular get3(@ByRef PaddingMode container); + @Namespace @Name("std::get<3>") public static native @ByRef kCircular get3(@ByRef PaddingMode container); @ValueSetter public native PaddingMode put(@ByRef kCircular value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java index b28620e6d3c..1d06dc41a3c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class PairwiseDistanceImpl extends PairwiseDistanceImplCloneable { public PairwiseDistanceImpl(Pointer p) { super(p); } /** Downcast constructor. */ public PairwiseDistanceImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public PairwiseDistanceImpl(@Const @ByRef(nullValue = "torch::nn::PairwiseDistanceOptions{}") PairwiseDistanceOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::PairwiseDistanceOptions{}") PairwiseDistanceOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java index f241fc5621b..bc37452f466 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class PairwiseDistanceImplCloneable extends Module { public PairwiseDistanceImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public PairwiseDistanceImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PairwiseDistanceImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceOptions.java index 2d8d3df8cc4..1b393c4f8e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Param.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Param.java index aa75fe52938..ea980cf366f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Param.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Param.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParamList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParamList.java index 5c3e9da8cb5..fc71decfceb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParamList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParamList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParamListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParamListIterator.java index 271a4763bdb..41e53b57fc0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParamListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParamListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java index 9cc9a728705..c9653f15d05 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -25,7 +25,7 @@ public class ParameterDictImpl extends ParameterDictImplCloneable { public ParameterDictImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ParameterDictImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ParameterDictImpl() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java index 97c08d9e25d..489a708f97c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ParameterDictImplCloneable extends Module { public ParameterDictImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ParameterDictImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ParameterDictImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java index c24611978ce..a8226b9074c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ParameterListImpl extends ParameterListImplCloneable { public ParameterListImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ParameterListImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ParameterListImpl() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java index a1dcd831eb9..c3af0ff152a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ParameterListImplCloneable extends Module { public ParameterListImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ParameterListImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ParameterListImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterPolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterPolicy.java index bbee2258e1f..e6d40679f47 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterPolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterPolicy.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Pass.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Pass.java index 71133483f81..9a9472aa0b0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Pass.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Pass.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java index 7a345b38839..29133e33be4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java index 0bb3d373db8..2966a3ae01a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -41,7 +41,7 @@ public class PixelShuffleImpl extends PixelShuffleImplCloneable { public PixelShuffleImpl(Pointer p) { super(p); } /** Downcast constructor. */ public PixelShuffleImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public PixelShuffleImpl(@Const @ByRef PixelShuffleOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef PixelShuffleOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java index 00eb931626b..645974afd88 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class PixelShuffleImplCloneable extends Module { public PixelShuffleImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public PixelShuffleImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PixelShuffleImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleOptions.java index 7a902869f5c..c13fc643205 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java index bf4dd1ea2e0..cb29e266e55 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class PixelUnshuffleImpl extends PixelUnshuffleImplCloneable { public PixelUnshuffleImpl(Pointer p) { super(p); } /** Downcast constructor. */ public PixelUnshuffleImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public PixelUnshuffleImpl(@Const @ByRef PixelUnshuffleOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef PixelUnshuffleOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java index 6ff4012d89c..bbb8a3eb53b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class PixelUnshuffleImplCloneable extends Module { public PixelUnshuffleImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public PixelUnshuffleImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PixelUnshuffleImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleOptions.java index d6e8c96f606..57313a33b26 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java index 8e7d7399d2c..991c61bca49 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPair.java index 07d3d1d5cc3..56e96e03043 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPair.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPairOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPairOptional.java index fbc6872af7f..09e453e1574 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPairOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPairOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java index 2450476c31b..8d226726038 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class PoissonNLLLossImpl extends PoissonNLLLossImplCloneable { public PoissonNLLLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public PoissonNLLLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public PoissonNLLLossImpl(@ByVal(nullValue = "torch::nn::PoissonNLLLossOptions{}") PoissonNLLLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::PoissonNLLLossOptions{}") PoissonNLLLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java index d1c5252dbcc..af8d41d4318 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class PoissonNLLLossImplCloneable extends Module { public PoissonNLLLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public PoissonNLLLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PoissonNLLLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossOptions.java index dcca549a389..0731f34639f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PostAccumulateGradHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PostAccumulateGradHook.java index a59273e7a19..7159d95e57a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PostAccumulateGradHook.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PostAccumulateGradHook.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,4 +28,8 @@ public class PostAccumulateGradHook extends Pointer { // only implemented for python hooks on nodes, registers hook with compiled // autograd public native void compiled_args(@ByRef CompiledNodeArgs args); + + public native void apply_with_saved( + @Cast("torch::autograd::Variable*") @ByRef Tensor arg0, + @ByRef SwapSavedVariables arg1); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PrintValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PrintValue.java index 8249dc8a699..bee670314e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PrintValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PrintValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksArgs.java index beb69494973..43fba130eee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksArgs.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksArgs.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java index 7a48384c2d6..77289c3c8ae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ProfileIValueOp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ProfileIValueOp.java index b7537abc9ed..2830a978b8b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ProfileIValueOp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ProfileIValueOp.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ProfilerConfig.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ProfilerConfig.java index ee83032f898..e5bef484936 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ProfilerConfig.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ProfilerConfig.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Property.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Property.java index b6f504aa3f9..1e658418524 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Property.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Property.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyList.java index ea0517de1e4..7de4d3eb0ba 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListIterator.java index e7e64d7a089..74af287e720 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListMaybe.java index dd2b00fbfe7..286d9661af2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListMaybe.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListMaybe.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyVector.java index b844dd7cf19..83f91b11ca8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreter.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreter.java index 9404e79dd19..e0fc645caf0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreter.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreter.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java index ff3c4484397..ffdc0777002 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -117,8 +117,8 @@ public class PyInterpreterVTable extends Pointer { public native @StdString BytePointer name(); // Run Py_DECREF on a PyObject. We DO NOT assume the GIL is held on call - // See NOTE [PyInterpreter::decref takes an `is_tensor` arg] - public native void decref(@Cast("PyObject*") Pointer pyobj, @Cast("bool") boolean is_tensor); + // See NOTE [PyInterpreter::decref takes a `has_pyobj_slot` arg] + public native void decref(@Cast("PyObject*") Pointer pyobj, @Cast("bool") boolean has_pyobj_slot); // Perform a detach by deferring to the __torch_dispatch__ implementation of // detach, which will also arrange for the PyObject to get copied in this @@ -145,6 +145,15 @@ public native void python_op_registration_trampoline( @Cast("c10::DispatchKey") short arg1, IValueVector stack); + public native void throw_abstract_impl_not_imported_error( + @StdString BytePointer opname, + @Cast("const char*") BytePointer pymodule, + @Cast("const char*") BytePointer context); + public native void throw_abstract_impl_not_imported_error( + @StdString String opname, + String pymodule, + String context); + // Invoke the Python dispatcher to handle this call public native void python_dispatcher( @Const @ByRef OperatorHandle op, @@ -160,6 +169,7 @@ public native void python_dispatcher( public native @ByVal LongArrayRef sizes(@Const TensorImpl self); public native @ByVal SymIntArrayRef sym_sizes(@Const TensorImpl self); public native Layout layout(@Const TensorImpl self); + public native @Cast("int64_t") long numel(@Const TensorImpl self); public native @ByVal SymInt sym_numel(@Const TensorImpl self); public native @ByVal SymIntArrayRef sym_strides(@Const TensorImpl self); public native @ByVal SymInt sym_storage_offset(@Const TensorImpl self); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java index 4a0544ae1f5..43f580b52c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolderPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolderPtr.java index c41910ada8d..80491b17fd2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolderPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolderPtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectType.java index 767996c572a..982f30e59fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectTypePtr.java index 19868def6e5..236e94e29f8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java index e3f66953fb0..33519a27a09 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,36 +28,89 @@ public class PyTorchStreamReader extends Pointer { private native void allocate(@StdString String file_name); public PyTorchStreamReader(@Cast("std::istream*") Pointer in) { super((Pointer)null); allocate(in); } private native void allocate(@Cast("std::istream*") Pointer in); - public PyTorchStreamReader(@SharedPtr ReadAdapterInterface in) { super((Pointer)null); allocate(in); } - private native void allocate(@SharedPtr ReadAdapterInterface in); + public PyTorchStreamReader(@SharedPtr("caffe2::serialize::ReadAdapterInterface") @ByVal ReadAdapterInterface in) { super((Pointer)null); allocate(in); } + private native void allocate(@SharedPtr("caffe2::serialize::ReadAdapterInterface") @ByVal ReadAdapterInterface in); // return dataptr, size public native @ByVal T_DataPtrSizeT_T getRecord(@StdString BytePointer name); public native @ByVal T_DataPtrSizeT_T getRecord(@StdString String name); + // multi-thread getRecord + public native @ByVal T_DataPtrSizeT_T getRecord(@StdString BytePointer name, @ByRef ReadAdapterInterfaceVector additionalReaders); + public native @ByVal T_DataPtrSizeT_T getRecord(@StdString String name, @ByRef ReadAdapterInterfaceVector additionalReaders); // inplace memory writing public native @Cast("size_t") long getRecord(@StdString BytePointer name, Pointer dst, @Cast("size_t") long n); public native @Cast("size_t") long getRecord(@StdString String name, Pointer dst, @Cast("size_t") long n); + // inplace memory writing, multi-threads. + // When additionalReaders is empty, the default behavior is call getRecord(name, dst, n) with default reader + // This approach can be used for reading large tensors. + public native @Cast("size_t") long getRecord(@StdString BytePointer name, Pointer dst, @Cast("size_t") long n, + @ByRef ReadAdapterInterfaceVector additionalReaders); + public native @Cast("size_t") long getRecord(@StdString String name, Pointer dst, @Cast("size_t") long n, + @ByRef ReadAdapterInterfaceVector additionalReaders); public native @Cast("size_t") long getRecord( @StdString BytePointer name, Pointer dst, @Cast("size_t") long n, @Cast("size_t") long chunk_size, Pointer buf, - @Const @ByRef MemCopyFunction memcpy_func); + @Const @ByRef(nullValue = "std::function(nullptr)") MemCopyFunction memcpy_func); + public native @Cast("size_t") long getRecord( + @StdString BytePointer name, + Pointer dst, + @Cast("size_t") long n, + @Cast("size_t") long chunk_size, + Pointer buf); public native @Cast("size_t") long getRecord( @StdString String name, Pointer dst, @Cast("size_t") long n, @Cast("size_t") long chunk_size, Pointer buf, - @Const @ByRef MemCopyFunction memcpy_func); + @Const @ByRef(nullValue = "std::function(nullptr)") MemCopyFunction memcpy_func); + public native @Cast("size_t") long getRecord( + @StdString String name, + Pointer dst, + @Cast("size_t") long n, + @Cast("size_t") long chunk_size, + Pointer buf); + + // Concurrent reading records with multiple readers. + // additionalReaders are additional clients to access the underlying record at different offsets + // and write to different trunks of buffers. + // If the overall size of the tensor is 10, and size of additionalReader is 2. + // The default thread will read [0,4), the additional reader will read [4,8). + // The default reader will read [8,10). + // The default reader will write to buffer[0,4), the additional reader will write to buffer[4,8), + // the additional reader will write to buffer[8,10). + // When additionalReaders is empty, the default behavior is call getRecord(name) with default reader + // This approach can be used for reading large tensors. + public native @Cast("size_t") long getRecordMultiReaders(@StdString BytePointer name, + @ByRef ReadAdapterInterfaceVector additionalReaders, + Pointer dst, @Cast("size_t") long n); + public native @Cast("size_t") long getRecordMultiReaders(@StdString String name, + @ByRef ReadAdapterInterfaceVector additionalReaders, + Pointer dst, @Cast("size_t") long n); + + public native @Cast("size_t") long getRecordSize(@StdString BytePointer name); + public native @Cast("size_t") long getRecordSize(@StdString String name); + public native @Cast("size_t") long getRecordOffset(@StdString BytePointer name); public native @Cast("size_t") long getRecordOffset(@StdString String name); public native @Cast("bool") boolean hasRecord(@StdString BytePointer name); public native @Cast("bool") boolean hasRecord(@StdString String name); public native @ByVal StringVector getAllRecords(); + + public native @ByVal ChunkRecordIterator createChunkReaderIter( + @StdString BytePointer name, + @Cast("const size_t") long recordSize, + @Cast("const size_t") long chunkSize); + public native @ByVal ChunkRecordIterator createChunkReaderIter( + @StdString String name, + @Cast("const size_t") long recordSize, + @Cast("const size_t") long chunkSize); public native @Cast("uint64_t") long version(); public native @StdString BytePointer serializationId(); public native void setShouldLoadDebugSymbol(@Cast("bool") boolean should_load_debug_symbol); + public native void setAdditionalReaderSizeThreshold(@Cast("const size_t") long size); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonDispatcherTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonDispatcherTLS.java index ee5d537a691..ea87033da3b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonDispatcherTLS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonDispatcherTLS.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonOp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonOp.java index 65bd69b70ec..b387d8eb4cb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonOp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonOp.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonTorchFunctionTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonTorchFunctionTLS.java index ab9b69b318e..33e525e06ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonTorchFunctionTLS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonTorchFunctionTLS.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,9 +40,9 @@ public class PythonTorchFunctionTLS extends Pointer { public static native void set_disabled_state(@Cast("at::impl::TorchFunctionDisabledState") int disabled_state_); public static native TorchFunctionDisabledState get_disabled_state(); - public static native void push_onto_stack(@SharedPtr SafePyObject mode); - public static native @SharedPtr SafePyObject pop_stack(); - public static native @SharedPtr SafePyObject get_stack_at(@Cast("int64_t") long idx); + public static native void push_onto_stack(@SharedPtr("c10::SafePyObject") @ByVal SafePyObject mode); + public static native @Const @SharedPtr("c10::SafePyObject") @ByVal SafePyObject pop_stack(); + public static native @Const @SharedPtr("c10::SafePyObject") @ByRef SafePyObject get_stack_at(@Cast("int64_t") long idx); public static native @Cast("int64_t") long stack_len(); public static native @Const @ByRef PythonTorchFunctionTLS get_state(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QEngineVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QEngineVector.java index afb21c29432..c0de4d12594 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QEngineVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QEngineVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeType.java index b4b49e52981..de1db939eac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeTypePtr.java index 0e597a53469..728ced32907 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QTensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QTensorImpl.java index 00248debc47..e57e5a8668e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QTensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QTensorImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedName.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedName.java index 4a9ae574a95..52516bf8eb0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedName.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedName.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedNameOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedNameOptional.java index 63f595447b0..ccb808c235e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedNameOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedNameOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Quantizer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Quantizer.java index 813887026df..af4d2184621 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Quantizer.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Quantizer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerPtr.java index 598ff1b8bd5..e90a7ec2907 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerPtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -16,7 +16,7 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; - + // namespace detail @Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class QuantizerPtr extends Pointer { diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerType.java index 64513ec8776..b9b86e2bd23 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerTypePtr.java index 1fc2ce84808..d0827cf7ecf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java index 3cf1a3f183c..e330bc486d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java index 8f48e4849e1..bcca215615b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java index 9463a53647f..9f73ca865de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNBaseMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNBaseMode.java index cef7d069f74..749d618fb5f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNBaseMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNBaseMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class RNNBaseMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -31,16 +31,16 @@ public class RNNBaseMode extends Pointer { public native @Name("operator =") @ByRef RNNBaseMode put(@ByRef RNNBaseMode x); public @ByRef kLSTM get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kLSTM get0(@ByRef RNNBaseMode container); + @Namespace @Name("std::get<0>") public static native @ByRef kLSTM get0(@ByRef RNNBaseMode container); @ValueSetter public native RNNBaseMode put(@ByRef kLSTM value); public @ByRef kGRU get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kGRU get1(@ByRef RNNBaseMode container); + @Namespace @Name("std::get<1>") public static native @ByRef kGRU get1(@ByRef RNNBaseMode container); @ValueSetter public native RNNBaseMode put(@ByRef kGRU value); public @ByRef kRNN_TANH get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kRNN_TANH get2(@ByRef RNNBaseMode container); + @Namespace @Name("std::get<2>") public static native @ByRef kRNN_TANH get2(@ByRef RNNBaseMode container); @ValueSetter public native RNNBaseMode put(@ByRef kRNN_TANH value); public @ByRef kRNN_RELU get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kRNN_RELU get3(@ByRef RNNBaseMode container); + @Namespace @Name("std::get<3>") public static native @ByRef kRNN_RELU get3(@ByRef RNNBaseMode container); @ValueSetter public native RNNBaseMode put(@ByRef kRNN_RELU value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java index 83f0aa8a8c2..c0fc9b2a0fb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class RNNCellImpl extends RNNCellImplBase { public RNNCellImpl(Pointer p) { super(p); } /** Downcast constructor. */ public RNNCellImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public RNNCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java index a61daef7e5a..ea4d4c567ce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -25,7 +25,7 @@ public class RNNCellImplBase extends RNNCellImplCloneable { public RNNCellImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public RNNCellImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public RNNCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNCellOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java index ffe666ced78..23067331246 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class RNNCellImplCloneable extends Module { public RNNCellImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public RNNCellImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RNNCellImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptions.java index 2455d591038..482658321fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptionsBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptionsBase.java index 6078be1ef79..08fd5763686 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptionsBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptionsBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java index 44828cd8e83..269f10ae5f0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,7 +39,7 @@ public class RNNImpl extends RNNImplBase { public RNNImpl(Pointer p) { super(p); } /** Downcast constructor. */ public RNNImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public RNNImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java index 931320a0021..ce7177d1f03 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -25,7 +25,7 @@ public class RNNImplBase extends RNNImplCloneable { public RNNImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public RNNImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public RNNImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java index fb4d2daee96..55690e589e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class RNNImplCloneable extends Module { public RNNImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public RNNImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RNNImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNNonlinearity.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNNonlinearity.java index 97f89cb4917..e48de79a201 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNNonlinearity.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNNonlinearity.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class RNNNonlinearity extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -29,10 +29,10 @@ public class RNNNonlinearity extends Pointer { public native @Name("operator =") @ByRef RNNNonlinearity put(@ByRef RNNNonlinearity x); public @ByRef kTanh get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kTanh get0(@ByRef RNNNonlinearity container); + @Namespace @Name("std::get<0>") public static native @ByRef kTanh get0(@ByRef RNNNonlinearity container); @ValueSetter public native RNNNonlinearity put(@ByRef kTanh value); public @ByRef kReLU get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kReLU get1(@ByRef RNNNonlinearity container); + @Namespace @Name("std::get<1>") public static native @ByRef kReLU get1(@ByRef RNNNonlinearity container); @ValueSetter public native RNNNonlinearity put(@ByRef kReLU value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptions.java index 76419cb2f7f..8081ee7e0ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptionsBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptionsBase.java index b9f414f006f..73a37d4191c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptionsBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptionsBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUFuncOptions.java index e9e1e2d7ab9..7e6ed3820a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java index 64807d10fc7..44eda46bae7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class RReLUImpl extends RReLUImplCloneable { public RReLUImpl(Pointer p) { super(p); } /** Downcast constructor. */ public RReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public RReLUImpl(@Const @ByRef(nullValue = "torch::nn::RReLUOptions{}") RReLUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::RReLUOptions{}") RReLUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java index 3657772498d..1b3ec5d033a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class RReLUImplCloneable extends Module { public RReLUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public RReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RReLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUOptions.java index 3a76ef50846..fe86aa3baf1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterface.java index 2e25c948daa..d43af897b95 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterfacePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterfacePtr.java index ff29640222b..487eb30858a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterfacePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterfacePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefSingleElementType.java index 13ac1e5f625..88a29c8ba2f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefSingleElementType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefSingleElementType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefType.java index 639d7aeb45e..530d169fedb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Raise.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Raise.java index 8f56eedf1ce..dd027cacb29 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Raise.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Raise.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java index dc3f4c8f88b..1e5d1839f17 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java index 38647312c62..9738f81a748 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java index dbe9ee031d5..9b39b0b7d8f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class ReLU6Impl extends ReLU6ImplCloneable { public ReLU6Impl(Pointer p) { super(p); } /** Downcast constructor. */ public ReLU6Impl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ReLU6Impl(@Const @ByRef(nullValue = "torch::nn::ReLU6Options{}") ReLU6Options options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLU6Options{}") ReLU6Options options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java index 81fdc07ac5c..fc488ef186a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReLU6ImplCloneable extends Module { public ReLU6ImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ReLU6ImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReLU6ImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Options.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Options.java index 53b8e760e8e..bd301894d27 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Options.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Options.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java index 8a92b95a210..78c7e7fc9a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class ReLUImpl extends ReLUImplCloneable { public ReLUImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ReLUImpl(@Const @ByRef(nullValue = "torch::nn::ReLUOptions{}") ReLUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLUOptions{}") ReLUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java index 1cb4e8b95c8..dccc5ce35cb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReLUImplCloneable extends Module { public ReLUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUOptions.java index 20867a5493c..4629f8fcfc6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterface.java index bb70a432d59..6ea233b1063 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterfaceVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterfaceVector.java new file mode 100644 index 00000000000..08c6e30df71 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterfaceVector.java @@ -0,0 +1,90 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ReadAdapterInterfaceVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ReadAdapterInterfaceVector(Pointer p) { super(p); } + public ReadAdapterInterfaceVector(ReadAdapterInterface value) { this(1); put(0, value); } + public ReadAdapterInterfaceVector(ReadAdapterInterface ... array) { this(array.length); put(array); } + public ReadAdapterInterfaceVector() { allocate(); } + public ReadAdapterInterfaceVector(long n) { allocate(n); } + private native void allocate(); + private native void allocate(@Cast("size_t") long n); + public native @Name("operator =") @ByRef ReadAdapterInterfaceVector put(@ByRef ReadAdapterInterfaceVector x); + + public boolean empty() { return size() == 0; } + public native long size(); + public void clear() { resize(0); } + public native void resize(@Cast("size_t") long n); + + public ReadAdapterInterface front() { return get(0); } + public ReadAdapterInterface back() { return get(size() - 1); } + @Index(function = "at") public native @SharedPtr("caffe2::serialize::ReadAdapterInterface") ReadAdapterInterface get(@Cast("size_t") long i); + public native ReadAdapterInterfaceVector put(@Cast("size_t") long i, ReadAdapterInterface value); + + public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("caffe2::serialize::ReadAdapterInterface") ReadAdapterInterface value); + public native @ByVal Iterator erase(@ByVal Iterator pos); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @SharedPtr("caffe2::serialize::ReadAdapterInterface") @Const ReadAdapterInterface get(); + } + + public ReadAdapterInterface[] get() { + ReadAdapterInterface[] array = new ReadAdapterInterface[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; + for (int i = 0; i < array.length; i++) { + array[i] = get(i); + } + return array; + } + @Override public String toString() { + return java.util.Arrays.toString(get()); + } + + public ReadAdapterInterface pop_back() { + long size = size(); + ReadAdapterInterface value = get(size - 1); + resize(size - 1); + return value; + } + public ReadAdapterInterfaceVector push_back(ReadAdapterInterface value) { + long size = size(); + resize(size + 1); + return put(size, value); + } + public ReadAdapterInterfaceVector put(ReadAdapterInterface value) { + if (size() != 1) { resize(1); } + return put(0, value); + } + public ReadAdapterInterfaceVector put(ReadAdapterInterface ... array) { + if (size() != array.length) { resize(array.length); } + for (int i = 0; i < array.length; i++) { + put(i, array[i]); + } + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunction.java index 3d57ecfb6a8..ee9a588789a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -109,9 +109,13 @@ public class RecordFunction extends Pointer { // profiling. - // Returns whether this RecordFunction corresponds to an async event orn ot. + // Returns whether this RecordFunction corresponds to an async event or not. public native @Cast("bool") boolean isAsync(); + // Returns whether this RecordFunction corresponds to NCCL metadata collection + // or not. + public native @Cast("bool") boolean isNcclMeta(); + // Internal-only, used to denote out variant used for Static Runtime execution public native @Cast("bool") boolean isStaticRuntimeOutVariant(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbacksEntry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbacksEntry.java index aa591ba6f89..77034bfa6ce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbacksEntry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbacksEntry.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionGuard.java index 4453cf6f936..877c9c96092 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntList.java index 7a389e54bfa..929fc1c030e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntPair.java index dd1c13e6c67..0f381b8882f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntPair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntPair.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionTLS.java index 7e34266cb08..04ca80241ee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionTLS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionTLS.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java index ca435ae24c3..5fdc8ea83fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class ReflectionPad1dImpl extends ReflectionPad1dImplBase { public ReflectionPad1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ReflectionPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java index 7182b3704b0..508157b4619 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -26,7 +26,7 @@ public class ReflectionPad1dImplBase extends ReflectionPad1dImplCloneable { public ReflectionPad1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ReflectionPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReflectionPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java index d176a5c53f9..06e71579768 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReflectionPad1dImplCloneable extends Module { public ReflectionPad1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ReflectionPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dOptions.java index cf8258ef110..625d1b1a865 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java index 02821a5956d..bee6a940547 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class ReflectionPad2dImpl extends ReflectionPad2dImplBase { public ReflectionPad2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ReflectionPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java index f695e02c3fc..6452c2bc22b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReflectionPad2dImplBase extends ReflectionPad2dImplCloneable { public ReflectionPad2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ReflectionPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReflectionPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java index 7c5e5bc562b..6cf80544d7e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReflectionPad2dImplCloneable extends Module { public ReflectionPad2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ReflectionPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dOptions.java index c618d695cd4..fe3298d8b54 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java index b1242a90276..ede21dcb151 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -46,6 +46,6 @@ public class ReflectionPad3dImpl extends ReflectionPad3dImplBase { public ReflectionPad3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ReflectionPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java index abce60a4be3..53d50abb9cf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReflectionPad3dImplBase extends ReflectionPad3dImplCloneable { public ReflectionPad3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ReflectionPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReflectionPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java index c788f95efd2..8b2c697be02 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReflectionPad3dImplCloneable extends Module { public ReflectionPad3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ReflectionPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dOptions.java index 454390f66d6..7a24110f13c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RegisterOperators.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RegisterOperators.java index bcda18aa6fe..725c5b19fcd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RegisterOperators.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RegisterOperators.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationHandleRAII.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationHandleRAII.java index 9541dc57ba5..b55abb4b3cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationHandleRAII.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationHandleRAII.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java index d1d48939e4e..1ed82f59743 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class ReplicationPad1dImpl extends ReplicationPad1dImplBase { public ReplicationPad1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ReplicationPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java index 0bf0f52e85b..2e156e39800 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,7 +28,7 @@ public class ReplicationPad1dImplBase extends ReplicationPad1dImplCloneable { public ReplicationPad1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ReplicationPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReplicationPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java index 55b798ae76e..93b1f2756ee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReplicationPad1dImplCloneable extends Module { public ReplicationPad1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ReplicationPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dOptions.java index d91e9f111dc..5346f421828 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java index 6669f3f2c71..529582c86c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class ReplicationPad2dImpl extends ReplicationPad2dImplBase { public ReplicationPad2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ReplicationPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java index 33f991ca370..899b36b857d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReplicationPad2dImplBase extends ReplicationPad2dImplCloneable { public ReplicationPad2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ReplicationPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReplicationPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java index 479e89e402f..5e548c2d57e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReplicationPad2dImplCloneable extends Module { public ReplicationPad2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ReplicationPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dOptions.java index b727848a435..a741c82a25c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java index c4e2968464b..9c081408321 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,6 +45,6 @@ public class ReplicationPad3dImpl extends ReplicationPad3dImplBase { public ReplicationPad3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ReplicationPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java index 60b9f53617d..ab4bceae459 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReplicationPad3dImplBase extends ReplicationPad3dImplCloneable { public ReplicationPad3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ReplicationPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReplicationPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java index acc0ae8a195..578426fce46 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ReplicationPad3dImplCloneable extends Module { public ReplicationPad3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ReplicationPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dOptions.java index cb0aa9a1088..96968098c38 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java index 591ead5153f..3ee73638c20 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ResolverVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ResolverVector.java index 7e3f8f50908..0207a91aa71 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ResolverVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ResolverVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Result.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Result.java index 1517a56591c..e767b6e3fe0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Result.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Result.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Return.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Return.java index 260dd08154a..6ba6c2019f7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Return.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Return.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java index c01c756131a..ff7bb6acc12 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class SELUImpl extends SELUImplCloneable { public SELUImpl(Pointer p) { super(p); } /** Downcast constructor. */ public SELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SELUImpl(@Const @ByRef(nullValue = "torch::nn::SELUOptions{}") SELUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::SELUOptions{}") SELUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java index e60c9300721..e7c4107de73 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SELUImplCloneable extends Module { public SELUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SELUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUOptions.java index 15f39c08752..be137b3e09c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java index 5a479d022dc..6f3abb3d6bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java index dcffe647c70..9ea4c47e1b9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java index 17d534086e1..a505bf28feb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyHandle.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyHandle.java index 68929bca718..f32f01de6a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyHandle.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyHandle.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java index ac7c16f12ae..da81c93a472 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,9 +38,9 @@ public class SafePyObject extends Pointer { // Steals a reference to data public SafePyObject(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter) { super((Pointer)null); allocate(data, pyinterpreter); } - @UniquePtr @Name("std::make_unique") private native void allocate(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter); + @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter); public SafePyObject(@ByRef(true) SafePyObject other) { super((Pointer)null); allocate(other); } - @UniquePtr @Name("std::make_unique") private native void allocate(@ByRef(true) SafePyObject other); + @NoException(true) @SharedPtr @Name("std::make_shared") private native void allocate(@ByRef(true) SafePyObject other); // In principle this could be copyable if we add an incref to PyInterpreter // but for now it's easier to just disallow it. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObjectOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObjectOptional.java new file mode 100644 index 00000000000..d768083f69d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObjectOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SafePyObjectOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SafePyObjectOptional(Pointer p) { super(p); } + public SafePyObjectOptional(SafePyObject value) { this(); put(value); } + public SafePyObjectOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef SafePyObjectOptional put(@ByRef SafePyObjectOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @SharedPtr("c10::SafePyObject") SafePyObject get(); + @ValueSetter public native SafePyObjectOptional put(@SharedPtr("c10::SafePyObject") SafePyObject value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Sampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Sampler.java index ab90b1c138b..c8b54ed7856 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Sampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Sampler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java index 1d5f5706d0c..f83fe7f1bf4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java index 3a7c7983349..c7b05f12613 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableHooks.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableHooks.java index 19761737808..d2e499a431f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableHooks.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableHooks.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java index 57ff5f782b4..f9c108d5216 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -69,6 +69,12 @@ public class Scalar extends Pointer { public Scalar(@ByVal Float8_e4m3fn vv) { super((Pointer)null); allocate(vv); } private native void allocate(@ByVal Float8_e4m3fn vv); + + public Scalar(@ByVal Float8_e5m2fnuz vv) { super((Pointer)null); allocate(vv); } + private native void allocate(@ByVal Float8_e5m2fnuz vv); + + public Scalar(@ByVal Float8_e4m3fnuz vv) { super((Pointer)null); allocate(vv); } + private native void allocate(@ByVal Float8_e4m3fnuz vv); public Scalar(@ByVal FloatComplex vv) { super((Pointer)null); allocate(vv); } private native void allocate(@ByVal FloatComplex vv); public Scalar(@ByVal DoubleComplex vv) { super((Pointer)null); allocate(vv); } @@ -92,11 +98,14 @@ public class Scalar extends Pointer { // } else if (Tag::HAS_i == tag) { // return checked_convert(v.i, #type); // } else if (Tag::HAS_si == tag) { -// TORCH_CHECK(false, "tried to get " #name " out of SymInt") +// return checked_convert( +// toSymInt().guard_int(__FILE__, __LINE__), #type); // } else if (Tag::HAS_sd == tag) { -// TORCH_CHECK(false, "tried to get " #name " out of SymFloat") +// return checked_convert( +// toSymFloat().guard_float(__FILE__, __LINE__), #type); // } else if (Tag::HAS_sb == tag) { -// TORCH_CHECK(false, "tried to get " #name " out of SymBool") +// return checked_convert( +// toSymBool().guard_bool(__FILE__, __LINE__), #type); // } // TORCH_CHECK(false) // } @@ -117,6 +126,8 @@ public class Scalar extends Pointer { public native @ByVal BFloat16 toBFloat16(); public native @ByVal Float8_e5m2 toFloat8_e5m2(); public native @ByVal Float8_e4m3fn toFloat8_e4m3fn(); + public native @ByVal Float8_e5m2fnuz toFloat8_e5m2fnuz(); + public native @ByVal Float8_e4m3fnuz toFloat8_e4m3fnuz(); // #undef DEFINE_ACCESSOR diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarArrayRef.java index f7e51ea41c1..2ef6a5b2833 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarOptional.java index aa8a2a06330..8f023768391 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeArrayRef.java index 0afec1a60e8..d6a59213441 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeEnumerationType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeEnumerationType.java index 4957fea43c6..4fcf4aabcd0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeEnumerationType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeEnumerationType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeOptional.java index 33054235376..e3983376987 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeType.java index be1e94fe80b..5d433664e07 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeTypePtr.java index 4d6915bd0c5..5469c18274f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeVector.java index cb8fb17c37e..ad0b5fd656d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaArgument.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaArgument.java index b068c9a6d85..2f37d5b84d5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaArgument.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaArgument.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java index 8904164d65c..c5bfa2631e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Scope.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Scope.java index d8e716fcf51..2c67df9774f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Scope.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Scope.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScopeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScopeOptional.java index 00c88f6dde5..4d679bc4ca2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScopeOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScopeOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScriptTypeParser.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScriptTypeParser.java index d67bce7dea9..ef6e282f18e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScriptTypeParser.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScriptTypeParser.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Select.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Select.java index 82e33cf27e0..ee3ea869004 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Select.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Select.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Self.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Self.java index b41e6ffe4e1..71af41a423b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Self.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Self.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java index 1966af683b1..85e598b770b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -93,7 +93,7 @@ public class SequentialImpl extends SequentialImplCloneable { public SequentialImpl(Pointer p) { super(p); } /** Downcast constructor. */ public SequentialImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SequentialImpl() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java index 35439865f4e..f405cc12ba8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SequentialImplCloneable extends Module { public SequentialImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SequentialImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SequentialImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java index de004f50730..ca93ad89981 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbol.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbol.java index 41f74ecce83..91687445d26 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbol.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbol.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVector.java index 78256cb5f05..6bdaafcba7d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVectorOptional.java index 95f0b7b51b6..de110f1f7d9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedClassTypeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedClassTypeVector.java index f96e239e964..8da367f19d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedClassTypeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedClassTypeVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedModuleVector.java index 2a569a0088f..21d6dc1b331 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedModuleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedModuleVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedParserData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedParserData.java index f5a483f71e4..7736cac8459 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedParserData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedParserData.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedSugaredValueVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedSugaredValueVector.java index 1dd76ceef41..a665207a62a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedSugaredValueVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedSugaredValueVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedType.java index 8ac2cf2f372..95cfda501a6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java index 6c454bb7a0b..882831086bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java index e7620ed4ba5..e2330221441 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,7 +33,7 @@ public class SiLUImpl extends SiLUImplCloneable { @SharedPtr @Name("std::make_shared") private native void allocate(); /** Downcast constructor. */ public SiLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java index c9d094b2aab..da71df7e39c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SiLUImplCloneable extends Module { public SiLUImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SiLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SiLUImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java index 0c08f296526..3ffe65add00 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,7 +33,7 @@ public class SigmoidImpl extends SigmoidImplCloneable { @SharedPtr @Name("std::make_shared") private native void allocate(); /** Downcast constructor. */ public SigmoidImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java index efe0a0e876c..3008eca0dd3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SigmoidImplCloneable extends Module { public SigmoidImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SigmoidImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SigmoidImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleSelf.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleSelf.java index 7d4a7580bb0..fcd2a016189 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleSelf.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleSelf.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleValue.java index e3c36f784f1..aaf5530d33d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SingletonTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SingletonTypePtr.java index bfa6ecc3c81..0a01e31b87e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SingletonTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SingletonTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java index b3d042c08b8..b10768b2f78 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTMatchedSchemaPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTMatchedSchemaPair.java index fbece756f44..c20957698ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTMatchedSchemaPair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTMatchedSchemaPair.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTOptional.java index bd54aa98426..9cff7db01a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVector.java index b1b5eef7409..6ff996f248f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVectorOptional.java index e20a0b24537..f139bd40109 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java index b68e6f17eac..fec178b15ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java index 4f17824b7bb..3d17fbff75b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SliceExpr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SliceExpr.java index 8b73494d22e..6ac128d68ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SliceExpr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SliceExpr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SliceValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SliceValue.java index eb322a8d770..7a0ac49bdec 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SliceValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SliceValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SlotCursor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SlotCursor.java index cb356ef081e..4434397bb6a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SlotCursor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SlotCursor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java index f2824581180..bf319e7e133 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -42,7 +42,7 @@ public class SmoothL1LossImpl extends SmoothL1LossImplCloneable { public SmoothL1LossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public SmoothL1LossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SmoothL1LossImpl(@ByVal(nullValue = "torch::nn::SmoothL1LossOptions{}") SmoothL1LossOptions options) { super((Pointer)null); allocate(options); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::SmoothL1LossOptions{}") SmoothL1LossOptions options); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java index cb4c5de623d..74793bb5d7b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SmoothL1LossImplCloneable extends Module { public SmoothL1LossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SmoothL1LossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SmoothL1LossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossOptions.java index 661880135c6..d346352ca1e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -54,5 +54,5 @@ public class SmoothL1LossOptions extends Pointer { public SmoothL1LossOptions(@ByVal kSum reduction) { super((Pointer)null); allocate(reduction); } private native void allocate(@ByVal kSum reduction); public native @ByRef @NoException(true) LossReduction reduction(); - public native @ByRef @NoException(true) DoublePointer beta(); + public native @ByRef @NoException(true) DoubleOptional beta(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java index 2fc1abdac84..e158a3d291c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -41,7 +41,7 @@ public class SoftMarginLossImpl extends SoftMarginLossImplCloneable { public SoftMarginLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public SoftMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftMarginLossImpl(@ByVal(nullValue = "torch::nn::SoftMarginLossOptions{}") SoftMarginLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::SoftMarginLossOptions{}") SoftMarginLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java index c4424b8118c..2d30bfdf0e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SoftMarginLossImplCloneable extends Module { public SoftMarginLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SoftMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftMarginLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossOptions.java index be886b12569..ca65ac42fdc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java index 131e5019145..21f75fff99d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,7 +33,7 @@ public class Softmax2dImpl extends Softmax2dImplCloneable { @SharedPtr @Name("std::make_shared") private native void allocate(); /** Downcast constructor. */ public Softmax2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java index b61eccfdf12..ae8fac057c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class Softmax2dImplCloneable extends Module { public Softmax2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public Softmax2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Softmax2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxFuncOptions.java index 5fd777fba53..1e022aab676 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java index e3fcced0e2e..ec2af7738da 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class SoftmaxImpl extends SoftmaxImplCloneable { public SoftmaxImpl(Pointer p) { super(p); } /** Downcast constructor. */ public SoftmaxImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftmaxImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java index 004a6c9faa6..6abcf2438d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SoftmaxImplCloneable extends Module { public SoftmaxImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SoftmaxImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftmaxImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxOptions.java index bb1238e4ade..7043899e71b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminFuncOptions.java index 692925c3ec9..30a71102077 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminFuncOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java index 576c1c9749e..c07747a05f1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class SoftminImpl extends SoftminImplCloneable { public SoftminImpl(Pointer p) { super(p); } /** Downcast constructor. */ public SoftminImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftminImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java index ad8ead0b99e..3d63cc46fe2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SoftminImplCloneable extends Module { public SoftminImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SoftminImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftminImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminOptions.java index ef073ed8eef..f3ff8025ba4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java index e66f1f7af5a..c584cdce368 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class SoftplusImpl extends SoftplusImplCloneable { public SoftplusImpl(Pointer p) { super(p); } /** Downcast constructor. */ public SoftplusImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftplusImpl(@Const @ByRef(nullValue = "torch::nn::SoftplusOptions{}") SoftplusOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftplusOptions{}") SoftplusOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java index a962e38d300..cef1e58c41a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SoftplusImplCloneable extends Module { public SoftplusImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SoftplusImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftplusImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusOptions.java index 20cd84a1395..c7d54409363 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java index a658a6e6956..ab7d3c27344 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class SoftshrinkImpl extends SoftshrinkImplCloneable { public SoftshrinkImpl(Pointer p) { super(p); } /** Downcast constructor. */ public SoftshrinkImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftshrinkImpl(@Const @ByRef(nullValue = "torch::nn::SoftshrinkOptions{}") SoftshrinkOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftshrinkOptions{}") SoftshrinkOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java index e30b991c361..0cb37f3814b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SoftshrinkImplCloneable extends Module { public SoftshrinkImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SoftshrinkImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftshrinkImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkOptions.java index a1b8eacb1b5..8906bac6358 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java index 48320194414..9e6ed477696 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,7 +33,7 @@ public class SoftsignImpl extends SoftsignImplCloneable { @SharedPtr @Name("std::make_shared") private native void allocate(); /** Downcast constructor. */ public SoftsignImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java index f5108a1c065..3f6b13ff1af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class SoftsignImplCloneable extends Module { public SoftsignImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public SoftsignImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftsignImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java index 96a67e98c59..116eebdd414 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceLocation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceLocation.java index 024f9d3453f..e870bdc2ae4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceLocation.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceLocation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java index 524b44e180d..95bcd6b0fd2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeHasher.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeHasher.java index 9c103178302..9f47c225ba4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeHasher.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeHasher.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeOptional.java index f798eff3f7f..5d868443ed6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeUnpickler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeUnpickler.java index 53b5eb2158d..f05f7e61c7d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeUnpickler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeUnpickler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SpecialFormValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SpecialFormValue.java index b3d0b0e85f0..ab126928b51 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SpecialFormValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SpecialFormValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SplitUntil32Bit.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SplitUntil32Bit.java index fc3d9d5771e..0ef26c3ee96 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SplitUntil32Bit.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SplitUntil32Bit.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntry.java index 5e3a16fa4b1..3f293ead7a6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntry.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Starred.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Starred.java index 8d839802f1a..c6bcb28286b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Starred.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Starred.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StepLR.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StepLR.java index 6c170a6b87e..b840224646c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StepLR.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StepLR.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Stmt.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Stmt.java index eb6f12fd9d6..e172113c7c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Stmt.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Stmt.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StmtList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StmtList.java index 7b28bb6bc47..ebceea7551d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StmtList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StmtList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StmtListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StmtListIterator.java index a76557716e0..fee641ff6fa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StmtListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StmtListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java index 4eb5d09abe3..f95e94083d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -39,6 +39,23 @@ public class Storage extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public use_byte_size_t(Pointer p) { super(p); } } + public static class unsafe_borrow_t extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public unsafe_borrow_t(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public unsafe_borrow_t(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public unsafe_borrow_t position(long position) { + return (unsafe_borrow_t)super.position(position); + } + @Override public unsafe_borrow_t getPointer(long i) { + return new unsafe_borrow_t((Pointer)this).offsetAddress(i); + } + + public unsafe_borrow_t() { super((Pointer)null); allocate(); } + private native void allocate(); + } public Storage() { super((Pointer)null); allocate(); } private native void allocate(); @@ -86,7 +103,6 @@ private native void allocate( @ByVal use_byte_size_t arg0, @Cast("size_t") long size_bytes, @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr); - // Legacy constructor for partially initialized (dtype or memory) storages // that can be temporarily created with Caffe2 APIs. See the note on top of // TensorImpl.h for details. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java index ed2f412f424..a0832d8183d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -151,4 +151,6 @@ public native void UniqueStorageShareExternalPointer( public native void set_received_cuda(@Cast("bool") boolean received_cuda); public native @Cast("bool") boolean received_cuda(); + + public native @Cast("c10::impl::PyObjectSlot*") Pointer pyobj_slot(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImplPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImplPtr.java index 88c6c2329d3..38d5937fdcb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImplPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImplPtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java index 47c3df40165..7ccd9fcc4b6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageTypePtr.java index ff7423e354e..5c50f023a2a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Stream.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Stream.java index a21df61f8ab..9f95e7ed7d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Stream.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Stream.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3.java index f506b86903b..a3683598e80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjType.java index 646cf9b792d..7eba755468e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjTypePtr.java index cc070447ade..b4f3a8cdae6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamOptional.java index ba9c50c44d9..90573c569f4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java index 421f1471190..f064a1dd339 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Stride.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Stride.java index 4a4d696bcc8..d8c1bc41e7c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Stride.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Stride.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideArrayRef.java index a1b0b98caca..1322077a217 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideOptional.java index f55fc34eca2..ef4aea11ab6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java index 6108df18051..d79b552f81f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVector.java index 065bd3d6dfa..cf2049fc741 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVectorOptional.java index d0a3cf3f98d..151d588f4b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDict.java index 64c008e9a6f..8706324bdde 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDict.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItem.java index 0abd790368b..f09249aa8d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItem.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItem.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItemVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItemVector.java index 05416fd9a30..e23412834ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItemVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItemVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePair.java index 07ca2b74982..b111dc441b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePair.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleVector.java index bda16a97bfd..c30d8229297 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringArrayRef.java index 9951f7d99f9..1506e6f6dbb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringBoolMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringBoolMap.java index 0d61d58f7c0..2c0fc4d0951 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringBoolMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringBoolMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java index b0b499e4555..467e294c0f1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -53,6 +53,8 @@ private native void allocate( public native @Cast("size_t") long find(@StdString BytePointer tok, @Cast("size_t") long start); public native @Cast("size_t") long find(@StdString String tok, @Cast("size_t") long start); + public native @Cast("size_t") long find_regex(@StdString BytePointer tok, @Cast("size_t") long start); + public native @Cast("size_t") long find_regex(@StdString String tok, @Cast("size_t") long start); public native @ByVal StringCordView substr(@Cast("size_t") long start, @Cast("size_t") long size); public native @Cast("char") byte at(@Cast("size_t") long index); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java index 7a5b8baed0a..a0f7542777a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringIValueMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringIValueMap.java index d5cd5d79dcd..32057907f6d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringIValueMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringIValueMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLiteral.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLiteral.java index adf3774a594..2967eae4c49 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLiteral.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLiteral.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongMap.java index 692bea96f5a..9e4c938de80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongVector.java index 71b49eb766c..126c94f8108 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringOptional.java index 4b8b581fda7..7f5ed349665 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSet.java index 354efed13e5..61c649c66db 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSet.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java index 740f27f6b1c..19a79b3c2f4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java index 694740ce999..3c579f723c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItemVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItemVector.java index a8f011bd405..585587b12c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItemVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItemVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePair.java index fd92f7cbdc5..eeb91d99859 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePair.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleVector.java index 92e3ce5515d..47462e9d65a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTMap.java index 68ccb947a62..a77da2ca5c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringStringMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringStringMap.java index 1374903d370..61fbcf4dd87 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringStringMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringStringMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java index 9105e77147a..6b79594bcec 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItem.java index f2d4df9d7bd..d27b3aa2d4e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItem.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItem.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItemVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItemVector.java index a4b145e5fda..8464daad7bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItemVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItemVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPair.java index f848daa8907..81a2171566c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPair.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorVector.java index 921337809b0..11587056be4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java index 8e00e108017..288c956e58f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTypePtr.java index 94c9f0adbbf..531a4bee46a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringValueMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringValueMap.java index 817c8a206dd..64fbe7eeb0c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringValueMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringValueMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringVector.java index 3764668c0bb..cc880cbe8d0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringVectorOptional.java index b2811b94e31..2b4fb659de5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewOptional.java index a66ba79117f..5abb11ed648 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVector.java index d906d9e953a..a93a3c87621 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVectorOptional.java index ad7dbe09eaa..8c232cc09bb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrongTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrongTypePtr.java index 8040ac5ec31..bd40e5e88c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrongTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrongTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Subscript.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Subscript.java index d97b824a997..e1420624118 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Subscript.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Subscript.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredEnumClass.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredEnumClass.java index 249c4a197a2..5ae3fb8c4c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredEnumClass.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredEnumClass.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredTupleValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredTupleValue.java index f9497684dec..66ef261fe64 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredTupleValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredTupleValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValue.java index 399252a8d78..fa788c64409 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java index 4668cd07889..915246bdc66 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java index f74d16ed2a0..7f281dd3ea2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java index 25ccada6335..ba68b7db72b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVector.java index c68d0845c90..d062eb655a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorOptional.java index 92e74112735..ce39198edce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java index b5b71af2aae..8ec06df330a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java index 45e85b2f037..b2efb0e97e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java index 6c6e1fb7c0b..2954f81248b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -106,6 +106,15 @@ public enum Unchecked { public native @Cast("int64_t") long guard_int(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); public native @Cast("int64_t") long guard_int(String file, @Cast("int64_t") long line); + // Insert a guard that this SymInt must be size-like, returning true if + // the integer actually is >= 0. Unlike manually performing a >= 0 test, + // if the SymInt in question is an unbacked SymInt (or, potentially in the + // future, if it contains unbacked SymInts), we will also treat the + // unbacked SymInt as statically testing >= 2 (which will prevent us from + // choking on, e.g., contiguity checks.) + public native @Cast("bool") boolean expect_size(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); + public native @Cast("bool") boolean expect_size(String file, @Cast("int64_t") long line); + // Distinguish actual symbolic values from constants stored on the heap public native @Cast("bool") boolean is_symbolic(); @@ -142,6 +151,11 @@ public enum Unchecked { public native @ByVal SymInt min(@Const @ByRef SymInt sci); public native @ByVal SymInt max(@Const @ByRef SymInt sci); + // If both are symbolic, this checks if + // they share the same node. + // If both are not symbolic this just checks normal equality. + public native @Cast("bool") boolean is_same(@Const @ByRef SymInt other); + public native @ByVal @Name("operator c10::SymFloat") SymFloat asSymFloat(); // Don't use this. Prefer maybe_as_int instead diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRef.java index 2b8220d5969..8ebb1c45be7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRefOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRefOptional.java index f23cbf5ce07..9fd43403601 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRefOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRefOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptional.java index cf112a0205a..ee80e41b036 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorBase.java index ceff1dfab75..cf30007150b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorCommon.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorCommon.java index 88df632f8e7..1e1916ef768 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorCommon.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorCommon.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorImpl.java index fd9930c2262..5a4ece68c12 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java index 16900bc092a..11faea38fd0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntVector.java index 14fb26a10e8..7304cb2262f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java index 6ac045b1648..3e7d3f7dd21 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeArrayRef.java index b913f17ab67..ca637f4c967 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java index 2689cd2201f..3da1ca11317 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -66,6 +66,7 @@ public class SymNodeImpl extends Pointer { public native @ByVal SymNode sym_or(@Const @ByRef SymNode other); public native @ByVal SymNode sym_and(@Const @ByRef SymNode other); public native @ByVal SymNode sym_not(); + public native @ByVal SymNode sym_ite(@Const @ByRef SymNode then_val, @Const @ByRef SymNode else_val); // NB: self is ignored here, only the arguments are used public native @ByVal SymNode is_contiguous( @ByVal SymNodeArrayRef sizes, @@ -98,13 +99,18 @@ public class SymNodeImpl extends Pointer { public native double guard_float(String file, @Cast("int64_t") long line); public native @Cast("bool") boolean expect_true(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); public native @Cast("bool") boolean expect_true(String file, @Cast("int64_t") long line); + public native @Cast("bool") boolean expect_size(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); + public native @Cast("bool") boolean expect_size(String file, @Cast("int64_t") long line); public native @Cast("int64_t") long int_(); public native @Cast("bool") boolean bool_(); public native @Cast("bool") boolean has_hint(); public native @StdString BytePointer str(); public native @ByVal LongOptional singleton_int(); + public native @ByVal LongOptional singleton_coeff(); public native @ByVal LongOptional constant_int(); public native @ByVal BoolOptional constant_bool(); public native @ByVal LongOptional maybe_as_int(); + public native @Cast("bool") boolean is_constant(); + public native @Cast("bool") boolean is_symbolic(); public native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Symbol.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Symbol.java index 0387e556627..e7c10f0506f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Symbol.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Symbol.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolArrayRef.java index 7a3d08f18c0..de45cb43205 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolSet.java index 91d93c97802..898c8c31c5b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolSet.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolVector.java index 83e46d7bacc..10e974170ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java index cfb5a1b86c3..50e57ed246c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java index 50331358fea..20c7c391925 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -18,16 +18,13 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class SymbolicShapeMeta extends Pointer { static { Loader.load(); } - /** Default native constructor. */ - public SymbolicShapeMeta() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SymbolicShapeMeta(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SymbolicShapeMeta(Pointer p) { super(p); } - private native void allocate(); + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SymbolicShapeMeta(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public SymbolicShapeMeta position(long position) { return (SymbolicShapeMeta)super.position(position); @@ -36,14 +33,62 @@ public class SymbolicShapeMeta extends Pointer { return new SymbolicShapeMeta((Pointer)this).offsetAddress(i); } + // Basic metadata from which other quantities are derived public native @ByRef @NoOffset SymDimVector sizes_(); public native SymbolicShapeMeta sizes_(SymDimVector setter); public native @ByRef @NoOffset SymDimVector strides_(); public native SymbolicShapeMeta strides_(SymDimVector setter); - public native @ByRef @NoOffset SymInt numel_(); public native SymbolicShapeMeta numel_(SymInt setter); public native @ByRef @NoOffset SymInt storage_offset_(); public native SymbolicShapeMeta storage_offset_(SymInt setter); - public native @ByRef @NoOffset SymBool is_contiguous_(); public native SymbolicShapeMeta is_contiguous_(SymBool setter); - public native @ByRef @NoOffset SymBool is_channels_last_contiguous_(); public native SymbolicShapeMeta is_channels_last_contiguous_(SymBool setter); - public native @ByRef @NoOffset SymBool is_channels_last_3d_contiguous_(); public native SymbolicShapeMeta is_channels_last_3d_contiguous_(SymBool setter); - public native @ByRef @NoOffset SymBool is_channels_last_(); public native SymbolicShapeMeta is_channels_last_(SymBool setter); - public native @ByRef @NoOffset SymBool is_channels_last_3d_(); public native SymbolicShapeMeta is_channels_last_3d_(SymBool setter); - public native @ByRef @NoOffset SymBool is_non_overlapping_and_dense_(); public native SymbolicShapeMeta is_non_overlapping_and_dense_(SymBool setter); + + public native @Cast("bool") boolean strides_valid_(); public native SymbolicShapeMeta strides_valid_(boolean setter); // e.g. for sparse where there are no strides + + public SymbolicShapeMeta() { super((Pointer)null); allocate(); } + private native void allocate(); + public SymbolicShapeMeta(@Const @ByRef SymbolicShapeMeta other) { super((Pointer)null); allocate(other); } + private native void allocate(@Const @ByRef SymbolicShapeMeta other); + + public native void refresh_numel(); + + public native void refresh_contiguous(); + + public native @Cast("int64_t") long dim(); + + // Accessors for derived quantities, computed lazily on first access + + public native @Cast("bool") boolean has_numel(); + public native @Cast("bool") boolean has_is_contiguous(); + public native @Cast("bool") boolean has_is_channels_last_contiguous(); + public native @Cast("bool") boolean has_is_channels_last_3d_contiguous(); + public native @Cast("bool") boolean has_is_channels_last(); + public native @Cast("bool") boolean has_is_channels_last_3d(); + public native @Cast("bool") boolean has_is_non_overlapping_and_dense(); + + // Accessors to cached derived properties + // DO NOT call with mutables_ lock held + public native @Const @ByRef SymInt numel(); + + public native @Const @ByRef SymBool is_contiguous(); + + public native @Const @ByRef SymBool is_channels_last_contiguous(); + + public native @Const @ByRef SymBool is_channels_last_3d_contiguous(); + + public native @Const @ByRef SymBool is_channels_last(); + + public native @Const @ByRef SymBool is_channels_last_3d(); + + public native @Const @ByRef SymBool is_non_overlapping_and_dense(); + + // Assumptions so we can short-circuit computation + // NOTE: Don't need to lock mutables_ since these aren't const + public native void assume_contiguous(@ByVal(nullValue = "c10::SymBool(true)") SymBool val); + public native void assume_contiguous(); + public native void assume_channels_last_contiguous(@ByVal(nullValue = "c10::SymBool(true)") SymBool val); + public native void assume_channels_last_contiguous(); + public native void assume_channels_last_3d_contiguous(@ByVal(nullValue = "c10::SymBool(true)") SymBool val); + public native void assume_channels_last_3d_contiguous(); + public native void assume_channels_last(@ByVal(nullValue = "c10::SymBool(true)") SymBool val); + public native void assume_channels_last(); + public native void assume_channels_last_3d(@ByVal(nullValue = "c10::SymBool(true)") SymBool val); + public native void assume_channels_last_3d(); + public native void assume_non_overlapping_and_dense(@ByVal(nullValue = "c10::SymBool(true)") SymBool val); + public native void assume_non_overlapping_and_dense(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_DataPtrSizeT_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_DataPtrSizeT_T.java index 2bddb4a08c1..b7a301a1f5d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_DataPtrSizeT_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_DataPtrSizeT_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_IntInt_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_IntInt_T.java index 18a671c9514..7ba9a17a9c5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_IntInt_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_IntInt_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_LongLong_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_LongLong_T.java index d05f954997c..0d2c5a934af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_LongLong_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_LongLong_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceT_TensorTensor_T_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceT_TensorTensor_T_T.java index 00d64857451..3bbfe817361 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceT_TensorTensor_T_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceT_TensorTensor_T_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceTensor_T.java index cf4ea8e54f1..72aa159b1ce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceTensor_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceTensor_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_SafePyObjectTorchDispatchModeKey_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_SafePyObjectTorchDispatchModeKey_T.java new file mode 100644 index 00000000000..0ab1225d177 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_SafePyObjectTorchDispatchModeKey_T.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple,c10::impl::TorchDispatchModeKey>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_SafePyObjectTorchDispatchModeKey_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_SafePyObjectTorchDispatchModeKey_T(Pointer p) { super(p); } + public T_SafePyObjectTorchDispatchModeKey_T(@SharedPtr("c10::SafePyObject") SafePyObject value0, @ByRef TorchDispatchModeKey value1) { allocate(value0, value1); } + private native void allocate(@SharedPtr("c10::SafePyObject") SafePyObject value0, @ByRef TorchDispatchModeKey value1); + public T_SafePyObjectTorchDispatchModeKey_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_SafePyObjectTorchDispatchModeKey_T put(@ByRef T_SafePyObjectTorchDispatchModeKey_T x); + + public @SharedPtr("c10::SafePyObject") SafePyObject get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @SharedPtr("c10::SafePyObject") SafePyObject get0(@ByRef T_SafePyObjectTorchDispatchModeKey_T container); + public @ByRef TorchDispatchModeKey get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef TorchDispatchModeKey get1(@ByRef T_SafePyObjectTorchDispatchModeKey_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_T.java index 2a29b9c1a44..9ef7f8d44e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_TOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_TOptional.java index 4ca10260372..f0dad752cd6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_TOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_TOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java index 1179153e746..6e5d6265534 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java index b3369935cd7..0c833a91732 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorT_TensorTensor_T_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorT_TensorTensor_T_T.java index c52623cc64a..027b18508e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorT_TensorTensor_T_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorT_TensorTensor_T_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorDoubleLong_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorDoubleLong_T.java index 16f0deddf69..ec634e8c076 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorDoubleLong_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorDoubleLong_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensorTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensorTensor_T.java index fc22c44116f..0f8c854303b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensorTensor_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensorTensor_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensor_T.java index 6d68a26e718..352f5bd467f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensor_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensor_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java index 685b177959b..bc7bef90512 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensor_T.java index b89bef3f058..04b9e533646 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensor_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensor_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensor_T.java index acedc2e3d80..b5cdae48936 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensor_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensor_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java index 0476c197ecd..c49397fcd85 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java index 9d2aea48666..6cb2b372068 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_T.java index e6cab991129..6d0206a271c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_TOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_TOptional.java index 85df0e5a9a4..e978029187b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_TOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_TOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_T.java index 9e6b759be28..5bf03e49e80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_T.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_TOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_TOptional.java index ea3c32fc3ed..4825f37ca95 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_TOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_TOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TagArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TagArrayRef.java index 0f540efbdbc..1d6aece3ed4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TagArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TagArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -59,6 +59,8 @@ public class TagArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public TagArrayRef(@ByRef TagVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef TagVector vec); /** Construct an ArrayRef from a std::array */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TagVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TagVector.java new file mode 100644 index 00000000000..9781f5b9dd7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TagVector.java @@ -0,0 +1,90 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TagVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TagVector(Pointer p) { super(p); } + public TagVector(Tag value) { this(1); put(0, value); } + public TagVector(Tag ... array) { this(array.length); put(array); } + public TagVector() { allocate(); } + public TagVector(long n) { allocate(n); } + private native void allocate(); + private native void allocate(@Cast("size_t") long n); + public native @Name("operator =") @ByRef TagVector put(@ByRef TagVector x); + + public boolean empty() { return size() == 0; } + public native long size(); + public void clear() { resize(0); } + public native void resize(@Cast("size_t") long n); + + public Tag front() { return get(0); } + public Tag back() { return get(size() - 1); } + @Index(function = "at") public native @ByRef Tag get(@Cast("size_t") long i); + public native TagVector put(@Cast("size_t") long i, Tag value); + + public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef Tag value); + public native @ByVal Iterator erase(@ByVal Iterator pos); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const Tag get(); + } + + public Tag[] get() { + Tag[] array = new Tag[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; + for (int i = 0; i < array.length; i++) { + array[i] = get(i); + } + return array; + } + @Override public String toString() { + return java.util.Arrays.toString(get()); + } + + public Tag pop_back() { + long size = size(); + Tag value = get(size - 1); + resize(size - 1); + return value; + } + public TagVector push_back(Tag value) { + long size = size(); + resize(size + 1); + return put(size, value); + } + public TagVector put(Tag value) { + if (size() != 1) { resize(1); } + return put(0, value); + } + public TagVector put(Tag ... array) { + if (size() != array.length) { resize(array.length); } + for (int i = 0; i < array.length; i++) { + put(i, array[i]); + } + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java index a98ed45d1c6..49ff49e6bb5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,7 +33,7 @@ public class TanhImpl extends TanhImplCloneable { @SharedPtr @Name("std::make_shared") private native void allocate(); /** Downcast constructor. */ public TanhImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java index b95b2f9a0e8..ebe945a4e09 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class TanhImplCloneable extends Module { public TanhImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public TanhImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TanhImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java index ab3122e311c..bd234d27782 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,7 +33,7 @@ public class TanhshrinkImpl extends TanhshrinkImplCloneable { @SharedPtr @Name("std::make_shared") private native void allocate(); /** Downcast constructor. */ public TanhshrinkImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java index 42ba50617b9..88f863f1590 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class TanhshrinkImplCloneable extends Module { public TanhshrinkImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public TanhshrinkImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TanhshrinkImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index 22a1448e9c8..e69a10a36ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -417,12 +417,20 @@ private native void allocate( public native @ByVal Tensor _is_any_true(); public native @ByVal Tensor all(@Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor all(@Cast("int64_t") long dim); + public native @ByVal Tensor all(@ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor all(@ByVal LongArrayRefOptional dim); + public native @ByVal Tensor all(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor all(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); public native @ByVal Tensor all(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor all(@ByVal Dimname dim); public native @Cast("bool") boolean allclose(@Const @ByRef Tensor other, double rtol/*=1e-05*/, double atol/*=1e-08*/, @Cast("bool") boolean equal_nan/*=false*/); public native @Cast("bool") boolean allclose(@Const @ByRef Tensor other); public native @ByVal Tensor any(@Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor any(@Cast("int64_t") long dim); + public native @ByVal Tensor any(@ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor any(@ByVal LongArrayRefOptional dim); + public native @ByVal Tensor any(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor any(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); public native @ByVal Tensor any(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor any(@ByVal Dimname dim); public native @ByVal Tensor argmax(@ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); @@ -860,9 +868,11 @@ private native void allocate( public native @ByVal Tensor repeat_symint(@ByVal SymIntArrayRef repeats); public native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); public native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats); + public native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional output_size); + public native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor repeats); public native @ByVal Tensor repeat_interleave(@Cast("int64_t") long repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); public native @ByVal Tensor repeat_interleave(@Cast("int64_t") long repeats); - public native @ByVal Tensor repeat_interleave_symint(@ByVal SymInt repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); + public native @ByVal Tensor repeat_interleave_symint(@ByVal SymInt repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional output_size); public native @ByVal Tensor repeat_interleave_symint(@ByVal SymInt repeats); public native @ByVal Tensor reshape(@ByVal LongArrayRef shape); public native @ByVal Tensor reshape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... shape); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArg.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArg.java index e17fdf6eede..c67e4e97f25 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArg.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArg.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArgArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArgArrayRef.java index aa56ed9667f..4f0a43019d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArgArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArgArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java index 6288a049e10..b9d4f36930a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRefOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRefOptional.java index 62ab35061e3..8aeb3fa6b28 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRefOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRefOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java index fc59ff66ede..eedb5347d87 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -122,7 +122,10 @@ private native void allocate( public native void reset(); +// #if defined (_MSC_VER) public native @ByRef @Name("operator =") TensorBase put(@Const @ByRef TensorBase x); +// #else +// #endif // Ban assignment to rvalues, since at::Tensor (weirdly) performs a deep copy here @@ -260,6 +263,9 @@ private native void allocate( /** Returns if a {@code Tensor} has VE backend. */ public native @Cast("bool") boolean is_ve(); + /** Returns if a {@code Tensor} has PrivateUse1 backend. */ + public native @Cast("bool") boolean is_privateuseone(); + /** Returns if a {@code Tensor} has sparse backend. */ public native @Cast("bool") boolean is_sparse(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java index a79a0ecc85a..eb0dc83eb91 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java index 7b6c8c4fa22..08ee99a652e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorCastValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorCastValue.java index 93e28b486a1..bbec905ccc0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorCastValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorCastValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java index edea672b092..89cd1d2b34e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java index 4ef29147d54..000da9038f5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDeque.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDeque.java index 9bfad8fd8b7..a450ff89c0f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDeque.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDeque.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorElementReference.java index accdaa44391..a7f7dab43f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorElementReference.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorElementReference.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExample.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExample.java index 1a7a0f16897..1530622a30d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExample.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExample.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleCollation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleCollation.java index 354ec41f0fe..cf37166d34d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleCollation.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleCollation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleIterator.java index 8e0b702239b..a8a53d0d9f2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleOptional.java index c78d47c0cfc..479a49e9a96 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVector.java index 49af53ac31d..00edc5b625a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorIterator.java index a607786769e..25ce7fb2a85 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorOptional.java index 24f25d7fd96..a3c8ade0066 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java index f211fa383d4..2fae9df1e9b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometryArg.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometryArg.java index d179c7b19fe..6bed3fe58ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometryArg.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometryArg.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java index f1030cc791f..74e2e755a1f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -439,6 +439,8 @@ public enum SizesStridesPolicy { public native @Cast("bool") boolean is_ve(); + public native @Cast("bool") boolean is_privateuseone(); + public native @Cast("bool") boolean is_mkldnn(); public native @Cast("bool") boolean is_vulkan(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplPtr.java index 975eb31d3f4..0eae33a9c78 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplPtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplSet.java index a9527b797f6..b65a52d0fe2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplSet.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplVector.java index 544b9c0faa2..b40f599fced 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java index f63f3bc627e..0dd21fb1971 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexArrayRef.java index 3db1af5e340..86944182ff3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexVector.java index c49e837eac5..39ead887886 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java index 01aa3626440..650ed6974c4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java index dfe623c1684..6e8a4567ca8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java index c29f706c56c..98bf03c4a18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java index d9eb93236c7..1bd9225efef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorListIterator.java index 007b284117e..13f12f081f7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java index 2a47dca3f11..dfb22fa1529 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java index d852d7cc1c8..c9da0abb0d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java index e5ae85466de..7104546a5a5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java index ae8caa42287..2e29e684832 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptional.java index 8c27c669fe6..96191717a14 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalArrayRef.java index 08555be3ace..ad643b53596 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalElementReference.java index 34ec770a006..b58622f3e23 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalElementReference.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalElementReference.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalList.java index e4bd449ba64..8d0c37f04e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalListIterator.java index 8006c9587d7..768383c9d25 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalVector.java index cda0f3aef56..1c8c7ddfd29 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java index 56d5de8f505..e46c3e9ddc7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java index e1fd06f8a84..ae7ec39e4c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java index c172f42b9fd..b2d84235aba 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java index a004be69f8b..4d816aaefa6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TernaryIf.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TernaryIf.java index 2aeab297741..1e95268972d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TernaryIf.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TernaryIf.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadIdGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadIdGuard.java index 3b2a17fc9ba..25ac95cfadf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadIdGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadIdGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalDebugInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalDebugInfo.java index 1178763a1ac..1ad22c0465a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalDebugInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalDebugInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalPythonObjects.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalPythonObjects.java index 4d6ed9571c1..a166d5b0550 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalPythonObjects.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalPythonObjects.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -36,10 +36,10 @@ public class ThreadLocalPythonObjects extends Pointer { return new ThreadLocalPythonObjects((Pointer)this).offsetAddress(i); } - public static native void set(@StdString BytePointer key, @SharedPtr SafePyObject value); - public static native void set(@StdString String key, @SharedPtr SafePyObject value); - public static native @SharedPtr SafePyObject get(@StdString BytePointer key); - public static native @SharedPtr SafePyObject get(@StdString String key); + public static native void set(@StdString BytePointer key, @SharedPtr("c10::SafePyObject") @ByVal SafePyObject value); + public static native void set(@StdString String key, @SharedPtr("c10::SafePyObject") @ByVal SafePyObject value); + public static native @Const @SharedPtr("c10::SafePyObject") @ByRef SafePyObject get(@StdString BytePointer key); + public static native @Const @SharedPtr("c10::SafePyObject") @ByRef SafePyObject get(@StdString String key); public static native @Cast("bool") boolean contains(@StdString BytePointer key); public static native @Cast("bool") boolean contains(@StdString String key); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalState.java index c3c887272de..e24fec1e99a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalState.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateGuard.java index a4cac6b11be..f9ec90b76f8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java index 607a9dcf575..1cf586d9197 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class ThresholdImpl extends ThresholdImplCloneable { public ThresholdImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ThresholdImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ThresholdImpl(double threshold, double value) { super((Pointer)null); allocate(threshold, value); } @SharedPtr @Name("std::make_shared") private native void allocate(double threshold, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java index 7fb2106dcee..173f4c7aa8f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ThresholdImplCloneable extends Module { public ThresholdImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ThresholdImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ThresholdImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdOptions.java index 2608453cd84..c106ea5ddc7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Token.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Token.java index e2baff29063..765e90313c3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Token.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Token.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TorchDispatchModeTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TorchDispatchModeTLS.java index 206d9d07369..e3c580d9e6b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TorchDispatchModeTLS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TorchDispatchModeTLS.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -36,11 +36,41 @@ public class TorchDispatchModeTLS extends Pointer { return new TorchDispatchModeTLS((Pointer)this).offsetAddress(i); } - public static native void push_onto_stack(@SharedPtr SafePyObject mode); - public static native @SharedPtr SafePyObject pop_stack(); - public static native @SharedPtr SafePyObject get_stack_at(@Cast("int64_t") long idx); + // This API is NOT invariant safe. + // It must not take in an infra mode that uses TorchDispatchModeKey + // If you're pushing an infra mode onto the stack, we expect + // you to use set_mode + public static native void push_non_infra_mode_onto_stack( + @SharedPtr("c10::SafePyObject") @ByVal SafePyObject mode); + // Pops the top mode of the stack, + // giving precedence to user modes before attempting to pop + // any infra modes + public static native @Const @SharedPtr("c10::SafePyObject") @ByVal SafePyObject pop_stack(); + // Returns the highest-priority infra mode on the stack, + // along with its mode key. + public static native @Const @ByVal T_SafePyObjectTorchDispatchModeKey_T pop_highest_infra_mode(); + + public static native @Const @SharedPtr("c10::SafePyObject") @ByRef SafePyObject get_stack_at(@Cast("int64_t") long idx); public static native @Cast("int64_t") long stack_len(); + public static native @Const @ByVal SafePyObjectOptional get_mode( + TorchDispatchModeKey mode_key); + public static native @Const @ByVal SafePyObjectOptional get_mode( + @Cast("c10::impl::TorchDispatchModeKey") byte mode_key); + public static native @Const @ByVal SafePyObjectOptional unset_mode( + TorchDispatchModeKey mode_key); + public static native @Const @ByVal SafePyObjectOptional unset_mode( + @Cast("c10::impl::TorchDispatchModeKey") byte mode_key); + public static native void set_mode( + @Const @SharedPtr("c10::SafePyObject") @ByRef SafePyObject mode, + TorchDispatchModeKey mode_key); + public static native void set_mode( + @Const @SharedPtr("c10::SafePyObject") @ByRef SafePyObject mode, + @Cast("c10::impl::TorchDispatchModeKey") byte mode_key); + public static native @Const @ByRef TorchDispatchModeTLS get_state(); public static native void set_state(@ByVal TorchDispatchModeTLS state); + + public static native @Cast("bool") boolean any_modes_set(@Cast("bool") boolean skip_infra_modes/*=false*/); + public static native @Cast("bool") boolean any_modes_set(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TraceableFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TraceableFunction.java index a6851e4582b..4a7952a2485 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TraceableFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TraceableFunction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java index e3c77e144c6..00b455cd0cc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,7 +45,7 @@ public class TransformerDecoderImpl extends TransformerDecoderImplCloneable { public TransformerDecoderImpl(Pointer p) { super(p); } /** Downcast constructor. */ public TransformerDecoderImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerDecoderImpl(@ByVal TransformerDecoderOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal TransformerDecoderOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java index 5097c5d91c5..0952a34e536 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class TransformerDecoderImplCloneable extends Module { public TransformerDecoderImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public TransformerDecoderImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerDecoderImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java index 40643494e59..9b3b7fbcf30 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -46,7 +46,7 @@ public class TransformerDecoderLayerImpl extends TransformerDecoderLayerImplClon public TransformerDecoderLayerImpl(Pointer p) { super(p); } /** Downcast constructor. */ public TransformerDecoderLayerImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerDecoderLayerImpl(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead) { super((Pointer)null); allocate(d_model, nhead); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java index 20f91cc8c74..9a22ba5f73a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class TransformerDecoderLayerImplCloneable extends Module { public TransformerDecoderLayerImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public TransformerDecoderLayerImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerDecoderLayerImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerOptions.java index bfa2c72baa4..22e9d7ee2f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderOptions.java index 174b7e3c5ad..28cf4012f29 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java index 0a610d176e8..2f4098f91e0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -43,7 +43,7 @@ public class TransformerEncoderImpl extends TransformerEncoderImplCloneable { public TransformerEncoderImpl(Pointer p) { super(p); } /** Downcast constructor. */ public TransformerEncoderImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerEncoderImpl(@ByVal TransformerEncoderOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal TransformerEncoderOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java index a1fd58085cb..f02de944e70 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class TransformerEncoderImplCloneable extends Module { public TransformerEncoderImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public TransformerEncoderImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerEncoderImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java index 884dab620a8..d5e5b088b14 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -41,7 +41,7 @@ public class TransformerEncoderLayerImpl extends TransformerEncoderLayerImplClon public TransformerEncoderLayerImpl(Pointer p) { super(p); } /** Downcast constructor. */ public TransformerEncoderLayerImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerEncoderLayerImpl(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead) { super((Pointer)null); allocate(d_model, nhead); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java index 7865fb53ab0..1da2de3da14 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class TransformerEncoderLayerImplCloneable extends Module { public TransformerEncoderLayerImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public TransformerEncoderLayerImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerEncoderLayerImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerOptions.java index eadc9479179..7c3ee0e1976 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderOptions.java index dd78739b93e..8c501ab53e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java index 217032e8468..9122e5572f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -43,7 +43,7 @@ public class TransformerImpl extends TransformerImplCloneable { public TransformerImpl(Pointer p) { super(p); } /** Downcast constructor. */ public TransformerImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); /// diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java index b09de3f91a0..b73673a9341 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class TransformerImplCloneable extends Module { public TransformerImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public TransformerImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerOptions.java index c98f59872f7..54e891dd753 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tree.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tree.java index 38a4ef5fc09..dc62252b6f2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tree.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tree.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRef.java index f585072f9e0..f65c9a9a987 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefStringMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefStringMap.java index deeb233a156..73ad005df39 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefStringMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefStringMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeView.java index 601c9d530ae..3d06180de3a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeView.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeView.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java index 8729379f3bd..ae6b43a4a1f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -45,7 +45,7 @@ public class TripletMarginLossImpl extends TripletMarginLossImplCloneable { public TripletMarginLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public TripletMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TripletMarginLossImpl(@ByVal(nullValue = "torch::nn::TripletMarginLossOptions{}") TripletMarginLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::TripletMarginLossOptions{}") TripletMarginLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java index 4401290e44a..a8c325d521b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class TripletMarginLossImplCloneable extends Module { public TripletMarginLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public TripletMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TripletMarginLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossOptions.java index 29adc066c8f..901deced051 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java index 57c1b87376c..09468e99ad6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -47,7 +47,7 @@ public class TripletMarginWithDistanceLossImpl extends TripletMarginWithDistance public TripletMarginWithDistanceLossImpl(Pointer p) { super(p); } /** Downcast constructor. */ public TripletMarginWithDistanceLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TripletMarginWithDistanceLossImpl( @ByVal(nullValue = "torch::nn::TripletMarginWithDistanceLossOptions{}") TripletMarginWithDistanceLossOptions options_) { super((Pointer)null); allocate(options_); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java index a0e4dca4a8e..fe04daf388d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class TripletMarginWithDistanceLossImplCloneable extends Module { public TripletMarginWithDistanceLossImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public TripletMarginWithDistanceLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TripletMarginWithDistanceLossImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossOptions.java index 75217586b38..365bfb1b0e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tuple.java index ef9ea6b9ac1..b656e64ec66 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tuple.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleElements.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleElements.java index 4c3943cbe83..a4fc05fa7bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleElements.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleElements.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleLiteral.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleLiteral.java index 32349bfb691..001dcc739e4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleLiteral.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleLiteral.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TuplePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TuplePtr.java index f46a8ef15c1..57480dba941 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TuplePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TuplePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -16,7 +16,7 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; - // namespace detail + @Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TuplePtr extends Pointer { diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java index 26a45c6c077..845886237fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java index 625a6f5abff..3d3fa15e26d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -110,7 +110,7 @@ public class Type extends Pointer { public native @StdString BytePointer annotation_str(); // Returns a human readable string that includes additional information like - // "type is inferred rather than explictly defined" to help construct more + // "type is inferred rather than explicitly defined" to help construct more // user-friendly messages. public native @StdString BytePointer repr_str(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeArrayRef.java index d79cdd2035f..5a2bdc7f5a8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeEnv.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeEnv.java index fc18e97b06e..47c9e5905a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeEnv.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeEnv.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeError.java index 3360e79dfa9..2c2eded530e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeError.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifier.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifier.java index 6d2239386b8..089ffbe691b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifier.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifier.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java index 298722de435..7ac2097e510 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaOptional.java index 8ddd9657852..2e75748906a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypePtrOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypePtrOptional.java index 155eeb80ee4..7c171476541 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypePtrOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypePtrOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeVector.java index 2175fa7676e..4f6a84dac40 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnaryOp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnaryOp.java index 343c7b6bffa..a77d79a23db 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnaryOp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnaryOp.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java index e9c0eccd44b..4d106774c82 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java index 24223377542..ea4acc7c462 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class UnflattenImpl extends UnflattenImplCloneable { public UnflattenImpl(Pointer p) { super(p); } /** Downcast constructor. */ public UnflattenImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public UnflattenImpl(@Cast("int64_t") long dim, @ByVal @Cast("std::vector*") LongVector sizes) { super((Pointer)null); allocate(dim, sizes); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim, @ByVal @Cast("std::vector*") LongVector sizes); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java index f86543fa06d..433aed2064f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class UnflattenImplCloneable extends Module { public UnflattenImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public UnflattenImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UnflattenImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenOptions.java index b8cc2ec9cf0..ed31909bb3a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java index fe52c3b4faa..7ef3c6540c9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,7 +38,7 @@ public class UnfoldImpl extends UnfoldImplCloneable { public UnfoldImpl(Pointer p) { super(p); } /** Downcast constructor. */ public UnfoldImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public UnfoldImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java index 3591bc45489..8247aaeec61 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class UnfoldImplCloneable extends Module { public UnfoldImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public UnfoldImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UnfoldImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldOptions.java index 9ef2c7391fb..aef2aba253a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnionType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnionType.java index dbc9b92e2cd..f19f04a6334 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnionType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnionType.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java index d873af0ee14..490a7489628 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java index 2dffd08a093..12b3e5f58cf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java index a761096a06a..3cba3256a4e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -40,7 +40,7 @@ public class UpsampleImpl extends UpsampleImplCloneable { public UpsampleImpl(Pointer p) { super(p); } /** Downcast constructor. */ public UpsampleImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public UpsampleImpl(@Const @ByRef(nullValue = "torch::nn::UpsampleOptions{}") UpsampleOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::UpsampleOptions{}") UpsampleOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java index e3f7bc70856..513a3807d5d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class UpsampleImplCloneable extends Module { public UpsampleImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public UpsampleImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UpsampleImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleMode.java index a520f879de5..ec485e288b3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleMode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class UpsampleMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -32,19 +32,19 @@ public class UpsampleMode extends Pointer { public native @Name("operator =") @ByRef UpsampleMode put(@ByRef UpsampleMode x); public @ByRef kNearest get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kNearest get0(@ByRef UpsampleMode container); + @Namespace @Name("std::get<0>") public static native @ByRef kNearest get0(@ByRef UpsampleMode container); @ValueSetter public native UpsampleMode put(@ByRef kNearest value); public @ByRef kLinear get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kLinear get1(@ByRef UpsampleMode container); + @Namespace @Name("std::get<1>") public static native @ByRef kLinear get1(@ByRef UpsampleMode container); @ValueSetter public native UpsampleMode put(@ByRef kLinear value); public @ByRef kBilinear get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kBilinear get2(@ByRef UpsampleMode container); + @Namespace @Name("std::get<2>") public static native @ByRef kBilinear get2(@ByRef UpsampleMode container); @ValueSetter public native UpsampleMode put(@ByRef kBilinear value); public @ByRef kBicubic get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kBicubic get3(@ByRef UpsampleMode container); + @Namespace @Name("std::get<3>") public static native @ByRef kBicubic get3(@ByRef UpsampleMode container); @ValueSetter public native UpsampleMode put(@ByRef kBicubic value); public @ByRef kTrilinear get4() { return get4(this); } - @Namespace @Name("c10::get<4>") public static native @ByRef kTrilinear get4(@ByRef UpsampleMode container); + @Namespace @Name("std::get<4>") public static native @ByRef kTrilinear get4(@ByRef UpsampleMode container); @ValueSetter public native UpsampleMode put(@ByRef kTrilinear value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleOptions.java index 84b78849f6b..13328e5c20f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Use.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Use.java index 4363871d6cd..8e7e8a61856 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Use.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Use.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Value.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Value.java index 4702a9884d8..1d573735a53 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Value.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Value.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -31,7 +31,7 @@ public class Value extends Pointer { public native Value setType(@ByVal Type.TypePtr type); public native void inferTypeFrom(@Const @ByRef Tensor output); public native void inferTypeFrom( - @Cast("const c10::intrusive_ptr*") @ByRef Pointer output); + @Const @ByRef ObjPtr output); public native @Const @ByRef Type.TypePtr type(); public native @Cast("bool") boolean requires_grad(); public native @Cast("bool") boolean isCompleteTensor(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueArrayRef.java index 0cc8e2fd9b3..afb3eebf77b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueArrayRef.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueError.java index b9678151b26..32d406a81cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueError.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueOptional.java index f631d933336..f3329634c87 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueValueMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueValueMap.java index 806d72e545d..1ddb183b00c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueValueMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueValueMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueVector.java index c2c3f64a731..2c1ba685423 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java index c28d42725d7..121fe0d63c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Var.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Var.java index db7a7087a18..c8be0826110 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Var.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Var.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VarMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VarMaybe.java index ed00747b87d..44ae63116dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VarMaybe.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VarMaybe.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java index a3ae9efa08a..7f931ca0b20 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java index 42ac7fc44f9..0b7432be9d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java index abf5092123b..63a34243158 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WarnAlways.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WarnAlways.java index 5e12a6dc607..fa982c1e7c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WarnAlways.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WarnAlways.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Warning.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Warning.java index 493bd37564f..e53d5a75a31 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Warning.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Warning.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -38,39 +38,39 @@ public class Warning extends Pointer { } public Warning( - @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, + @ByVal WarningVariant type, @Const @ByRef SourceLocation source_location, @StdString BytePointer msg, @Cast("bool") boolean verbatim) { super((Pointer)null); allocate(type, source_location, msg, verbatim); } private native void allocate( - @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, + @ByVal WarningVariant type, @Const @ByRef SourceLocation source_location, @StdString BytePointer msg, @Cast("bool") boolean verbatim); public Warning( - @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, + @ByVal WarningVariant type, @Const @ByRef SourceLocation source_location, @StdString String msg, @Cast("bool") boolean verbatim) { super((Pointer)null); allocate(type, source_location, msg, verbatim); } private native void allocate( - @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, + @ByVal WarningVariant type, @Const @ByRef SourceLocation source_location, @StdString String msg, @Cast("bool") boolean verbatim); public Warning( - @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, + @ByVal WarningVariant type, @ByVal SourceLocation source_location, @ByVal CompileTimeEmptyString msg, @Cast("bool") boolean verbatim) { super((Pointer)null); allocate(type, source_location, msg, verbatim); } private native void allocate( - @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, + @ByVal WarningVariant type, @ByVal SourceLocation source_location, @ByVal CompileTimeEmptyString msg, @Cast("bool") boolean verbatim); // Getters for members - public native @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type(); + public native @ByVal WarningVariant type(); public native @Const @ByRef SourceLocation source_location(); public native @StdString BytePointer msg(); public native @Cast("bool") boolean verbatim(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandler.java index 2c77df71855..ffdaef86318 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandlerGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandlerGuard.java index cf374320902..63f868ea91f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandlerGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandlerGuard.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WarningVariant.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WarningVariant.java new file mode 100644 index 00000000000..cd14f008c17 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WarningVariant.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class WarningVariant extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public WarningVariant(Pointer p) { super(p); } + public WarningVariant(Warning.UserWarning value) { this(); put(value); } + public WarningVariant(Warning.DeprecationWarning value) { this(); put(value); } + public WarningVariant() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef WarningVariant put(@ByRef WarningVariant x); + + public @ByRef Warning.UserWarning get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @ByRef Warning.UserWarning get0(@ByRef WarningVariant container); + @ValueSetter public native WarningVariant put(@ByRef Warning.UserWarning value); + public @ByRef Warning.DeprecationWarning get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef Warning.DeprecationWarning get1(@ByRef WarningVariant container); + @ValueSetter public native WarningVariant put(@ByRef Warning.DeprecationWarning value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakIValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakIValue.java index d220919ca26..19bcdcb5ddd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakIValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakIValue.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongCompilationUnit.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongCompilationUnit.java index aa081267621..b2a41d9fbc3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongCompilationUnit.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongCompilationUnit.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongTypePtr.java index 0392d657a89..d763d1ac1b6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java index 91dc83d8610..087d4dd4dcc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java index d9108f7e850..d9435f2bc4c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java index 88b509e5a9e..8212007d90c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakTypePtr.java index 8f19d5c5eb6..f9ff9a07e07 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakTypePtr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/While.java b/pytorch/src/gen/java/org/bytedeco/pytorch/While.java index eaa50ff3071..4d061051f10 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/While.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/While.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/With.java b/pytorch/src/gen/java/org/bytedeco/pytorch/With.java index e80d29b1ec6..ab1e12e56b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/With.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/With.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WithItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItem.java index d84456d6062..99d5bf5787e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WithItem.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItem.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemList.java index 6af0571ea39..d3e36a316a5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemListIterator.java index 2d401ea8560..c719c705c3e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemListIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemListIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java index be2bf3ece18..49e11e88a8b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksArgs.java index bc6f30df385..d530dff7141 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksArgs.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksArgs.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java index d1e22aee59a..3d4a031af0f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -51,4 +51,12 @@ public class XPUHooksInterface extends Pointer { @ByRef DLDevice_ dl_device, @Const @ByRef Device aten_device, Pointer data); + + public native @ByVal Generator getXPUGenerator(@Cast("c10::DeviceIndex") byte device_index/*=-1*/); + public native @ByVal Generator getXPUGenerator(); + + public native @Const @ByRef Generator getDefaultXPUGenerator(@Cast("c10::DeviceIndex") byte device_index/*=-1*/); + public native @Const @ByRef Generator getDefaultXPUGenerator(); + + public native int getNumGPUs(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java index e6e917f506f..c9bf9acee5e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,6 +33,6 @@ public class ZeroPad1dImpl extends ZeroPad1dImplBase { public ZeroPad1dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ZeroPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java index e5e6e252e7b..7df9d347f13 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -28,7 +28,7 @@ public class ZeroPad1dImplBase extends ZeroPad1dImplCloneable { public ZeroPad1dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ZeroPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ZeroPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java index 7b63c7090fb..5f9f177afd9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ZeroPad1dImplCloneable extends Module { public ZeroPad1dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ZeroPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ZeroPad1dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dOptions.java index 8faca9ffb22..c4f4943e7ed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java index 17f2a9b45a4..fc715f83c07 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,6 +33,6 @@ public class ZeroPad2dImpl extends ZeroPad2dImplBase { public ZeroPad2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ZeroPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java index a62fd67c276..5bb6045c1e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ZeroPad2dImplBase extends ZeroPad2dImplCloneable { public ZeroPad2dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ZeroPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ZeroPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java index 8d5611bd6e9..f259214825f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ZeroPad2dImplCloneable extends Module { public ZeroPad2dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ZeroPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ZeroPad2dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java index f56c7204c37..c68363f816b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java index 6ae59b6a9df..be8360e1255 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -33,6 +33,6 @@ public class ZeroPad3dImpl extends ZeroPad3dImplBase { public ZeroPad3dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ZeroPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java index afa0aedb863..e540737da5f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ZeroPad3dImplBase extends ZeroPad3dImplCloneable { public ZeroPad3dImplBase(Pointer p) { super(p); } /** Downcast constructor. */ public ZeroPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ZeroPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java index 5e3b981d672..0ff22ca3eaf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -24,7 +24,7 @@ public class ZeroPad3dImplCloneable extends Module { public ZeroPad3dImplCloneable(Pointer p) { super(p); } /** Downcast constructor. */ public ZeroPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); @Override public Module asModule() { return asModule(this); } @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ZeroPad3dImplCloneable pointer); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dOptions.java index a091cf99912..ec322266026 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java index d9c7a140e50..51a3904102f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java index acfbea5226d..03973a835b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bits16.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bits16.java index 650b8094350..f893d4ca8e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/bits16.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bits16.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bits1x8.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bits1x8.java index c230d31dfb4..9f0c9812b66 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/bits1x8.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bits1x8.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bits2x4.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bits2x4.java index b41e4770037..ba5621fe38d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/bits2x4.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bits2x4.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bits4x2.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bits4x2.java index 75eee901af5..94fb2680d16 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/bits4x2.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bits4x2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bits8.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bits8.java index f6133539bf2..2a9771662da 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/bits8.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bits8.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bitset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bitset.java index fbfcaacc35a..3e985663e18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/bitset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bitset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java index 06d66e43aac..44ba87ccdf1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java index 8cd6e9f69f8..39f9f472621 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/crc64_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/crc64_t.java index 0dbe5586b5f..b3bde3b819f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/crc64_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/crc64_t.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java index ae6644ef656..b2592f949a2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java index 8483d5730d2..3268346800b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ApproximateClockToUnixTimeConverter.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ApproximateClockToUnixTimeConverter.java new file mode 100644 index 00000000000..af3bd4b9195 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ApproximateClockToUnixTimeConverter.java @@ -0,0 +1,66 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.cuda.functions.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// Convert `getCount` results to Nanoseconds since unix epoch. +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class ApproximateClockToUnixTimeConverter extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ApproximateClockToUnixTimeConverter(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ApproximateClockToUnixTimeConverter(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public ApproximateClockToUnixTimeConverter position(long position) { + return (ApproximateClockToUnixTimeConverter)super.position(position); + } + @Override public ApproximateClockToUnixTimeConverter getPointer(long i) { + return new ApproximateClockToUnixTimeConverter((Pointer)this).offsetAddress(i); + } + + public ApproximateClockToUnixTimeConverter() { super((Pointer)null); allocate(); } + private native void allocate(); + + + public static class UnixAndApproximateTimePair extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public UnixAndApproximateTimePair() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public UnixAndApproximateTimePair(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public UnixAndApproximateTimePair(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public UnixAndApproximateTimePair position(long position) { + return (UnixAndApproximateTimePair)super.position(position); + } + @Override public UnixAndApproximateTimePair getPointer(long i) { + return new UnixAndApproximateTimePair((Pointer)this).offsetAddress(i); + } + + public native @Cast("c10::time_t") long t_(); public native UnixAndApproximateTimePair t_(long setter); + public native @Cast("c10::approx_time_t") long approx_t_(); public native UnixAndApproximateTimePair approx_t_(long setter); + } + public static native @ByVal UnixAndApproximateTimePair measurePair(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java index 39b27331fad..a7096104e7b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java index f4984dd773d..587ebac4a1a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java index 168d8eaa02d..8ead036744f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; @@ -66,13 +66,21 @@ public native void recordHistory( @ByVal @Cast("c10::cuda::CUDACachingAllocator::CreateContextFn*") Pointer context_recorder, @Cast("size_t") long alloc_trace_max_entries, @Cast("c10::cuda::CUDACachingAllocator::RecordContext") int when); - public native void attachOutOfMemoryObserver(@ByVal @Cast("c10::cuda::CUDACachingAllocator::OutOfMemoryObserver*") Pointer observer); + public native void attachOutOfMemoryObserver(@ByVal OutOfMemoryObserver observer); + + // Attached AllocatorTraceTracker callbacks will be called while the + // per-device allocator lock is held. Any additional locks taken from within + // the callback must be proven to always have the lock order that never + // triggers a deadlock. In particular, Python's GIL may be held when + // calling the allocator so it is unsafe to try to acquire the GIL in this + // callback. + public native void attachAllocatorTraceTracker(@ByVal AllocatorTraceTracker tracker); public native void enablePeerAccess(int dev, int dev_to_access); // memory not allocated from cudaMalloc cannot be copied // across devices using cudaMemcpyAsync if peer to peer access is disabled. - // instead it requres cudaMemcpyAsyncPeer + // instead it requires cudaMemcpyAsyncPeer // with P2P Enabled, all combinations work // with P2P Disabled: // cudaMalloc cudaMallocAsync/cuMemMap diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java index fb3e01b1e87..2846c39ed25 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java index 0e8b9a2a569..e0453629f9f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java index 7239da02abc..2f3078807ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java index 42385178732..2940f07efb4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java index 0434fa60eb8..fb7802e2001 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java index 5177707c034..fa2a41b8961 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java index 251e0969230..d3bb0f715da 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java index 99ab19ac016..6fa1734fe63 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java index 1e6a6d83c9b..aedc694a0e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java index 2f15e6dbdf1..f10146d51dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java index 33a511e1587..9f918a6c310 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java index c70c0b9355c..bb53b77d57c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java index c0b30040f02..b8e555e4dc3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java index a1a0100068a..cc8973cf24f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java index a195344bf1a..c6a38b4110a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java index fcab82e7c70..87487330427 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java index 20e845c21fd..efefce665ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java index 0d11311b576..1122dcfeef0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java index 9942394663c..fe5ce0803ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java index 2a1c57bb6b4..11efaba7ffb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java index de69d2fb9f0..3594b4e13fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java index 77e6d56d0a4..831e24dc315 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java index 3adc185e389..b22f2ccbc03 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java index a48bf6abffb..19af7311a7e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java index fa6c3ae5477..40cbeffa0ac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java index ce50bfd5fab..aae7a155fa7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; @@ -22,19 +22,6 @@ import static org.bytedeco.pytorch.global.torch_cuda.*; -// TODO: Turn this into an honest to goodness class. I briefly attempted to do -// this, but it was a bit irritating to figure out how to also correctly -// apply pimpl pattern so I didn't have to leak any internal implementation -// details in the header (CUDACachingAllocator could be made a pimpl, but -// you also need to appropriately define a class which is a subclass -// of Allocator. Not impossible, but required a bit more surgery than -// I wanted to do at the time.) -// -// Why is this using a namespace rather than old-style THCCachingAllocator_ -// prefix? Mostly because it made the HIPify rules easier to write; _ is -// not counted as a word boundary, so you would otherwise have to list each -// of these functions. - @Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) public class Stat extends Pointer { static { Loader.load(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java index 054254d298d..a602c344567 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java index 31a3b2298ce..4077a77c580 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; @@ -52,51 +52,69 @@ public enum Action { } public TraceEntry( Action action, + int device, @Cast("int64_t") long addr, @Cast("size_t") long size, @Cast("cudaStream_t") Pointer stream, - @SharedPtr GatheredContext context/*=nullptr*/) { super((Pointer)null); allocate(action, addr, size, stream, context); } + @Cast("c10::approx_time_t") long time, + @SharedPtr GatheredContext context/*=nullptr*/) { super((Pointer)null); allocate(action, device, addr, size, stream, time, context); } private native void allocate( Action action, + int device, @Cast("int64_t") long addr, @Cast("size_t") long size, @Cast("cudaStream_t") Pointer stream, + @Cast("c10::approx_time_t") long time, @SharedPtr GatheredContext context/*=nullptr*/); public TraceEntry( Action action, + int device, @Cast("int64_t") long addr, @Cast("size_t") long size, - @Cast("cudaStream_t") Pointer stream) { super((Pointer)null); allocate(action, addr, size, stream); } + @Cast("cudaStream_t") Pointer stream, + @Cast("c10::approx_time_t") long time) { super((Pointer)null); allocate(action, device, addr, size, stream, time); } private native void allocate( Action action, + int device, @Cast("int64_t") long addr, @Cast("size_t") long size, - @Cast("cudaStream_t") Pointer stream); + @Cast("cudaStream_t") Pointer stream, + @Cast("c10::approx_time_t") long time); public TraceEntry( @Cast("c10::cuda::CUDACachingAllocator::TraceEntry::Action") int action, + int device, @Cast("int64_t") long addr, @Cast("size_t") long size, @Cast("cudaStream_t") Pointer stream, - @SharedPtr GatheredContext context/*=nullptr*/) { super((Pointer)null); allocate(action, addr, size, stream, context); } + @Cast("c10::approx_time_t") long time, + @SharedPtr GatheredContext context/*=nullptr*/) { super((Pointer)null); allocate(action, device, addr, size, stream, time, context); } private native void allocate( @Cast("c10::cuda::CUDACachingAllocator::TraceEntry::Action") int action, + int device, @Cast("int64_t") long addr, @Cast("size_t") long size, @Cast("cudaStream_t") Pointer stream, + @Cast("c10::approx_time_t") long time, @SharedPtr GatheredContext context/*=nullptr*/); public TraceEntry( @Cast("c10::cuda::CUDACachingAllocator::TraceEntry::Action") int action, + int device, @Cast("int64_t") long addr, @Cast("size_t") long size, - @Cast("cudaStream_t") Pointer stream) { super((Pointer)null); allocate(action, addr, size, stream); } + @Cast("cudaStream_t") Pointer stream, + @Cast("c10::approx_time_t") long time) { super((Pointer)null); allocate(action, device, addr, size, stream, time); } private native void allocate( @Cast("c10::cuda::CUDACachingAllocator::TraceEntry::Action") int action, + int device, @Cast("int64_t") long addr, @Cast("size_t") long size, - @Cast("cudaStream_t") Pointer stream); + @Cast("cudaStream_t") Pointer stream, + @Cast("c10::approx_time_t") long time); public native Action action_(); public native TraceEntry action_(Action setter); + public native int device_(); public native TraceEntry device_(int setter); public native @Cast("int64_t") long addr_(); public native TraceEntry addr_(long setter); // for OOM, this is the amount of free bytes reported by cuda public native @SharedPtr GatheredContext context_(); public native TraceEntry context_(GatheredContext setter); public native @Cast("cudaStream_t") Pointer stream_(); public native TraceEntry stream_(Pointer setter); public native @Cast("int64_t") long size_(); public native TraceEntry size_(long setter); + public native @ByRef trace_time_ time_(); public native TraceEntry time_(trace_time_ setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java index 8300a48b36b..16bea8ee2f5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java @@ -1,9 +1,9 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/trace_time_.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/trace_time_.java new file mode 100644 index 00000000000..c3db85c2c51 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/trace_time_.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.cuda.functions.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class trace_time_ extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public trace_time_() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public trace_time_(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public trace_time_(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public trace_time_ position(long position) { + return (trace_time_)super.position(position); + } + @Override public trace_time_ getPointer(long i) { + return new trace_time_((Pointer)this).offsetAddress(i); + } + + public native @Cast("c10::time_t") long t_(); public native trace_time_ t_(long setter); + public native @Cast("c10::approx_time_t") long approx_t_(); public native trace_time_ approx_t_(long setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 5df2e40fbd0..181eaaf6c76 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.global; @@ -20,6 +20,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { static { Loader.load(); } +// Targeting ../InlinedCallStackOptional.java + + +// Targeting ../BatchSizeOptional.java + + // Targeting ../BoolOptional.java @@ -164,9 +170,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../ExecutorExecutionModeOptional.java -// Targeting ../InlinedCallStackOptional.java - - // Targeting ../ScopeOptional.java @@ -209,7 +212,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../WeakStorageVectorOptional.java -// Targeting ../BatchSizeOptional.java +// Targeting ../CppSignatureOptional.java + + +// Targeting ../SafePyObjectOptional.java + + +// Targeting ../BytePointerPairOptional.java // Targeting ../ExampleOptional.java @@ -227,54 +236,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../T_TensorTensor_TOptional.java -// Targeting ../Nonlinearity.java - - -// Targeting ../FanModeType.java - - -// Targeting ../ConvPaddingMode.java - - -// Targeting ../Conv1dPadding.java - - -// Targeting ../Conv2dPadding.java - - -// Targeting ../Conv3dPadding.java - - -// Targeting ../EmbeddingBagMode.java - - -// Targeting ../PaddingMode.java - - -// Targeting ../LossReduction.java - - -// Targeting ../KLDivLossReduction.java - - -// Targeting ../GridSampleMode.java - - -// Targeting ../GridSamplePaddingMode.java - - -// Targeting ../RNNBaseMode.java - - -// Targeting ../RNNNonlinearity.java - - -// Targeting ../UpsampleMode.java - - -// Targeting ../InterpolateMode.java - - // Targeting ../TensorDeque.java @@ -437,6 +398,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../SharedSugaredValueVector.java +// Targeting ../TagVector.java + + +// Targeting ../ReadAdapterInterfaceVector.java + + // Targeting ../ExampleVector.java @@ -467,6 +434,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../SizeTMatchedSchemaPair.java +// Targeting ../BytePointerPair.java + + // Targeting ../T_DataPtrSizeT_T.java @@ -524,6 +494,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../T_TypePtrLong_T.java +// Targeting ../T_SafePyObjectTorchDispatchModeKey_T.java + + // Targeting ../HashAliasedIValueMap.java @@ -572,6 +545,57 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../DeviceTypeSet.java +// Targeting ../Nonlinearity.java + + +// Targeting ../FanModeType.java + + +// Targeting ../ConvPaddingMode.java + + +// Targeting ../Conv1dPadding.java + + +// Targeting ../Conv2dPadding.java + + +// Targeting ../Conv3dPadding.java + + +// Targeting ../EmbeddingBagMode.java + + +// Targeting ../PaddingMode.java + + +// Targeting ../LossReduction.java + + +// Targeting ../KLDivLossReduction.java + + +// Targeting ../GridSampleMode.java + + +// Targeting ../GridSamplePaddingMode.java + + +// Targeting ../RNNBaseMode.java + + +// Targeting ../RNNNonlinearity.java + + +// Targeting ../UpsampleMode.java + + +// Targeting ../InterpolateMode.java + + +// Targeting ../WarningVariant.java + + // Parsed from torch/csrc/utils/python_stub.h // #pragma once @@ -590,6 +614,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { /* #undef C10_USE_GFLAGS */ /* #undef C10_USE_NUMA */ /* #undef C10_USE_MSVC_STATIC_RUNTIME */ +/* #undef C10_USE_ROCM_KERNEL_ASSERT */ // #endif // C10_MACROS_CMAKE_MACROS_H_ @@ -1020,7 +1045,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // CUDA_KERNEL_ASSERT checks the assertion // even when NDEBUG is defined. This is useful for important assertions in CUDA // code that would otherwise be suppressed when building Release. -// #if defined(__ANDROID__) || defined(__APPLE__) || +// #if defined(__ANDROID__) || defined(__APPLE__) || defined(__FreeBSD__) || // (defined(USE_ROCM) && ROCM_VERSION < 40100) // Those platforms do not support assert() // #define CUDA_KERNEL_ASSERT(cond) @@ -1029,6 +1054,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #else // __APPLE__, _MSC_VER // #if defined(NDEBUG) // #endif // NDEBUG +// ROCm disable kernel assert by default +// #if !defined(C10_USE_ROCM_KERNEL_ASSERT) and defined(USE_ROCM) +// #define CUDA_KERNEL_ASSERT(cond) +// #define SYCL_KERNEL_ASSERT(cond) +// #else // #define CUDA_KERNEL_ASSERT(cond) // if (C10_UNLIKELY(!(cond))) { // __assert_fail( @@ -1039,6 +1069,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // __assert_fail( // #cond, __FILE__, static_cast(__LINE__), __func__); // } +// #endif // C10_USE_ROCM_KERNEL_ASSERT and USE_ROCM // #endif // __APPLE__ // #ifdef __APPLE__ @@ -1370,71 +1401,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif -// Parsed from c10/util/reverse_iterator.h - -// #pragma once - -/** - * A constexpr std::reverse_iterator for C++11. - * Implementation taken from libstdc++, - * https://raw.githubusercontent.com/gcc-mirror/gcc/gcc-9_2_0-release/libstdc%2B%2B-v3/include/bits/stl_iterator.h - * adapted to our code base and constexpr'ified. - */ - -// Copyright (C) 2001-2019 Free Software Foundation, Inc. -// -// This file is part of the GNU ISO C++ Library. This library is free -// software; you can redistribute it and/or modify it under the -// terms of the GNU General Public License as published by the -// Free Software Foundation; either version 3, or (at your option) -// any later version. - -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// Under Section 7 of GPL version 3, you are granted additional -// permissions described in the GCC Runtime Library Exception, version -// 3.1, as published by the Free Software Foundation. - -// You should have received a copy of the GNU General Public License and -// a copy of the GCC Runtime Library Exception along with this program; -// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -// . - -/* - * - * Copyright (c) 1994 - * Hewlett-Packard Company - * - * Permission to use, copy, modify, distribute and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appear in all copies and - * that both that copyright notice and this permission notice appear - * in supporting documentation. Hewlett-Packard Company makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - * - * - * Copyright (c) 1996-1998 - * Silicon Graphics Computer Systems, Inc. - * - * Permission to use, copy, modify, distribute and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appear in all copies and - * that both that copyright notice and this permission notice appear - * in supporting documentation. Silicon Graphics makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - */ - -// #include -// #include - - // namespace c10 - - // Parsed from c10/util/StringUtil.h // #ifndef C10_UTIL_STRINGUTIL_H_ @@ -1502,15 +1468,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif // C10_UTIL_STRINGUTIL_H_ -// Parsed from c10/util/in_place.h - -// #pragma once - -// #include - - // namespace c10 - - // Parsed from c10/util/Exception.h // #ifndef C10_UTIL_EXCEPTION_H_ @@ -1518,11 +1475,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include // #include // #include +// #include // #include // #if defined(_MSC_VER) && _MSC_VER <= 1900 @@ -1576,6 +1533,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Targeting ../ErrorAlwaysShowCppStacktrace.java + + // Targeting ../IndexError.java @@ -1600,9 +1560,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../OutOfMemoryError.java +// Targeting ../DistError.java + + // Targeting ../DistBackendError.java +// Targeting ../DistStoreError.java + + +// Targeting ../DistNetworkError.java + + // A utility function to return an exception std::string by prepending its // exception type before its what() content @@ -1623,6 +1592,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // throw ::c10::err_type( // {__func__, __FILE__, static_cast(__LINE__)}, msg) +// #define C10_BUILD_ERROR(err_type, msg) +// ::c10::err_type({__func__, __FILE__, static_cast(__LINE__)}, msg) + // Private helper macro for workaround MSVC misexpansion of nested macro // invocations involving __VA_ARGS__. See // https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly @@ -1875,6 +1847,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #define TORCH_CHECK_NOT_IMPLEMENTED(cond, ...) // TORCH_CHECK_WITH_MSG(NotImplementedError, cond, "TYPE", __VA_ARGS__) +// #define TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(cond, ...) +// TORCH_CHECK_WITH_MSG( +// ErrorAlwaysShowCppStacktrace, cond, "TYPE", ##__VA_ARGS__) + // #ifdef STRIP_ERROR_MESSAGES // #define WARNING_MESSAGE_STRING(...) // ::c10::detail::CompileTimeEmptyString {} @@ -2044,7 +2020,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // a DispatchKeySet. The bits in the DispatchKeySet are split between the bottom // ~12 "BackendComponent" bits, while the remaining upper bits are assigned to // functionalities. When we encounter a functionality bit that is known to be -// customizeable per-backend, then we also look at the lower BackendComponent +// customizable per-backend, then we also look at the lower BackendComponent // bits and take the highest bit to determine which backend's implementation to // use. @@ -2414,37 +2390,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // go here. FuncTorchBatched((short)(Undefined.value + 34)), // See Note [Out-of-tree vmap+grad prototype] - FuncTorchVmapMode((short)(Undefined.value + 35)), // See Note [Out-of-tree vmap+grad prototype] + + // Dispatch key for BatchedTensorImpl wrapping a nested tensor. + BatchedNestedTensor((short)(Undefined.value + 35)), + + FuncTorchVmapMode((short)(Undefined.value + 36)), // See Note [Out-of-tree vmap+grad prototype] // This is the dispatch key for BatchedTensorImpl, which is used to implement // batching rules for vmap. - Batched((short)(Undefined.value + 36)), + Batched((short)(Undefined.value + 37)), // When we are inside a vmap, all tensors dispatch on this key. // See Note: [DispatchKey::VmapMode usage] for more details. - VmapMode((short)(Undefined.value + 37)), + VmapMode((short)(Undefined.value + 38)), - FuncTorchGradWrapper((short)(Undefined.value + 38)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchGradWrapper((short)(Undefined.value + 39)), // See Note [Out-of-tree vmap+grad prototype] // Out-of-core key for Deferred Module Initialization in torchdistx. // See https://pytorch.org/torchdistx/latest/deferred_init.html - DeferredInit((short)(Undefined.value + 39)), + DeferredInit((short)(Undefined.value + 40)), // Used by Python key logic to know the set of tls on entry to the dispatcher // This kernel assumes it is the top-most non-functorch-related DispatchKey. // If you add a key above, make sure to update the fallback implementation for // this. - PythonTLSSnapshot((short)(Undefined.value + 40)), + PythonTLSSnapshot((short)(Undefined.value + 41)), // This key should be at the very top of the dispatcher - FuncTorchDynamicLayerFrontMode((short)(Undefined.value + 41)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchDynamicLayerFrontMode((short)(Undefined.value + 42)), // See Note [Out-of-tree vmap+grad prototype] // TESTING: This is intended to be a generic testing tensor type id. // Don't use it for anything real; its only acceptable use is within a single // process test. Use it by creating a TensorImpl with this DispatchKey, and // then registering operators to operate on this type id. See // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example. - TESTING_ONLY_GenericWrapper((short)(Undefined.value + 42)), + TESTING_ONLY_GenericWrapper((short)(Undefined.value + 43)), // TESTING: This is intended to be a generic testing tensor type id. // Don't use it for anything real; its only acceptable use is within a ingle @@ -2453,51 +2433,51 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // to operate on this type id. See // aten/src/ATen/core/dispatch/backend_fallback_test.cpp // for a usage example - TESTING_ONLY_GenericMode((short)(Undefined.value + 43)), + TESTING_ONLY_GenericMode((short)(Undefined.value + 44)), // This key is used for pre-dispatch tracing in make_fx. // It has lower priority than the PythonDispatcher key // because we use the PythonDispatcher to intercept the key from python, // and avoid having to implement it in C++. - PreDispatch((short)(Undefined.value + 44)), + PreDispatch((short)(Undefined.value + 45)), // This is a bypass that allows you to skip running the C++ dispatcher // entirely - PythonDispatcher((short)(Undefined.value + 45)), + PythonDispatcher((short)(Undefined.value + 46)), // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // - EndOfFunctionalityKeys((short)(Undefined.value + 46)), + EndOfFunctionalityKeys((short)(Undefined.value + 47)), - StartOfDenseBackends((short)(Undefined.value + 47)), - CPU((short)(Undefined.value + 48)), + StartOfDenseBackends((short)(Undefined.value + 48)), + CPU((short)(Undefined.value + 49)), - CUDA((short)(Undefined.value + 49)), + CUDA((short)(Undefined.value + 50)), - HIP((short)(Undefined.value + 50)), + HIP((short)(Undefined.value + 51)), - XLA((short)(Undefined.value + 51)), + XLA((short)(Undefined.value + 52)), - MPS((short)(Undefined.value + 52)), + MPS((short)(Undefined.value + 53)), - IPU((short)(Undefined.value + 53)), + IPU((short)(Undefined.value + 54)), - XPU((short)(Undefined.value + 54)), + XPU((short)(Undefined.value + 55)), - HPU((short)(Undefined.value + 55)), + HPU((short)(Undefined.value + 56)), - VE((short)(Undefined.value + 56)), + VE((short)(Undefined.value + 57)), - Lazy((short)(Undefined.value + 57)), + Lazy((short)(Undefined.value + 58)), - MTIA((short)(Undefined.value + 58)), + MTIA((short)(Undefined.value + 59)), - PrivateUse1((short)(Undefined.value + 59)), + PrivateUse1((short)(Undefined.value + 60)), - PrivateUse2((short)(Undefined.value + 60)), + PrivateUse2((short)(Undefined.value + 61)), - PrivateUse3((short)(Undefined.value + 61)), + PrivateUse3((short)(Undefined.value + 62)), - Meta((short)(Undefined.value + 62)), + Meta((short)(Undefined.value + 63)), EndOfDenseBackends((short)(0)), StartOfQuantizedBackends((short)(1)), QuantizedCPU((short)(2)), @@ -2805,17 +2785,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace std -// Parsed from c10/util/Array.h - -// #pragma once - -// #include -// #include - - // namespace guts - // namespace c10 - - // Parsed from c10/util/TypeTraits.h // #pragma once @@ -3165,7 +3134,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10") public static native @ByVal DispatchKeySet getRuntimeDispatchKeySet(@Cast("c10::DispatchKey") short t); // Resolve alias dispatch key to DispatchKeySet if applicable, -// and chek if k is a part of that set +// and check if k is a part of that set @Namespace("c10") public static native @Cast("bool") boolean runtimeDispatchKeySetHas(DispatchKey t, DispatchKey k); @Namespace("c10") public static native @Cast("bool") boolean runtimeDispatchKeySetHas(@Cast("c10::DispatchKey") short t, @Cast("c10::DispatchKey") short k); @@ -3323,7 +3292,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { SparseCsc((byte)(4)), SparseBsr((byte)(5)), SparseBsc((byte)(6)), - NumOptions((byte)(7)); + Jagged((byte)(7)), + NumOptions((byte)(8)); public final byte value; private Layout(byte v) { this.value = v; } @@ -3556,7 +3526,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once -// #include // #include // #include // #include @@ -4354,6 +4323,108 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // IWYU pragma: keep +// Parsed from c10/util/Float8_e4m3fnuz-inl.h + +// #pragma once + +// #include +// #include + +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif + +/** Constructors */ + + + +/** Implicit conversions */ + + + +/** Special values helper */ + + + + // namespace c10 + + // namespace std + + + +// Parsed from c10/util/Float8_e4m3fnuz.h + + +/// +/// +/// +// #pragma once + +/** Defines the Float8_e4m3fnuz type (8-bit floating-point) including + * conversions to standard C types and basic arithmetic operations. Note that + * arithmetic operations are implemented by converting to floating point and + * performing the operation in float32. + * + * Binary configuration remains the same as Float8_e4m3fn: + * s eeee mmm + * 1 sign bit + * 4 exponent bits + * 3 mantissa bits + * + * The key differences versus Float8_e4m3fn are: + * bias = 8 + * no infinities or negative zero + * NaN only when sign bit is 1, rest all 0s + * + * Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and + * the existing Float8_e4m3fn implementation. */ + +// #include +// #include +// #include +// #include + +// #if defined(__cplusplus) && (__cplusplus >= 201103L) +// #include +// #elif !defined(__OPENCL_VERSION__) +// #include +// #include +// #endif + +// #include +// #include + +/* + * Convert a 8-bit floating-point number in fp8 E4M3FNUZ format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +// #if defined(__CUDA_ARCH__) || defined(__HIP__) +@Namespace("c10::detail") public static native float fp8e4m3fnuz_to_fp32_value(@Cast("uint8_t") byte arg0); +// #else +// #endif + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 8-bit floating-point number in fp8 E4M3FNUZ format, in bit representation. + */ +@Namespace("c10::detail") public static native @Cast("uint8_t") byte fp8e4m3fnuz_from_fp32_value(float f); + + +// Targeting ../Float8_e4m3fnuz.java + + + +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer out, + @Const @ByRef Float8_e4m3fnuz value); + + // namespace c10 + +// #include // IWYU pragma: keep + + // Parsed from c10/util/complex_math.h // #if !defined(C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H) @@ -4615,6 +4686,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // for SYCL 2020 // #endif +// #if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && +// !defined(__APPLE__) +// #include +// #endif + // #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") // #endif @@ -5038,6 +5114,108 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // IWYU pragma: keep +// Parsed from c10/util/Float8_e5m2fnuz-inl.h + +// #pragma once + +// #include +// #include + +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif + +/** Constructors */ + + + +/** Implicit conversions */ + + + +/** Special values helpers */ + + + + // namespace c10 + + // namespace std + + + +// Parsed from c10/util/Float8_e5m2fnuz.h + + +/// +/// +/// +// #pragma once + +/** Defines the Float8_e5m2fnuz type (8-bit floating-point) including + * conversions to standard C types and basic arithmetic operations. Note that + * arithmetic operations are implemented by converting to floating point and + * performing the operation in float32. + * + * Binary configuration remains the same as e5m2: + * s eeeee mm + * 1 sign bit + * 5 exponent bits + * 2 mantissa bits + * + * The key differences that e5m2fnuz brings are: + * bias = 16 + * no infinities or negative zero + * NaN only when sign bit is 1, rest all 0s + * + * Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and + * the existing Float8_e4m3fn implementation. */ + +// #include +// #include +// #include +// #include + +// #if defined(__cplusplus) && (__cplusplus >= 201103L) +// #include +// #elif !defined(__OPENCL_VERSION__) +// #include +// #include +// #endif + +// #include +// #include + +/* + * Convert a 8-bit floating-point number in fp8 E5M2FNUZ format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +// #if defined(__CUDA_ARCH__) || defined(__HIP__) +@Namespace("c10::detail") public static native float fp8e5m2fnuz_to_fp32_value(@Cast("uint8_t") byte arg0); +// #else +// #endif + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 8-bit floating-point number in fp8 E5M2 format, in bit representation. + */ +@Namespace("c10::detail") public static native @Cast("uint8_t") byte fp8e5m2fnuz_from_fp32_value(float f); + + +// Targeting ../Float8_e5m2fnuz.java + + + +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer out, + @Const @ByRef Float8_e5m2fnuz value); + + // namespace c10 + +// #include // IWYU pragma: keep + + // Parsed from c10/util/bits.h // #pragma once @@ -5136,7 +5314,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include +// #include // #include // #include // #include @@ -5146,6 +5326,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include +// #include // #include // #include // #include @@ -5185,11 +5366,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // _(c10::bits16, Bits16) /* 22 */ // _(c10::Float8_e5m2, Float8_e5m2) /* 23 */ // _(c10::Float8_e4m3fn, Float8_e4m3fn) /* 24 */ +// _(c10::Float8_e5m2fnuz, Float8_e5m2fnuz) /* 25 */ +// _(c10::Float8_e4m3fnuz, Float8_e4m3fnuz) /* 26 */ // If you want to support ComplexHalf for real, add ComplexHalf // into this macro (and change the name). But beware: convert() // doesn't work for all the conversions you need... -// #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF(_) +// #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF_F8NZ(_) // _(uint8_t, Byte) // _(int8_t, Char) // _(int16_t, Short) @@ -5221,6 +5404,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // _(at::BFloat16, BFloat16) // _(at::Float8_e5m2, Float8_e5m2) // _(at::Float8_e4m3fn, Float8_e4m3fn) +// _(at::Float8_e5m2fnuz, Float8_e5m2fnuz) +// _(at::Float8_e4m3fnuz, Float8_e4m3fnuz) @Namespace("c10") public enum ScalarType { Byte((byte)(0)), /* 0 */ @@ -5247,9 +5432,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { Bits8((byte)(21)), /* 21 */ Bits16((byte)(22)), /* 22 */ Float8_e5m2((byte)(23)), /* 23 */ - Float8_e4m3fn((byte)(24)), - Undefined((byte)(25)), - NumOptions((byte)(26)); + Float8_e4m3fn((byte)(24)), /* 24 */ + Float8_e5m2fnuz((byte)(25)), /* 25 */ + Float8_e4m3fnuz((byte)(26)), + Undefined((byte)(27)), + NumOptions((byte)(28)); public final byte value; private ScalarType(byte v) { this.value = v; } @@ -5275,7 +5462,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // /* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ // /* TODO: remove once the bug is fixed. */ // static type t; -// }; /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* 10 */ /* 11 */ /* 12 */ /* 13 */ /* 14 */ /* 15 */ /* 16 */ /* 17 */ /* 18 */ /* 19 */ /* 20 */ /* 21 */ /* 22 */ /* 23 */ /* 24 */ +// }; /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* 10 */ /* 11 */ /* 12 */ /* 13 */ /* 14 */ /* 15 */ /* 16 */ /* 17 */ /* 18 */ /* 19 */ /* 20 */ /* 21 */ /* 22 */ /* 23 */ /* 24 */ /* 25 */ /* 26 */ // #undef SPECIALIZE_ScalarTypeToCPPType @@ -5286,7 +5473,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // struct CppTypeToScalarType // : std:: // integral_constant { -// }; /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* 10 */ /* 11 */ /* 12 */ /* 13 */ /* 14 */ /* 15 */ /* 16 */ /* 17 */ /* 18 */ /* 19 */ /* 20 */ /* 21 */ /* 22 */ /* 23 */ /* 24 */ +// }; /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* 10 */ /* 11 */ /* 12 */ /* 13 */ /* 14 */ /* 15 */ /* 16 */ /* 17 */ /* 18 */ /* 19 */ /* 20 */ /* 21 */ /* 22 */ /* 23 */ /* 24 */ /* 25 */ /* 26 */ // #undef SPECIALIZE_CppTypeToScalarType @@ -5398,6 +5585,78 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // ::c10::ScalarType::SCALARTYPE5>::t), // SCALARTYPE5) +// #define AT_FORALL_SCALAR_TYPES_AND6( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// _) +// _(uint8_t, Byte) +// _(int8_t, Char) +// _(int16_t, Short) +// _(int, Int) +// _(int64_t, Long) +// _(float, Float) +// _(double, Double) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE1>::t), +// SCALARTYPE1) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE2>::t), +// SCALARTYPE2) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE3>::t), +// SCALARTYPE3) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE4>::t), +// SCALARTYPE4) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE5>::t), +// SCALARTYPE5) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE6>::t), +// SCALARTYPE6) + +// #define AT_FORALL_SCALAR_TYPES_AND7( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// SCALARTYPE7, +// _) +// _(uint8_t, Byte) +// _(int8_t, Char) +// _(int16_t, Short) +// _(int, Int) +// _(int64_t, Long) +// _(float, Float) +// _(double, Double) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE1>::t), +// SCALARTYPE1) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE2>::t), +// SCALARTYPE2) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE3>::t), +// SCALARTYPE3) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE4>::t), +// SCALARTYPE4) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE5>::t), +// SCALARTYPE5) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE6>::t), +// SCALARTYPE6) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE7>::t), +// SCALARTYPE7) + // #define AT_FORALL_QINT_TYPES(_) // _(c10::qint8, QInt8) // _(c10::quint8, QUInt8) @@ -5437,6 +5696,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10") @MemberGetter public static native ScalarType kBits16(); /* 22 */ @Namespace("c10") @MemberGetter public static native ScalarType kFloat8_e5m2(); /* 23 */ @Namespace("c10") @MemberGetter public static native ScalarType kFloat8_e4m3fn(); /* 24 */ + @Namespace("c10") @MemberGetter public static native ScalarType kFloat8_e5m2fnuz(); /* 25 */ + @Namespace("c10") @MemberGetter public static native ScalarType kFloat8_e4m3fnuz(); /* 26 */ // #undef DEFINE_CONSTANT @Namespace("c10") public static native @Cast("const char*") BytePointer toString(ScalarType t); @@ -5447,11 +5708,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -@Namespace("c10") public static native @Cast("bool") boolean isFloatingType(ScalarType t); - @Namespace("c10") public static native @Cast("bool") boolean isFloat8Type(ScalarType t); + @Namespace("c10") public static native @Cast("bool") boolean isReducedFloatingType(ScalarType t); +@Namespace("c10") public static native @Cast("bool") boolean isFloatingType(ScalarType t); + @Namespace("c10") public static native @Cast("bool") boolean isComplexType(ScalarType t); @Namespace("c10") public static native @Cast("bool") boolean isQIntType(ScalarType t); @@ -5483,58 +5745,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/util/ExclusivelyOwned.h +// Parsed from c10/util/in_place.h // #pragma once -// #include - -// See example implementation in TensorBase.h and TensorBody.h. -// Synopsis: -// -// repr_type -- type to use to store an owned T in ExclusivelyOwned. -// -// pointer_type -- pointer-esque type to return from -// ExclusivelyOwned's get() and operator*() methods. -// -// const_pointer_type -- similar to pointer_type, used for the const methods. -// -// static repr_type nullRepr() -- return a null instance of repr_type. -// -// template -// static repr_type createInPlace(Args&&... args) -- used by the in-place -// ExclusivelyOwned constructor. -// -// static repr_type moveToRepr(T&& x) -- move the given x into an -// instance of repr_type. used by the ExclusivelyOwned(T&&) -// constructor. -// -// static void destroyOwned(repr_type x) -- free memory for a -// known-exclusively-owned instance of x. Replaces calling repr_type's -// destructor. Being able to implement this more efficiently than -// repr_type's destructor is the main reason to use ExclusivelyOwned -// for a type. -// -// static T take(repr_type&) -- move out of the given repr_type into an owned T. -// -// static pointer_type getImpl(const repr_type&) -- return a pointer -// to the given repr_type. May take repr_type by value if that is more -// efficient. - -/** ExclusivelyOwned is a smart-pointer-like wrapper around an - * exclusively-owned instance of some type T that normally has - * mandatory reference counting (currently just Tensor). If you have - * an isolated piece of code that knows that it has sole ownership of - * an object of one of these types (i.e., because you created it - * directly or using a factory function) and that object will not - * escape from that isolated piece of code, then moving the object - * into an ExclusivelyOwned will avoid an atomic reference count - * decrement at destruction time. - * - * If you directly create the Tensor in the first - * place, you can use the in_place constructor of ExclusivelyOwned to - * additionally avoid doing any stores to initialize the refcount & - * weakcount. */ +// #include // namespace c10 @@ -5547,6 +5762,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include +// #include // #include /** MaybeOwnedTraits describes how to borrow from T. Here is how we @@ -5773,7 +5989,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include +// #include // #include // #include @@ -5853,6 +6071,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { + + // #undef DEFINE_TO // namespace c10 @@ -6020,7 +6240,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #if !defined(TORCH_PEDANTIC) // Use precomputed hashsum for std::string // Needed to workaround ambiguity in class name resolution -// into __PRETTY_FUNCION__ when abovementioned class is defined in inlined +// into __PRETTY_FUNCTION__ when abovementioned class is defined in inlined // namespace. In multi-ABI C++ library, `std::string` is an alias to // `std::__cxx11::basic_string` which depending on compiler flags can be // resolved to `basic_string` either in `std` namespace or in @@ -6031,67 +6251,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from c10/util/flat_hash_map.h - -// Taken from -// https://github.com/skarupke/flat_hash_map/blob/2c4687431f978f02a3780e24b8b701d22aa32d9c/flat_hash_map.hpp -// with fixes applied: -// - https://github.com/skarupke/flat_hash_map/pull/25 -// - https://github.com/skarupke/flat_hash_map/pull/26 -// - replace size_t with uint64_t to fix it for 32bit -// - add "GCC diagnostic" pragma to ignore -Wshadow -// - make sherwood_v3_table::convertible_to_iterator public because GCC5 seems -// to have issues with it otherwise -// - fix compiler warnings in operator templated_iterator - -// Copyright Malte Skarupke 2017. -// Distributed under the Boost Software License, Version 1.0. -// (See http://www.boost.org/LICENSE_1_0.txt) - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") -// #endif - -// #if defined(_MSC_VER) && !defined(__clang__) -// #pragma warning(push) -// #pragma warning(disable : 4624) // destructor was implicitly defined as deleted -// #endif - -// #ifdef _MSC_VER -// #define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__ -// #else -// #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) -// #endif -@Namespace("ska::detailv3") @MemberGetter public static native byte min_lookups(); -public static final byte min_lookups = min_lookups(); - -@Namespace("ska::detailv3") public static native byte log2(@Cast("uint64_t") long value); - -@Namespace("ska::detailv3") public static native @Cast("uint64_t") long next_power_of_two(@Cast("uint64_t") long i); - -// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t -// (it takes CWG1558 into account and also works for older compilers) - // namespace detailv3 - - // end namespace ska - -// #if defined(_MSC_VER) && !defined(__clang__) -// #pragma warning(pop) -// #endif - - // Parsed from c10/util/irange.h // Copyright 2004-present Facebook. All Rights Reserved. @@ -6251,6 +6410,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { /* 22 */ /* 23 */ /* 24 */ + /* 25 */ + /* 26 */ // #undef DEFINE_SCALAR_METADATA_INSTANCE @@ -6641,46 +6802,85 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/core/Storage.h +// Parsed from c10/util/ExclusivelyOwned.h // #pragma once -// #include -// Targeting ../Storage.java +// #include +// See example implementation in TensorBase.h and TensorBody.h. +// Synopsis: +// +// repr_type -- type to use to store an owned T in ExclusivelyOwned. +// +// pointer_type -- pointer-esque type to return from +// ExclusivelyOwned's get() and operator*() methods. +// +// const_pointer_type -- similar to pointer_type, used for the const methods. +// +// static repr_type nullRepr() -- return a null instance of repr_type. +// +// template +// static repr_type createInPlace(Args&&... args) -- used by the in-place +// ExclusivelyOwned constructor. +// +// static repr_type moveToRepr(T&& x) -- move the given x into an +// instance of repr_type. used by the ExclusivelyOwned(T&&) +// constructor. +// +// static void destroyOwned(repr_type x) -- free memory for a +// known-exclusively-owned instance of x. Replaces calling repr_type's +// destructor. Being able to implement this more efficiently than +// repr_type's destructor is the main reason to use ExclusivelyOwned +// for a type. +// +// static T take(repr_type&) -- move out of the given repr_type into an owned T. +// +// static pointer_type getImpl(const repr_type&) -- return a pointer +// to the given repr_type. May take repr_type by value if that is more +// efficient. +/** ExclusivelyOwned is a smart-pointer-like wrapper around an + * exclusively-owned instance of some type T that normally has + * mandatory reference counting (currently just Tensor). If you have + * an isolated piece of code that knows that it has sole ownership of + * an object of one of these types (i.e., because you created it + * directly or using a factory function) and that object will not + * escape from that isolated piece of code, then moving the object + * into an ExclusivelyOwned will avoid an atomic reference count + * decrement at destruction time. + * + * If you directly create the Tensor in the first + * place, you can use the in_place constructor of ExclusivelyOwned to + * additionally avoid doing any stores to initialize the refcount & + * weakcount. */ // namespace c10 -// Parsed from c10/core/AutogradState.h +// Parsed from c10/core/Storage.h // #pragma once -// #include -// Targeting ../AutogradState.java +// #include +// #include + +@Namespace("c10") public static native @Cast("bool") boolean isSharedStorageAlias( + @Cast({"", "c10::Storage&&"}) @StdMove Storage storage0, + @Cast({"", "c10::Storage&&"}) @StdMove Storage storage1); +// Targeting ../Storage.java // namespace c10 -// Parsed from c10/core/GradMode.h +// Parsed from c10/core/AutogradState.h // #pragma once -// #include // #include -// Targeting ../GradMode.java - - -// Targeting ../AutoGradMode.java - - -// Targeting ../NoGradGuard.java - - -// Targeting ../AutoFwGradMode.java +// Targeting ../AutogradState.java @@ -7334,34 +7534,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/core/WrapDimMinimal.h - -// #pragma once - -// #include -// This template can only be specialized at int64_t and c10::SymInt; -// you'll get linker errors otherwise - // namespace detail - -@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( - @Cast("int64_t") long dim, - @Cast("int64_t") long dim_post_expr, - @Cast("bool") boolean wrap_scalar/*=true*/); -@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( - @Cast("int64_t") long dim, - @Cast("int64_t") long dim_post_expr); - -@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( - @ByVal SymInt dim, - @ByVal SymInt dim_post_expr, - @Cast("bool") boolean wrap_scalar/*=true*/); -@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( - @ByVal SymInt dim, - @ByVal SymInt dim_post_expr); - - // namespace c10 - - // Parsed from c10/core/impl/HermeticPyObjectTLS.h // #pragma once @@ -7485,6 +7657,50 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 +// Parsed from c10/core/SymbolicShapeMeta.h + +// #pragma once +// #include +// #include +// #include + +// #include +// #include +// Targeting ../SymbolicShapeMeta.java + + + + // namespace c10 + + +// Parsed from c10/core/WrapDimMinimal.h + +// #pragma once + +// #include +// This template can only be specialized at int64_t and c10::SymInt; +// you'll get linker errors otherwise + // namespace detail + +@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( + @Cast("int64_t") long dim, + @Cast("int64_t") long dim_post_expr, + @Cast("bool") boolean wrap_scalar/*=true*/); +@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( + @Cast("int64_t") long dim, + @Cast("int64_t") long dim_post_expr); + +@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( + @ByVal SymInt dim, + @ByVal SymInt dim_post_expr, + @Cast("bool") boolean wrap_scalar/*=true*/); +@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( + @ByVal SymInt dim, + @ByVal SymInt dim_post_expr); + + // namespace c10 + + // Parsed from c10/util/Logging.h // #ifndef C10_UTIL_LOGGING_H_ @@ -7841,6 +8057,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Initializes the c10 logger. @Namespace("c10") public static native void initLogging(); +// Sets the rank, which will be included in log messages +@Namespace("c10") public static native void SetGlobalRank(@Cast("int64_t") long rank); + // namespace c10 // #endif // C10_UTIL_LOGGING_H_ @@ -7921,6 +8140,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -8006,9 +8226,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../BackendMeta.java -// Targeting ../SymbolicShapeMeta.java - - // Targeting ../VariableVersion.java @@ -8168,7 +8385,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once -// #include +// #include // #include // #include @@ -8467,8 +8684,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include +// #include // The PtrTraits argument to the TensorAccessor/GenericPackedTensorAccessor // is used to enable the __restrict__ keyword/modifier for the data @@ -9278,6 +9495,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor +// aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor + + // aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor @@ -9287,6 +9507,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor +// aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor + + // aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor @@ -10175,13 +10398,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::repeat(Tensor self, SymInt[] repeats) -> Tensor -// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor + + +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor -// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor -// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor // aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) @@ -12424,8 +12650,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #else // #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) // #endif +@Namespace("ska_ordered::detailv3") @MemberGetter public static native byte min_lookups(); +public static final byte min_lookups = min_lookups(); + +@Namespace("ska_ordered::detailv3") public static native @Cast("uint64_t") long next_power_of_two(@Cast("uint64_t") long i); // Implementation taken from http://en.cppreference.com/w/cpp/types/void_t // (it takes CWG1558 into account and also works for older compilers) @@ -12815,7 +13045,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByRef Type.TypePtr t1, @Const @ByRef Type.TypePtr t2, @Cast("bool") boolean default_to_union/*=false*/, - @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); + @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); @Namespace("c10") public static native @ByVal TypePtrOptional unifyTypes( @Const @ByRef Type.TypePtr t1, @Const @ByRef Type.TypePtr t2); @@ -12824,7 +13054,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal TypeArrayRef elements, @Cast("std::ostream*") @ByRef Pointer why_not, @Cast("bool") boolean default_to_union/*=false*/, - @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); + @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); @Namespace("c10") public static native @ByVal TypePtrOptional unifyTypeList( @ByVal TypeArrayRef elements, @Cast("std::ostream*") @ByRef Pointer why_not); @@ -12832,7 +13062,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal TypeVector elements, @Cast("std::ostream*") @ByRef Pointer why_not, @Cast("bool") boolean default_to_union/*=false*/, - @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); + @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); @Namespace("c10") public static native @ByVal TypePtrOptional unifyTypeList( @ByVal TypeVector elements, @Cast("std::ostream*") @ByRef Pointer why_not); @@ -13350,25 +13580,25 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10::detail") public static native @Cast("size_t") long atomic_weakcount_decrement(@Cast("std::atomic*") @ByRef LongPointer weakcount); -// Targeting ../TuplePtr.java +// Targeting ../QuantizerPtr.java -// Targeting ../FuturePtr.java +// Targeting ../GeneratorImplPtr.java -// Targeting ../ConstantStringPtr.java +// Targeting ../TuplePtr.java -// Targeting ../GeneratorImplPtr.java +// Targeting ../FuturePtr.java -// Targeting ../QuantizerPtr.java +// Targeting ../ConstantStringPtr.java // Targeting ../AwaitPtr.java -// Targeting ../RRefInterfacePtr.java +// Targeting ../ObjPtr.java // Targeting ../PyObjectHolderPtr.java @@ -13377,10 +13607,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../EnumHolderPtr.java -// Targeting ../TensorImplPtr.java +// Targeting ../RRefInterfacePtr.java -// Targeting ../TreeRef.java +// Targeting ../TensorImplPtr.java // Targeting ../StorageImplPtr.java @@ -13392,6 +13622,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../BackendMetaRef.java +// Targeting ../TreeRef.java + + // To allow intrusive_ptr inside std::map or std::set, we need operator< // Targeting ../WeakStorage.java @@ -13545,13 +13778,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Input is a list of Futures with the same target type. // Output is a Future to the List of completed Futures. @Namespace("c10") public static native @ByVal FuturePtr collectAll( - @ByVal FuturePtrList srcs); + @Const @ByRef FuturePtrList srcs); // Input is a List of Futures with the same target type. // Output is a Future that will be updated with a seen value. @Namespace("c10") public static native @ByVal FuturePtr collectAny( - @ByVal FuturePtrList srcs); + @Const @ByRef FuturePtrList srcs); +// Targeting ../Object.java + -// User-defined object. // Targeting ../PyObjectHolder.java @@ -13806,12 +14040,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include +// #include // #include // #include // #include // #include +// #include // #include // #include // #include @@ -13834,8 +14069,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // We need a ComplexHolder because currently the payloads in the Union // only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big -// to fit in the IValue directly, we indirect complex numbers through an intrusive -// pointer to ComplexHolder (which contains a c10::complex). +// to fit in the IValue directly, we indirect complex numbers through an +// intrusive pointer to ComplexHolder (which contains a c10::complex). // Similar to ComplexHolder, for StreamData3 @@ -13915,10 +14150,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { - // namespace c10 -// #include // IWYU pragma: keep +// #include // IWYU pragma: keep // Parsed from ATen/core/List_inl.h @@ -14646,8 +14880,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { - // namespace namedinference - // namespace at + // namespace at::namedinference // Parsed from ATen/NamedTensorUtils.h @@ -14661,8 +14894,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -@Namespace("at") public static native @Cast("bool") boolean has_names(@ByVal TensorArrayRef tensors); -@Namespace("at") public static native @Cast("bool") boolean has_names(@ByVal TensorVector tensors); +@Namespace("at") public static native @Cast("bool") boolean has_names(@Const @ByRef TensorArrayRef tensors); +@Namespace("at") public static native @Cast("bool") boolean has_names(@Const @ByRef TensorVector tensors); // Converts dim to an positional index. Errors if `dim` cannot be used to // refer to any dimension of tensor. @@ -15443,11 +15676,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { /// @Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_non_differentiable_view( - @ByVal @Cast("torch::autograd::Variable*") Tensor base, + @Cast("const torch::autograd::Variable*") @ByRef Tensor base, @Const @ByRef Tensor data, @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); @Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_non_differentiable_view( - @ByVal @Cast("torch::autograd::Variable*") Tensor base, + @Cast("const torch::autograd::Variable*") @ByRef Tensor base, @Const @ByRef Tensor data); /** Creates a {@code Variable} from the given {@code Tensor}, copying its underlying @@ -15470,11 +15703,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { * specifying the function in the autograd graph, and what particular input of * that function, this variable is connected to. */ @Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable( - @ByVal Tensor data, + @Const @ByRef Tensor data, @ByVal Edge gradient_edge, @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); @Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable( - @ByVal Tensor data, + @Const @ByRef Tensor data, @ByVal Edge gradient_edge); @Namespace("torch::autograd::utils") public static native @Cast("bool") boolean has_same_meta(@Cast("const torch::autograd::Variable*") @ByRef Tensor base, @Cast("const torch::autograd::Variable*") @ByRef Tensor other); @@ -15689,6 +15922,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // schema as used in the compiler for resolving function calls and reporting // errors. These objects should be constructed from C10 schema once those @@ -15820,281 +16054,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from ATen/record_function.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include - - -// Kind of record function scope; -@Namespace("at") public enum RecordScope { - // c10/ATen ops, autograd nodes - FUNCTION((byte)(0)), - // Functions/nodes called from the autograd - BACKWARD_FUNCTION((byte)(1)), - // TorchScript functions, methods - TORCHSCRIPT_FUNCTION((byte)(2)), - // Kernel Function dtype Tag - KERNEL_FUNCTION_DTYPE((byte)(3)), - // Torchbind custom class, - CUSTOM_CLASS((byte)(4)), - // Generic Build Feature - BUILD_FEATURE((byte)(5)), - // Kernel Function dtype Tag - LITE_INTERPRETER((byte)(6)), - // User defined scope (e.g. with record_function()) - USER_SCOPE((byte)(7)), - // Scopes for static runtime, a specialized TorchScript interpreter - STATIC_RUNTIME_OP((byte)(8)), - STATIC_RUNTIME_MODEL((byte)(9)), - NUM_SCOPES((byte)(10));// must be the last in the list - - public final byte value; - private RecordScope(byte v) { this.value = v; } - private RecordScope(RecordScope e) { this.value = e.value; } - public RecordScope intern() { for (RecordScope e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - - // namespace at - // namespace std - -// Soft limit on the number of callbacks to use; -@Namespace("at") @MemberGetter public static native @Cast("const std::size_t") long kSoftLimitCallbacks(); - -// An abstract base class for various observer contexts that can be attached to -// the RecordFunction. - -// -// PyTorch callbacks/observers API: -// - -/** - * RecordFunctionCallback represents a pair of callbacks to be used with - * RecordFunction, members: - * start, end - the callbacks to run when entering and exiting the scope; - * optionally, the start callback may return an ObserverContext which will - * be passed to the end callback, use appropriate constructor accordingly. - * needs_inputs - whether the callbacks need the inputs passed from the - * observed function/range; NOTE: passing the inputs incurs an additional - * overhead; sampling_probability - if not 1.0, then the callback is - * probabilistically sampled to run; NOTE: start and end callbacks always run as - * a pair and are sampled together; scopes - types of scopes to execute the - * callbacks on (see RecordScope); passing empty set means the callbacks will be - * executed for all possible scope types should_run - optional function that - * returns whether this callback should run; overwrites the effect of setting - * sampling_probability - */ - -// Notes: -// - two types of callbacks are provided: thread local and global -// - thread local callbacks are added/removed only for the given thread -// and are stored locally for each thread and separately from the list -// of the global callbacks -// - global callbacks are stored in a single per process list and are -// invoked by every RecordFunction, in addition to the thread local -// callbacks specific to the given thread -// - we allow the added callbacks to be sampled, by specifying a sampling -// probability for each callback pair, if the start callback is -// not picked to run, the corresponding end callback won't be called -// - a typical use case for the global callbacks is passive monitoring -// in the background (e.g. fleet-wide monitoring), without focusing on -// the specific piece of code -// - in contrast, thread local callbacks are enabled locally, on demand, -// for the specific piece of code (range) and are not sampled -// - a typical use case for thread local callbacks is profiler and code -// execution tracer -// - note, thread local callbacks are automatically propagated with -// ThreadLocalState across JIT continuations and async tasks (at::launch) - -@Namespace("at") @MemberGetter public static native @Cast("const at::CallbackHandle") long INVALID_CALLBACK_HANDLE(); -// Targeting ../RecordFunctionCallbacksEntry.java - - - -// Holds pairs (callbacks, unique_id) -// Targeting ../RecordFunction.java - - - -@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(RecordScope scope); -@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(@Cast("at::RecordScope") byte scope); - -@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( - RecordScope scope); -@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( - @Cast("at::RecordScope") byte scope); - - // namespace detail - -// optional argument - function's seq_no -// #define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// ::at::detail::record_function_with_scope( -// guard, fn, inputs, ##__VA_ARGS__); -// } - -// #define RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( -// scope, fn, inputs, outputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// if (guard.needsInputs()) { -// guard.before(fn, inputs, ##__VA_ARGS__); -// } else { -// guard.before(fn, ##__VA_ARGS__); -// } -// if (guard.needsOutputs()) { -// guard.setOutputs(outputs); -// } -// } - -// #define RECORD_FUNCTION(fn, inputs, ...) -// RECORD_FUNCTION_WITH_SCOPE( -// at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__) - -// #define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) -// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs) - -// #define RECORD_FUNCTION_WITH_INPUTS_OUTPUTS(fn, inputs, outputs, ...) -// RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( -// at::RecordScope::FUNCTION, fn, inputs, outputs, ##__VA_ARGS__) - -// Custom user scopes in C++; similar to Python's 'with record_function("..."):' -// #define RECORD_USER_SCOPE(fn) -// RECORD_FUNCTION_WITH_SCOPE( -// at::RecordScope::USER_SCOPE, fn, c10::ArrayRef{}) - -// RECORD_USER_SCOPE with inputs -// #define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) -// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs) - -// Helper macro to pass in debug handle that is used to -// post process events -// #define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( -// scope, fn, debug_handle, inputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// ::at::detail::record_function_with_scope_and_debug_handle( -// guard, fn, debug_handle, inputs, ##__VA_ARGS__); -// } - -// Helper macros to record LITE INTERPETER scope events with debug handles -// #define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( -// fn, debug_handle, inputs) -// RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( -// at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs) - -// Bookend to the RECORD_FUNCTION macros. Use this after the kernel -// launch to let the profiler bind the outputs to the op that produced -// them. Note that guard is declared by RECORD_FUNCTION so this macro -// needs to be called from the same scope as RECORD_FUNCTION -// #define RECORD_OUTPUTS(outputs) -// if (guard.needsOutputs()) { -// guard.setOutputs( -// std::vector(outputs.begin(), outputs.end())); -// } - -/** - * addThreadLocalCallback adds a thread local callback to run with - * RecordFunction, returns handle to use with removeThreadLocalCallback - */ -@Namespace("at") public static native @Cast("at::CallbackHandle") long addThreadLocalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); - -/** - * hasThreadLocalCallbacks returns whether there're callbacks registered - * with addThreadLocalCallback - */ -@Namespace("at") public static native @Cast("bool") boolean hasThreadLocalCallbacks(); - -/** - * clearThreadLocalCallbacks removes all thread local callbacks - */ -@Namespace("at") public static native void clearThreadLocalCallbacks(); - -/** - * addGlobalCallback adds a global callback to run with RecordFunction: - * - * only during the program initialization - */ -@Namespace("at") public static native @Cast("at::CallbackHandle") long addGlobalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); - -/** - * removeCallback removes a callback given the handle returned by - * addThreadLocalCallback or addGlobalCallback; - * - * no other code can run simultaneously - */ -@Namespace("at") public static native void removeCallback(@Cast("at::CallbackHandle") long handle); - -/** - * Prevent the given callback from executing. If handle is invalid, - * does nothing. - */ -@Namespace("at") public static native void disableCallback(@Cast("at::CallbackHandle") long handle); - -/** - * Allow the given callback, previously disabled with disableCallback, to - * execute again. If handle is invalid, does nothing. - */ -@Namespace("at") public static native void reenableCallback(@Cast("at::CallbackHandle") long handle); - -/** - * hasGlobalCallbacks returns whether there're global callbacks - * registered with pushGlobalCallback - */ -@Namespace("at") public static native @Cast("bool") boolean hasGlobalCallbacks(); - -/** - * clearGlobalCallbacks removes all global callbacks - */ -@Namespace("at") public static native void clearGlobalCallbacks(); - -// for both thread local and global callbacks -@Namespace("at") public static native @Cast("bool") boolean hasCallbacks(); -@Namespace("at") public static native void clearCallbacks(); - -/** - * enableRecordFunction enables RecordFunction thread locally - */ -@Namespace("at") public static native void enableRecordFunction(@Cast("bool") boolean enable/*=true*/); -@Namespace("at") public static native void enableRecordFunction(); - -/** - * isRecordFunctionEnabled returns whether RecordFunction - * is enabled thread locally - */ -@Namespace("at") public static native @Cast("bool") boolean isRecordFunctionEnabled(); -// Targeting ../RecordFunctionGuard.java - - -// Targeting ../DisableRecordFunctionGuard.java - - -// Targeting ../RecordFunctionTLS.java - - - -@Namespace("at") public static native @Const @ByRef RecordFunctionTLS get_record_function_tls_(); - -@Namespace("at") public static native void set_record_function_tls_(@Const @ByRef RecordFunctionTLS tls); - -@Namespace("at") public static native void set_record_function_seed_for_testing(@Cast("uint32_t") int seed); - - // namespace at - - // Parsed from ATen/core/op_registration/op_allowlist.h // #pragma once @@ -16216,81 +16175,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/util/either.h - -// Originally taken from -// https://github.com/cryfs/cryfs/blob/14ad22570ddacef22d5ff139cdff68a54fc8234d/src/cpp-utils/either.h - -// #pragma once - -// #include -// #include -// #include -/** - * either is a tagged union that holds either an object of type A - * or an object of type B. - */ - // namespace c10 - - -// Parsed from torch/csrc/jit/frontend/function_schema_parser.h - -// #pragma once - -// #include -// #include -// #include -// #include - - -@Namespace("torch::jit") public static native @ByVal FunctionSchema parseSchema(@StdString BytePointer schema); -@Namespace("torch::jit") public static native @ByVal FunctionSchema parseSchema(@StdString String schema); -@Namespace("torch::jit") public static native @ByVal OperatorName parseName(@StdString BytePointer name); -@Namespace("torch::jit") public static native @ByVal OperatorName parseName(@StdString String name); - - // namespace jit - // namespace torch - - -// Parsed from c10/core/CompileTimeFunctionPointer.h +// Parsed from ATen/SequenceNumber.h // #pragma once -// #include +// #include +// #include -/** - * Represent a function pointer as a C++ type. - * This allows using the function pointer as a type - * in a template and calling it from inside the template - * allows the compiler to inline the call because it - * knows the function pointer at compile time. - * - * Example 1: - * int add(int a, int b) {return a + b;} - * using Add = TORCH_FN_TYPE(add); - * template struct Executor { - * int execute(int a, int b) { - * return Func::func_ptr()(a, b); - * } - * }; - * Executor executor; - * EXPECT_EQ(3, executor.execute(1, 2)); - * - * Example 2: - * int add(int a, int b) {return a + b;} - * template int execute(Func, int a, int b) { - * return Func::func_ptr()(a, b); - * } - * EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2)); - */ +// A simple thread local enumeration, used to link forward and backward pass +// ops and is used by autograd and observers framework - // namespace c10 +@Namespace("at::sequence_number") public static native @Cast("uint64_t") long peek(); +@Namespace("at::sequence_number") public static native @Cast("uint64_t") long get_and_increment(); -// #define TORCH_FN_TYPE(func) -// ::c10::CompileTimeFunctionPointer< -// std::remove_pointer_t>, -// func> -// #define TORCH_FN(func) TORCH_FN_TYPE(func)() + // namespace at::sequence_number // Parsed from ATen/core/boxing/OperatorKernel.h @@ -16680,10 +16578,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // checks that T can be unboxed from an IValue into a C++ value. // - // The following specialisations of assert_is_valid_input_type are technically not - // necessary since we would hit the base case and show an error message - // there if they didn't exist, but we can show a better error message - // in some common error scenarios. + // TODO: it probably would be good to tighten this up quite a bit more with + // an explicit list for everything // // assert_is_valid_output_type @@ -16722,6 +16618,47 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from c10/core/CompileTimeFunctionPointer.h + +// #pragma once + +// #include + +/** + * Represent a function pointer as a C++ type. + * This allows using the function pointer as a type + * in a template and calling it from inside the template + * allows the compiler to inline the call because it + * knows the function pointer at compile time. + * + * Example 1: + * int add(int a, int b) {return a + b;} + * using Add = TORCH_FN_TYPE(add); + * template struct Executor { + * int execute(int a, int b) { + * return Func::func_ptr()(a, b); + * } + * }; + * Executor executor; + * EXPECT_EQ(3, executor.execute(1, 2)); + * + * Example 2: + * int add(int a, int b) {return a + b;} + * template int execute(Func, int a, int b) { + * return Func::func_ptr()(a, b); + * } + * EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2)); + */ + + // namespace c10 + +// #define TORCH_FN_TYPE(func) +// ::c10::CompileTimeFunctionPointer< +// std::remove_pointer_t>, +// func> +// #define TORCH_FN(func) TORCH_FN_TYPE(func)() + + // Parsed from ATen/core/boxing/impl/WrapFunctionIntoFunctor.h // #pragma once @@ -16831,80 +16768,714 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from ATen/core/dispatch/CppSignature.h +// Parsed from c10/util/flat_hash_map.h + +// Taken from +// https://github.com/skarupke/flat_hash_map/blob/2c4687431f978f02a3780e24b8b701d22aa32d9c/flat_hash_map.hpp +// with fixes applied: +// - https://github.com/skarupke/flat_hash_map/pull/25 +// - https://github.com/skarupke/flat_hash_map/pull/26 +// - replace size_t with uint64_t to fix it for 32bit +// - add "GCC diagnostic" pragma to ignore -Wshadow +// - make sherwood_v3_table::convertible_to_iterator public because GCC5 seems +// to have issues with it otherwise +// - fix compiler warnings in operator templated_iterator + +// Copyright Malte Skarupke 2017. +// Distributed under the Boost Software License, Version 1.0. +// (See http://www.boost.org/LICENSE_1_0.txt) // #pragma once -// #include -// #include // #include -// #include -// #include -// Targeting ../CppSignature.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif +// #if defined(_MSC_VER) && !defined(__clang__) +// #pragma warning(push) +// #pragma warning(disable : 4624) // destructor was implicitly defined as deleted +// #endif -@Namespace("c10::impl") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef CppSignature lhs, @Const @ByRef CppSignature rhs); +// #ifdef _MSC_VER +// #define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__ +// #else +// #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) +// #endif +@Namespace("ska::detailv3") public static native byte log2(@Cast("uint64_t") long value); +// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t +// (it takes CWG1558 into account and also works for older compilers) + // namespace detailv3 + // end namespace ska +// #if defined(_MSC_VER) && !defined(__clang__) +// #pragma warning(pop) +// #endif -// Parsed from ATen/core/dispatch/RegistrationHandleRAII.h + +// Parsed from c10/util/either.h + +// Originally taken from +// https://github.com/cryfs/cryfs/blob/14ad22570ddacef22d5ff139cdff68a54fc8234d/src/cpp-utils/either.h // #pragma once -// #include -// Targeting ../RegistrationHandleRAII.java +// #include +// #include +// #include +/** + * either is a tagged union that holds either an object of type A + * or an object of type B. + */ + // namespace c10 +// Parsed from c10/core/PyHandleCache.h +// #pragma once +// #include +// #include +// #include +// #include + +// A PyHandleCache represents a cached pointer from a C++ object to +// a Python object that represents that object analogously in Python. +// Upon a cache hit, the relevant object can be retrieved after a test +// and then a memory load. Two conditions must hold to be able to use this +// class: +// +// - This must truly be a cache; e.g., the caller must be able to produce +// the object some other way if the cache hit misses. +// +// - This must truly be a handle; e.g., the Python object referenced by +// this class must have static lifetime. This means we don't have to +// maintain strong ownership or deallocate the object when the C++ object +// dies. Static lifetime is a good idea in conjunction with the cache, +// since if you are producing a fresh object on miss you won't be +// maintaining object identity. If you need bidirectional ownership, +// you will want to factor out the pattern in TensorImpl with +// resurrection. +// +// This cache is expected to not improve perf under torchdeploy, as one +// interpreter will fill up the cache, and all the interpreters will be +// unable to use the slot. A potential improvement is to have multiple +// slots (one per interpreter), which will work in deployment scenarios +// where there a stable, fixed number of interpreters. You can also store +// the relevant state in the Python library, rather than in the non-Python +// library (although in many cases, this is not convenient, as there may +// not be a way to conveniently index based on the object.) + + // namespace c10 -// Parsed from ATen/core/ATenOpList.h + +// Parsed from c10/core/SafePyObject.h // #pragma once +// #include // #include +// #include +// Targeting ../SafePyObject.java -// check if an op is a custom op (i.e. did not come from native_functions.yaml) -@Namespace("at") public static native @Cast("bool") boolean is_custom_op(@Const @ByRef OperatorName opName); +// Targeting ../SafePyHandle.java -// Parsed from ATen/core/op_registration/op_registration.h + // namespace c10 + + +// Parsed from c10/util/Bitset.h + +// #pragma once + +// #include +// #include +// #include +// #if defined(_MSC_VER) +// #endif +// Targeting ../bitset.java + + + +@Namespace("c10::utils") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@ByVal bitset lhs, @ByVal bitset rhs); + + // namespace utils + // namespace c10 + + +// Parsed from ATen/core/Variadic.h + +// #pragma once + +// #include +// #include +// #include +// #include + +// #include +// #include + +// This class allows you to write variadic functions which +// call a (possibly overloaded) function on each argument, +// in order. This is most commonly used in autogenerated code, +// where it is convenient to have a function that can uniformly +// take arguments of different types. If your arguments +// are homogenous consider using a std::initializer_list instead. +// +// For examples of this in use, see torch/csrc/utils/variadic.h + + // namespace torch + + +// Parsed from ATen/core/dispatch/DispatchKeyExtractor.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// Take a DispatchKeySet for a Tensor and determine what the actual dispatch +// DispatchKey should be, taking into account TLS, and skipping backends which +// fall through. +// +// Unlike Tensor::key_set(), the value of this on a tensor can change depending +// on TLS. +// +// NB: If there is no valid dispatch key, this will return Undefined +@Namespace("c10::impl") public static native @ByVal DispatchKeySet computeDispatchKeySet( + @ByVal DispatchKeySet ks, + @ByVal DispatchKeySet key_mask +); + + + // A small gadget to extract the DispatchKeySet from types which are known + // to have it. Used to extract dispatch keys from unboxed calls. + + // NB: take by const reference (Don't do universal forwarding here! You + // don't want to move into this function!) + +// Targeting ../DispatchKeyExtractor.java + + + + + + +// Parsed from ATen/core/dispatch/OperatorEntry.h // #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include + +// #include +// #include + +// #ifdef C10_MOBILE +// #endif + +// This data structure represents a kernel that was registered to us from a +// user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata +// about the kernel that isn't necessary for actual dispatching (this is why +// we don't put AnnotatedKernel in the actual DispatchTable), but is useful for +// giving good error messages. + +// This data structure represents operator schema, with metadata specifying +// where the registration of this schema occurred + +// Internal data structure that records information about a specific operator. +// It's not part of the public API; typically, users will interact with +// OperatorHandle instead. +// +// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher +// lock (this is important because some methods in OperatorEntry access +// dispatcher state) + + // namespace impl + // namespace c10 + + +// Parsed from ATen/record_function.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include +// #include + + +// Function name to record NCCL metadata +@Namespace("at") @MemberGetter public static native @StdString BytePointer kParamCommsCallName(); + +// Kind of record function scope; +@Namespace("at") public enum RecordScope { + // c10/ATen ops, autograd nodes + FUNCTION((byte)(0)), + // Functions/nodes called from the autograd + BACKWARD_FUNCTION((byte)(1)), + // TorchScript functions, methods + TORCHSCRIPT_FUNCTION((byte)(2)), + // Kernel Function dtype Tag + KERNEL_FUNCTION_DTYPE((byte)(3)), + // Torchbind custom class, + CUSTOM_CLASS((byte)(4)), + // Generic Build Feature + BUILD_FEATURE((byte)(5)), + // Kernel Function dtype Tag + LITE_INTERPRETER((byte)(6)), + // User defined scope (e.g. with record_function()) + USER_SCOPE((byte)(7)), + // Scopes for static runtime, a specialized TorchScript interpreter + STATIC_RUNTIME_OP((byte)(8)), + STATIC_RUNTIME_MODEL((byte)(9)), + NUM_SCOPES((byte)(10));// must be the last in the list + + public final byte value; + private RecordScope(byte v) { this.value = v; } + private RecordScope(RecordScope e) { this.value = e.value; } + public RecordScope intern() { for (RecordScope e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + + // namespace at + // namespace std + +// Soft limit on the number of callbacks to use; +@Namespace("at") @MemberGetter public static native @Cast("const std::size_t") long kSoftLimitCallbacks(); + +// An abstract base class for various observer contexts that can be attached to +// the RecordFunction. + +// +// PyTorch callbacks/observers API: +// + /** - * Include this file if you want to register operators. It includes all - * functionality needed to do so for you. + * RecordFunctionCallback represents a pair of callbacks to be used with + * RecordFunction, members: + * start, end - the callbacks to run when entering and exiting the scope; + * optionally, the start callback may return an ObserverContext which will + * be passed to the end callback, use appropriate constructor accordingly. + * needs_inputs - whether the callbacks need the inputs passed from the + * observed function/range; NOTE: passing the inputs incurs an additional + * overhead; sampling_probability - if not 1.0, then the callback is + * probabilistically sampled to run; NOTE: start and end callbacks always run as + * a pair and are sampled together; scopes - types of scopes to execute the + * callbacks on (see RecordScope); passing empty set means the callbacks will be + * executed for all possible scope types should_run - optional function that + * returns whether this callback should run; overwrites the effect of setting + * sampling_probability */ -// #include -// #include -// #include +// Notes: +// - two types of callbacks are provided: thread local and global +// - thread local callbacks are added/removed only for the given thread +// and are stored locally for each thread and separately from the list +// of the global callbacks +// - global callbacks are stored in a single per process list and are +// invoked by every RecordFunction, in addition to the thread local +// callbacks specific to the given thread +// - we allow the added callbacks to be sampled, by specifying a sampling +// probability for each callback pair, if the start callback is +// not picked to run, the corresponding end callback won't be called +// - a typical use case for the global callbacks is passive monitoring +// in the background (e.g. fleet-wide monitoring), without focusing on +// the specific piece of code +// - in contrast, thread local callbacks are enabled locally, on demand, +// for the specific piece of code (range) and are not sampled +// - a typical use case for thread local callbacks is profiler and code +// execution tracer +// - note, thread local callbacks are automatically propagated with +// ThreadLocalState across JIT continuations and async tasks (at::launch) + +@Namespace("at") @MemberGetter public static native @Cast("const at::CallbackHandle") long INVALID_CALLBACK_HANDLE(); +// Targeting ../RecordFunctionCallbacksEntry.java + + + +// Holds pairs (callbacks, unique_id) +// Targeting ../RecordFunction.java + + + +@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(RecordScope scope); +@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(@Cast("at::RecordScope") byte scope); + +@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( + RecordScope scope); +@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( + @Cast("at::RecordScope") byte scope); + + // namespace detail + +// optional argument - function's seq_no +// #define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// ::at::detail::record_function_with_scope( +// guard, fn, inputs, ##__VA_ARGS__); +// } + +// #define RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( +// scope, fn, inputs, outputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// if (guard.needsInputs()) { +// guard.before(fn, inputs, ##__VA_ARGS__); +// } else { +// guard.before(fn, ##__VA_ARGS__); +// } +// if (guard.needsOutputs()) { +// guard.setOutputs(outputs); +// } +// } + +// #define RECORD_FUNCTION(fn, inputs, ...) +// RECORD_FUNCTION_WITH_SCOPE( +// at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__) + +// #define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) +// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs) + +// #define RECORD_FUNCTION_WITH_INPUTS_OUTPUTS(fn, inputs, outputs, ...) +// RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( +// at::RecordScope::FUNCTION, fn, inputs, outputs, ##__VA_ARGS__) + +// Custom user scopes in C++; similar to Python's 'with record_function("..."):' +// #define RECORD_USER_SCOPE(fn) +// RECORD_FUNCTION_WITH_SCOPE( +// at::RecordScope::USER_SCOPE, fn, c10::ArrayRef{}) + +// RECORD_USER_SCOPE with inputs +// #define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) +// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs) + +// Helper macro to pass in debug handle that is used to +// post process events +// #define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( +// scope, fn, debug_handle, inputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// ::at::detail::record_function_with_scope_and_debug_handle( +// guard, fn, debug_handle, inputs, ##__VA_ARGS__); +// } + +// Helper macros to record LITE INTERPETER scope events with debug handles +// #define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( +// fn, debug_handle, inputs) +// RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( +// at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs) + +// Bookend to the RECORD_FUNCTION macros. Use this after the kernel +// launch to let the profiler bind the outputs to the op that produced +// them. Note that guard is declared by RECORD_FUNCTION so this macro +// needs to be called from the same scope as RECORD_FUNCTION +// #define RECORD_OUTPUTS(outputs) +// if (guard.needsOutputs()) { +// guard.setOutputs( +// std::vector(outputs.begin(), outputs.end())); +// } + +/** + * addThreadLocalCallback adds a thread local callback to run with + * RecordFunction, returns handle to use with removeThreadLocalCallback + */ +@Namespace("at") public static native @Cast("at::CallbackHandle") long addThreadLocalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); + +/** + * hasThreadLocalCallbacks returns whether there're callbacks registered + * with addThreadLocalCallback + */ +@Namespace("at") public static native @Cast("bool") boolean hasThreadLocalCallbacks(); + +/** + * clearThreadLocalCallbacks removes all thread local callbacks + */ +@Namespace("at") public static native void clearThreadLocalCallbacks(); + +/** + * addGlobalCallback adds a global callback to run with RecordFunction: + * + * only during the program initialization + */ +@Namespace("at") public static native @Cast("at::CallbackHandle") long addGlobalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); + +/** + * removeCallback removes a callback given the handle returned by + * addThreadLocalCallback or addGlobalCallback; + * + * no other code can run simultaneously + */ +@Namespace("at") public static native void removeCallback(@Cast("at::CallbackHandle") long handle); + +/** + * Prevent the given callback from executing. If handle is invalid, + * does nothing. + */ +@Namespace("at") public static native void disableCallback(@Cast("at::CallbackHandle") long handle); + +/** + * Allow the given callback, previously disabled with disableCallback, to + * execute again. If handle is invalid, does nothing. + */ +@Namespace("at") public static native void reenableCallback(@Cast("at::CallbackHandle") long handle); + +/** + * hasGlobalCallbacks returns whether there're global callbacks + * registered with pushGlobalCallback + */ +@Namespace("at") public static native @Cast("bool") boolean hasGlobalCallbacks(); + +/** + * clearGlobalCallbacks removes all global callbacks + */ +@Namespace("at") public static native void clearGlobalCallbacks(); + +// for both thread local and global callbacks +@Namespace("at") public static native @Cast("bool") boolean hasCallbacks(); +@Namespace("at") public static native void clearCallbacks(); + +/** + * enableRecordFunction enables RecordFunction thread locally + */ +@Namespace("at") public static native void enableRecordFunction(@Cast("bool") boolean enable/*=true*/); +@Namespace("at") public static native void enableRecordFunction(); + +/** + * isRecordFunctionEnabled returns whether RecordFunction + * is enabled thread locally + */ +@Namespace("at") public static native @Cast("bool") boolean isRecordFunctionEnabled(); +// Targeting ../RecordFunctionGuard.java + + +// Targeting ../DisableRecordFunctionGuard.java + + +// Targeting ../RecordFunctionTLS.java + + + +@Namespace("at") public static native @Const @ByRef RecordFunctionTLS get_record_function_tls_(); + +@Namespace("at") public static native void set_record_function_tls_(@Const @ByRef RecordFunctionTLS tls); + +@Namespace("at") public static native void set_record_function_seed_for_testing(@Cast("uint32_t") int seed); + + // namespace at + + +// Parsed from c10/util/Synchronized.h + +// #pragma once + +// #include + +// #include + +/** + * A very simple Synchronization class for error-free use of data + * in a multi-threaded context. See folly/docs/Synchronized.md for + * the inspiration of this class. + * + * Full URL: + * https://github.com/facebook/folly/blob/main/folly/docs/Synchronized.md + * + * This class implements a small subset of the generic functionality + * implemented by folly:Synchronized. Specifically, only withLock + * is implemented here since it's the smallest possible API that is + * able to cover a large surface area of functionality offered by + * folly::Synchronized. + */ + // end namespace c10 + + +// Parsed from c10/core/GradMode.h + +// #pragma once + +// #include +// #include +// Targeting ../GradMode.java + + +// Targeting ../AutoGradMode.java + + +// Targeting ../NoGradGuard.java + + +// Targeting ../AutoFwGradMode.java + + + + // namespace c10 + + +// Parsed from ATen/core/grad_mode.h + +// #pragma once + +// #include +// #include + + + +// Parsed from ATen/core/dispatch/Dispatcher.h + +// #pragma once + +// #include // #include +// #include +// #include // #include // #include -// #include -// #if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD) -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include + +// #ifndef NDEBUG +// #include // #endif -// #include -// The first argument of the schema might be of type DispatchKeySet, in which case we remove it. -// We do this because every argument in a function schema is expected to be convertable -// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of. -// See Note [Plumbing Keys Through The Dispatcher] -// Targeting ../RegisterOperators.java +@Namespace("c10") public static native @Cast("bool") boolean show_dispatch_trace(); +@Namespace("c10") public static native void dispatch_trace_nesting_incr(); +@Namespace("c10") public static native void dispatch_trace_nesting_decr(); +@Namespace("c10") public static native @Cast("int64_t") long dispatch_trace_nesting_value(); +// Targeting ../OpRegistrationListener.java + + + +// Targeting ../Dispatcher.java + + +// Targeting ../OperatorHandle.java + + + +/** + * This is a handle to an operator schema registered with the dispatcher. + * It holds the same information as an OperatorHandle, but it is templated + * on the operator arguments and allows calling the operator in an + * unboxed way. + */ + +// CaptureKernelCall is intended to capture return values from Dispatcher +// unboxed kernel calls. A record function may request to get outputs from the +// kernel calls. For boxed kernels, it's straightforward, the returned values +// are in the stack object. The stack can be passed to record functions. For +// unboxed kernels, we need to handle different kinds of return values, cache +// them temporarily, then release the values for the actual function call +// return. + +// Handle the lvalue reference differently since it should not be moved. + + +// Handle case where the kernel returns void. + + // namespace detail + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && + + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && + + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && + + + + +// NB: this doesn't count as a "true" dispatcher jump, so no instrumentation + // namespace c10 - // Old-style API + + // namespace std + + +// Parsed from ATen/core/dispatch/CppSignature.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// Targeting ../CppSignature.java + + + +@Namespace("c10::impl") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef CppSignature lhs, @Const @ByRef CppSignature rhs); + + + + + +// Parsed from ATen/core/dispatch/RegistrationHandleRAII.h + +// #pragma once + +// #include +// Targeting ../RegistrationHandleRAII.java + + + @@ -16923,7 +17494,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { nondeterministic_bitwise(5), nondeterministic_seeded(6), pointwise(7), - view_copy(8); + pt2_compliant_tag(8), + view_copy(9); public final int value; private Tag(int v) { this.value = v; } @@ -16934,6 +17506,52 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/core/ATenOpList.h + +// #pragma once + +// #include + + +// check if an op is a custom op (i.e. did not come from native_functions.yaml) +@Namespace("at") public static native @Cast("bool") boolean is_custom_op(@Const @ByRef OperatorName opName); + + + +// Parsed from ATen/core/op_registration/op_registration.h + +// #pragma once + +/** + * Include this file if you want to register operators. It includes all + * functionality needed to do so for you. + */ + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD) +// #include +// #endif +// #include +// The first argument of the schema might be of type DispatchKeySet, in which case we remove it. +// We do this because every argument in a function schema is expected to be convertable +// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of. +// See Note [Plumbing Keys Through The Dispatcher] + +// Targeting ../RegisterOperators.java + + + + // namespace c10 + // Old-style API + + + // Parsed from ATen/core/function.h // #pragma once @@ -17070,6 +17688,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include +// #include // #include // #include @@ -17093,6 +17712,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { */ // #endif +// #define HAS_PT2_COMPLIANT_TAG + // For multipy/torchdeploy use case @Namespace("torch") public enum _RegisterOrVerify { REGISTER(0), VERIFY(1); @@ -17102,6 +17723,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { public _RegisterOrVerify intern() { for (_RegisterOrVerify e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } + + +/// +// #define HAS_IMPL_ABSTRACT_PYSTUB // Targeting ../CppFunction.java @@ -17442,15 +18067,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from ATen/core/grad_mode.h - -// #pragma once - -// #include -// #include - - - // Parsed from torch/csrc/autograd/grad_mode.h // #pragma once @@ -17482,25 +18098,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // get a mutable reference to the functorch tls @Namespace("at::functorch") public static native @UniquePtr FuncTorchTLSBase functorchTLSAccessor(); - // namespace functorch - // namespace at - - -// Parsed from c10/core/SafePyObject.h - -// #pragma once - -// #include -// #include -// #include -// Targeting ../SafePyObject.java - - -// Targeting ../SafePyHandle.java - - - - // namespace c10 + // namespace at::functorch // Parsed from ATen/PythonTorchFunctionTLS.h @@ -17524,8 +18122,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::impl") public static native @Cast("bool") boolean torch_function_mode_enabled(); - // namespace impl - // namespace at + // namespace at::impl // Parsed from ATen/SavedTensorHooks.h @@ -17562,8 +18159,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { - // namespace impl - // namespace at + // namespace at::impl // Parsed from c10/core/impl/PythonDispatcherTLS.h @@ -17589,12 +18185,28 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include + +@Namespace("c10::impl") public enum TorchDispatchModeKey { + FAKE((byte)(0)), + PROXY((byte)(1)), + FUNCTIONAL((byte)(2)), + NUM_MODE_KEYS((byte)(3)); + + public final byte value; + private TorchDispatchModeKey(byte v) { this.value = v; } + private TorchDispatchModeKey(TorchDispatchModeKey e) { this.value = e.value; } + public TorchDispatchModeKey intern() { for (TorchDispatchModeKey e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} // Targeting ../TorchDispatchModeTLS.java @Namespace("c10::impl") public static native @Cast("bool") boolean dispatch_mode_enabled(); +@Namespace("c10::impl") public static native @StdString BytePointer to_string(TorchDispatchModeKey mode_key); +@Namespace("c10::impl") public static native @StdString String to_string(@Cast("c10::impl::TorchDispatchModeKey") byte mode_key); + // namespace impl // namespace c10 @@ -17906,14 +18518,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include -// #include -// #include -// #include - -// Forward-declares at::Context, at::Generator and at::cuda::NVRTC +// Forward-declares at::Generator and at::cuda::NVRTC // namespace cuda // namespace at @@ -17940,13 +18547,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include -// #include +// #include // #include // #include // #include -// #include // #include @@ -17965,6 +18571,27 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace at +// Parsed from ATen/detail/IPUHooksInterface.h + +// #pragma once + +// #include +// #include +// #include +// #include +// Targeting ../IPUHooksInterface.java + + +// Targeting ../IPUHooksArgs.java + + +// #define REGISTER_IPU_HOOKS(clsname) +// C10_REGISTER_CLASS(IPUHooksRegistry, clsname, clsname) +@Namespace("at::detail") public static native @Const @ByRef IPUHooksInterface getIPUHooks(); + // namespace detail + // namespace at + + // Parsed from ATen/detail/MPSHooksInterface.h // Copyright © 2022 Apple Inc. @@ -17977,8 +18604,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include - // Targeting ../MPSHooksInterface.java @@ -17997,17 +18622,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once -// #include // #include // #include -// #include -// #include -// #include - -// Targeting ../DLDevice_.java - +// #include @Namespace("at") @MemberGetter public static native @Cast("const char*") BytePointer MTIA_HELP(); @@ -18070,17 +18689,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include - +// #include // #include // #include // #include // #include +// Targeting ../DLDevice_.java -// We use forward declaration here instead of #include to avoid -// leaking DLPack implementation detail to every project that includes `ATen/Context.h`, which in turn -// would lead to a conflict when linked with another project using DLPack (for example TVM) @Namespace("at") @MemberGetter public static native @Cast("const char*") BytePointer XPU_HELP(); // Targeting ../XPUHooksInterface.java @@ -18178,6 +18795,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -18544,8 +19162,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal SymIntArrayRef stride, @Const @ByRef TensorOptions options); - // namespace detail - // namespace at + // namespace at::detail // Parsed from ATen/TensorGeometry.h @@ -19046,6 +19663,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // It should only affect the correctness of tracing function, because the // guard is essentially no-op when the master `setTracingState()` switch is // off. + // TODO: move this from `at::` to `jit::torch::` after // `aten/src/ATen/cpp_custom_type_hack.h` is removed. @@ -19053,9 +19671,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::tracer::impl") public static native void set_dispatch_enabled(@Cast("bool") boolean enabled); - // namespace impl - // namespace tracer - // namespace at + // namespace at::tracer::impl // Parsed from ATen/core/Reduction.h @@ -20151,12 +20767,27 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); +@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + // aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); // aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor all_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); +@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor all_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor all_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); + // aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @ByVal Dimname dim); @@ -20455,12 +21086,27 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); +@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + // aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); // aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor any_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); +@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor any_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor any_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); + // aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @ByVal Dimname dim); @@ -23173,15 +23819,31 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::channel_shuffle(Tensor self, int groups) -> Tensor +// aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor @Namespace("at") public static native @ByVal Tensor channel_shuffle(@Const @ByRef Tensor self, @Cast("int64_t") long groups); -// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!) + +// aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor channel_shuffle_symint(@Const @ByRef Tensor self, @ByVal SymInt groups); + + +// aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor channel_shuffle_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long groups); -// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor channel_shuffle_outf(@Const @ByRef Tensor self, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor channel_shuffle_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymInt groups); + + +// aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor channel_shuffle_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt groups, @ByRef Tensor out); + + + // Parsed from ATen/ops/cholesky.h @@ -24139,19 +24801,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor +// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); @Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor +// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); @@ -24162,6 +24823,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView String padding); +// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); +@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView BytePointer padding); +@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView String padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); +@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView String padding); + + + // Parsed from ATen/ops/conv2d.h @@ -24188,19 +24857,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor +// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); @Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor +// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); @@ -24211,6 +24879,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView String padding); +// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); +@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView BytePointer padding); +@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView String padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); +@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView String padding); + + + // Parsed from ATen/ops/conv3d.h @@ -24237,19 +24913,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor +// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); @Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor +// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); @@ -24260,6 +24935,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView String padding); +// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); +@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView BytePointer padding); +@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView String padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); +@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @StringView String padding); + + + // Parsed from ATen/ops/conv_depthwise3d.h @@ -24286,34 +24969,31 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor +// aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_depthwise3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_depthwise3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv_depthwise3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor conv_depthwise3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +// aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_depthwise3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation); -// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) +// aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor conv_depthwise3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor conv_depthwise3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) +// aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor conv_depthwise3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor conv_depthwise3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); -// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +// aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation); -// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); +// aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByRef Tensor out); @@ -24410,16 +25090,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor +// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @@ -24449,16 +25128,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor +// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @@ -24488,16 +25166,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor +// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @@ -24527,34 +25204,31 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor +// aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor @Namespace("at") public static native @ByVal Tensor convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); @Namespace("at") public static native @ByVal Tensor convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups); -// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); +// aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups); -// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) +// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); @Namespace("at") public static native @ByRef Tensor convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups); -// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) +// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); -// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); +// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups); -// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups, @ByRef Tensor out); @@ -24584,40 +25258,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); @@ -24647,18 +25318,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) +// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + +// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + + +// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + + // Parsed from ATen/ops/convolution_overrideable.h @@ -24685,18 +25372,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor +// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor @Namespace("at") public static native @ByVal Tensor convolution_overrideable(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); @Namespace("at") public static native @ByVal Tensor convolution_overrideable(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups); -// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) + +// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor convolution_overrideable_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups); + + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor convolution_overrideable_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); @Namespace("at") public static native @ByRef Tensor convolution_overrideable_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups); -// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor convolution_overrideable_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor convolution_overrideable_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor convolution_overrideable_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups); + + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor convolution_overrideable_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @ByVal SymInt groups, @ByRef Tensor out); + + + // Parsed from ATen/ops/copy.h @@ -25405,18 +26108,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor +// aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor @Namespace("at") public static native @ByVal Tensor cudnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); @Namespace("at") public static native @ByVal Tensor cudnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + +// aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); + + +// aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); @Namespace("at") public static native @ByRef Tensor cudnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor cudnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); +// aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); + + +// aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); + + + // Parsed from ATen/ops/cudnn_convolution_add_relu.h @@ -25443,18 +26162,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor +// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor @Namespace("at") public static native @ByVal Tensor cudnn_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); @Namespace("at") public static native @ByVal Tensor cudnn_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); -// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + +// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_add_relu_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups); + + +// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); @Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); -// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups); + + +// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByRef Tensor out); + + + // Parsed from ATen/ops/cudnn_convolution_relu.h @@ -25481,18 +26216,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor +// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor @Namespace("at") public static native @ByVal Tensor cudnn_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); @Namespace("at") public static native @ByVal Tensor cudnn_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); -// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + +// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_relu_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups); + + +// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); @Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); -// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups); + + +// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByRef Tensor out); + + + // Parsed from ATen/ops/cudnn_convolution_transpose.h @@ -25519,18 +26270,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor +// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor @Namespace("at") public static native @ByVal Tensor cudnn_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); @Namespace("at") public static native @ByVal Tensor cudnn_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + +// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); + + +// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); @Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); +// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); + + +// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); + + + // Parsed from ATen/ops/cudnn_grid_sampler.h @@ -30102,6 +30869,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor @Namespace("at") public static native @ByVal Tensor floor_divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor floor_divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor floor_divide_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); + @@ -36503,11 +37275,44 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); +// aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +// aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); +// aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + // aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor linspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); // aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor linspace_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByRef Tensor out); +// aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linspace_out(@ByRef Tensor out, @Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); +// aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linspace_outf(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, @ByRef Tensor out); + +// aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linspace_out(@ByRef Tensor out, @Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +// aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linspace_outf(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByRef Tensor out); + +// aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); +// aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linspace_outf(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, @ByRef Tensor out); + @@ -37237,12 +38042,48 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); +// aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, double base, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +// aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); +// aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, double base, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + // aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/); @Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); // aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor logspace_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base, @ByRef Tensor out); +// aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, double base/*=10.0*/); +@Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); +// aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logspace_outf(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, double base, @ByRef Tensor out); + +// aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/); +@Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +// aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logspace_outf(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base, @ByRef Tensor out); + +// aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, double base/*=10.0*/); +@Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); +// aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logspace_outf(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, double base, @ByRef Tensor out); + @@ -37732,6 +38573,42 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/ops/masked_scatter_backward.h + +// #pragma once + +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + + +// #include + + +// aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor +@Namespace("at") public static native @ByVal Tensor masked_scatter_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor mask, @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal Tensor masked_scatter_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor mask, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); + + +// aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor +@Namespace("at") public static native @ByVal Tensor masked_scatter_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor mask, @ByVal SymIntArrayRef sizes); + + + + + // Parsed from ATen/ops/masked_select.h // #pragma once @@ -38878,34 +39755,31 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); @Namespace("at") public static native @ByVal Tensor miopen_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); @Namespace("at") public static native @ByRef Tensor miopen_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor miopen_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); @@ -38935,11 +39809,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor +// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); @Namespace("at") public static native @ByVal Tensor miopen_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); +// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_convolution_add_relu_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups); + + + // Parsed from ATen/ops/miopen_convolution_relu.h @@ -38966,11 +39845,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor +// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); @Namespace("at") public static native @ByVal Tensor miopen_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); +// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_convolution_relu_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups); + + + // Parsed from ATen/ops/miopen_convolution_transpose.h @@ -38997,34 +39881,31 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); @Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); @Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); @@ -39054,34 +39935,31 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); @Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); @Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); @@ -39328,34 +40206,31 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor +// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); @Namespace("at") public static native @ByVal Tensor mkldnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); -// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor mkldnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); +// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups); -// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); @Namespace("at") public static native @ByRef Tensor mkldnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); -// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor mkldnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups); -// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByRef Tensor out); @@ -39698,26 +40573,44 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor +// aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); -// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional input_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight_symint(@Const @ByRef Tensor self); + + +// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); -// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal LongArrayRefOptional input_size, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal LongArrayRefOptional input_size, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByRef Tensor out); +// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional input_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); + + +// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByVal SymIntArrayRefOptional input_size, @ByRef Tensor out); + + + // Parsed from ATen/ops/mkldnn_reorder_conv3d_weight.h @@ -39744,20 +40637,38 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor +// aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) + +// aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight_symint(@Const @ByRef Tensor self); + + +// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation, @ByVal(nullValue = "c10::SymInt(1)") SymInt groups); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); + + +// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByRef Tensor out); + + + // Parsed from ATen/ops/mkldnn_rnn_layer.h @@ -40010,18 +40921,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + +// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + + +// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); @Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + + // Parsed from ATen/ops/mps_convolution_transpose_backward.h @@ -40048,18 +40975,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor) +// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + +// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); @Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); @Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef dilation, @ByVal SymInt groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); + + + // Parsed from ATen/ops/mse_loss.h @@ -40955,10 +41898,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::native_channel_shuffle(Tensor self, int groups) -> Tensor +// aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor @Namespace("at") public static native @ByVal Tensor native_channel_shuffle(@Const @ByRef Tensor self, @Cast("int64_t") long groups); +// aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor native_channel_shuffle_symint(@Const @ByRef Tensor self, @ByVal SymInt groups); + + + // Parsed from ATen/ops/native_dropout.h @@ -45931,31 +46879,55 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor +// aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor @Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); @Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats); -// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor + +// aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional output_size); +@Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor repeats); + + +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor @Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); @Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Const @ByRef Tensor repeats); -// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor + +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional output_size); +@Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor repeats); + + +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor @Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Cast("int64_t") long repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); @Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Cast("int64_t") long repeats); -// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor self, @ByVal SymInt repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor self, @ByVal SymInt repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional output_size); @Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor self, @ByVal SymInt repeats); -// aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!) +// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor repeat_interleave_out(@ByRef Tensor out, @Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); @Namespace("at") public static native @ByRef Tensor repeat_interleave_out(@ByRef Tensor out, @Const @ByRef Tensor repeats); -// aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!) + + +// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor repeat_interleave_outf(@Const @ByRef Tensor repeats, @ByVal LongOptional output_size, @ByRef Tensor out); +// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor repeat_interleave_symint_out(@ByRef Tensor out, @Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional output_size); +@Namespace("at") public static native @ByRef Tensor repeat_interleave_symint_out(@ByRef Tensor out, @Const @ByRef Tensor repeats); + + +// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor repeat_interleave_symint_outf(@Const @ByRef Tensor repeats, @ByVal SymIntOptional output_size, @ByRef Tensor out); + + + // Parsed from ATen/ops/replication_pad1d.h @@ -48602,42 +49574,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); @Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); @Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor slow_conv3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); -// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); -// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByRef Tensor out); +// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByRef Tensor out); -// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor +// aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); @Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); @Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); @@ -48667,34 +49634,31 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); @Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); -// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor output); @Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor output); -// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding); +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding); -// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByRef Tensor output); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByRef Tensor output); +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByRef Tensor output); -// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor +// aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv3d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); @Namespace("at") public static native @ByVal Tensor slow_conv3d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); -// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding); +// aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding); @@ -48724,42 +49688,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor +// aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); -// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); -// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); -// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByRef Tensor out); @@ -48789,42 +49748,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor +// aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); -// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); -// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); -// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); +// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef dilation, @ByRef Tensor out); @@ -48854,42 +49808,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); -// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); -// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef dilation, @ByRef Tensor out); -// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor +// aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); @@ -48919,42 +49868,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); -// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); -// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); +// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal SymIntArrayRef dilation, @ByRef Tensor out); -// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor +// aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); @@ -53025,8 +53969,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::sym_constrain_range_for_size(Scalar size, *, int? min, int? max) -> () -@Namespace("at") public static native void sym_constrain_range_for_size(@Const @ByRef Scalar size, @ByVal LongOptional min, @ByVal LongOptional max); +// aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> () +@Namespace("at") public static native void sym_constrain_range_for_size(@Const @ByRef Scalar size, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional min, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional max); +@Namespace("at") public static native void sym_constrain_range_for_size(@Const @ByRef Scalar size); @@ -53598,22 +54543,40 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); @Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); @Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); -// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor thnn_conv2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor thnn_conv2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); -// aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor + +// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); + + +// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef stride, @ByVal SymIntArrayRef padding, @ByRef Tensor out); + + +// aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor @Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); @Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); @Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); @Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); +// aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor thnn_conv2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(1))") SymIntArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); +@Namespace("at") public static native @ByVal Tensor thnn_conv2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal SymIntArrayRef kernel_size); + + + // Parsed from ATen/ops/threshold.h @@ -57243,6 +58206,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -57381,6 +58345,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -57533,6 +58498,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -58009,6 +58975,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -58817,8 +59784,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::native") public static native @Const @ByRef Tensor get_nested_sizes(@Const @ByRef Tensor tensor); - // namespace native - // namespace at + // namespace at::native // Parsed from torch/csrc/autograd/input_metadata.h @@ -58837,7 +59803,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #ifndef AT_PER_OPERATOR_HEADERS // #include @@ -58894,30 +59859,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from ATen/core/Variadic.h - -// #pragma once - -// #include -// #include -// #include -// #include - -// #include -// #include - -// This class allows you to write variadic functions which -// call a (possibly overloaded) function on each argument, -// in order. This is most commonly used in autogenerated code, -// where it is convenient to have a function that can uniformly -// take arguments of different types. If your arguments -// are homogenous consider using a std::initializer_list instead. -// -// For examples of this in use, see torch/csrc/utils/variadic.h - - // namespace torch - - // Parsed from torch/csrc/utils/variadic.h // #pragma once @@ -58953,23 +59894,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from ATen/SequenceNumber.h - -// #pragma once - -// #include -// #include - -// A simple thread local enumeration, used to link forward and backward pass -// ops and is used by autograd and observers framework - -@Namespace("at::sequence_number") public static native @Cast("uint64_t") long peek(); -@Namespace("at::sequence_number") public static native @Cast("uint64_t") long get_and_increment(); - - // namespace sequence_number - // namespace at - - // Parsed from torch/csrc/autograd/function.h // #pragma once @@ -58998,9 +59922,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") -// #endif - // Custom deleter to prevent stack overflows. @Namespace("torch::autograd") public static native void deleteNode(Node function); @@ -59047,7 +59968,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch - // Parsed from torch/csrc/autograd/custom_function.h // #pragma once @@ -59553,6 +60473,73 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( // SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND5( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, ...) +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) + +// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND5( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// TYPE, +// NAME, +// ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND5( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// __VA_ARGS__)) + +// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND6( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// ...) +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) + +// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND6( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// TYPE, +// NAME, +// ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND6( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// __VA_ARGS__)) + // #define AT_DISPATCH_CASE_INTEGRAL_TYPES(...) // AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) // AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) @@ -59788,6 +60775,104 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // SCALARTYPE6, // __VA_ARGS__)) +// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND7( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// SCALARTYPE7, +// ...) +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE7, __VA_ARGS__) + +// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND7( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// SCALARTYPE7, +// TYPE, +// NAME, +// ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND7( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// SCALARTYPE7, +// __VA_ARGS__)) + +// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND8( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// SCALARTYPE7, +// SCALARTYPE8, +// ...) +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE7, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE8, __VA_ARGS__) + +// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND8( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// SCALARTYPE7, +// SCALARTYPE8, +// TYPE, +// NAME, +// ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND8( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// SCALARTYPE7, +// SCALARTYPE8, +// __VA_ARGS__)) + +// #define AT_DISPATCH_CASE_BIT_TYPES(...) +// AT_DISPATCH_CASE(at::ScalarType::Bits1x8, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Bits2x4, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Bits4x2, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Bits8, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Bits16, __VA_ARGS__) + +// #define AT_DISPATCH_BIT_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_BIT_TYPES(__VA_ARGS__)) + // #define AT_DISPATCH_INDEX_TYPES(TYPE, NAME, ...) // AT_DISPATCH_SWITCH( // TYPE, @@ -59831,8 +60916,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByRef Scalar s, @ByVal ScalarTypeOptional dtype_opt, @ByVal DeviceOptional device_opt); - // namespace detail - // namespace at + // namespace at::detail // This is in the c10 namespace because we use ADL to find the functions in it. @@ -59853,8 +60937,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::native") public static native @ByVal Tensor wrapped_scalar_tensor( @Const @ByRef Scalar scalar); - // namespace native - // namespace at + // namespace at::native // Parsed from c10/util/strides.h @@ -59879,9 +60962,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy-dtor") -// #endif - // Use this to define the prototype for a meta function. There are two // versions; one that takes one argument (just the operator name), or FUNC2 // variant that takes two arguments (operator name and overload name). @@ -59936,7 +61016,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace at - // Parsed from ATen/core/Range.h // #pragma once @@ -60098,6 +61177,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { ScalarType src_type, @Const Pointer ptr); +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") Float8_e5m2fnuz fetch_and_cast_to_Float8_e5m2fnuz( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") Float8_e4m3fnuz fetch_and_cast_to_Float8_e4m3fnuz( + ScalarType src_type, + @Const Pointer ptr); + // Cast a value with static type src_t into dynamic dest_type, and store it to // ptr. // #define CAST_AND_STORE_CASE(type, scalartype) @@ -60176,6 +61263,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { ScalarType dest_type, Pointer ptr, @ByVal Float8_e5m2 value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_Float8_e5m2fnuz( + ScalarType dest_type, + Pointer ptr, + @ByVal Float8_e5m2fnuz value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_Float8_e4m3fnuz( + ScalarType dest_type, + Pointer ptr, + @ByVal Float8_e4m3fnuz value); // #define DEFINE_UNCASTABLE(T, scalartype_) // template <> @@ -60216,11 +61311,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include - -// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") -// #endif -// #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy-dtor") -// #endif // namespace at // TensorIterator is a helper class for element-wise operations, such as @@ -60310,7 +61400,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace at - // Parsed from ATen/NativeFunctions.h // #pragma once @@ -60381,6 +61470,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -60519,6 +61609,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -60671,6 +61762,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -61147,6 +62239,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -61892,8 +62985,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByRef TensorIndexVector indices, @Const @ByRef Tensor value); - // namespace indexing - // namespace at + // namespace at::indexing // Parsed from ATen/TensorOperators.h @@ -62220,6 +63312,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include +// #include +// #include +// #include +// #include // #include // #include // #include @@ -62439,8 +63537,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta); @Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +@Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); +@Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +@Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); @Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +@Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Tensor start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); +@Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Tensor start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +@Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Scalar start, @Const @ByRef Tensor end, @Cast("int64_t") long steps); @Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); @Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @@ -62606,260 +63716,23 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from c10/core/PyHandleCache.h - -// #pragma once - -// #include -// #include -// #include - -// #include - -// A PyHandleCache represents a cached pointer from a C++ object to -// a Python object that represents that object analogously in Python. -// Upon a cache hit, the relevant object can be retrieved after a test -// and then a memory load. Two conditions must hold to be able to use this -// class: -// -// - This must truly be a cache; e.g., the caller must be able to produce -// the object some other way if the cache hit misses. -// -// - This must truly be a handle; e.g., the Python object referenced by -// this class must have static lifetime. This means we don't have to -// maintain strong ownership or deallocate the object when the C++ object -// dies. Static lifetime is a good idea in conjunction with the cache, -// since if you are producing a fresh object on miss you won't be -// maintaining object identity. If you need bidirectional ownership, -// you will want to factor out the pattern in TensorImpl with -// resurrection. -// -// This cache is expected to not improve perf under torchdeploy, as one -// interpreter will fill up the cache, and all the interpreters will be -// unable to use the slot. A potential improvement is to have multiple -// slots (one per interpreter), which will work in deployment scenarios -// where there a stable, fixed number of interpreters. You can also store -// the relevant state in the Python library, rather than in the non-Python -// library (although in many cases, this is not convenient, as there may -// not be a way to conveniently index based on the object.) - - // namespace c10 - - -// Parsed from c10/util/Bitset.h - -// #pragma once - -// #include -// #include -// #include -// #if defined(_MSC_VER) -// #endif -// Targeting ../bitset.java - - - -@Namespace("c10::utils") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@ByVal bitset lhs, @ByVal bitset rhs); - - // namespace utils - // namespace c10 - - -// Parsed from ATen/core/dispatch/DispatchKeyExtractor.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// Take a DispatchKeySet for a Tensor and determine what the actual dispatch -// DispatchKey should be, taking into account TLS, and skipping backends which -// fall through. -// -// Unlike Tensor::key_set(), the value of this on a tensor can change depending -// on TLS. -// -// NB: If there is no valid dispatch key, this will return Undefined -@Namespace("c10::impl") public static native @ByVal DispatchKeySet computeDispatchKeySet( - @ByVal DispatchKeySet ks, - @ByVal DispatchKeySet key_mask -); - - - // A small gadget to extract the DispatchKeySet from types which are known - // to have it. Used to extract dispatch keys from unboxed calls. - - // NB: take by const reference (Don't do universal forwarding here! You - // don't want to move into this function!) - -// Targeting ../DispatchKeyExtractor.java - - - - - - -// Parsed from ATen/core/dispatch/OperatorEntry.h +// Parsed from torch/csrc/jit/frontend/function_schema_parser.h // #pragma once // #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include - -// #include -// #include - -// #ifdef C10_MOBILE -// #endif - -// This data structure represents a kernel that was registered to us from a -// user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata -// about the kernel that isn't necessary for actual dispatching (this is why -// we don't put AnnotatedKernel in the actual DispatchTable), but is useful for -// giving good error messages. - -// This data structure represents operator schema, with metadata specifying -// where the registration of this schema occurred - -// Internal data structure that records information about a specific operator. -// It's not part of the public API; typically, users will interact with -// OperatorHandle instead. -// -// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher -// lock (this is important because some methods in OperatorEntry access -// dispatcher state) - - // namespace impl - // namespace c10 - - -// Parsed from c10/util/Synchronized.h - -// #pragma once - -// #include - -// #include - -/** - * A very simple Synchronization class for error-free use of data - * in a multi-threaded context. See folly/docs/Synchronized.md for - * the inspiration of this class. - * - * Full URL: - * https://github.com/facebook/folly/blob/main/folly/docs/Synchronized.md - * - * This class implements a small subset of the generic functionality - * implemented by folly:Synchronized. Specifically, only withLock - * is implemented here since it's the smallest possible API that is - * able to cover a large surface area of functionality offered by - * folly::Synchronized. - */ - // end namespace c10 - - -// Parsed from ATen/core/dispatch/Dispatcher.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include - -// #ifndef NDEBUG -// #include -// #endif - -@Namespace("c10") public static native @Cast("bool") boolean show_dispatch_trace(); -@Namespace("c10") public static native void dispatch_trace_nesting_incr(); -@Namespace("c10") public static native void dispatch_trace_nesting_decr(); -@Namespace("c10") public static native @Cast("int64_t") long dispatch_trace_nesting_value(); -// Targeting ../OpRegistrationListener.java - - - -// Targeting ../Dispatcher.java - - -// Targeting ../OperatorHandle.java - - - -/** - * This is a handle to an operator schema registered with the dispatcher. - * It holds the same information as an OperatorHandle, but it is templated - * on the operator arguments and allows calling the operator in an - * unboxed way. - */ - -// CaptureKernelCall is intended to capture return values from Dispatcher -// unboxed kernel calls. A record function may request to get outputs from the -// kernel calls. For boxed kernels, it's straightforward, the returned values -// are in the stack object. The stack can be passed to record functions. For -// unboxed kernels, we need to handle different kinds of return values, cache -// them temporarily, then release the values for the actual function call -// return. - -// Handle the lvalue reference differently since it should not be moved. - - -// Handle case where the kernel returns void. - - // namespace detail - -// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && - - -// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && - - -// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && - - - - -// NB: this doesn't count as a "true" dispatcher jump, so no instrumentation - - +// #include +// #include +// #include - // namespace c10 +@Namespace("torch::jit") public static native @ByVal FunctionSchema parseSchema(@StdString BytePointer schema); +@Namespace("torch::jit") public static native @ByVal FunctionSchema parseSchema(@StdString String schema); +@Namespace("torch::jit") public static native @ByVal OperatorName parseName(@StdString BytePointer name); +@Namespace("torch::jit") public static native @ByVal OperatorName parseName(@StdString String name); - // namespace std + // namespace jit + // namespace torch // Parsed from torch/csrc/api/include/torch/types.h @@ -63125,19 +63998,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/csrc/utils/memory.h - -// #pragma once - -// #include - -// Reference: -// https://github.com/llvm-mirror/libcxx/blob/master/include/memory#L3091 - - - // namespace torch - - // Parsed from torch/csrc/api/include/torch/data/dataloader/base.h // #pragma once @@ -63150,7 +64010,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include // #include @@ -63245,8 +64104,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include - // #include // #include @@ -63297,7 +64154,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include // #include @@ -63651,6 +64507,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // Targeting ../SourceRangeUnpickler.java @@ -63834,8 +64691,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/runtime/operator.h @@ -63850,6 +64706,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -63863,7 +64720,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include + + // Targeting ../Operator.java @@ -63873,6 +64733,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::jit") public static native @Const @ByVal OperatorVector getAllOperators(); @Namespace("torch::jit") public static native @Const @ByRef OperatorVector getAllOperatorsFor( @ByVal Symbol name); +// Returns operators in the order which OpOverloadPacket resolves them. +@Namespace("torch::jit") public static native @ByVal OperatorVector getAllSortedOperatorsFor( + @ByVal Symbol name); // given a operator with an overload name, find the specific operator related to // it, may return nullptr if no operator exists. @@ -63905,8 +64768,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // compile-time function for the selective op registration based on schema // string. - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/utils/schema_info.h @@ -64153,8 +65015,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include @Namespace("torch::jit") public static native void setGraphExecutorOptimize(@Cast("bool") boolean o); @Namespace("torch::jit") public static native @Cast("bool") boolean getGraphExecutorOptimize(); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/runtime/argument_spec.h @@ -64205,8 +65066,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::jit") public static native @ByVal ByteOptional convertOptional( @Const @ByRef ScalarTypeOptional from); - // namespace jit - // namespace torch + // namespace torch::jit // namespace std @@ -64224,9 +65084,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy-dtor") -// #endif - // namespace at @@ -64261,9 +65118,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::jit") public static native @StdVector StackEntry currentCallstack(); @Namespace("torch::jit") public static native @ByVal StringVector currentModuleHierarchy(); - // namespace jit - // namespace torch - + // namespace torch::jit // Parsed from torch/csrc/jit/runtime/variable_tensor_list.h @@ -64274,8 +65129,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // a wrapper to mark places where we expect all the at::Tensors to be // variables - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/runtime/graph_executor.h @@ -64357,8 +65211,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // with less plumbing. // namespace detail - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/api/function_impl.h @@ -64368,7 +65221,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // Targeting ../GraphFunction.java @@ -64377,8 +65229,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::jit") public static native @NoException(true) GraphFunction tryToGraphFunction(@ByRef Function arg0); @Namespace("torch::jit") public static native @ByRef GraphFunction toGraphFunction(@ByRef Function arg0); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/api/method.h @@ -64397,8 +65248,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // of the public API; new code should not use this type alias. // namespace script - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/api/object.h @@ -64420,8 +65270,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // We once had a `script::` namespace that was deleted. This is for backcompat // of the public API; new code should not use this type alias. // namespace script - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/api/include/torch/ordered_dict.h @@ -64550,7 +65399,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include // #include @@ -64577,8 +65425,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // We once had a `script::` namespace that was deleted. This is for backcompat // of the public API; new code should not use this type alias. // namespace script - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/api/module.h @@ -64596,7 +65443,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include @@ -64769,8 +65615,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // of the public API; new code should not use this type alias. // namespace script - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/api/include/torch/serialize/input-archive.h @@ -65016,7 +65861,6 @@ The list of (type, depth) pairs controls the type of specializations and the num // #include // #include -// #include // #include // #include // #include @@ -65287,10 +66131,10 @@ The list of (type, depth) pairs controls the type of specializations and the num // #pragma once // #include +// #include // #include // #include -// #include // #include // #define TORCH_ENUM_DECLARE(name) @@ -65328,7 +66172,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // // ``` // struct TORCH_API SomeOptions { -// typedef c10::variant +// typedef std::variant // reduction_t; SomeOptions(reduction_t reduction = torch::kMean) : // reduction_(reduction) {} // @@ -66991,7 +67835,6 @@ The list of (type, depth) pairs controls the type of specializations and the num // #include // #include -// #include // #include // #include @@ -67105,7 +67948,6 @@ The list of (type, depth) pairs controls the type of specializations and the num // #define AT_MKLDNN_ACL_ENABLED() 0 // #define AT_MKL_ENABLED() 0 // #define AT_MKL_SEQUENTIAL() 0 -// #define AT_FFTW_ENABLED() 1 // #define AT_POCKETFFT_ENABLED() 1 // #define AT_NNPACK_ENABLED() 1 // #define CAFFE2_STATIC_LINK_CUDA() 0 @@ -67350,6 +68192,7 @@ scalar_t sf(scalar_t x, scalar_t y) // #pragma once // #include +// #include // #include // #include @@ -67398,27 +68241,6 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // #include -// #ifndef _WIN32 -// #include -// #endif -// #if defined(C10_IOS) && defined(C10_MOBILE) -// #include // for gettimeofday() -// #endif - -// #if defined(__i386__) || defined(__x86_64__) || defined(__amd64__) -// #define C10_RDTSC -// #if defined(_MSC_VER) -// #elif defined(__CUDACC__) || defined(__HIPCC__) -// #elif defined(__clang__) -// `__rdtsc` is available by default. -// NB: This has to be first, because Clang will also define `__GNUC__` -// #elif defined(__GNUC__) -// #include -// #else -// #undef C10_RDTSC -// #endif -// #endif - // TODO: replace with pytorch/rfcs#43 when it is ready. // #define SOFT_ASSERT(cond, ...) // [&]() -> bool { @@ -67464,22 +68286,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Cast("uint32_t") int line, String cond, @ByVal CompileTimeEmptyString args); - -@Namespace("torch::profiler::impl") public static native @Cast("torch::profiler::impl::time_t") long getTimeSinceEpoch(); - -@Namespace("torch::profiler::impl") public static native @Cast("torch::profiler::impl::time_t") long getTime(@Cast("bool") boolean allow_monotonic/*=false*/); -@Namespace("torch::profiler::impl") public static native @Cast("torch::profiler::impl::time_t") long getTime(); - -// We often do not need to capture true wall times. If a fast mechanism such -// as TSC is available we can use that instead and convert back to epoch time -// during post processing. This greatly reduce the clock's contribution to -// profiling. -// http://btorpey.github.io/blog/2014/02/18/clock-sources-in-linux/ -// https://quick-bench.com/q/r8opkkGZSJMu9wM_XTbDouq-0Io -// TODO: We should use -// `https://github.com/google/benchmark/blob/main/src/cycleclock.h` - -// Convert `getCount` results to Nanoseconds since unix epoch. +@Namespace("torch::profiler::impl") @MemberGetter public static native int TENSOR_LIST_DISPLAY_LENGTH_LIMIT(); // Targeting ../FileLineFunc.java @@ -67501,6 +68308,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Cast("const bool") boolean flatten_list_enabled/*=false*/); @Namespace("torch::profiler::impl") public static native @Cast("std::vector*") @StdVector LongVector inputSizes( @Const @ByRef RecordFunction fn); +@Namespace("torch::profiler::impl") public static native @StdString BytePointer variantShapesToStr(@Cast("torch::profiler::impl::shape*") @StdVector Nonlinearity shapes); @Namespace("torch::profiler::impl") public static native @StdString BytePointer shapesToStr( @Cast("std::vector*") @StdVector LongVector shapes); @Namespace("torch::profiler::impl") public static native @StdString BytePointer strListToStr(@Const @ByRef StringVector types); @@ -67510,6 +68318,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::profiler::impl") public static native @ByVal StringVector inputTypes(@Const @ByRef RecordFunction fn); @Namespace("torch::profiler::impl") public static native @ByVal StringIValueMap saveExtraArgs(@Const @ByRef RecordFunction fn); +@Namespace("torch::profiler::impl") public static native @ByVal ExtraFilesMap saveNcclMeta(@Const @ByRef RecordFunction fn); @Namespace("torch::profiler::impl") public static native @Cast("uint64_t") long computeFlops( @StdString BytePointer op_name, @@ -67518,6 +68327,8 @@ scalar_t sf(scalar_t x, scalar_t y) @StdString String op_name, @Const @ByRef StringIValueMap extra_args); +@Namespace("torch::profiler::impl") public static native @StdString BytePointer shapeToStr(@Cast("const std::vector*") @ByRef LongVector shape); + // namespace impl // namespace profiler // namespace torch @@ -69585,7 +70396,6 @@ scalar_t sf(scalar_t x, scalar_t y) // #pragma once -// #include // #include // #include // #include @@ -70631,7 +71441,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef Tensor input, @Const @ByRef Tensor target, @ByVal LossReduction reduction, - double beta/*=1.*/); + @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional beta_opt); @Namespace("torch::nn::functional::detail") public static native @ByVal Tensor smooth_l1_loss( @Const @ByRef Tensor input, @Const @ByRef Tensor target, @@ -70651,11 +71461,27 @@ scalar_t sf(scalar_t x, scalar_t y) /** namespace F = torch::nn::functional; /** F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone)); /** } */ + +/// @Namespace("torch::nn::functional") public static native @ByVal Tensor smooth_l1_loss( @Const @ByRef Tensor input, @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::SmoothL1LossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SmoothL1LossFuncOptions{}") SmoothL1LossOptions options, - double beta/*=1.*/); + @Cast("const torch::nn::functional::SmoothL1LossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SmoothL1LossFuncOptions{}") SmoothL1LossOptions options); + +/** See + * https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss + * about the exact behavior of this functional. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::smooth_l1_loss(input, target, /*options=* /torch::kNone, /*beta=* /0.5);
+ *  }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor smooth_l1_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::SmoothL1LossFuncOptions*") @ByRef SmoothL1LossOptions options, + double beta); // ============================================================================ @@ -71082,7 +71908,6 @@ scalar_t sf(scalar_t x, scalar_t y) // #pragma once -// #include // #include // #include // #include @@ -72727,7 +73552,6 @@ scalar_t sf(scalar_t x, scalar_t y) // #pragma once -// #include // #include // #include // #include @@ -73134,7 +73958,6 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // #include -// #include // #include // #include @@ -73249,7 +74072,6 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // #include -// #include // #include // #include @@ -77080,14 +77902,14 @@ scalar_t sf(scalar_t x, scalar_t y) public static final int TORCH_VERSION_MAJOR = 2; /** Indicates the minor version of LibTorch. */ -public static final int TORCH_VERSION_MINOR = 1; +public static final int TORCH_VERSION_MINOR = 2; /** Indicates the patch version of LibTorch. */ -public static final int TORCH_VERSION_PATCH = 2; +public static final int TORCH_VERSION_PATCH = 0; /** Indicates the version of LibTorch. */ public static final String TORCH_VERSION = - "2.1.2"; + "2.2.0"; // Parsed from torch/csrc/autograd/InferenceMode.h @@ -77834,6 +78656,12 @@ scalar_t sf(scalar_t x, scalar_t y) // other information. @Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const char*") BytePointer kSerializationIdRecordName(); +// Targeting ../MzZipReaderIterWrapper.java + + +// Targeting ../ChunkRecordIterator.java + + // Targeting ../PyTorchStreamReader.java @@ -78029,19 +78857,19 @@ scalar_t sf(scalar_t x, scalar_t y) * serialized {@code Module}, exported either via {@code ScriptModule.save()} in * Python or {@code torch::jit::ExportModule} in C++. */ @Namespace("torch::jit") public static native @ByVal JitModule load( - @SharedPtr ReadAdapterInterface rai, + @SharedPtr("caffe2::serialize::ReadAdapterInterface") @ByVal ReadAdapterInterface rai, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule load( - @SharedPtr ReadAdapterInterface rai); + @SharedPtr("caffe2::serialize::ReadAdapterInterface") @ByVal ReadAdapterInterface rai); @Namespace("torch::jit") public static native @ByVal JitModule load( - @SharedPtr ReadAdapterInterface rai, + @SharedPtr("caffe2::serialize::ReadAdapterInterface") @ByVal ReadAdapterInterface rai, @ByVal DeviceOptional device, @ByRef ExtraFilesMap extra_files, @Cast("bool") boolean load_debug_files/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule load( - @SharedPtr ReadAdapterInterface rai, + @SharedPtr("caffe2::serialize::ReadAdapterInterface") @ByVal ReadAdapterInterface rai, @ByVal DeviceOptional device, @ByRef ExtraFilesMap extra_files); @@ -78102,6 +78930,10 @@ scalar_t sf(scalar_t x, scalar_t y) @Cast("std::istream*") @ByRef Pointer in, @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal ObjPtr ObjLoaderFunc( + @Const @ByRef StrongTypePtr type, + @ByVal IValue input); + // namespace jit // namespace torch @@ -78263,12 +79095,12 @@ scalar_t sf(scalar_t x, scalar_t y) // Register function pointer of Tensor BackendMetadata for serialization. @Namespace("torch::jit") public static native void TensorBackendMetaRegistry( DeviceType t, - @ByVal @Cast("torch::jit::BackendMetaPtr*") Pointer get_fptr, - @ByVal @Cast("torch::jit::BackendMetaPtr*") Pointer set_fptr); + @Cast("const torch::jit::BackendMetaPtr*") @ByRef Pointer get_fptr, + @Cast("const torch::jit::BackendMetaPtr*") @ByRef Pointer set_fptr); @Namespace("torch::jit") public static native void TensorBackendMetaRegistry( @Cast("c10::DeviceType") byte t, - @ByVal @Cast("torch::jit::BackendMetaPtr*") Pointer get_fptr, - @ByVal @Cast("torch::jit::BackendMetaPtr*") Pointer set_fptr); + @Cast("const torch::jit::BackendMetaPtr*") @ByRef Pointer get_fptr, + @Cast("const torch::jit::BackendMetaPtr*") @ByRef Pointer set_fptr); // Return a map of Tensor Metadata which including BackendMetaData for // serialization. For now, it only takes care of `conj` and `neg` bit. @@ -78285,7 +79117,7 @@ scalar_t sf(scalar_t x, scalar_t y) // NOTE: This overload is required by unpickler.cpp @Namespace("torch::jit") public static native void setTensorMetadata( @Const @ByRef Tensor t, - @ByVal GenericDict metadata_idict); + @Const @ByRef GenericDict metadata_idict); // namespace jit // namespace torch @@ -78646,6 +79478,34 @@ scalar_t sf(scalar_t x, scalar_t y) +/* + * NOTE: transforming PEP 604 union into equivalent union type + * + * NOTE: Union[int, float] parses into: + * expr:(subscript + * (variable (ident Union)) + * (list + * (variable (ident int)) + * (variable (ident float)))) + * subscript + * + * NOTE: (int | float) parses into: + * expr:(| + * (variable (ident int)) + * (variable (ident float))) + * | + */ + +@Namespace("torch::jit") public static native void _flatten_pep604_union( + @Const @ByRef Expr node, + @StdVector Expr result); + +@Namespace("torch::jit") public static native @StdVector Expr get_pep604_union_members(@Const @ByRef Expr node); + +// Flattens a PEP 604 union into a classical union. +// For example, ((x | y) | z) is transformed into Union[x, y, z]. +@Namespace("torch::jit") public static native @ByVal Expr pep604union_to_union(@Const @ByRef Expr expr); + // namespace jit // namespace torch @@ -78738,7 +79598,8 @@ scalar_t sf(scalar_t x, scalar_t y) @ByVal PickleReader reader, @ByVal TypeResolver type_resolver, @ByVal TensorArrayRef tensor_table, - TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/, + @ByVal(nullValue = "torch::jit::ObjLoader(nullptr)") ObjLoader obj_loader); @Namespace("torch::jit") public static native @ByVal IValue unpickle( @ByVal PickleReader reader, @ByVal TypeResolver type_resolver, @@ -78747,7 +79608,8 @@ scalar_t sf(scalar_t x, scalar_t y) @ByVal PickleReader reader, @ByVal TypeResolver type_resolver, @ByVal TensorVector tensor_table, - TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/, + @ByVal(nullValue = "torch::jit::ObjLoader(nullptr)") ObjLoader obj_loader); @Namespace("torch::jit") public static native @ByVal IValue unpickle( @ByVal PickleReader reader, @ByVal TypeResolver type_resolver, @@ -78759,6 +79621,9 @@ scalar_t sf(scalar_t x, scalar_t y) * {@code class_resolver} function must be provided. * * See {@code torch::pickle} for details. */ + +/// +/// @Namespace("torch::jit") public static native @ByVal IValue unpickle( @Cast("const char*") BytePointer data, @Cast("size_t") long size, @@ -78790,6 +79655,49 @@ scalar_t sf(scalar_t x, scalar_t y) @ByVal(nullValue = "c10::ArrayRef{}") TensorArrayRef tensor_table, TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); +/** Decode a chunk of memory containing pickled data into its {@code torch::IValue}s. + * + * If any {@code torch::IValue}s in the pickled data are {@code Object}s, then a + * {@code class_resolver} function must be provided. + * + * See {@code torch::pickle} for details. */ +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + @Cast("const char*") BytePointer data, + @Cast("size_t") long size, + @ByVal ObjLoader obj_loader, + @ByVal(nullValue = "torch::jit::TypeResolver(nullptr)") TypeResolver type_resolver, + @ByVal(nullValue = "c10::ArrayRef{}") TensorArrayRef tensor_table, + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + @Cast("const char*") BytePointer data, + @Cast("size_t") long size, + @ByVal ObjLoader obj_loader); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + String data, + @Cast("size_t") long size, + @ByVal ObjLoader obj_loader, + @ByVal(nullValue = "torch::jit::TypeResolver(nullptr)") TypeResolver type_resolver, + @ByVal(nullValue = "c10::ArrayRef{}") TensorVector tensor_table, + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + String data, + @Cast("size_t") long size, + @ByVal ObjLoader obj_loader); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + @Cast("const char*") BytePointer data, + @Cast("size_t") long size, + @ByVal ObjLoader obj_loader, + @ByVal(nullValue = "torch::jit::TypeResolver(nullptr)") TypeResolver type_resolver, + @ByVal(nullValue = "c10::ArrayRef{}") TensorVector tensor_table, + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + String data, + @Cast("size_t") long size, + @ByVal ObjLoader obj_loader, + @ByVal(nullValue = "torch::jit::TypeResolver(nullptr)") TypeResolver type_resolver, + @ByVal(nullValue = "c10::ArrayRef{}") TensorArrayRef tensor_table, + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); + // namespace jit // namespace torch diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index deec3af1401..e0489632c98 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -1,11 +1,11 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.global; import org.bytedeco.pytorch.cuda.*; import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.cuda.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; @@ -61,7 +61,6 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #pragma once -// #include // #include // #include // #include @@ -105,6 +104,87 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10 +// Parsed from ATen/cuda/CUDAContextLight.h + +// #pragma once +// Light-weight version of CUDAContext.h with fewer transitive includes + +// #include + +// #include +// #include +// #include + +// #ifdef CUDART_VERSION +// #include +// #endif + +// #if defined(USE_ROCM) && ROCM_VERSION >= 50300 +// #include +// #endif + +// #include +// #include + + +/* +A common CUDA interface for ATen. + +This interface is distinct from CUDAHooks, which defines an interface that links +to both CPU-only and CUDA builds. That interface is intended for runtime +dispatch and should be used from files that are included in both CPU-only and +CUDA builds. + +CUDAContext, on the other hand, should be preferred by files only included in +CUDA builds. It is intended to expose CUDA functionality in a consistent +manner. + +This means there is some overlap between the CUDAContext and CUDAHooks, but +the choice of which to use is simple: use CUDAContext when in a CUDA-only file, +use CUDAHooks otherwise. + +Note that CUDAContext simply defines an interface with no associated class. +It is expected that the modules whose functions compose this interface will +manage their own state. There is only a single CUDA context/state. +*/ + +/** + * DEPRECATED: use device_count() instead + */ +@Namespace("at::cuda") public static native @Cast("int64_t") long getNumGPUs(); + +/** + * CUDA is available if we compiled with CUDA, and there are one or more + * devices. If we compiled with CUDA but there is a driver problem, etc., + * this function will report CUDA is not available (rather than raise an error.) + */ +@Namespace("at::cuda") public static native @Cast("bool") boolean is_available(); + +@Namespace("at::cuda") public static native Pointer getCurrentDeviceProperties(); + +@Namespace("at::cuda") public static native int warp_size(); + +@Namespace("at::cuda") public static native Pointer getDeviceProperties(@Cast("int64_t") long device); + +@Namespace("at::cuda") public static native @Cast("bool") boolean canDeviceAccessPeer( + @Cast("int64_t") long device, + @Cast("int64_t") long peer_device); + +@Namespace("at::cuda") public static native Allocator getCUDADeviceAllocator(); + +/* Handles */ +@Namespace("at::cuda") public static native @Cast("cusparseHandle_t") Pointer getCurrentCUDASparseHandle(); +@Namespace("at::cuda") public static native @Cast("cublasHandle_t") Pointer getCurrentCUDABlasHandle(); + +@Namespace("at::cuda") public static native void clearCublasWorkspaces(); + +// #if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300 +@Namespace("at::cuda") public static native @Cast("cusolverDnHandle_t") Pointer getCurrentCUDASolverDnHandle(); +// #endif + + // namespace at::cuda + + // Parsed from c10/cuda/CUDAStream.h // #pragma once @@ -234,84 +314,14 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #pragma once -// #include - -// #include -// #include -// #include - -// #ifdef CUDART_VERSION -// #include -// #endif - -// #if defined(USE_ROCM) && ROCM_VERSION >= 50300 -// #include -// #endif +// #include -// #include +// Preserved for BC, as many files depend on these includes // #include // #include -// #include // #include // #include -/* -A common CUDA interface for ATen. - -This interface is distinct from CUDAHooks, which defines an interface that links -to both CPU-only and CUDA builds. That interface is intended for runtime -dispatch and should be used from files that are included in both CPU-only and -CUDA builds. - -CUDAContext, on the other hand, should be preferred by files only included in -CUDA builds. It is intended to expose CUDA functionality in a consistent -manner. - -This means there is some overlap between the CUDAContext and CUDAHooks, but -the choice of which to use is simple: use CUDAContext when in a CUDA-only file, -use CUDAHooks otherwise. - -Note that CUDAContext simply defines an interface with no associated class. -It is expected that the modules whose functions compose this interface will -manage their own state. There is only a single CUDA context/state. -*/ - -/** - * DEPRECATED: use device_count() instead - */ -@Namespace("at::cuda") public static native @Cast("int64_t") long getNumGPUs(); - -/** - * CUDA is available if we compiled with CUDA, and there are one or more - * devices. If we compiled with CUDA but there is a driver problem, etc., - * this function will report CUDA is not available (rather than raise an error.) - */ -@Namespace("at::cuda") public static native @Cast("bool") boolean is_available(); - -@Namespace("at::cuda") public static native Pointer getCurrentDeviceProperties(); - -@Namespace("at::cuda") public static native int warp_size(); - -@Namespace("at::cuda") public static native Pointer getDeviceProperties(@Cast("int64_t") long device); - -@Namespace("at::cuda") public static native @Cast("bool") boolean canDeviceAccessPeer( - @Cast("int64_t") long device, - @Cast("int64_t") long peer_device); - -@Namespace("at::cuda") public static native Allocator getCUDADeviceAllocator(); - -/* Handles */ -@Namespace("at::cuda") public static native @Cast("cusparseHandle_t") Pointer getCurrentCUDASparseHandle(); -@Namespace("at::cuda") public static native @Cast("cublasHandle_t") Pointer getCurrentCUDABlasHandle(); - -@Namespace("at::cuda") public static native void clearCublasWorkspaces(); - -// #if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300 -@Namespace("at::cuda") public static native @Cast("cusolverDnHandle_t") Pointer getCurrentCUDASolverDnHandle(); -// #endif - - // namespace at::cuda - // Parsed from c10/core/impl/GPUTrace.h @@ -674,6 +684,65 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10 +// Parsed from c10/util/ApproximateClock.h + +// Copyright 2023-present Facebook. All Rights Reserved. + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include + +// #ifndef _WIN32 +// #include +// #endif +// #if defined(C10_IOS) && defined(C10_MOBILE) +// #include // for gettimeofday() +// #endif + +// #if defined(__i386__) || defined(__x86_64__) || defined(__amd64__) +// #define C10_RDTSC +// #if defined(_MSC_VER) +// #elif defined(__CUDACC__) || defined(__HIPCC__) +// #elif defined(__clang__) +// `__rdtsc` is available by default. +// NB: This has to be first, because Clang will also define `__GNUC__` +// #elif defined(__GNUC__) +// #include +// #else +// #undef C10_RDTSC +// #endif +// #endif + +@Namespace("c10") public static native @Cast("c10::time_t") long getTimeSinceEpoch(); + +@Namespace("c10") public static native @Cast("c10::time_t") long getTime(@Cast("bool") boolean allow_monotonic/*=false*/); +@Namespace("c10") public static native @Cast("c10::time_t") long getTime(); + +// We often do not need to capture true wall times. If a fast mechanism such +// as TSC is available we can use that instead and convert back to epoch time +// during post processing. This greatly reduce the clock's contribution to +// profiling. +// http://btorpey.github.io/blog/2014/02/18/clock-sources-in-linux/ +// https://quick-bench.com/q/r8opkkGZSJMu9wM_XTbDouq-0Io +// TODO: We should use +// `https://github.com/google/benchmark/blob/main/src/cycleclock.h` +// Targeting ../cuda/ApproximateClockToUnixTimeConverter.java + + + + // namespace c10 + + // Parsed from c10/cuda/CUDACachingAllocator.h // #pragma once @@ -683,6 +752,7 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #include // #include // #include +// #include // #include // #include @@ -696,6 +766,21 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #define REGISTER_FREE_MEMORY_CALLBACK(name, ...) // C10_REGISTER_CLASS(FreeCudaMemoryCallbacksRegistry, name, __VA_ARGS__); + +// TODO: Turn this into an honest to goodness class. I briefly attempted to do +// this, but it was a bit irritating to figure out how to also correctly +// apply pimpl pattern so I didn't have to leak any internal implementation +// details in the header (CUDACachingAllocator could be made a pimpl, but +// you also need to appropriately define a class which is a subclass +// of Allocator. Not impossible, but required a bit more surgery than +// I wanted to do at the time.) +// +// Why is this using a namespace rather than old-style THCCachingAllocator_ +// prefix? Mostly because it made the HIPify rules easier to write; _ is +// not counted as a word boundary, so you would otherwise have to list each +// of these functions. + +@Namespace("c10::cuda::CUDACachingAllocator") @MemberGetter public static native @Cast("const size_t") long kLargeBuffer(); // Targeting ../cuda/Stat.java @@ -724,6 +809,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // Targeting ../cuda/AllocatorState.java +// Targeting ../cuda/trace_time_.java + + // Targeting ../cuda/TraceEntry.java @@ -747,9 +835,6 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { @Override public String toString() { return intern().name(); } } -@Namespace("c10::cuda::CUDACachingAllocator") public static native void setAllocatorSettings(@StdString BytePointer env); -@Namespace("c10::cuda::CUDACachingAllocator") public static native void setAllocatorSettings(@StdString String env); - // Size pretty-printer // Targeting ../cuda/CUDAAllocator.java @@ -809,6 +894,8 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { +@Namespace("c10::cuda::CUDACachingAllocator") public static native void attachAllocatorTraceTracker(@ByVal AllocatorTraceTracker tracker); + // Not part of CUDA_ALLOCATOR_BACKEND_INTERFACE diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list.java index f86f48e2f51..2b1a571e3aa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list_iterator.java index edcc5fd47a9..2431db48807 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list_iterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kArea.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kArea.java index 114ebc812cd..6bfc0a2a849 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kArea.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kArea.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kBatchMean.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kBatchMean.java index 830d6a10924..67225dd389e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kBatchMean.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kBatchMean.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kBicubic.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kBicubic.java index 8a4afe44e7e..eca2f43d968 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kBicubic.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kBicubic.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kBilinear.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kBilinear.java index 380c1e29316..5e3b7c91a77 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kBilinear.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kBilinear.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kBorder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kBorder.java index f8f14faf517..87773c1d919 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kBorder.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kBorder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kCircular.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kCircular.java index f1525ed0a06..f0d077a959a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kCircular.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kCircular.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConstant.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConstant.java index 8b01be69a14..91eb3597bab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConstant.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConstant.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv1D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv1D.java index 7782be14b4e..268d847555a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv1D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv1D.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv2D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv2D.java index 779f05b60ac..77c7837a3fe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv2D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv2D.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv3D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv3D.java index 04a73b53232..96c9ee7c656 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv3D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv3D.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose1D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose1D.java index 4f4a52d303e..2de70e78eaa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose1D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose1D.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose2D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose2D.java index c98529aa5e2..766614b1980 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose2D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose2D.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose3D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose3D.java index 12585465e77..d2668a7332b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose3D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose3D.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kFanIn.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kFanIn.java index 3eeca795124..6f11f1020c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kFanIn.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kFanIn.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kFanOut.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kFanOut.java index c9ed147a128..c8eed7e614a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kFanOut.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kFanOut.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kGELU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kGELU.java index 3e6227e1e98..592d634c9c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kGELU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kGELU.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kGRU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kGRU.java index 3932c0ccd49..136a4fd9acb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kGRU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kGRU.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kLSTM.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kLSTM.java index 3ff365b670d..1f33ef09ee3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kLSTM.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kLSTM.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kLeakyReLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kLeakyReLU.java index 4f7c4c8ace8..0eb881b01a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kLeakyReLU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kLeakyReLU.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kLinear.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kLinear.java index ed9dad54380..3556d45734d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kLinear.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kLinear.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kMax.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kMax.java index 2add924365a..0d0d7b310cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kMax.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kMax.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kMean.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kMean.java index 23bd1ad3f9f..2ee10f4d68d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kMean.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kMean.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kMish.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kMish.java index ae8a0800e26..60c3f22018b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kMish.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kMish.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kNearest.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kNearest.java index 23fbdf0307b..573736b4309 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kNearest.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kNearest.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kNearestExact.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kNearestExact.java index 90055a28825..cfe4671eaa9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kNearestExact.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kNearestExact.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kNone.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kNone.java index b9efb8a3868..cb51fb7e974 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kNone.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kNone.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_RELU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_RELU.java index 870000126e4..dd86b8b7fc2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_RELU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_RELU.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_TANH.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_TANH.java index 99c02d253a5..fbb6d2c3923 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_TANH.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_TANH.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kReLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kReLU.java index 68f6cf7da58..ce5741e47c9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kReLU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kReLU.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kReflect.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kReflect.java index f7f8f84ca08..9cad7f5a19c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kReflect.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kReflect.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kReflection.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kReflection.java index a6b74408ac3..5c788f8ddf9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kReflection.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kReflection.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kReplicate.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kReplicate.java index 40573df51ed..efaffc9e3c2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kReplicate.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kReplicate.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kSame.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kSame.java index 5c68d396948..188534ff2e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kSame.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kSame.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kSiLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kSiLU.java index 6f62971192f..8ca84a9e5a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kSiLU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kSiLU.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kSigmoid.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kSigmoid.java index 15cf0473ff9..0689e00947c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kSigmoid.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kSigmoid.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kSum.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kSum.java index 2e5b3b3718b..3f98d19409e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kSum.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kSum.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kTanh.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kTanh.java index b46e72fc42f..89d9fce5bef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kTanh.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kTanh.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kTrilinear.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kTrilinear.java index bf8da88620d..697baf3540c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kTrilinear.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kTrilinear.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kValid.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kValid.java index 79d97367aee..84e49c697bd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kValid.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kValid.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kZeros.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kZeros.java index 2f4ed51dcac..2a8bf2c2959 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kZeros.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kZeros.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/module_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/module_iterator.java index e597f9a6a45..6ff234c2286 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/module_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/module_iterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/module_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/module_list.java index 97fe287cfd2..f3e02f7ac48 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/module_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/module_list.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_data_pod.java b/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_data_pod.java index 5a0dab03720..253ad1624a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_data_pod.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_data_pod.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_engine.java b/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_engine.java index 420d786ac5f..fe74487df95 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_engine.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_engine.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; @@ -25,6 +25,7 @@ public class mt19937_engine extends Pointer { public mt19937_engine(Pointer p) { super(p); } + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) public mt19937_engine(@Cast("uint64_t") long seed/*=5489*/) { super((Pointer)null); allocate(seed); } private native void allocate(@Cast("uint64_t") long seed/*=5489*/); public mt19937_engine() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java index bfeff429d9c..2d32c6b21fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java index fe7f3c2a1de..37aaaf3b6ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java index 9d124f6143c..2f438ae0394 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java index 15b78496c70..f16a884a90e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_iterator.java index 05f8e8da0bb..7ab6a91fa3e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_iterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_list.java index 89fd70ba5af..71d8fc5f54e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_list.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java index 233ece0ce1e..e5b5ece1427 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java index 8dc5a4b6835..90a3749f13b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java index 7e321ab5101..2f185a80acd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java index 48e188a4749..dbcf0933d30 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/pretty_tree.java b/pytorch/src/gen/java/org/bytedeco/pytorch/pretty_tree.java index 7a444dc239d..f727813364e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/pretty_tree.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/pretty_tree.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/qint32.java b/pytorch/src/gen/java/org/bytedeco/pytorch/qint32.java index fd65cec8ca8..bdcc5e401e8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/qint32.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/qint32.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/qint8.java b/pytorch/src/gen/java/org/bytedeco/pytorch/qint8.java index bb9557a51df..937a3315412 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/qint8.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/qint8.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/quint2x4.java b/pytorch/src/gen/java/org/bytedeco/pytorch/quint2x4.java index c94b0463be5..3e5e27db6f0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/quint2x4.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/quint2x4.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/quint4x2.java b/pytorch/src/gen/java/org/bytedeco/pytorch/quint4x2.java index 2f53907b6b0..4913083afba 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/quint4x2.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/quint4x2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/quint8.java b/pytorch/src/gen/java/org/bytedeco/pytorch/quint8.java index 5e8a49920e3..24540eb2098 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/quint8.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/quint8.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/type_index.java b/pytorch/src/gen/java/org/bytedeco/pytorch/type_index.java index 68fd9d081b3..091ea09b4ed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/type_index.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/type_index.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/TransformerActivation.java b/pytorch/src/main/java/org/bytedeco/pytorch/TransformerActivation.java index 2dac4af2ceb..98b5ad1ad65 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/TransformerActivation.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/TransformerActivation.java @@ -6,7 +6,7 @@ /* This is a modified version of the variant container without the get2 method, that would * return a std::function and not a function pointer. */ -@NoOffset @Name("c10::variant >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("std::variant >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TransformerActivation extends Pointer { static { Loader.load(); @@ -46,7 +46,7 @@ public kReLU get0() { return get0(this); } - @Namespace @Name("c10::get<0>") static native @ByRef kReLU get0(@ByRef TransformerActivation container); + @Namespace @Name("std::get<0>") static native @ByRef kReLU get0(@ByRef TransformerActivation container); @ValueSetter public native TransformerActivation put(@ByRef kReLU value); @@ -54,7 +54,7 @@ public kGELU get1() { return get1(this); } - @Namespace @Name("c10::get<1>") static native @ByRef kGELU get1(@ByRef TransformerActivation container); + @Namespace @Name("std::get<1>") static native @ByRef kGELU get1(@ByRef TransformerActivation container); @ValueSetter public native TransformerActivation put(@ByRef kGELU value); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/cuda/functions/AllocatorTraceTracker.java b/pytorch/src/main/java/org/bytedeco/pytorch/cuda/functions/AllocatorTraceTracker.java new file mode 100644 index 00000000000..8e35350649b --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/cuda/functions/AllocatorTraceTracker.java @@ -0,0 +1,32 @@ +package org.bytedeco.pytorch.cuda.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.cuda.TraceEntry; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class AllocatorTraceTracker extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public AllocatorTraceTracker(Pointer p) { + super(p); + } + + protected AllocatorTraceTracker() { + allocate(); + } + + private native void allocate(); + + // std::function + public native void call(@Const @ByRef TraceEntry e); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/cuda/functions/OutOfMemoryObserver.java b/pytorch/src/main/java/org/bytedeco/pytorch/cuda/functions/OutOfMemoryObserver.java new file mode 100644 index 00000000000..557265eb2fb --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/cuda/functions/OutOfMemoryObserver.java @@ -0,0 +1,29 @@ +package org.bytedeco.pytorch.cuda.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class OutOfMemoryObserver extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public OutOfMemoryObserver(Pointer p) { + super(p); + } + + protected OutOfMemoryObserver() { + allocate(); + } + + private native void allocate(); + + // std::function(const at::StrongTypePtr&, IValue)> + public native @ByVal ObjPtr call(@Const @ByRef StrongTypePtr stp, @ByVal IValue iv); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 2edce027a3d..f1b9b71f8fc 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -296,7 +296,7 @@ public void map(InfoMap infoMap) { .put(new Info().javaText("import org.bytedeco.pytorch.Module;")) .put(new Info().javaText("import org.bytedeco.javacpp.annotation.Cast;")) - .put(new Info("basic/containers").cppTypes("c10::optional", "torch::optional", "c10::variant")) + .put(new Info("basic/containers").cppTypes("c10::optional", "torch::optional")) .put(new Info("std::nullptr_t").cast().pointerTypes("PointerPointer")) .put(new Info("at::CheckedFrom").cast().valueTypes("BytePointer", "String").pointerTypes("PointerPointer")) // Alias to const char* @@ -304,7 +304,7 @@ public void map(InfoMap infoMap) { // .put(new Info("c10::IValue::operator ==").skip()) // Possible name conflict with IValue.equals .put(new Info("std::size_t", "c10::Dict::size_type", "c10::Dict::size_type").cast().valueTypes("long").pointerTypes("SizeTPointer")) - .put(new Info("approx_time_t").cast().valueTypes("long").pointerTypes("LongPointer")) + .put(new Info("c10::approx_time_t").cast().valueTypes("long").pointerTypes("LongPointer")) .put(new Info( "torch::ExpandingArray<1>", "torch::ExpandingArray<2>", "torch::ExpandingArray<3>", "torch::ExpandingArray<4>", "torch::ExpandingArray", "torch::ExpandingArray<1*2>", "torch::ExpandingArray<2*2>", "torch::ExpandingArray<3*2>").cast().pointerTypes("LongPointer")) @@ -342,6 +342,8 @@ public void map(InfoMap infoMap) { .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::BFloat16>::t)").pointerTypes("BFloat16")) .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Float8_e5m2>::t)").pointerTypes("Float8_e5m2")) .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Float8_e4m3fn>::t)").pointerTypes("Float8_e4m3fn")) + .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Float8_e5m2fnuz>::t)").pointerTypes("Float8_e5m2fnuz")) + .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Float8_e4m3fnuz>::t)").pointerTypes("Float8_e4m3fnuz")) .put(new Info("c10::DataPtr", "at::DataPtr").valueTypes("@Cast({\"\", \"c10::DataPtr&&\"}) @StdMove DataPtr").pointerTypes("DataPtr")) .put(new Info("c10::Storage", "at::Storage").valueTypes("@Cast({\"\", \"c10::Storage&&\"}) @StdMove Storage").pointerTypes("Storage")) .put(new Info("c10::ClassType").purify().pointerTypes("ClassType")) // Issue #669 @@ -426,7 +428,7 @@ public void map(InfoMap infoMap) { .put(new Info("c10::optional").pointerTypes("TypeMetaOptional").define()) .put(new Info("c10::optional").pointerTypes("ExecutorExecutionModeOptional").define()) .put(new Info("c10::optional::operator ->").skip()) // Returns a pointer to ExecutorExecutionMode, which is an enum - .put(new Info("c10::optional", + .put(new Info("const c10::optional", "c10::optional", "c10::optional").cast().pointerTypes("InlinedCallStackOptional").define()) .put(new Info("c10::optional", "c10::optional").cast().pointerTypes("ScopeOptional").define()) @@ -451,6 +453,9 @@ public void map(InfoMap infoMap) { .put(new Info("c10::optional >").pointerTypes("StringViewVectorOptional").define()) .put(new Info("c10::optional >", "c10::optional >")/*.cast?*/.pointerTypes("PointerPairOptional").define()) .put(new Info("c10::optional > >", "c10::optional >").pointerTypes("WeakStorageVectorOptional").define()) + .put(new Info("c10::optional").pointerTypes("CppSignatureOptional").define()) + .put(new Info("c10::optional >").pointerTypes("SafePyObjectOptional").define()) + .put(new Info("c10::optional >").pointerTypes("BytePointerPairOptional").define()) ; @@ -484,16 +489,16 @@ public void map(InfoMap infoMap) { ; - //// c10::variant + //// std::variant infoMap - .put(new Info("c10::variant", "torch::nn::init::NonlinearityType").pointerTypes("Nonlinearity").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::init::FanModeType").pointerTypes("FanModeType").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::ConvOptions<1>::padding_mode_t", "torch::nn::ConvOptions<2>::padding_mode_t", "torch::nn::ConvOptions<3>::padding_mode_t", @@ -501,28 +506,28 @@ public void map(InfoMap infoMap) { "torch::nn::ConvTransposeOptions<2>::padding_mode_t", "torch::nn::ConvTransposeOptions<3>::padding_mode_t", "torch::nn::detail::conv_padding_mode_t").pointerTypes("ConvPaddingMode").define()) - .put(new Info("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>", + .put(new Info("std::variant,torch::enumtype::kValid,torch::enumtype::kSame>", "torch::nn::ConvOptions<1>::padding_t", "torch::nn::detail::ConvNdOptions<1>::padding_t", "torch::nn::functional::ConvFuncOptions<1>::padding_t", "torch::nn::functional::Conv1dFuncOptions::padding_t").purify().pointerTypes("Conv1dPadding").define()) - .put(new Info("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>", + .put(new Info("std::variant,torch::enumtype::kValid,torch::enumtype::kSame>", "torch::nn::ConvOptions<2>::padding_t", "torch::nn::detail::ConvNdOptions<2>::padding_t", "torch::nn::functional::ConvFuncOptions<2>::padding_t", "torch::nn::functional::Conv2dFuncOptions::padding_t").purify().pointerTypes("Conv2dPadding").define()) - .put(new Info("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>", + .put(new Info("std::variant,torch::enumtype::kValid,torch::enumtype::kSame>", "torch::nn::ConvOptions<3>::padding_t", "torch::nn::detail::ConvNdOptions<3>::padding_t", "torch::nn::functional::ConvFuncOptions<3>::padding_t", "torch::nn::functional::Conv3dFuncOptions::padding_t").purify().pointerTypes("Conv3dPadding").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::EmbeddingBagMode").pointerTypes("EmbeddingBagMode").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::functional::PadFuncOptions::mode_t").pointerTypes("PaddingMode").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::L1LossOptions::reduction_t", "torch::nn::functional::L1LossFuncOptions::reduction_t", "torch::nn::MSELossOptions::reduction_t", "torch::nn::functional::MSELossFuncOptions::reduction_t", "torch::nn::BCELossOptions::reduction_t", "torch::nn::functional::BinaryCrossEntropyFuncOptions::reduction_t", @@ -542,27 +547,31 @@ public void map(InfoMap infoMap) { "torch::nn::NLLLossOptions::reduction_t", "torch::nn::functional::NLLLossFuncOptions::reduction_t", "torch::nn::CrossEntropyLossOptions::reduction_t", "torch::nn::functional::CrossEntropyFuncOptions::reduction_t", "torch::nn::BCEWithLogitsLossOptions::reduction_t", "torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions::reduction_t").pointerTypes("LossReduction").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::KLDivLossOptions::reduction_t", "torch::nn::functional::KLDivFuncOptions::reduction_t").pointerTypes("KLDivLossReduction").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::functional::GridSampleFuncOptions::mode_t").pointerTypes("GridSampleMode").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::functional::GridSampleFuncOptions::padding_mode_t").pointerTypes("GridSamplePaddingMode").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::detail::RNNOptionsBase::rnn_options_base_mode_t").pointerTypes("RNNBaseMode").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::RNNOptions::nonlinearity_t", "torch::nn::RNNCellOptions::nonlinearity_t").pointerTypes("RNNNonlinearity").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::UpsampleOptions::mode_t").pointerTypes("UpsampleMode").define()) - .put(new Info("c10::variant", + .put(new Info("std::variant", "torch::nn::functional::InterpolateFuncOptions::mode_t").pointerTypes("InterpolateMode").define()) - .put(new Info("c10::variant >", + .put(new Info("std::variant >", "torch::nn::activation_t", "torch::nn::TransformerOptions::activation_t").pointerTypes("TransformerActivation")) // Defined explicitly + + .put(new Info("std::variant", "c10::Warning::warning_variant_t").pointerTypes("WarningVariant").define()) // Cannot be defined as inner class of Warning + .put(new Info("c10::Warning::UserWarning").pointerTypes("Warning.UserWarning")) + .put(new Info("c10::Warning::DeprecationWarning").pointerTypes("Warning.DeprecationWarning")) ; /* @@ -653,6 +662,8 @@ public void map(InfoMap infoMap) { .put(new Info("const std::vector").pointerTypes("FunctionSchemaVector").define()) .put(new Info("const std::vector", "std::vector").pointerTypes("DataPtrVector").define()) // Used from cuda only .put(new Info("const std::vector >", "std::vector >").pointerTypes("WeakStorageVector").define()) + .put(new Info("std::vector").pointerTypes("TagVector").define()) + .put(new Info("std::vector >").pointerTypes("ReadAdapterInterfaceVector").define()) ; @@ -887,6 +898,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::tuple,std::vector >", "std::tuple,std::vector >").pointerTypes("T_TensorTensorVectorTensorVector_T").define()) .put(new Info("const std::tuple", "std::tuple").pointerTypes("T_DataPtrSizeT_T").define()) .put(new Info("std::tuple", "std::pair").pointerTypes("T_TypePtrLong_T").define()) // Parse this pair as tuple because Parser doesn't generate valid code for optional + .put(new Info("std::tuple,c10::impl::TorchDispatchModeKey>").pointerTypes("T_SafePyObjectTorchDispatchModeKey_T").define()) ; @@ -971,27 +983,29 @@ public void map(InfoMap infoMap) { .put(new Info("std::pair").pointerTypes("RecordFunctionHandleIntPair").define()) .put(new Info("std::pair", "std::pair").pointerTypes("PointerPair").define()) .put(new Info("std::pair").pointerTypes("SizeTMatchedSchemaPair").define()) + .put(new Info("std::pair").pointerTypes("BytePointerPair").define()) ; //// Intrusive pointers /* We cannot define an adapter working like SharedPtrAdapter since there is no public constructor of intrusive_ptr taking a T*. */ for (PointerInfo pi : new PointerInfo[]{ + new PointerInfo("at::Quantizer"), + new PointerInfo("c10::GeneratorImpl"), new PointerInfo("c10::ivalue::Tuple"), new PointerInfo("c10::ivalue::Future", "at::ivalue::Future"), new PointerInfo("c10::ivalue::ConstantString"), - new PointerInfo("c10::GeneratorImpl"), - new PointerInfo("at::Quantizer"), new PointerInfo("c10::ivalue::Await"), - new PointerInfo("c10::RRefInterface"), + new PointerInfo("c10::ivalue::Object").javaBaseName("Obj"), new PointerInfo("c10::ivalue::PyObjectHolder"), new PointerInfo("c10::ivalue::EnumHolder"), + new PointerInfo("c10::RRefInterface"), new PointerInfo("c10::TensorImpl"), new PointerInfo("c10::TensorImpl,c10::UndefinedTensorImpl").javaBaseName("TensorImpl"), - new PointerInfo("torch::jit::Tree").javaName("TreeRef"), new PointerInfo("c10::StorageImpl", "c10::StorageImpl,NullType"), new PointerInfo("c10::SymNodeImpl").javaName("SymNode"), - new PointerInfo("c10::BackendMeta").javaName("BackendMetaRef") // Warning: BackendMetaPtr is sth different + new PointerInfo("c10::BackendMeta").javaName("BackendMetaRef"), // Warning: BackendMetaPtr is sth different + new PointerInfo("torch::jit::Tree").javaName("TreeRef"), }) { String[] cppNames = new String[pi.argumentNames.length + pi.otherCppNames.length]; int i = 0; @@ -1015,7 +1029,9 @@ public void map(InfoMap infoMap) { //// Classes that Parser cannot detect as virtual - infoMap.put(new Info("c10::Error", "c10::IndexError", "c10::LinAlgError", "c10::ValueError", "c10::TypeError", "c10::NotImplementedError", "c10::EnforceFiniteError", "c10::OutOfMemoryError", + infoMap.put(new Info("c10::Error", "c10::IndexError", "c10::LinAlgError", "c10::ValueError", "c10::TypeError", + "c10::DistError", "c10::DistNetworkError", "c10::DistStoreError", + "c10::NotImplementedError", "c10::EnforceFiniteError", "c10::OutOfMemoryError", "c10::ErrorAlwaysShowCppStacktrace", "c10::OnnxfiBackendSystemError", "c10::DistBackendError", "c10::SharedType", "c10::StrongTypePtr", "c10::WeakTypePtr", "torch::autograd::CppFunctionPreHook", "torch::autograd::DifferentiableViewMeta", "torch::autograd::TraceableFunction", "torch::jit::Instruction", "torch::jit::Method", "torch::jit::ModuleInstanceInfo", @@ -1216,7 +1232,7 @@ public void map(InfoMap infoMap) { "torch::data::samplers::DistributedSampler<>" ).purify().pointerTypes("DistributedSampler")) .put(new Info( - "c10::optional" + "const c10::optional", "c10::optional" ).pointerTypes("BatchSizeOptional").define()) .put(new Info("torch::data::DataLoaderBase > >,torch::data::Example,std::vector >", @@ -1759,7 +1775,9 @@ public void map(InfoMap infoMap) { new PointerInfo("torch::nn::Module"), new PointerInfo("const at::functorch::FuncTorchTLSBase"), new PointerInfo("const torch::jit::CompilationUnit"), - new PointerInfo("torch::jit::SugaredValue") + new PointerInfo("torch::jit::SugaredValue"), + new PointerInfo("caffe2::serialize::ReadAdapterInterface"), + new PointerInfo("c10::SafePyObject"), }) { pi.makeShared(infoMap); } @@ -1786,7 +1804,6 @@ public void map(InfoMap infoMap) { new PointerInfo("torch::jit::Graph"), new PointerInfo("c10::NamedTensorMeta"), new PointerInfo("c10::FunctionSchema"), - new PointerInfo("c10::SafePyObject"), new PointerInfo("at::CPUGeneratorImpl"), new PointerInfo("at::TensorIterator"), new PointerInfo("caffe2::serialize::IStreamAdapter"), @@ -1801,6 +1818,8 @@ public void map(InfoMap infoMap) { infoMap .put(new Info("std::unique_ptr", "torch::jit::GraphAttr::Ptr").annotations("@UniquePtr").pointerTypes("AttributeValue")) // Ptr is really defined in AttributeValue (superclass of GraphAttr). But Parser doesn't find it. .put(new Info("torch::autograd::AutogradMeta::post_acc_grad_hooks_").annotations("@UniquePtr", "@Cast({\"\", \"\", \"std::unique_ptr&&\"})")) // See JavaCPP Issue #717 + + .put(new Info("std::unique_ptr").skip()) // A class cannot be handled by both shared and unique ptr ; @@ -1826,8 +1845,9 @@ public void map(InfoMap infoMap) { "at::TensorIteratorBase::serial_for_each", "at::TensorIteratorBase::for_each", - "torch::autograd::get_current_graph_task_exec_info" // Would need to map GraphTask, NodeExec...too much burden + "torch::autograd::get_current_graph_task_exec_info", // Would need to map GraphTask, NodeExec...too much burden + "torch::Library::def" ).skip()) ; @@ -1894,7 +1914,9 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10::impl::_force_tls_local_dispatch_key_set", "torch::jit::CompilationUnit::_clear_python_cu", "torch::jit::GraphFunction::_set_initial_executor_execution_mode", "torch::jit::GraphFunction::_set_ignore_amp", - "c10::detail::_str" + "c10::detail::_str", + "torch::jit::kJitOnlyOperatorTags", + "c10::IValue::Tag" // 2.2.0 make IValue::tag public, while IValue::Tag is supposed to be private. Bug ? Check if fixed in next release ).skip()); @@ -2033,7 +2055,9 @@ We need either to put an annotation info on each member, or javaName("@NoOffset {"bool", "boolean"}, {"at::BFloat16", "BFloat16"}, {"at::Float8_e4m3fn", "Float8_e4m3fn"}, - {"at::Float8_e5m2", "Float8_e5m2"} + {"at::Float8_e5m2", "Float8_e5m2"}, + {"at::Float8_e5m2fnuz", "Float8_e5m2fnuz"}, + {"at::Float8_e4m3fnuz", "Float8_e4m3fnuz"} }) { infoMap.put(new Info(template("c10::fetch_and_cast", t[0])).javaNames("fetch_and_cast_to_" + t[1])) .put(new Info(template("c10::cast_and_store", t[0])).javaNames("cast_and_store_from_" + t[1])); @@ -2087,9 +2111,11 @@ We need either to put an annotation info on each member, or javaName("@NoOffset infoMap.put(new Info("c10::ThreadPoolRegistry()", "c10::CUDAHooksRegistry()").skip()); - ///// Forward references and opaque classes + + //// Forward references and opaque classes infoMap .put(new Info("c10::Argument").pointerTypes("Argument")) // Ref in function_schema_inl.h, defined in function_schema.h + .put(new Info("c10::impl::CppSignature")) ; /* Classes that are not part of the API (no TORCH_API nor C10_API) and are not argument nor return type of API methods. @@ -2209,7 +2235,6 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10::impl::is_mutable_tensor_ref", "c10::in_place_t", "c10::ivalue::ComplexHolder", - "c10::ivalue::Object", "c10::ivalue::StreamData3Holder", "c10::ivalue::TupleElements::", "c10::ivalue::TupleTypeFactory", @@ -2299,6 +2324,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset ).skip()) ; + //// Functions not part of the API //// TORCH_API and the like are not honored on Linux but are on Windows. We must skip all public //// functions not marked as part of API. @@ -2333,11 +2359,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("torch::Tensor", "at::Tensor")) - //// std::function passed as generic pointer because are returned by some methods. So no mapping possible. - .put(new Info("std::function", "torch::jit::BackendMetaPtr", "std::function&)>") - .pointerTypes("Pointer").cast()) - - //// Classes kept but passed as generic pointer + //// Classes kept but passed as generic pointer .put(new Info("c10::intrusive_ptr_target", "c10::nullopt", "c10::nullopt_t", "c10::impl::PyObjectSlot", "_object", "PyObject", "THPObjectPtr", "pyobj_list", "std::chrono::milliseconds", "std::exception_ptr", "std::type_info", @@ -2350,7 +2372,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "std::iterator >", "c10::optional", "c10::optional", "c10::intrusive_ptr", "c10::intrusive_ptr", - "c10::intrusive_ptr", "c10::ArrayRef >", + "c10::ArrayRef >", "torch::jit::DetachedBuffer::UniqueDetachedBuffer", "c10::optional", "c10::optional::ListOfOptionalElements>", "c10::optional::ListOfOptionalElements>", "c10::optional", "c10::optional >", @@ -2412,7 +2434,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset //// Function pointers // skip() is added when function pointer are parsed instead of std::function to use the class in package // functions and prevent the creation of an automatic class in main package. - // If a native function returns a std::function, no way to map it. + // If a native function returns a std::function, no way to map it. So either cast to pointer or skip. infoMap .put(new Info("void (*)(void*)", "c10::DeleterFnPtr", "torch::Deleter", "at::ContextDeleter", "caffe2::TypeMeta::Delete", "std::function").pointerTypes("PointerConsumer").valueTypes("PointerConsumer").skip()) @@ -2467,6 +2489,14 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("void (*)(void*, size_t)", "c10::PlacementDtor", "caffe2::TypeMeta::PlacementNew", "caffe2::TypeMeta::PlacementDelete").pointerTypes("PlacementConsumer").valueTypes("PlacementConsumer").skip()) .put(new Info("void (*)(const void*, void*, size_t)", "caffe2::TypeMeta::Copy").pointerTypes("PlacementCopier").valueTypes("PlacementCopier").skip()) .put(new Info("torch::jit::Operation (*)(const torch::jit::Node*)", "torch::jit::OperationCreator").pointerTypes("OperationCreator").valueTypes("OperationCreator").skip()) + .put(new Info("c10::ApproximateClockToUnixTimeConverter::makeConverter").skip()) // Function returning a std::function + .put(new Info("std::function(const at::StrongTypePtr&,c10::IValue)>", "torch::jit::ObjLoader").pointerTypes("ObjLoader")) + + //// std::function passed as generic pointer because are returned by some methods. + .put(new Info("std::function", "torch::jit::BackendMetaPtr", "std::function&)>") + .pointerTypes("Pointer").cast()) + + ; infoMap.put(new Info("caffe2::TypeMeta::deleteFn").javaText("public native @NoException(true) PointerConsumer deleteFn();")); // Parser picks up the wrong Delete diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 2cf50c171b6..3c84c6f06d2 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -75,7 +75,7 @@ public void map(InfoMap infoMap) { .put(new Info().enumerate().friendly()) .put(new Info().javaText("import org.bytedeco.pytorch.*;")) - .put(new Info().javaText("import org.bytedeco.pytorch.functions.*;")) + .put(new Info().javaText("import org.bytedeco.pytorch.cuda.functions.*;")) .put(new Info().javaText("import org.bytedeco.pytorch.Error;")) .put(new Info().javaText("import org.bytedeco.pytorch.global.torch.DeviceType;")) .put(new Info().javaText("import org.bytedeco.pytorch.global.torch.ScalarType;")) @@ -108,6 +108,9 @@ public void map(InfoMap infoMap) { .put(new Info("std::array", "c10::cuda::CUDACachingAllocator::StatArray").cast().pointerTypes("Stat")) //// Function pointers + .put(new Info("std::function'" //.put(new Info("std::shared_ptr (*)()", "c10::cuda::CUDACachingAllocator::CreateContextFn").pointerTypes("GatheredContextSupplier").valueTypes("GatheredContextSupplier").skip()) @@ -137,6 +140,8 @@ public void map(InfoMap infoMap) { .put(new Info("c10::optional").pointerTypes("ByteOptional")) .put(new Info("c10::IntArrayRef", "at::IntArrayRef").pointerTypes("LongArrayRef")) .put(new Info("std::vector").pointerTypes("DataPtrVector")) + .put(new Info("c10::Allocator").pointerTypes("Allocator")) + .put(new Info("CUDAContextLight.h").linePatterns("struct Allocator;").skip()) // Prevent regeneration of Allocator class in cuda package .put(new Info("c10::DeviceIndex").valueTypes("byte")) .put(new Info("c10::StreamId").valueTypes("long")) diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h index 5172bf59ff6..a359b0fe267 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -4,6 +4,7 @@ // ATen/cudnn/Descriptors.h // ATen/cudnn/Types.h // c10/cuda/CUDAGuard.h +#include "ATen/cuda/CUDAContextLight.h" #include "c10/cuda/CUDAStream.h" #include "ATen/cuda/CUDAContext.h" #include "c10/core/impl/GPUTrace.h" @@ -19,6 +20,7 @@ #include "ATen/cudnn/Utils.h" #include "ATen/cudnn/Handle.h" #include "c10/cuda/CUDAGraphsC10Utils.h" +#include "c10/util/ApproximateClock.h" #include "c10/cuda/CUDACachingAllocator.h", #include "c10/cuda/impl/CUDAGuardImpl.h" #include "ATen/cudnn/Descriptors.h" diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h index 84914ddfa90..dcaca411259 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -15,16 +15,12 @@ #include "c10/core/DeviceType.h" #include "c10/util/Deprecated.h" // #include "c10/util/string_utils.h" // Android only -// #include "c10/util/C++17.h" -#include "c10/util/reverse_iterator.h" // #include "c10/util/string_view.h" // Not mapped. Using custom adapter instead. #include "c10/util/StringUtil.h" -#include "c10/util/in_place.h" -// #include "c10/util/variant.h" // Not parseable and incompatible with declaring c10::variant as basic container #include "c10/util/Exception.h" #include "c10/core/Device.h" #include "c10/core/DispatchKey.h" -#include "c10/util/Array.h" +// #include "c10/util/C++17.h" #include "c10/util/TypeTraits.h" #include "c10/util/TypeList.h" // #include "c10/util/Metaprogramming.h" // Not parseable @@ -46,6 +42,8 @@ #include "c10/util/floating_point_utils.h" #include "c10/util/Float8_e4m3fn-inl.h" #include "c10/util/Float8_e4m3fn.h" +#include "c10/util/Float8_e4m3fnuz-inl.h" +#include "c10/util/Float8_e4m3fnuz.h" #include "c10/util/complex_math.h" #include "c10/util/complex_utils.h" #include "c10/util/complex.h" @@ -53,6 +51,8 @@ #include "c10/util/Half.h" #include "c10/util/Float8_e5m2-inl.h" #include "c10/util/Float8_e5m2.h" +#include "c10/util/Float8_e5m2fnuz-inl.h" +#include "c10/util/Float8_e5m2fnuz.h" #include "c10/util/bits.h" #include "c10/util/qint32.h" #include "c10/util/qint8.h" @@ -60,7 +60,8 @@ #include "c10/util/quint4x2.h" #include "c10/util/quint8.h" #include "c10/core/ScalarType.h" -#include "c10/util/ExclusivelyOwned.h" +// #include "c10/util/Optional.h" // Incompatible with declaration of c10::optional as basic container +#include "c10/util/in_place.h" #include "c10/util/MaybeOwned.h" #include "c10/core/SymNodeImpl.h" #include "c10/core/SymFloat.h" @@ -68,12 +69,10 @@ #include "c10/core/SymInt.h" #include "c10/util/TypeCast.h" #include "c10/core/Scalar.h" -// #include "c10/util/Optional.h" // Incompatible with declaration of c10::optional as basic container #include "c10/util/IdWrapper.h" #include "c10/util/Type.h" #include "c10/util/ConstexprCrc.h" #include "c10/util/TypeIndex.h" -#include "c10/util/flat_hash_map.h" #include "c10/util/irange.h" #include "c10/util/typeid.h" #include "c10/core/ScalarTypeToTypeMeta.h" @@ -82,9 +81,9 @@ #include "c10/core/Allocator.h" #include "c10/util/python_stub.h" #include "c10/core/StorageImpl.h" +#include "c10/util/ExclusivelyOwned.h" #include "c10/core/Storage.h" #include "c10/core/AutogradState.h" -#include "c10/core/GradMode.h" #include "c10/util/Registry.h" #include "c10/util/Flags.h" #include "c10/core/impl/LocalDispatchKeySet.h" @@ -92,12 +91,13 @@ #include "c10/core/SymIntArrayRef.h" #include "c10/core/DefaultDtype.h" #include "c10/core/TensorOptions.h" -#include "c10/core/WrapDimMinimal.h" #include "c10/core/impl/HermeticPyObjectTLS.h" #include "c10/core/impl/PyInterpreter.h" #include "c10/core/impl/PyObjectSlot.h" #include "c10/core/impl/SizesAndStrides.h" #include "c10/util/DimVector.h" +#include "c10/core/SymbolicShapeMeta.h" +#include "c10/core/WrapDimMinimal.h" // #include "c10/util/logging_is_not_google_glog.h" // Not parseable #include "c10/util/Logging.h" #include "c10/util/accumulate.h" @@ -173,26 +173,38 @@ #include "ATen/core/function_schema.h" #include "ATen/core/function_schema_inl.h" #include "ATen/core/op_registration/infer_schema.h" -#include "ATen/record_function.h" #include "ATen/core/op_registration/op_allowlist.h" -#include "c10/util/either.h" -#include "torch/csrc/jit/frontend/function_schema_parser.h" -#include "c10/core/CompileTimeFunctionPointer.h" +#include "ATen/SequenceNumber.h" #include "ATen/core/boxing/OperatorKernel.h" #include "ATen/core/boxing/BoxedKernel.h" #include "ATen/core/boxing/BoxedKernel_impl.h" #include "ATen/core/stack.h" #include "ATen/core/boxing/impl/boxing.h" #include "ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h" +#include "c10/core/CompileTimeFunctionPointer.h" #include "ATen/core/boxing/impl/WrapFunctionIntoFunctor.h" #include "ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h" #include "ATen/core/boxing/KernelFunction.h" #include "ATen/core/boxing/KernelFunction_impl.h" +#include "c10/util/flat_hash_map.h" +#include "c10/util/either.h" +#include "c10/core/PyHandleCache.h" +#include "c10/core/SafePyObject.h" +#include "c10/util/Bitset.h" +#include "ATen/core/Variadic.h" +#include "ATen/core/dispatch/DispatchKeyExtractor.h" +#include "ATen/core/dispatch/OperatorEntry.h" +#include "ATen/record_function.h" +#include "c10/util/Synchronized.h" +// #include "c10/util/LeftRight.h" // Not in API +#include "c10/core/GradMode.h" +#include "ATen/core/grad_mode.h" +#include "ATen/core/dispatch/Dispatcher.h" #include "ATen/core/dispatch/CppSignature.h" #include "ATen/core/dispatch/RegistrationHandleRAII.h" +#include "ATen/core/enum_tag.h" #include "ATen/core/ATenOpList.h" #include "ATen/core/op_registration/op_registration.h" -#include "ATen/core/enum_tag.h" #include "ATen/core/function.h" // #include "ATen/core/builtin_function.h" // Not in API #include "ATen/core/class_type.h" @@ -201,10 +213,8 @@ #include "torch/library.h" #include "torch/csrc/autograd/autograd_not_implemented_fallback.h" #include "torch/csrc/autograd/anomaly_mode.h" -#include "ATen/core/grad_mode.h" #include "torch/csrc/autograd/grad_mode.h" #include "ATen/FuncTorchTLS.h" -#include "c10/core/SafePyObject.h" #include "ATen/PythonTorchFunctionTLS.h" #include "ATen/SavedTensorHooks.h" #include "ATen/ThreadLocalPythonObjects.h" @@ -222,6 +232,7 @@ #include "ATen/core/LegacyTypeDispatch.h" #include "ATen/detail/CUDAHooksInterface.h" #include "ATen/detail/HIPHooksInterface.h" +#include "ATen/detail/IPUHooksInterface.h" #include "ATen/detail/MPSHooksInterface.h" #include "ATen/detail/MTIAHooksInterface.h" #include "ATen/detail/ORTHooksInterface.h" @@ -714,6 +725,7 @@ #include "ATen/ops/margin_ranking_loss.h" #include "ATen/ops/masked_fill.h" #include "ATen/ops/masked_scatter.h" +#include "ATen/ops/masked_scatter_backward.h" #include "ATen/ops/masked_select.h" #include "ATen/ops/masked_select_backward.h" #include "ATen/ops/matmul.h" @@ -1177,9 +1189,7 @@ #include "torch/csrc/autograd/input_metadata.h" #include "torch/csrc/autograd/saved_variable_hooks.h" #include "torch/csrc/autograd/saved_variable.h" -#include "ATen/core/Variadic.h" #include "torch/csrc/utils/variadic.h" -#include "ATen/SequenceNumber.h" #include "torch/csrc/autograd/function.h" #include "torch/csrc/autograd/custom_function.h" #include "torch/csrc/api/include/torch/autograd.h" @@ -1203,13 +1213,7 @@ #include "ATen/ATen.h" #include "torch/csrc/api/include/torch/detail/TensorDataContainer.h" #include "torch/csrc/autograd/generated/variable_factories.h" -#include "c10/core/PyHandleCache.h" -#include "c10/util/Bitset.h" -#include "ATen/core/dispatch/DispatchKeyExtractor.h" -#include "ATen/core/dispatch/OperatorEntry.h" -#include "c10/util/Synchronized.h" -// #include "c10/util/LeftRight.h" // Not in API -#include "ATen/core/dispatch/Dispatcher.h" +#include "torch/csrc/jit/frontend/function_schema_parser.h" #include "torch/csrc/api/include/torch/types.h" #include "torch/csrc/api/include/torch/data/dataloader_options.h" #include "torch/csrc/api/include/torch/data/detail/queue.h" @@ -1219,7 +1223,6 @@ #include "torch/csrc/api/include/torch/data/samplers/base.h" #include "torch/csrc/api/include/torch/data/samplers/random.h" #include "torch/csrc/api/include/torch/data/worker_exception.h" -#include "torch/csrc/utils/memory.h" #include "torch/csrc/api/include/torch/data/dataloader/base.h" #include "torch/csrc/api/include/torch/data/dataloader/stateful.h" #include "torch/csrc/api/include/torch/data/dataloader/stateless.h" @@ -1237,6 +1240,7 @@ #include "torch/csrc/jit/ir/scope.h" #include "torch/csrc/jit/ir/constants.h" #include "torch/csrc/jit/ir/named_value.h" +// #include "c10/util/overloaded.h" // Non parseable #include "torch/csrc/jit/runtime/operator_options.h" #include "torch/csrc/jit/runtime/operator.h" #include "torch/csrc/utils/schema_info.h" @@ -1355,7 +1359,6 @@ #include "torch/csrc/api/include/torch/nn/options/adaptive.h" #include "torch/csrc/api/include/torch/nn/modules/adaptive.h" #include "torch/csrc/api/include/torch/nn/modules/batchnorm.h" -// #include "c10/util/overloaded.h" // Non parseable #include "torch/csrc/api/include/torch/nn/modules/conv.h" #include "torch/csrc/api/include/torch/nn/modules/distance.h" #include "torch/csrc/api/include/torch/nn/modules/dropout.h" From d1d1b540690124852c0cb2afd48e9d48f4a68cc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Thu, 1 Feb 2024 15:01:19 +0100 Subject: [PATCH 04/24] Add parsing of CUDAFunctions.h --- .../org/bytedeco/pytorch/cuda/CUDAError.java | 41 ++++ .../bytedeco/pytorch/cuda/WarningState.java | 51 +++++ .../bytedeco/pytorch/global/torch_cuda.java | 197 ++++++++++++++++++ .../org/bytedeco/pytorch/presets/torch.java | 1 + .../bytedeco/pytorch/presets/torch_cuda.java | 5 +- .../pytorch/presets/torch_cuda_include.h | 6 +- 6 files changed, 296 insertions(+), 5 deletions(-) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAError.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/WarningState.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAError.java new file mode 100644 index 00000000000..7fedcd5b6f9 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAError.java @@ -0,0 +1,41 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.cuda.functions.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// Note [CHECK macro] +// ~~~~~~~~~~~~~~~~~~ +// This is a macro so that AT_ERROR can get accurate __LINE__ +// and __FILE__ information. We could split this into a short +// macro and a function implementation if we pass along __LINE__ +// and __FILE__, but no one has found this worth doing. + +// Used to denote errors from CUDA framework. +// This needs to be declared here instead util/Exception.h for proper conversion +// during hipify. +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAError extends Error { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAError(Pointer p) { super(p); } + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/WarningState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/WarningState.java new file mode 100644 index 00000000000..afa499d7602 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/WarningState.java @@ -0,0 +1,51 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.cuda.functions.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// this is a holder for c10 global state (similar to at GlobalContext) +// currently it's used to store cuda synchronization warning state, +// but can be expanded to hold other related global state, e.g. to +// record stream usage +@Namespace("c10::cuda") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class WarningState extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public WarningState() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public WarningState(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public WarningState(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public WarningState position(long position) { + return (WarningState)super.position(position); + } + @Override public WarningState getPointer(long i) { + return new WarningState((Pointer)this).offsetAddress(i); + } + + public native void set_sync_debug_mode(SyncDebugMode l); + public native void set_sync_debug_mode(@Cast("c10::cuda::SyncDebugMode") int l); + + public native SyncDebugMode get_sync_debug_mode(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index e0489632c98..8daeb49f4a5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -433,6 +433,203 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #define C10_CUDA_BUILD_SHARED_LIBS +// Parsed from c10/cuda/CUDAMiscFunctions.h + +// #pragma once +// this file is to avoid circular dependency between CUDAFunctions.h and +// CUDAExceptions.h + +// #include + +// #include +@Namespace("c10::cuda") public static native @NoException(true) @Cast("const char*") BytePointer get_cuda_check_suffix(); + // namespace cuda + // namespace c10 + + +// Parsed from c10/cuda/CUDAException.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../cuda/CUDAError.java + + + // namespace c10 + +// #define C10_CUDA_CHECK(EXPR) +// do { +// const cudaError_t __err = EXPR; +// c10::cuda::c10_cuda_check_implementation( +// static_cast(__err), +// __FILE__, +// __func__, /* Line number data type not well-defined between \ +// compilers, so we perform an explicit cast */ +// static_cast(__LINE__), +// true); +// } while (0) + +// #define C10_CUDA_CHECK_WARN(EXPR) +// do { +// const cudaError_t __err = EXPR; +// if (C10_UNLIKELY(__err != cudaSuccess)) { +// auto error_unused C10_UNUSED = cudaGetLastError(); +// (void)error_unused; +// TORCH_WARN("CUDA warning: ", cudaGetErrorString(__err)); +// } +// } while (0) + +// Indicates that a CUDA error is handled in a non-standard way +// #define C10_CUDA_ERROR_HANDLED(EXPR) EXPR + +// Intentionally ignore a CUDA error +// #define C10_CUDA_IGNORE_ERROR(EXPR) +// do { +// const cudaError_t __err = EXPR; +// if (C10_UNLIKELY(__err != cudaSuccess)) { +// cudaError_t error_unused C10_UNUSED = cudaGetLastError(); +// (void)error_unused; +// } +// } while (0) + +// Clear the last CUDA error +// #define C10_CUDA_CLEAR_ERROR() +// do { +// cudaError_t error_unused C10_UNUSED = cudaGetLastError(); +// (void)error_unused; +// } while (0) + +// This should be used directly after every kernel launch to ensure +// the launch happened correctly and provide an early, close-to-source +// diagnostic if it didn't. +// #define C10_CUDA_KERNEL_LAUNCH_CHECK() C10_CUDA_CHECK(cudaGetLastError()) + +/** Launches a CUDA kernel appending to it all the information need to handle + * device-side assertion failures. Checks that the launch was successful. */ +// #define TORCH_DSA_KERNEL_LAUNCH( +// kernel, blocks, threads, shared_mem, stream, ...) +// do { +// auto& launch_registry = +// c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref(); +// kernel<<>>( +// __VA_ARGS__, +// launch_registry.get_uvm_assertions_ptr_for_current_device(), +// launch_registry.insert( +// __FILE__, __FUNCTION__, __LINE__, #kernel, stream.id())); +// C10_CUDA_KERNEL_LAUNCH_CHECK(); +// } while (0) + +/** In the event of a CUDA failure, formats a nice error message about that + * failure and also checks for device-side assertion failures */ +@Namespace("c10::cuda") public static native void c10_cuda_check_implementation( + int err, + @Cast("const char*") BytePointer filename, + @Cast("const char*") BytePointer function_name, + int line_number, + @Cast("const bool") boolean include_device_assertions); +@Namespace("c10::cuda") public static native void c10_cuda_check_implementation( + int err, + String filename, + String function_name, + int line_number, + @Cast("const bool") boolean include_device_assertions); + + // namespace cuda + // namespace c10 + + +// Parsed from c10/cuda/CUDAFunctions.h + +// #pragma once + +// This header provides C++ wrappers around commonly used CUDA API functions. +// The benefit of using C++ here is that we can raise an exception in the +// event of an error, rather than explicitly pass around error codes. This +// leads to more natural APIs. +// +// The naming convention used here matches the naming convention of torch.cuda + +// #include +// #include +// #include +// #include +// #include + +// NB: In the past, we were inconsistent about whether or not this reported +// an error if there were driver problems are not. Based on experience +// interacting with users, it seems that people basically ~never want this +// function to fail; it should just return zero if things are not working. +// Oblige them. +// It still might log a warning for user first time it's invoked +@Namespace("c10::cuda") public static native @NoException(true) byte device_count(); + +// Version of device_count that throws is no devices are detected +@Namespace("c10::cuda") public static native byte device_count_ensure_non_zero(); + +@Namespace("c10::cuda") public static native byte current_device(); + +@Namespace("c10::cuda") public static native void set_device(byte device); + +@Namespace("c10::cuda") public static native void device_synchronize(); + +@Namespace("c10::cuda") public static native void warn_or_error_on_sync(); + +// Raw CUDA device management functions +@Namespace("c10::cuda") public static native @Cast("cudaError_t") int GetDeviceCount(IntPointer dev_count); +@Namespace("c10::cuda") public static native @Cast("cudaError_t") int GetDeviceCount(IntBuffer dev_count); +@Namespace("c10::cuda") public static native @Cast("cudaError_t") int GetDeviceCount(int[] dev_count); + +@Namespace("c10::cuda") public static native @Cast("cudaError_t") int GetDevice(IntPointer device); +@Namespace("c10::cuda") public static native @Cast("cudaError_t") int GetDevice(IntBuffer device); +@Namespace("c10::cuda") public static native @Cast("cudaError_t") int GetDevice(int[] device); + +@Namespace("c10::cuda") public static native @Cast("cudaError_t") int SetDevice(int device); + +@Namespace("c10::cuda") public static native @Cast("cudaError_t") int MaybeSetDevice(int device); + +@Namespace("c10::cuda") public static native int ExchangeDevice(int device); + +@Namespace("c10::cuda") public static native int MaybeExchangeDevice(int device); + +@Namespace("c10::cuda") public static native void SetTargetDevice(); + +@Namespace("c10::cuda") public enum SyncDebugMode { L_DISABLED(0), L_WARN(1), L_ERROR(2); + + public final int value; + private SyncDebugMode(int v) { this.value = v; } + private SyncDebugMode(SyncDebugMode e) { this.value = e.value; } + public SyncDebugMode intern() { for (SyncDebugMode e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../cuda/WarningState.java + + + +@Namespace("c10::cuda") public static native @ByRef WarningState warning_state(); +// the subsequent functions are defined in the header because for performance +// reasons we want them to be inline +@Namespace("c10::cuda") public static native void memcpy_and_sync( + Pointer dst, + @Const Pointer src, + @Cast("int64_t") long nbytes, + @Cast("cudaMemcpyKind") int kind, + @Cast("cudaStream_t") Pointer stream); + +@Namespace("c10::cuda") public static native void stream_synchronize(@Cast("cudaStream_t") Pointer stream); + +@Namespace("c10::cuda") public static native @Cast("bool") boolean hasPrimaryContext(byte device_index); +@Namespace("c10::cuda") public static native @ByVal ByteOptional getDeviceIndexWithPrimaryContext(); + + // namespace cuda + // namespace c10 + + // Parsed from ATen/cuda/Exceptions.h // #pragma once diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index f1b9b71f8fc..374fa367ba7 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -250,6 +250,7 @@ public static void sharedMap(InfoMap infoMap) { "alignas", "COMPLEX_INTEGER_OP_TEMPLATE_CONDITION", "C10_DEVICE_HOST_FUNCTION", "FORCE_INLINE_APPLE", "ERROR_UNSUPPORTED_CAST", "LEGACY_CONTIGUOUS_MEMORY_FORMAT", "GFLAGS_DLL_DEFINE_FLAG", "GFLAGS_DLL_DECLARE_FLAG", "AT_X", "DEFINE_KEY", "C10_DISPATCHER_INLINE_UNLESS_MOBILE", "TH_DISALLOW_COPY_AND_ASSIGN", "__device__", + "__inline__", "TORCH_DSA_KERNEL_ARGS", "TORCH_DSA_KERNEL_ARGS_PASS", "C10_CUDA_API", "C10_CUDA_IMPORT", "C10_CUDA_EXPORT", "__ubsan_ignore_float_divide_by_zero__", "__ubsan_ignore_undefined__", diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 3c84c6f06d2..4a98a1eaa85 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -147,7 +147,7 @@ public void map(InfoMap infoMap) { .put(new Info("c10::StreamId").valueTypes("long")) .put(new Info("c10::cuda::CaptureStatus").valueTypes("int").cast().skip()) // Enum doesn't parse .put(new Info("std::pair,std::vector >").pointerTypes("DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair").define()) - .put(new Info("c10::CuDNNError").purify()) + .put(new Info("c10::CuDNNError", "c10::CUDAError").purify()) .put(new Info("c10::impl::GPUTrace::gpuTraceState").skip()) .put(new Info("at::native::RNNDescriptor::dropout_desc_").skip()) .put(new Info("at::native::operator <<(std::ostream&, at::native::TensorDescriptor&)", @@ -183,7 +183,8 @@ public void map(InfoMap infoMap) { .put(new Info( // Enums "cudnnActivationMode_t", "cudnnLossNormalizationMode_t", "cudnnRNNInputMode_t", "cudnnDirectionMode_t", "cudnnRNNMode_t", "cudaStreamCaptureMode", "cudnnDataType_t", "cudnnNanPropagation_t", - "cusparseStatus_t", "cusolverStatus_t", "cudnnRNNAlgo_t", "cudnnNanPropagation_t", "cublasStatus_t", "cudaError_t" + "cusparseStatus_t", "cusolverStatus_t", "cudnnRNNAlgo_t", "cudnnNanPropagation_t", "cublasStatus_t", "cudaError_t", + "cudaMemcpyKind" ).valueTypes("int").cast()) ; diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h index a359b0fe267..4adfba64905 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -11,9 +11,9 @@ #include "c10/cuda/CUDADeviceAssertionHost.h" #include "c10/cuda/CUDAMacros.h" #include "c10/cuda/impl/cuda_cmake_macros.h" -// #include "c10/cuda/CUDAMiscFunctions.h", // Parsing error -// #include "c10/cuda/CUDAException.h", // Parsing error -// #include "c10/cuda/CUDAFunctions.h", // Parsing error +#include "c10/cuda/CUDAMiscFunctions.h", +#include "c10/cuda/CUDAException.h", +#include "c10/cuda/CUDAFunctions.h", #include "ATen/cuda/Exceptions.h" #include "ATen/cudnn/cudnn-wrapper.h" #include "ATen/cuda/ATenCUDAGeneral.h" From 9d0db45a5c798ad51d3529a3200c5860aff503cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Thu, 1 Feb 2024 17:30:55 +0100 Subject: [PATCH 05/24] Add AOTInductor --- pytorch/include_list.pl | 4 +- .../pytorch/AOTIModelContainerRunner.java | 58 +++++++++++++++++++ .../pytorch/AOTIModelContainerRunnerCpu.java | 40 +++++++++++++ .../org/bytedeco/pytorch/DynamicLibrary.java | 28 +++++++++ .../cuda/AOTIModelContainerRunnerCuda.java | 58 +++++++++++++++++++ .../org/bytedeco/pytorch/global/torch.java | 22 +++++++ .../bytedeco/pytorch/global/torch_cuda.java | 14 ++++- .../org/bytedeco/pytorch/presets/torch.java | 11 +++- .../bytedeco/pytorch/presets/torch_cuda.java | 3 + .../pytorch/presets/torch_cuda_include.h | 4 +- .../bytedeco/pytorch/presets/torch_include.h | 4 ++ 11 files changed, 240 insertions(+), 6 deletions(-) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunner.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DynamicLibrary.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java diff --git a/pytorch/include_list.pl b/pytorch/include_list.pl index 1435c82f3ec..2b67e999ee2 100644 --- a/pytorch/include_list.pl +++ b/pytorch/include_list.pl @@ -52,7 +52,7 @@ sub go { chdir "cppbuild/linux-x86_64-gpu/pytorch/torch/include"; -go('torch/csrc/api/include/torch/torch.h', 'torch/script.h'); +go('torch/csrc/api/include/torch/torch.h', 'torch/script.h', 'torch/csrc/inductor/aoti_model_container_runner.h'); print <"}) @StdMove TensorVector run( + @Cast({"", "std::vector"}) @StdMove TensorVector inputs); + + public native @ByVal BytePointerVector get_call_spec(); + + public AOTIModelContainerRunner( + @Cast("const char*") BytePointer model_path, + @Cast("size_t") long num_models, + @Cast("bool") boolean is_cpu, + @Cast("const char*") BytePointer cubin_dir) { super((Pointer)null); allocate(model_path, num_models, is_cpu, cubin_dir); } + private native void allocate( + @Cast("const char*") BytePointer model_path, + @Cast("size_t") long num_models, + @Cast("bool") boolean is_cpu, + @Cast("const char*") BytePointer cubin_dir); + public AOTIModelContainerRunner( + String model_path, + @Cast("size_t") long num_models, + @Cast("bool") boolean is_cpu, + String cubin_dir) { super((Pointer)null); allocate(model_path, num_models, is_cpu, cubin_dir); } + private native void allocate( + String model_path, + @Cast("size_t") long num_models, + @Cast("bool") boolean is_cpu, + String cubin_dir); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java new file mode 100644 index 00000000000..26cee36966f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java @@ -0,0 +1,40 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("torch::inductor") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AOTIModelContainerRunnerCpu extends AOTIModelContainerRunner { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AOTIModelContainerRunnerCpu(Pointer p) { super(p); } + + public AOTIModelContainerRunnerCpu(@Cast("const char*") BytePointer model_path, @Cast("size_t") long num_models/*=1*/) { super((Pointer)null); allocate(model_path, num_models); } + private native void allocate(@Cast("const char*") BytePointer model_path, @Cast("size_t") long num_models/*=1*/); + public AOTIModelContainerRunnerCpu(@Cast("const char*") BytePointer model_path) { super((Pointer)null); allocate(model_path); } + private native void allocate(@Cast("const char*") BytePointer model_path); + public AOTIModelContainerRunnerCpu(String model_path, @Cast("size_t") long num_models/*=1*/) { super((Pointer)null); allocate(model_path, num_models); } + private native void allocate(String model_path, @Cast("size_t") long num_models/*=1*/); + public AOTIModelContainerRunnerCpu(String model_path) { super((Pointer)null); allocate(model_path); } + private native void allocate(String model_path); + + public native @Cast({"", "std::vector"}) @StdMove TensorVector run( + @Cast({"", "std::vector"}) @StdMove TensorVector inputs); + + public native @ByVal BytePointerVector get_call_spec(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DynamicLibrary.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DynamicLibrary.java new file mode 100644 index 00000000000..a7b641d7013 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DynamicLibrary.java @@ -0,0 +1,28 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// Forward declare DynamicLibrary +@Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DynamicLibrary extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public DynamicLibrary() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DynamicLibrary(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java new file mode 100644 index 00000000000..5bbc854a2be --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java @@ -0,0 +1,58 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.cuda.functions.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("torch::inductor") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class AOTIModelContainerRunnerCuda extends AOTIModelContainerRunner { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AOTIModelContainerRunnerCuda(Pointer p) { super(p); } + + public AOTIModelContainerRunnerCuda( + @Cast("const char*") BytePointer model_path, + @Cast("size_t") long num_models/*=1*/, + @Cast("const char*") BytePointer cubin_dir/*=nullptr*/) { super((Pointer)null); allocate(model_path, num_models, cubin_dir); } + private native void allocate( + @Cast("const char*") BytePointer model_path, + @Cast("size_t") long num_models/*=1*/, + @Cast("const char*") BytePointer cubin_dir/*=nullptr*/); + public AOTIModelContainerRunnerCuda( + @Cast("const char*") BytePointer model_path) { super((Pointer)null); allocate(model_path); } + private native void allocate( + @Cast("const char*") BytePointer model_path); + public AOTIModelContainerRunnerCuda( + String model_path, + @Cast("size_t") long num_models/*=1*/, + String cubin_dir/*=nullptr*/) { super((Pointer)null); allocate(model_path, num_models, cubin_dir); } + private native void allocate( + String model_path, + @Cast("size_t") long num_models/*=1*/, + String cubin_dir/*=nullptr*/); + public AOTIModelContainerRunnerCuda( + String model_path) { super((Pointer)null); allocate(model_path); } + private native void allocate( + String model_path); + + public native @Cast({"", "std::vector"}) @StdMove TensorVector run( + @Cast({"", "std::vector"}) @StdMove TensorVector inputs); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 181eaaf6c76..3929f582062 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -79702,6 +79702,28 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch +// Parsed from torch/csrc/inductor/aoti_model_container_runner.h + +// #if !defined(C10_MOBILE) && !defined(ANDROID) +// #pragma once + +// #include +// #include +// Targeting ../DynamicLibrary.java + + + +// Targeting ../AOTIModelContainerRunner.java + + +// Targeting ../AOTIModelContainerRunnerCpu.java + + + + // namespace torch::inductor +// #endif + + // Parsed from datasets.h /* diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index 8daeb49f4a5..66270cd5580 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -977,7 +977,7 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // not counted as a word boundary, so you would otherwise have to list each // of these functions. -@Namespace("c10::cuda::CUDACachingAllocator") @MemberGetter public static native @Cast("const size_t") long kLargeBuffer(); + // Targeting ../cuda/Stat.java @@ -1249,4 +1249,16 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10 +// Parsed from torch/csrc/inductor/aoti_model_container_runner_cuda.h + +// #pragma once + +// #include +// Targeting ../cuda/AOTIModelContainerRunnerCuda.java + + + + // namespace torch::inductor + + } diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 374fa367ba7..3bd7139e103 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -61,6 +61,7 @@ include = { "torch/torch.h", "torch/script.h", + "torch/csrc/inductor/aoti_model_container_runner.h", // For inclusion in JNI only, not parsed (compiler needs some complete definitions) "torch/csrc/jit/runtime/instruction.h", @@ -1784,7 +1785,7 @@ public void map(InfoMap infoMap) { } - //// @UniquePtr + //// Classes handled with @UniquePtr for (String opt: new String[] { "Adagrad", "Adam", "AdamW", "LBFGS", "RMSprop", "SGD" }) { infoMap .put(new Info("torch::optim::" + opt + "Options", "torch::optim::" + opt + "ParamState")) // Help qualification @@ -1917,7 +1918,13 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::jit::GraphFunction::_set_initial_executor_execution_mode", "torch::jit::GraphFunction::_set_ignore_amp", "c10::detail::_str", "torch::jit::kJitOnlyOperatorTags", - "c10::IValue::Tag" // 2.2.0 make IValue::tag public, while IValue::Tag is supposed to be private. Bug ? Check if fixed in next release + "c10::IValue::Tag", // 2.2.0 make IValue::tag public, while IValue::Tag is supposed to be private. Bug ? Check if fixed in next release + + // Optional args of AOTModelContainerRun.run. Opaque types without apparent use in 2.2.0. + "AOTInductorStreamOpaque", + "AOTInductorStreamHandle", + "AOTIProxyExecutorOpaque", + "AOTIProxyExecutorHandle" ).skip()); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 4a98a1eaa85..0d361c16e36 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -40,6 +40,7 @@ "ATen/cudnn/Descriptors.h", "ATen/cudnn/Types.h", "c10/cuda/CUDAGuard.h", + "torch/csrc/inductor/aoti_model_container_runner_cuda.h", // For inclusion in JNI only, not parsed "ATen/cuda/CUDAGeneratorImpl.h", @@ -157,6 +158,8 @@ public void map(InfoMap infoMap) { .put(new Info("c10::cuda::CUDACachingAllocator::CheckpointDelta").immutable()) // at::DataPtr is not constructible + .put(new Info("c10::cuda::CUDACachingAllocator::kLargeBuffer").skip()) // Triggers UnsatisfiedLinkException as of 2.2.0 + .put(new Info( "at::native::Descriptor", "at::native::Descriptor", diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h index 4adfba64905..4195ee66a89 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -4,6 +4,7 @@ // ATen/cudnn/Descriptors.h // ATen/cudnn/Types.h // c10/cuda/CUDAGuard.h +// torch/csrc/inductor/aoti_model_container_runner_cuda.h #include "ATen/cuda/CUDAContextLight.h" #include "c10/cuda/CUDAStream.h" #include "ATen/cuda/CUDAContext.h" @@ -25,4 +26,5 @@ #include "c10/cuda/impl/CUDAGuardImpl.h" #include "ATen/cudnn/Descriptors.h" #include "ATen/cudnn/Types.h" -#include "c10/cuda/CUDAGuard.h" \ No newline at end of file +#include "c10/cuda/CUDAGuard.h" +#include "torch/csrc/inductor/aoti_model_container_runner_cuda.h" \ No newline at end of file diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h index dcaca411259..81706181751 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -1,6 +1,7 @@ // All files included by // #include // #include +// #include // as listed by g++ -H torch/torch.h torch/script.h // Excluding: // - the ones that fill at::meta at::native and at::_ops namespaces @@ -1423,5 +1424,8 @@ #include "torch/csrc/jit/frontend/versioned_symbols.h" #include "torch/csrc/jit/frontend/tree_views.h" #include "torch/csrc/jit/serialization/pickle.h" +// #include "torch/csrc/inductor/aoti_torch/c/shim.h" // model.so API, not part of libtorch API +// #include "torch/csrc/inductor/aoti_runtime/interface.h" // model.so API, not part of libtorch API +#include "torch/csrc/inductor/aoti_model_container_runner.h" #include "datasets.h" \ No newline at end of file From 6e8fc4409f4aa6663a2d0f1bc554cd4059685cb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Sat, 3 Feb 2024 08:56:22 +0100 Subject: [PATCH 06/24] Fix compilation error on Windows --- pytorch/cppbuild.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh index ceecc5924a7..8974d45fa10 100755 --- a/pytorch/cppbuild.sh +++ b/pytorch/cppbuild.sh @@ -53,6 +53,10 @@ git checkout v$PYTORCH_VERSION git submodule update --init --recursive git submodule foreach --recursive 'git reset --hard' +# Fix version of this submodule for tag v2.2.0. Or won't compile on windows. +# Probably could be remove when upgrading PyTorch +(cd third_party/pocketfft; git checkout 9d3ab05a7fffbc71a492bc6a17be034e83e8f0fe) + # https://github.com/pytorch/pytorch/pull/66219 #patch -Np1 < ../../../pytorch.patch From 82cbc2cd176cd439e6a52aee0d67eac0a57b711e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Sat, 3 Feb 2024 12:09:28 +0100 Subject: [PATCH 07/24] Cleanup cppbuild.sh --- pytorch/cppbuild.sh | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh index 8974d45fa10..be7453e3509 100755 --- a/pytorch/cppbuild.sh +++ b/pytorch/cppbuild.sh @@ -53,13 +53,10 @@ git checkout v$PYTORCH_VERSION git submodule update --init --recursive git submodule foreach --recursive 'git reset --hard' -# Fix version of this submodule for tag v2.2.0. Or won't compile on windows. -# Probably could be remove when upgrading PyTorch +# Fix version of this submodule to allow compilation on windows. +# Probably could be remove when we upgrade to next version of PyTorch. (cd third_party/pocketfft; git checkout 9d3ab05a7fffbc71a492bc6a17be034e83e8f0fe) -# https://github.com/pytorch/pytorch/pull/66219 -#patch -Np1 < ../../../pytorch.patch - CPYTHON_HOST_PATH="$INSTALL_PATH/../../../cpython/cppbuild/$PLATFORM/host/" CPYTHON_PATH="$INSTALL_PATH/../../../cpython/cppbuild/$PLATFORM/" OPENBLAS_PATH="$INSTALL_PATH/../../../openblas/cppbuild/$PLATFORM/" @@ -169,24 +166,11 @@ sedinplace 's/ build_deps()/ build_deps(); sys.exit()/g' setup.py sedinplace 's/AND NOT DEFINED ENV{CUDAHOSTCXX}//g' cmake/public/cuda.cmake sedinplace 's/CMAKE_CUDA_FLAGS "/CMAKE_CUDA_FLAGS " --use-local-env /g' CMakeLists.txt -# work around some compiler bugs -sedinplace 's/!defined(__INTEL_COMPILER))/!defined(__INTEL_COMPILER) \&\& (__GNUC__ < 11))/g' third_party/XNNPACK/src/xnnpack/intrinsics-polyfill.h sedinplace 's/using ExpandingArrayDouble/public: using ExpandingArrayDouble/g' ./torch/csrc/api/include/torch/nn/options/pooling.h -sedinplace 's/typedef c10::variant/public: typedef c10::variant/g' ./torch/csrc/api/include/torch/nn/options/upsampling.h -sedinplace 's/std::copysign/copysignf/g' aten/src/ATen/native/cuda/*.cu -sedinplace 's/std::trunc/truncf/g' aten/src/ATen/native/cuda/*.cu -sedinplace 's/std::floor/floorf/g' aten/src/ATen/native/cuda/*.cu -sedinplace 's/std::ceil/ceilf/g' aten/src/ATen/native/cuda/*.cu -sedinplace 's/round(/roundf(/g' aten/src/ATen/native/cuda/*.cu -sedinplace 's/floor(/floorf(/g' aten/src/ATen/native/cuda/*.cu -sedinplace 's/ceil(/ceilf(/g' aten/src/ATen/native/cuda/*.cu -sedinplace '/#include /a\ -#include \ -' caffe2/utils/math_gpu.cu # allow setting the build directory and passing CUDA options sedinplace "s/BUILD_DIR = .build./BUILD_DIR = os.environ['BUILD_DIR'] if 'BUILD_DIR' in os.environ else 'build'/g" tools/setup_helpers/env.py -sedinplace "s/var.startswith(('BUILD_', 'USE_', 'CMAKE_'))/var.startswith(('BUILD_', 'USE_', 'CMAKE_', 'CUDA_'))/g" tools/setup_helpers/cmake.py +sedinplace 's/var.startswith(("BUILD_", "USE_", "CMAKE_"))/var.startswith(("BUILD_", "USE_", "CMAKE_", "CUDA_"))/g' tools/setup_helpers/cmake.py # allow resizing std::vector sedinplace 's/TensorIndex(c10::nullopt_t)/TensorIndex(c10::nullopt_t none = None)/g' aten/src/ATen/TensorIndexing.h From 00b84d299c555c2695bd0f2ad8f506cce34700b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Sat, 3 Feb 2024 22:21:03 +0100 Subject: [PATCH 08/24] Fix linking error on windows --- pytorch/cppbuild.sh | 2 +- pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java | 2 +- pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh index be7453e3509..fe9e23675c0 100755 --- a/pytorch/cppbuild.sh +++ b/pytorch/cppbuild.sh @@ -54,7 +54,7 @@ git submodule update --init --recursive git submodule foreach --recursive 'git reset --hard' # Fix version of this submodule to allow compilation on windows. -# Probably could be remove when we upgrade to next version of PyTorch. +# Probably could be removed when we upgrade to next version of PyTorch. (cd third_party/pocketfft; git checkout 9d3ab05a7fffbc71a492bc6a17be034e83e8f0fe) CPYTHON_HOST_PATH="$INSTALL_PATH/../../../cpython/cppbuild/$PLATFORM/host/" diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 3929f582062..ca7d939c01e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -68327,7 +68327,7 @@ scalar_t sf(scalar_t x, scalar_t y) @StdString String op_name, @Const @ByRef StringIValueMap extra_args); -@Namespace("torch::profiler::impl") public static native @StdString BytePointer shapeToStr(@Cast("const std::vector*") @ByRef LongVector shape); + // namespace impl // namespace profiler diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 3bd7139e103..5f6fec8043c 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -2358,8 +2358,8 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::jit::ClassDef::create", "torch::jit::Code::operator <<(std::ostream&, const torch::jit::Code&)", // The friend operator is truly a member of torch::jit and not torch::jit::Code "torch::profiler::impl::getNvtxStr", - "c10::merge_primitive" // templated function with some specializations. Will have to figure what - // instances to create if needed. + "torch::profiler::impl::shapeToStr", + "c10::merge_primitive" // templated function with some specializations. Will have to figure what instances to create if needed. ).skip()); //// Aliases necessary because of Parser limited namespace resolution From 79348b0f817e67a07f981ae317decb04005efab1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Sun, 4 Feb 2024 09:38:56 +0100 Subject: [PATCH 09/24] Moved AOTIModelContainerRunnerCuda to main presets --- pytorch/pytorch.patch | 738 ------------------ .../AOTIModelContainerRunnerCuda.java | 20 +- .../org/bytedeco/pytorch/global/torch.java | 12 + .../bytedeco/pytorch/global/torch_cuda.java | 12 - .../org/bytedeco/pytorch/presets/torch.java | 2 +- .../bytedeco/pytorch/presets/torch_cuda.java | 1 - .../pytorch/presets/torch_cuda_include.h | 1 - .../bytedeco/pytorch/presets/torch_include.h | 3 + 8 files changed, 24 insertions(+), 765 deletions(-) delete mode 100644 pytorch/pytorch.patch rename pytorch/src/gen/java/org/bytedeco/pytorch/{cuda => }/AOTIModelContainerRunnerCuda.java (83%) diff --git a/pytorch/pytorch.patch b/pytorch/pytorch.patch deleted file mode 100644 index ef316b44f0f..00000000000 --- a/pytorch/pytorch.patch +++ /dev/null @@ -1,738 +0,0 @@ -diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml -index f33eda2f94..a92a79a684 100644 ---- a/.github/workflows/lint.yml -+++ b/.github/workflows/lint.yml -@@ -97,7 +97,7 @@ jobs: - - name: Ensure no direct cub include - if: always() - run: | -- (! git --no-pager grep -I -no $'#include - #include - -+#include -+ -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+ -+#include -+ -+#else -+ - // include cub in a safe manner, see: - // https://github.com/pytorch/pytorch/pull/55292 - #undef CUB_NS_POSTFIX //undef to avoid redefinition warnings - #undef CUB_NS_PREFIX --#define CUB_NS_PREFIX namespace at { namespace cuda { namespace detail { --#define CUB_NS_POSTFIX }}} -+#undef CUB_NS_QUALIFIER -+#define CUB_NS_PREFIX namespace at_cuda_detail { -+#define CUB_NS_POSTFIX } -+#define CUB_NS_QUALIFIER ::at_cuda_detail::cub - #include - #undef CUB_NS_POSTFIX - #undef CUB_NS_PREFIX -+#undef CUB_NS_QUALIFIER -+ -+#endif - - #include - #include -@@ -33,16 +46,40 @@ - #define NO_ROCM(x) - #else - #define NO_ROCM(x) x -+#endif - --namespace at { namespace native { -+#if !defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16() -+ -+namespace at_cuda_detail { -+// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 -+ -+template <> -+struct cub::FpLimits -+{ -+ static __host__ __device__ __forceinline__ c10::BFloat16 Max() { -+ unsigned short max_word = 0x7F7F; -+ return reinterpret_cast(max_word); -+ } -+ -+ static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { -+ unsigned short lowest_word = 0xFF7F; -+ return reinterpret_cast(lowest_word); -+ } -+}; - --namespace cub = at::cuda::detail::cub; -+template <> struct cub::NumericTraits: cub::BaseTraits {}; -+} -+#endif - -+#if !defined(USE_ROCM) -+namespace at { namespace native { -+namespace cub = ::at_cuda_detail::cub; - }} - #endif - - namespace at { - namespace cuda { -+namespace cub { - - namespace detail { - -@@ -55,44 +92,17 @@ struct cuda_type { - using type = __half; - }; - --#if defined(CUDA_VERSION) && CUDA_VERSION >= 11050 --// cub sort support for __nv_bfloat16 is added to cub 1.13 in --// https://github.com/NVIDIA/cub/pull/306 and according to --// https://github.com/NVIDIA/cub#releases, 1.13 is included in --// CUDA Toolkit 11.5 -+#if CUB_SUPPORTS_NV_BFLOAT16() - --// waiting for https://github.com/NVIDIA/cub/pull/306 to land on CUDA - template<> - struct cuda_type { - using type = __nv_bfloat16; - }; - --#elif !defined(__HIP_PLATFORM_HCC__) -- --// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 -- --template <> --struct cub::FpLimits --{ -- static __host__ __device__ __forceinline__ c10::BFloat16 Max() { -- unsigned short max_word = 0x7F7F; -- return reinterpret_cast(max_word); -- } -- -- static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { -- unsigned short lowest_word = 0xFF7F; -- return reinterpret_cast(lowest_word); -- } --}; -- --template <> struct cub::NumericTraits: cub::BaseTraits {}; -- - #endif - - } // namespace detail - --namespace cub { -- - inline int get_num_bits(uint64_t max_key) { - int num_bits = 1; - while (max_key > 1) { -@@ -115,11 +125,11 @@ static inline void sort_keys( - key_t_ *keys_out_ = reinterpret_cast(keys_out); - - if (descending) { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortKeysDescending, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeysDescending, - keys_in_, keys_out_, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } else { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortKeys, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeys, - keys_in_, keys_out_, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } -@@ -147,11 +157,11 @@ static inline void sort_pairs( - key_t_ *keys_out_ = reinterpret_cast(keys_out); - - if (descending) { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortPairsDescending, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairsDescending, - keys_in_, keys_out_, values_in, values_out, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } else { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortPairs, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairs, - keys_in_, keys_out_, values_in, values_out, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } -@@ -183,12 +193,12 @@ static inline void segmented_sort_pairs( - key_t_ *keys_out_ = reinterpret_cast(keys_out); - - if (descending) { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, - keys_in_, keys_out_, values_in, values_out, - num_elements, num_segments, begin_offsets, end_offsets, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } else { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSegmentedRadixSort::SortPairs, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs, - keys_in_, keys_out_, values_in, values_out, - num_elements, num_segments, begin_offsets, end_offsets, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); -@@ -240,7 +250,7 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - // so split at int_max/2 - constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 - int size_cub = std::min(num_items, max_cub_size); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, - input, - output, - scan_op, -@@ -260,7 +270,7 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - first_elem_ptr, - scan_op); - C10_CUDA_KERNEL_LAUNCH_CHECK(); -- using ArgIndexInputIterator = NO_ROCM(detail)::cub::ArgIndexInputIterator; -+ using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator; - using tuple = typename ArgIndexInputIterator::value_type; - auto input_iter_transform = [=] __device__ (const tuple &x)->input_t { - if (x.key == 0) { -@@ -269,9 +279,9 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - return x.value; - } - }; -- auto input_ = NO_ROCM(detail)::cub::TransformInputIterator( -+ auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator( - ArgIndexInputIterator(input + i), input_iter_transform); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, - input_, - output + i, - scan_op, -@@ -287,7 +297,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - // so split at int_max/2 - constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 - int size_cub = std::min(num_items, max_cub_size); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::ExclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, - input, - output, - scan_op, -@@ -309,7 +319,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - C10_CUDA_KERNEL_LAUNCH_CHECK(); - auto input_ = impl::chained_iterator{ - input + i, first_elem_ptr}; -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, - input_, - output + i, - scan_op, -@@ -322,7 +332,7 @@ template::max(), - "cub unique does not support more than INT_MAX elements"); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSelect::Unique, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique, - input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream()); - } - -diff --git a/aten/src/ATen/cuda/cub_definitions.cuh b/aten/src/ATen/cuda/cub_definitions.cuh -new file mode 100644 -index 0000000000000..61119fc174587 ---- /dev/null -+++ b/aten/src/ATen/cuda/cub_definitions.cuh -@@ -0,0 +1,29 @@ -+#pragma once -+ -+#if !defined(USE_ROCM) -+#include // for CUDA_VERSION -+#endif -+ -+#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 -+#include -+#else -+#define CUB_VERSION 0 -+#endif -+ -+// cub sort support for __nv_bfloat16 is added to cub 1.13 in: -+// https://github.com/NVIDIA/cub/pull/306 -+#if CUB_VERSION >= 101300 -+#define CUB_SUPPORTS_NV_BFLOAT16() true -+#else -+#define CUB_SUPPORTS_NV_BFLOAT16() false -+#endif -+ -+// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: -+// https://github.com/NVIDIA/cub/pull/326 -+// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake -+// starting from CUDA 11.5 -+#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true -+#else -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false -+#endif -diff --git a/caffe2/core/context_gpu.cu b/caffe2/core/context_gpu.cu -index c2b89945ad..6d53740091 100644 ---- a/caffe2/core/context_gpu.cu -+++ b/caffe2/core/context_gpu.cu -@@ -21,6 +21,7 @@ - #include "caffe2/core/logging.h" - #include "caffe2/core/tensor.h" - #include "caffe2/utils/string_utils.h" -+#include "caffe2/utils/cub_namespace.cuh" - - C10_DEFINE_string( - caffe2_cuda_memory_pool, -diff --git a/caffe2/operators/accuracy_op.cu b/caffe2/operators/accuracy_op.cu -index f06663d71a..29df54e752 100644 ---- a/caffe2/operators/accuracy_op.cu -+++ b/caffe2/operators/accuracy_op.cu -@@ -3,6 +3,7 @@ - #include "caffe2/utils/GpuAtomics.cuh" - #include "caffe2/utils/math.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/affine_channel_op.cu b/caffe2/operators/affine_channel_op.cu -index adf4ac55c0..efae0a3fc6 100644 ---- a/caffe2/operators/affine_channel_op.cu -+++ b/caffe2/operators/affine_channel_op.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/affine_channel_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/arg_ops.cu b/caffe2/operators/arg_ops.cu -index 7e90d25b83..56deaa6363 100644 ---- a/caffe2/operators/arg_ops.cu -+++ b/caffe2/operators/arg_ops.cu -@@ -2,8 +2,8 @@ - - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/common_gpu.h" - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/batch_moments_op.cu b/caffe2/operators/batch_moments_op.cu -index 4b693b5c04..81359f6440 100644 ---- a/caffe2/operators/batch_moments_op.cu -+++ b/caffe2/operators/batch_moments_op.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/batch_moments_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/batch_sparse_to_dense_op.cu b/caffe2/operators/batch_sparse_to_dense_op.cu -index aea2035a5d..3e7ad8af9a 100644 ---- a/caffe2/operators/batch_sparse_to_dense_op.cu -+++ b/caffe2/operators/batch_sparse_to_dense_op.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/batch_sparse_to_dense_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/boolean_mask_ops.cu b/caffe2/operators/boolean_mask_ops.cu -index 214b7c13ba..501dd3b191 100644 ---- a/caffe2/operators/boolean_mask_ops.cu -+++ b/caffe2/operators/boolean_mask_ops.cu -@@ -2,8 +2,8 @@ - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/boolean_mask_ops.h" -- - #include -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/cross_entropy_op.cu b/caffe2/operators/cross_entropy_op.cu -index 380e80399f..c23f05f8e5 100644 ---- a/caffe2/operators/cross_entropy_op.cu -+++ b/caffe2/operators/cross_entropy_op.cu -@@ -4,6 +4,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/cross_entropy_op.h" - #include "caffe2/operators/operator_fallback_gpu.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/distance_op.cu b/caffe2/operators/distance_op.cu -index 3a8bb337d5..a360166854 100644 ---- a/caffe2/operators/distance_op.cu -+++ b/caffe2/operators/distance_op.cu -@@ -4,6 +4,7 @@ - #include "caffe2/operators/distance_op.h" - #include "caffe2/utils/conversions.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/elementwise_div_op.cu b/caffe2/operators/elementwise_div_op.cu -index 42b103a0f1..33118a8f5e 100644 ---- a/caffe2/operators/elementwise_div_op.cu -+++ b/caffe2/operators/elementwise_div_op.cu -@@ -3,8 +3,8 @@ - #include - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/elementwise_ops_utils.h" -diff --git a/caffe2/operators/elementwise_linear_op.cu b/caffe2/operators/elementwise_linear_op.cu -index cc49115bff..8f749644b2 100644 ---- a/caffe2/operators/elementwise_linear_op.cu -+++ b/caffe2/operators/elementwise_linear_op.cu -@@ -5,6 +5,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/operator_fallback_gpu.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/elementwise_mul_op.cu b/caffe2/operators/elementwise_mul_op.cu -index bdbf760cf9..1991b8b513 100644 ---- a/caffe2/operators/elementwise_mul_op.cu -+++ b/caffe2/operators/elementwise_mul_op.cu -@@ -3,8 +3,8 @@ - #include - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/elementwise_ops_utils.h" -diff --git a/caffe2/operators/elementwise_ops.cu b/caffe2/operators/elementwise_ops.cu -index c9ced33cf8..932bd5dafd 100644 ---- a/caffe2/operators/elementwise_ops.cu -+++ b/caffe2/operators/elementwise_ops.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/elementwise_ops.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - #include - #include -diff --git a/caffe2/operators/find_op.cu b/caffe2/operators/find_op.cu -index f8ff2bab16..0418a71fbc 100644 ---- a/caffe2/operators/find_op.cu -+++ b/caffe2/operators/find_op.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/find_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/generate_proposals_op.cu b/caffe2/operators/generate_proposals_op.cu -index 64518538b6..a4207f8653 100644 ---- a/caffe2/operators/generate_proposals_op.cu -+++ b/caffe2/operators/generate_proposals_op.cu -@@ -5,6 +5,7 @@ - #include "caffe2/operators/generate_proposals_op_util_boxes.h" // BBOX_XFORM_CLIP_DEFAULT - #include "caffe2/operators/generate_proposals_op_util_nms.h" - #include "caffe2/operators/generate_proposals_op_util_nms_gpu.h" -+#include "caffe2/utils/cub_namespace.cuh" - - #ifdef __HIP_PLATFORM_HCC__ - #include -diff --git a/caffe2/operators/normalize_ops.cu b/caffe2/operators/normalize_ops.cu -index 26df05308d..e4d1f34b75 100644 ---- a/caffe2/operators/normalize_ops.cu -+++ b/caffe2/operators/normalize_ops.cu -@@ -5,6 +5,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/normalize_l1_op.h" - #include "caffe2/operators/normalize_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/one_hot_ops.cu b/caffe2/operators/one_hot_ops.cu -index e521b3dd09..87e8196765 100644 ---- a/caffe2/operators/one_hot_ops.cu -+++ b/caffe2/operators/one_hot_ops.cu -@@ -2,6 +2,7 @@ - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/one_hot_ops.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/pack_segments.cu b/caffe2/operators/pack_segments.cu -index 7475100fd3..372638abdd 100644 ---- a/caffe2/operators/pack_segments.cu -+++ b/caffe2/operators/pack_segments.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/pack_segments.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/prelu_op.cu b/caffe2/operators/prelu_op.cu -index 745a393f07..6303b70b4a 100644 ---- a/caffe2/operators/prelu_op.cu -+++ b/caffe2/operators/prelu_op.cu -@@ -1,6 +1,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/prelu_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/reduce_front_back_max_ops.cu b/caffe2/operators/reduce_front_back_max_ops.cu -index 3c6ee7f0ae..c41d5ad579 100644 ---- a/caffe2/operators/reduce_front_back_max_ops.cu -+++ b/caffe2/operators/reduce_front_back_max_ops.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/reduce_front_back_max_ops.h" -+#include "caffe2/utils/cub_namespace.cuh" - - #ifdef __HIP_PLATFORM_HCC__ - #include -diff --git a/caffe2/operators/reduce_front_back_sum_mean_ops.cu b/caffe2/operators/reduce_front_back_sum_mean_ops.cu -index 476596f084..a7ad6dd500 100644 ---- a/caffe2/operators/reduce_front_back_sum_mean_ops.cu -+++ b/caffe2/operators/reduce_front_back_sum_mean_ops.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/reduce_front_back_sum_mean_ops.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/reduction_ops.cu b/caffe2/operators/reduction_ops.cu -index ba55a66de5..9649b85d01 100644 ---- a/caffe2/operators/reduction_ops.cu -+++ b/caffe2/operators/reduction_ops.cu -@@ -2,7 +2,7 @@ - #include "caffe2/operators/reduction_ops.h" - #include "caffe2/utils/conversions.h" - --#include -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/rmac_regions_op.cu b/caffe2/operators/rmac_regions_op.cu -index 0ec2dd351a..de2b2553a7 100644 ---- a/caffe2/operators/rmac_regions_op.cu -+++ b/caffe2/operators/rmac_regions_op.cu -@@ -1,4 +1,5 @@ - #include -+#include "caffe2/utils/cub_namespace.cuh" - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/rmac_regions_op.h" -@@ -10,6 +11,9 @@ - #ifdef __HIP_PLATFORM_HCC__ - namespace rocprim { - #else -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+namespace at_cuda_detail { -+#endif - namespace cub { - #endif - -@@ -22,6 +26,9 @@ inline __host__ __device__ bool operator<( - } - - } // namespace cub -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+} // namespace at_cuda_detail -+#endif - - namespace caffe2 { - -diff --git a/caffe2/operators/segment_reduction_op_gpu.cuh b/caffe2/operators/segment_reduction_op_gpu.cuh -index ffe834e886..eebade352e 100644 ---- a/caffe2/operators/segment_reduction_op_gpu.cuh -+++ b/caffe2/operators/segment_reduction_op_gpu.cuh -@@ -1,3 +1,4 @@ -+#include "caffe2/utils/cub_namespace.cuh" - #include - #include - #include -diff --git a/caffe2/operators/sequence_ops.cu b/caffe2/operators/sequence_ops.cu -index cc34effd3f..2ceb5236ef 100644 ---- a/caffe2/operators/sequence_ops.cu -+++ b/caffe2/operators/sequence_ops.cu -@@ -1,6 +1,7 @@ - #include - - #include -+#include "caffe2/utils/cub_namespace.cuh" - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/sequence_ops.h" -diff --git a/caffe2/operators/softmax_ops.cu b/caffe2/operators/softmax_ops.cu -index 51c0cbc2bf..ebf0700c9e 100644 ---- a/caffe2/operators/softmax_ops.cu -+++ b/caffe2/operators/softmax_ops.cu -@@ -5,6 +5,7 @@ - #include "caffe2/operators/softmax_op.h" - #include "caffe2/operators/softmax_with_loss_op.h" - #include "caffe2/operators/spatial_softmax_with_loss_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/spatial_batch_norm_op_impl.cuh b/caffe2/operators/spatial_batch_norm_op_impl.cuh -index edc076c7d7..6fdb4c63f8 100644 ---- a/caffe2/operators/spatial_batch_norm_op_impl.cuh -+++ b/caffe2/operators/spatial_batch_norm_op_impl.cuh -@@ -5,8 +5,8 @@ - - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/context_gpu.h" - #include "caffe2/utils/math.h" -diff --git a/caffe2/sgd/adagrad_fused_op_gpu.cu b/caffe2/sgd/adagrad_fused_op_gpu.cu -index e2bf91c880..a7057c8737 100644 ---- a/caffe2/sgd/adagrad_fused_op_gpu.cu -+++ b/caffe2/sgd/adagrad_fused_op_gpu.cu -@@ -2,6 +2,7 @@ - #include - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include - #include "caffe2/sgd/adagrad_fused_op_gpu.cuh" - #include "caffe2/utils/math.h" -diff --git a/caffe2/sgd/adagrad_op_gpu.cu b/caffe2/sgd/adagrad_op_gpu.cu -index 8abb3376ca..b80d29700c 100644 ---- a/caffe2/sgd/adagrad_op_gpu.cu -+++ b/caffe2/sgd/adagrad_op_gpu.cu -@@ -4,6 +4,7 @@ - #include "caffe2/core/common_gpu.h" - #include "caffe2/core/context_gpu.h" - #include "caffe2/sgd/adagrad_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/sgd/adam_op_gpu.cu b/caffe2/sgd/adam_op_gpu.cu -index 42ab975faa..6f9c323420 100644 ---- a/caffe2/sgd/adam_op_gpu.cu -+++ b/caffe2/sgd/adam_op_gpu.cu -@@ -2,6 +2,7 @@ - #include "caffe2/core/common_gpu.h" - #include "caffe2/core/context_gpu.h" - #include "caffe2/sgd/adam_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/utils/cub_namespace.cuh b/caffe2/utils/cub_namespace.cuh -new file mode 100644 -index 0000000000000..188a9936f9c6e ---- /dev/null -+++ b/caffe2/utils/cub_namespace.cuh -@@ -0,0 +1,17 @@ -+#pragma once -+ -+// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: -+// https://github.com/NVIDIA/cub/pull/326 -+// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake -+// starting from CUDA 11.5 -+#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true -+#else -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false -+#endif -+ -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+namespace caffe2 { -+namespace cub = ::CUB_WRAPPED_NAMESPACE::cub; -+} -+#endif -diff --git a/caffe2/utils/math/reduce.cu b/caffe2/utils/math/reduce.cu -index 8c40c5d2b0..e8a8b768eb 100644 ---- a/caffe2/utils/math/reduce.cu -+++ b/caffe2/utils/math/reduce.cu -@@ -5,9 +5,8 @@ - #include - #include - #include -- -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include - #include -diff --git a/caffe2/utils/math/reduce.cuh b/caffe2/utils/math/reduce.cuh -index 0c43ad45a3..18bdca11b9 100644 ---- a/caffe2/utils/math/reduce.cuh -+++ b/caffe2/utils/math/reduce.cuh -@@ -1,8 +1,8 @@ - #ifndef CAFFE2_UTILS_MATH_REDUCE_CUH_ - #define CAFFE2_UTILS_MATH_REDUCE_CUH_ - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/common_gpu.h" - -diff --git a/caffe2/utils/math_gpu.cu b/caffe2/utils/math_gpu.cu -index 7f3bb8eea6..54fbcca1d4 100644 ---- a/caffe2/utils/math_gpu.cu -+++ b/caffe2/utils/math_gpu.cu -@@ -7,8 +7,9 @@ - #include - #include - --#include - #include -+#include -+#include "caffe2/utils/cub_namespace.cuh" - - #include - #include -diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake -index ca560288a4..5fd189e4a8 100644 ---- a/cmake/Dependencies.cmake -+++ b/cmake/Dependencies.cmake -@@ -1622,6 +1622,12 @@ if(NOT INTERN_BUILD_MOBILE) - list(APPEND CUDA_NVCC_FLAGS "-Xcompiler" "-fPIC") - endif() - -+ # use cub in a safe manner, see: -+ # https://github.com/pytorch/pytorch/pull/55292 -+ if(NOT ${CUDA_VERSION} LESS 11.5) -+ list(APPEND CUDA_NVCC_FLAGS "-DCUB_WRAPPED_NAMESPACE=at_cuda_detail") -+ endif() -+ - if(CUDA_HAS_FP16 OR NOT ${CUDA_VERSION} LESS 7.5) - message(STATUS "Found CUDA with FP16 support, compiling with torch.cuda.HalfTensor") - list(APPEND CUDA_NVCC_FLAGS "-DCUDA_HAS_FP16=1" "-D__CUDA_NO_HALF_OPERATORS__" "-D__CUDA_NO_HALF_CONVERSIONS__" diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCuda.java similarity index 83% rename from pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCuda.java index 5bbc854a2be..e336d8136dd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCuda.java @@ -1,14 +1,12 @@ // Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE -package org.bytedeco.pytorch.cuda; - -import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.cuda.functions.*; -import org.bytedeco.pytorch.Error; -import org.bytedeco.pytorch.global.torch.DeviceType; -import org.bytedeco.pytorch.global.torch.ScalarType; -import org.bytedeco.pytorch.global.torch.MemoryFormat; +package org.bytedeco.pytorch; + import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,13 +14,11 @@ import static org.bytedeco.javacpp.presets.javacpp.*; import static org.bytedeco.openblas.global.openblas_nolapack.*; import static org.bytedeco.openblas.global.openblas.*; -import org.bytedeco.pytorch.*; -import static org.bytedeco.pytorch.global.torch.*; -import static org.bytedeco.pytorch.global.torch_cuda.*; +import static org.bytedeco.pytorch.global.torch.*; -@Namespace("torch::inductor") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +@Namespace("torch::inductor") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class AOTIModelContainerRunnerCuda extends AOTIModelContainerRunner { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index ca7d939c01e..7d754eefd6c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -79724,6 +79724,18 @@ scalar_t sf(scalar_t x, scalar_t y) // #endif +// Parsed from torch/csrc/inductor/aoti_model_container_runner_cuda.h + +// #pragma once + +// #include +// Targeting ../AOTIModelContainerRunnerCuda.java + + + + // namespace torch::inductor + + // Parsed from datasets.h /* diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index 66270cd5580..82a41460be3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -1249,16 +1249,4 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10 -// Parsed from torch/csrc/inductor/aoti_model_container_runner_cuda.h - -// #pragma once - -// #include -// Targeting ../cuda/AOTIModelContainerRunnerCuda.java - - - - // namespace torch::inductor - - } diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 5f6fec8043c..1e1439f640c 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -61,7 +61,7 @@ include = { "torch/torch.h", "torch/script.h", - "torch/csrc/inductor/aoti_model_container_runner.h", + "torch/csrc/inductor/aoti_model_container_runner_cuda.h", // For inclusion in JNI only, not parsed (compiler needs some complete definitions) "torch/csrc/jit/runtime/instruction.h", diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 0d361c16e36..078a94201ea 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -40,7 +40,6 @@ "ATen/cudnn/Descriptors.h", "ATen/cudnn/Types.h", "c10/cuda/CUDAGuard.h", - "torch/csrc/inductor/aoti_model_container_runner_cuda.h", // For inclusion in JNI only, not parsed "ATen/cuda/CUDAGeneratorImpl.h", diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h index 4195ee66a89..6cd5e1eabdb 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -27,4 +27,3 @@ #include "ATen/cudnn/Descriptors.h" #include "ATen/cudnn/Types.h" #include "c10/cuda/CUDAGuard.h" -#include "torch/csrc/inductor/aoti_model_container_runner_cuda.h" \ No newline at end of file diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h index 81706181751..58e3f949fb3 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -1427,5 +1427,8 @@ // #include "torch/csrc/inductor/aoti_torch/c/shim.h" // model.so API, not part of libtorch API // #include "torch/csrc/inductor/aoti_runtime/interface.h" // model.so API, not part of libtorch API #include "torch/csrc/inductor/aoti_model_container_runner.h" +// Not in torch_cuda_include because doesn't need cuda includes or linkage +// + protected field AOTIModelContainerRunner::model_so is not exported, so won't link on windows +#include "torch/csrc/inductor/aoti_model_container_runner_cuda.h" #include "datasets.h" \ No newline at end of file From e6e6aae3d12276c7f65959c6d44e1ab09bd461bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Sun, 4 Feb 2024 12:29:52 +0100 Subject: [PATCH 10/24] Revert "Moved AOTIModelContainerRunnerCuda to main presets" This reverts commit 79348b0f817e67a07f981ae317decb04005efab1. --- pytorch/pytorch.patch | 738 ++++++++++++++++++ .../AOTIModelContainerRunnerCuda.java | 20 +- .../org/bytedeco/pytorch/global/torch.java | 12 - .../bytedeco/pytorch/global/torch_cuda.java | 12 + .../org/bytedeco/pytorch/presets/torch.java | 2 +- .../bytedeco/pytorch/presets/torch_cuda.java | 1 + .../pytorch/presets/torch_cuda_include.h | 1 + .../bytedeco/pytorch/presets/torch_include.h | 3 - 8 files changed, 765 insertions(+), 24 deletions(-) create mode 100644 pytorch/pytorch.patch rename pytorch/src/gen/java/org/bytedeco/pytorch/{ => cuda}/AOTIModelContainerRunnerCuda.java (83%) diff --git a/pytorch/pytorch.patch b/pytorch/pytorch.patch new file mode 100644 index 00000000000..ef316b44f0f --- /dev/null +++ b/pytorch/pytorch.patch @@ -0,0 +1,738 @@ +diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml +index f33eda2f94..a92a79a684 100644 +--- a/.github/workflows/lint.yml ++++ b/.github/workflows/lint.yml +@@ -97,7 +97,7 @@ jobs: + - name: Ensure no direct cub include + if: always() + run: | +- (! git --no-pager grep -I -no $'#include + #include + ++#include ++ ++#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() ++ ++#include ++ ++#else ++ + // include cub in a safe manner, see: + // https://github.com/pytorch/pytorch/pull/55292 + #undef CUB_NS_POSTFIX //undef to avoid redefinition warnings + #undef CUB_NS_PREFIX +-#define CUB_NS_PREFIX namespace at { namespace cuda { namespace detail { +-#define CUB_NS_POSTFIX }}} ++#undef CUB_NS_QUALIFIER ++#define CUB_NS_PREFIX namespace at_cuda_detail { ++#define CUB_NS_POSTFIX } ++#define CUB_NS_QUALIFIER ::at_cuda_detail::cub + #include + #undef CUB_NS_POSTFIX + #undef CUB_NS_PREFIX ++#undef CUB_NS_QUALIFIER ++ ++#endif + + #include + #include +@@ -33,16 +46,40 @@ + #define NO_ROCM(x) + #else + #define NO_ROCM(x) x ++#endif + +-namespace at { namespace native { ++#if !defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16() ++ ++namespace at_cuda_detail { ++// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 ++ ++template <> ++struct cub::FpLimits ++{ ++ static __host__ __device__ __forceinline__ c10::BFloat16 Max() { ++ unsigned short max_word = 0x7F7F; ++ return reinterpret_cast(max_word); ++ } ++ ++ static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { ++ unsigned short lowest_word = 0xFF7F; ++ return reinterpret_cast(lowest_word); ++ } ++}; + +-namespace cub = at::cuda::detail::cub; ++template <> struct cub::NumericTraits: cub::BaseTraits {}; ++} ++#endif + ++#if !defined(USE_ROCM) ++namespace at { namespace native { ++namespace cub = ::at_cuda_detail::cub; + }} + #endif + + namespace at { + namespace cuda { ++namespace cub { + + namespace detail { + +@@ -55,44 +92,17 @@ struct cuda_type { + using type = __half; + }; + +-#if defined(CUDA_VERSION) && CUDA_VERSION >= 11050 +-// cub sort support for __nv_bfloat16 is added to cub 1.13 in +-// https://github.com/NVIDIA/cub/pull/306 and according to +-// https://github.com/NVIDIA/cub#releases, 1.13 is included in +-// CUDA Toolkit 11.5 ++#if CUB_SUPPORTS_NV_BFLOAT16() + +-// waiting for https://github.com/NVIDIA/cub/pull/306 to land on CUDA + template<> + struct cuda_type { + using type = __nv_bfloat16; + }; + +-#elif !defined(__HIP_PLATFORM_HCC__) +- +-// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 +- +-template <> +-struct cub::FpLimits +-{ +- static __host__ __device__ __forceinline__ c10::BFloat16 Max() { +- unsigned short max_word = 0x7F7F; +- return reinterpret_cast(max_word); +- } +- +- static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { +- unsigned short lowest_word = 0xFF7F; +- return reinterpret_cast(lowest_word); +- } +-}; +- +-template <> struct cub::NumericTraits: cub::BaseTraits {}; +- + #endif + + } // namespace detail + +-namespace cub { +- + inline int get_num_bits(uint64_t max_key) { + int num_bits = 1; + while (max_key > 1) { +@@ -115,11 +125,11 @@ static inline void sort_keys( + key_t_ *keys_out_ = reinterpret_cast(keys_out); + + if (descending) { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortKeysDescending, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeysDescending, + keys_in_, keys_out_, n, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } else { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortKeys, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeys, + keys_in_, keys_out_, n, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } +@@ -147,11 +157,11 @@ static inline void sort_pairs( + key_t_ *keys_out_ = reinterpret_cast(keys_out); + + if (descending) { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortPairsDescending, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairsDescending, + keys_in_, keys_out_, values_in, values_out, n, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } else { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortPairs, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairs, + keys_in_, keys_out_, values_in, values_out, n, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } +@@ -183,12 +193,12 @@ static inline void segmented_sort_pairs( + key_t_ *keys_out_ = reinterpret_cast(keys_out); + + if (descending) { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } else { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSegmentedRadixSort::SortPairs, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); +@@ -240,7 +250,7 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT + // so split at int_max/2 + constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 + int size_cub = std::min(num_items, max_cub_size); +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input, + output, + scan_op, +@@ -260,7 +270,7 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT + first_elem_ptr, + scan_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +- using ArgIndexInputIterator = NO_ROCM(detail)::cub::ArgIndexInputIterator; ++ using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator; + using tuple = typename ArgIndexInputIterator::value_type; + auto input_iter_transform = [=] __device__ (const tuple &x)->input_t { + if (x.key == 0) { +@@ -269,9 +279,9 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT + return x.value; + } + }; +- auto input_ = NO_ROCM(detail)::cub::TransformInputIterator( ++ auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator( + ArgIndexInputIterator(input + i), input_iter_transform); +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, +@@ -287,7 +297,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT + // so split at int_max/2 + constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 + int size_cub = std::min(num_items, max_cub_size); +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::ExclusiveScan, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input, + output, + scan_op, +@@ -309,7 +319,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT + C10_CUDA_KERNEL_LAUNCH_CHECK(); + auto input_ = impl::chained_iterator{ + input + i, first_elem_ptr}; +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, +@@ -322,7 +332,7 @@ template::max(), + "cub unique does not support more than INT_MAX elements"); +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSelect::Unique, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique, + input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream()); + } + +diff --git a/aten/src/ATen/cuda/cub_definitions.cuh b/aten/src/ATen/cuda/cub_definitions.cuh +new file mode 100644 +index 0000000000000..61119fc174587 +--- /dev/null ++++ b/aten/src/ATen/cuda/cub_definitions.cuh +@@ -0,0 +1,29 @@ ++#pragma once ++ ++#if !defined(USE_ROCM) ++#include // for CUDA_VERSION ++#endif ++ ++#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 ++#include ++#else ++#define CUB_VERSION 0 ++#endif ++ ++// cub sort support for __nv_bfloat16 is added to cub 1.13 in: ++// https://github.com/NVIDIA/cub/pull/306 ++#if CUB_VERSION >= 101300 ++#define CUB_SUPPORTS_NV_BFLOAT16() true ++#else ++#define CUB_SUPPORTS_NV_BFLOAT16() false ++#endif ++ ++// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: ++// https://github.com/NVIDIA/cub/pull/326 ++// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake ++// starting from CUDA 11.5 ++#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) ++#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true ++#else ++#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false ++#endif +diff --git a/caffe2/core/context_gpu.cu b/caffe2/core/context_gpu.cu +index c2b89945ad..6d53740091 100644 +--- a/caffe2/core/context_gpu.cu ++++ b/caffe2/core/context_gpu.cu +@@ -21,6 +21,7 @@ + #include "caffe2/core/logging.h" + #include "caffe2/core/tensor.h" + #include "caffe2/utils/string_utils.h" ++#include "caffe2/utils/cub_namespace.cuh" + + C10_DEFINE_string( + caffe2_cuda_memory_pool, +diff --git a/caffe2/operators/accuracy_op.cu b/caffe2/operators/accuracy_op.cu +index f06663d71a..29df54e752 100644 +--- a/caffe2/operators/accuracy_op.cu ++++ b/caffe2/operators/accuracy_op.cu +@@ -3,6 +3,7 @@ + #include "caffe2/utils/GpuAtomics.cuh" + #include "caffe2/utils/math.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + namespace caffe2 { +diff --git a/caffe2/operators/affine_channel_op.cu b/caffe2/operators/affine_channel_op.cu +index adf4ac55c0..efae0a3fc6 100644 +--- a/caffe2/operators/affine_channel_op.cu ++++ b/caffe2/operators/affine_channel_op.cu +@@ -1,5 +1,6 @@ + #include "caffe2/operators/affine_channel_op.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + #include "caffe2/core/context_gpu.h" +diff --git a/caffe2/operators/arg_ops.cu b/caffe2/operators/arg_ops.cu +index 7e90d25b83..56deaa6363 100644 +--- a/caffe2/operators/arg_ops.cu ++++ b/caffe2/operators/arg_ops.cu +@@ -2,8 +2,8 @@ + + #include + ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include "caffe2/core/common_gpu.h" + #include "caffe2/core/context_gpu.h" +diff --git a/caffe2/operators/batch_moments_op.cu b/caffe2/operators/batch_moments_op.cu +index 4b693b5c04..81359f6440 100644 +--- a/caffe2/operators/batch_moments_op.cu ++++ b/caffe2/operators/batch_moments_op.cu +@@ -1,5 +1,6 @@ + #include "caffe2/operators/batch_moments_op.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + #include "caffe2/core/context_gpu.h" +diff --git a/caffe2/operators/batch_sparse_to_dense_op.cu b/caffe2/operators/batch_sparse_to_dense_op.cu +index aea2035a5d..3e7ad8af9a 100644 +--- a/caffe2/operators/batch_sparse_to_dense_op.cu ++++ b/caffe2/operators/batch_sparse_to_dense_op.cu +@@ -1,5 +1,6 @@ + #include "caffe2/operators/batch_sparse_to_dense_op.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + #include "caffe2/core/context_gpu.h" +diff --git a/caffe2/operators/boolean_mask_ops.cu b/caffe2/operators/boolean_mask_ops.cu +index 214b7c13ba..501dd3b191 100644 +--- a/caffe2/operators/boolean_mask_ops.cu ++++ b/caffe2/operators/boolean_mask_ops.cu +@@ -2,8 +2,8 @@ + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/boolean_mask_ops.h" +- + #include ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/cross_entropy_op.cu b/caffe2/operators/cross_entropy_op.cu +index 380e80399f..c23f05f8e5 100644 +--- a/caffe2/operators/cross_entropy_op.cu ++++ b/caffe2/operators/cross_entropy_op.cu +@@ -4,6 +4,7 @@ + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/cross_entropy_op.h" + #include "caffe2/operators/operator_fallback_gpu.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/distance_op.cu b/caffe2/operators/distance_op.cu +index 3a8bb337d5..a360166854 100644 +--- a/caffe2/operators/distance_op.cu ++++ b/caffe2/operators/distance_op.cu +@@ -4,6 +4,7 @@ + #include "caffe2/operators/distance_op.h" + #include "caffe2/utils/conversions.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + namespace caffe2 { +diff --git a/caffe2/operators/elementwise_div_op.cu b/caffe2/operators/elementwise_div_op.cu +index 42b103a0f1..33118a8f5e 100644 +--- a/caffe2/operators/elementwise_div_op.cu ++++ b/caffe2/operators/elementwise_div_op.cu +@@ -3,8 +3,8 @@ + #include + #include + ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/elementwise_ops_utils.h" +diff --git a/caffe2/operators/elementwise_linear_op.cu b/caffe2/operators/elementwise_linear_op.cu +index cc49115bff..8f749644b2 100644 +--- a/caffe2/operators/elementwise_linear_op.cu ++++ b/caffe2/operators/elementwise_linear_op.cu +@@ -5,6 +5,7 @@ + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/operator_fallback_gpu.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + namespace caffe2 { +diff --git a/caffe2/operators/elementwise_mul_op.cu b/caffe2/operators/elementwise_mul_op.cu +index bdbf760cf9..1991b8b513 100644 +--- a/caffe2/operators/elementwise_mul_op.cu ++++ b/caffe2/operators/elementwise_mul_op.cu +@@ -3,8 +3,8 @@ + #include + #include + ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/elementwise_ops_utils.h" +diff --git a/caffe2/operators/elementwise_ops.cu b/caffe2/operators/elementwise_ops.cu +index c9ced33cf8..932bd5dafd 100644 +--- a/caffe2/operators/elementwise_ops.cu ++++ b/caffe2/operators/elementwise_ops.cu +@@ -1,5 +1,6 @@ + #include "caffe2/operators/elementwise_ops.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + #include + #include +diff --git a/caffe2/operators/find_op.cu b/caffe2/operators/find_op.cu +index f8ff2bab16..0418a71fbc 100644 +--- a/caffe2/operators/find_op.cu ++++ b/caffe2/operators/find_op.cu +@@ -1,6 +1,7 @@ + #include + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/find_op.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/generate_proposals_op.cu b/caffe2/operators/generate_proposals_op.cu +index 64518538b6..a4207f8653 100644 +--- a/caffe2/operators/generate_proposals_op.cu ++++ b/caffe2/operators/generate_proposals_op.cu +@@ -5,6 +5,7 @@ + #include "caffe2/operators/generate_proposals_op_util_boxes.h" // BBOX_XFORM_CLIP_DEFAULT + #include "caffe2/operators/generate_proposals_op_util_nms.h" + #include "caffe2/operators/generate_proposals_op_util_nms_gpu.h" ++#include "caffe2/utils/cub_namespace.cuh" + + #ifdef __HIP_PLATFORM_HCC__ + #include +diff --git a/caffe2/operators/normalize_ops.cu b/caffe2/operators/normalize_ops.cu +index 26df05308d..e4d1f34b75 100644 +--- a/caffe2/operators/normalize_ops.cu ++++ b/caffe2/operators/normalize_ops.cu +@@ -5,6 +5,7 @@ + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/normalize_l1_op.h" + #include "caffe2/operators/normalize_op.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/one_hot_ops.cu b/caffe2/operators/one_hot_ops.cu +index e521b3dd09..87e8196765 100644 +--- a/caffe2/operators/one_hot_ops.cu ++++ b/caffe2/operators/one_hot_ops.cu +@@ -2,6 +2,7 @@ + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/one_hot_ops.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/pack_segments.cu b/caffe2/operators/pack_segments.cu +index 7475100fd3..372638abdd 100644 +--- a/caffe2/operators/pack_segments.cu ++++ b/caffe2/operators/pack_segments.cu +@@ -1,6 +1,7 @@ + #include + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/pack_segments.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/prelu_op.cu b/caffe2/operators/prelu_op.cu +index 745a393f07..6303b70b4a 100644 +--- a/caffe2/operators/prelu_op.cu ++++ b/caffe2/operators/prelu_op.cu +@@ -1,6 +1,7 @@ + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/prelu_op.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + namespace caffe2 { +diff --git a/caffe2/operators/reduce_front_back_max_ops.cu b/caffe2/operators/reduce_front_back_max_ops.cu +index 3c6ee7f0ae..c41d5ad579 100644 +--- a/caffe2/operators/reduce_front_back_max_ops.cu ++++ b/caffe2/operators/reduce_front_back_max_ops.cu +@@ -1,6 +1,7 @@ + #include + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/reduce_front_back_max_ops.h" ++#include "caffe2/utils/cub_namespace.cuh" + + #ifdef __HIP_PLATFORM_HCC__ + #include +diff --git a/caffe2/operators/reduce_front_back_sum_mean_ops.cu b/caffe2/operators/reduce_front_back_sum_mean_ops.cu +index 476596f084..a7ad6dd500 100644 +--- a/caffe2/operators/reduce_front_back_sum_mean_ops.cu ++++ b/caffe2/operators/reduce_front_back_sum_mean_ops.cu +@@ -1,6 +1,7 @@ + #include + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/reduce_front_back_sum_mean_ops.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/reduction_ops.cu b/caffe2/operators/reduction_ops.cu +index ba55a66de5..9649b85d01 100644 +--- a/caffe2/operators/reduction_ops.cu ++++ b/caffe2/operators/reduction_ops.cu +@@ -2,7 +2,7 @@ + #include "caffe2/operators/reduction_ops.h" + #include "caffe2/utils/conversions.h" + +-#include ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/rmac_regions_op.cu b/caffe2/operators/rmac_regions_op.cu +index 0ec2dd351a..de2b2553a7 100644 +--- a/caffe2/operators/rmac_regions_op.cu ++++ b/caffe2/operators/rmac_regions_op.cu +@@ -1,4 +1,5 @@ + #include ++#include "caffe2/utils/cub_namespace.cuh" + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/rmac_regions_op.h" +@@ -10,6 +11,9 @@ + #ifdef __HIP_PLATFORM_HCC__ + namespace rocprim { + #else ++#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() ++namespace at_cuda_detail { ++#endif + namespace cub { + #endif + +@@ -22,6 +26,9 @@ inline __host__ __device__ bool operator<( + } + + } // namespace cub ++#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() ++} // namespace at_cuda_detail ++#endif + + namespace caffe2 { + +diff --git a/caffe2/operators/segment_reduction_op_gpu.cuh b/caffe2/operators/segment_reduction_op_gpu.cuh +index ffe834e886..eebade352e 100644 +--- a/caffe2/operators/segment_reduction_op_gpu.cuh ++++ b/caffe2/operators/segment_reduction_op_gpu.cuh +@@ -1,3 +1,4 @@ ++#include "caffe2/utils/cub_namespace.cuh" + #include + #include + #include +diff --git a/caffe2/operators/sequence_ops.cu b/caffe2/operators/sequence_ops.cu +index cc34effd3f..2ceb5236ef 100644 +--- a/caffe2/operators/sequence_ops.cu ++++ b/caffe2/operators/sequence_ops.cu +@@ -1,6 +1,7 @@ + #include + + #include ++#include "caffe2/utils/cub_namespace.cuh" + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/sequence_ops.h" +diff --git a/caffe2/operators/softmax_ops.cu b/caffe2/operators/softmax_ops.cu +index 51c0cbc2bf..ebf0700c9e 100644 +--- a/caffe2/operators/softmax_ops.cu ++++ b/caffe2/operators/softmax_ops.cu +@@ -5,6 +5,7 @@ + #include "caffe2/operators/softmax_op.h" + #include "caffe2/operators/softmax_with_loss_op.h" + #include "caffe2/operators/spatial_softmax_with_loss_op.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/spatial_batch_norm_op_impl.cuh b/caffe2/operators/spatial_batch_norm_op_impl.cuh +index edc076c7d7..6fdb4c63f8 100644 +--- a/caffe2/operators/spatial_batch_norm_op_impl.cuh ++++ b/caffe2/operators/spatial_batch_norm_op_impl.cuh +@@ -5,8 +5,8 @@ + + #include + ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include "caffe2/core/context_gpu.h" + #include "caffe2/utils/math.h" +diff --git a/caffe2/sgd/adagrad_fused_op_gpu.cu b/caffe2/sgd/adagrad_fused_op_gpu.cu +index e2bf91c880..a7057c8737 100644 +--- a/caffe2/sgd/adagrad_fused_op_gpu.cu ++++ b/caffe2/sgd/adagrad_fused_op_gpu.cu +@@ -2,6 +2,7 @@ + #include + #include + ++#include "caffe2/utils/cub_namespace.cuh" + #include + #include "caffe2/sgd/adagrad_fused_op_gpu.cuh" + #include "caffe2/utils/math.h" +diff --git a/caffe2/sgd/adagrad_op_gpu.cu b/caffe2/sgd/adagrad_op_gpu.cu +index 8abb3376ca..b80d29700c 100644 +--- a/caffe2/sgd/adagrad_op_gpu.cu ++++ b/caffe2/sgd/adagrad_op_gpu.cu +@@ -4,6 +4,7 @@ + #include "caffe2/core/common_gpu.h" + #include "caffe2/core/context_gpu.h" + #include "caffe2/sgd/adagrad_op.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/sgd/adam_op_gpu.cu b/caffe2/sgd/adam_op_gpu.cu +index 42ab975faa..6f9c323420 100644 +--- a/caffe2/sgd/adam_op_gpu.cu ++++ b/caffe2/sgd/adam_op_gpu.cu +@@ -2,6 +2,7 @@ + #include "caffe2/core/common_gpu.h" + #include "caffe2/core/context_gpu.h" + #include "caffe2/sgd/adam_op.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/utils/cub_namespace.cuh b/caffe2/utils/cub_namespace.cuh +new file mode 100644 +index 0000000000000..188a9936f9c6e +--- /dev/null ++++ b/caffe2/utils/cub_namespace.cuh +@@ -0,0 +1,17 @@ ++#pragma once ++ ++// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: ++// https://github.com/NVIDIA/cub/pull/326 ++// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake ++// starting from CUDA 11.5 ++#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) ++#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true ++#else ++#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false ++#endif ++ ++#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() ++namespace caffe2 { ++namespace cub = ::CUB_WRAPPED_NAMESPACE::cub; ++} ++#endif +diff --git a/caffe2/utils/math/reduce.cu b/caffe2/utils/math/reduce.cu +index 8c40c5d2b0..e8a8b768eb 100644 +--- a/caffe2/utils/math/reduce.cu ++++ b/caffe2/utils/math/reduce.cu +@@ -5,9 +5,8 @@ + #include + #include + #include +- ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include + #include +diff --git a/caffe2/utils/math/reduce.cuh b/caffe2/utils/math/reduce.cuh +index 0c43ad45a3..18bdca11b9 100644 +--- a/caffe2/utils/math/reduce.cuh ++++ b/caffe2/utils/math/reduce.cuh +@@ -1,8 +1,8 @@ + #ifndef CAFFE2_UTILS_MATH_REDUCE_CUH_ + #define CAFFE2_UTILS_MATH_REDUCE_CUH_ + ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include "caffe2/core/common_gpu.h" + +diff --git a/caffe2/utils/math_gpu.cu b/caffe2/utils/math_gpu.cu +index 7f3bb8eea6..54fbcca1d4 100644 +--- a/caffe2/utils/math_gpu.cu ++++ b/caffe2/utils/math_gpu.cu +@@ -7,8 +7,9 @@ + #include + #include + +-#include + #include ++#include ++#include "caffe2/utils/cub_namespace.cuh" + + #include + #include +diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake +index ca560288a4..5fd189e4a8 100644 +--- a/cmake/Dependencies.cmake ++++ b/cmake/Dependencies.cmake +@@ -1622,6 +1622,12 @@ if(NOT INTERN_BUILD_MOBILE) + list(APPEND CUDA_NVCC_FLAGS "-Xcompiler" "-fPIC") + endif() + ++ # use cub in a safe manner, see: ++ # https://github.com/pytorch/pytorch/pull/55292 ++ if(NOT ${CUDA_VERSION} LESS 11.5) ++ list(APPEND CUDA_NVCC_FLAGS "-DCUB_WRAPPED_NAMESPACE=at_cuda_detail") ++ endif() ++ + if(CUDA_HAS_FP16 OR NOT ${CUDA_VERSION} LESS 7.5) + message(STATUS "Found CUDA with FP16 support, compiling with torch.cuda.HalfTensor") + list(APPEND CUDA_NVCC_FLAGS "-DCUDA_HAS_FP16=1" "-D__CUDA_NO_HALF_OPERATORS__" "-D__CUDA_NO_HALF_CONVERSIONS__" diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java similarity index 83% rename from pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCuda.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java index e336d8136dd..5bbc854a2be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java @@ -1,12 +1,14 @@ // Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE -package org.bytedeco.pytorch; - +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.cuda.functions.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -14,11 +16,13 @@ import static org.bytedeco.javacpp.presets.javacpp.*; import static org.bytedeco.openblas.global.openblas_nolapack.*; import static org.bytedeco.openblas.global.openblas.*; - +import org.bytedeco.pytorch.*; import static org.bytedeco.pytorch.global.torch.*; +import static org.bytedeco.pytorch.global.torch_cuda.*; + -@Namespace("torch::inductor") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("torch::inductor") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) public class AOTIModelContainerRunnerCuda extends AOTIModelContainerRunner { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 7d754eefd6c..ca7d939c01e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -79724,18 +79724,6 @@ scalar_t sf(scalar_t x, scalar_t y) // #endif -// Parsed from torch/csrc/inductor/aoti_model_container_runner_cuda.h - -// #pragma once - -// #include -// Targeting ../AOTIModelContainerRunnerCuda.java - - - - // namespace torch::inductor - - // Parsed from datasets.h /* diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index 82a41460be3..66270cd5580 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -1249,4 +1249,16 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10 +// Parsed from torch/csrc/inductor/aoti_model_container_runner_cuda.h + +// #pragma once + +// #include +// Targeting ../cuda/AOTIModelContainerRunnerCuda.java + + + + // namespace torch::inductor + + } diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 1e1439f640c..5f6fec8043c 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -61,7 +61,7 @@ include = { "torch/torch.h", "torch/script.h", - "torch/csrc/inductor/aoti_model_container_runner_cuda.h", + "torch/csrc/inductor/aoti_model_container_runner.h", // For inclusion in JNI only, not parsed (compiler needs some complete definitions) "torch/csrc/jit/runtime/instruction.h", diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 078a94201ea..0d361c16e36 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -40,6 +40,7 @@ "ATen/cudnn/Descriptors.h", "ATen/cudnn/Types.h", "c10/cuda/CUDAGuard.h", + "torch/csrc/inductor/aoti_model_container_runner_cuda.h", // For inclusion in JNI only, not parsed "ATen/cuda/CUDAGeneratorImpl.h", diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h index 6cd5e1eabdb..4195ee66a89 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -27,3 +27,4 @@ #include "ATen/cudnn/Descriptors.h" #include "ATen/cudnn/Types.h" #include "c10/cuda/CUDAGuard.h" +#include "torch/csrc/inductor/aoti_model_container_runner_cuda.h" \ No newline at end of file diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h index 58e3f949fb3..81706181751 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -1427,8 +1427,5 @@ // #include "torch/csrc/inductor/aoti_torch/c/shim.h" // model.so API, not part of libtorch API // #include "torch/csrc/inductor/aoti_runtime/interface.h" // model.so API, not part of libtorch API #include "torch/csrc/inductor/aoti_model_container_runner.h" -// Not in torch_cuda_include because doesn't need cuda includes or linkage -// + protected field AOTIModelContainerRunner::model_so is not exported, so won't link on windows -#include "torch/csrc/inductor/aoti_model_container_runner_cuda.h" #include "datasets.h" \ No newline at end of file From 9ebb6619fad25479427ee34bdc1f769aed714da9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Sun, 4 Feb 2024 12:35:44 +0100 Subject: [PATCH 11/24] Add DynamicLibrary.h to JNI --- pytorch/pytorch.patch | 738 ------------------ .../org/bytedeco/pytorch/presets/torch.java | 1 + .../bytedeco/pytorch/presets/torch_cuda.java | 1 + 3 files changed, 2 insertions(+), 738 deletions(-) delete mode 100644 pytorch/pytorch.patch diff --git a/pytorch/pytorch.patch b/pytorch/pytorch.patch deleted file mode 100644 index ef316b44f0f..00000000000 --- a/pytorch/pytorch.patch +++ /dev/null @@ -1,738 +0,0 @@ -diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml -index f33eda2f94..a92a79a684 100644 ---- a/.github/workflows/lint.yml -+++ b/.github/workflows/lint.yml -@@ -97,7 +97,7 @@ jobs: - - name: Ensure no direct cub include - if: always() - run: | -- (! git --no-pager grep -I -no $'#include - #include - -+#include -+ -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+ -+#include -+ -+#else -+ - // include cub in a safe manner, see: - // https://github.com/pytorch/pytorch/pull/55292 - #undef CUB_NS_POSTFIX //undef to avoid redefinition warnings - #undef CUB_NS_PREFIX --#define CUB_NS_PREFIX namespace at { namespace cuda { namespace detail { --#define CUB_NS_POSTFIX }}} -+#undef CUB_NS_QUALIFIER -+#define CUB_NS_PREFIX namespace at_cuda_detail { -+#define CUB_NS_POSTFIX } -+#define CUB_NS_QUALIFIER ::at_cuda_detail::cub - #include - #undef CUB_NS_POSTFIX - #undef CUB_NS_PREFIX -+#undef CUB_NS_QUALIFIER -+ -+#endif - - #include - #include -@@ -33,16 +46,40 @@ - #define NO_ROCM(x) - #else - #define NO_ROCM(x) x -+#endif - --namespace at { namespace native { -+#if !defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16() -+ -+namespace at_cuda_detail { -+// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 -+ -+template <> -+struct cub::FpLimits -+{ -+ static __host__ __device__ __forceinline__ c10::BFloat16 Max() { -+ unsigned short max_word = 0x7F7F; -+ return reinterpret_cast(max_word); -+ } -+ -+ static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { -+ unsigned short lowest_word = 0xFF7F; -+ return reinterpret_cast(lowest_word); -+ } -+}; - --namespace cub = at::cuda::detail::cub; -+template <> struct cub::NumericTraits: cub::BaseTraits {}; -+} -+#endif - -+#if !defined(USE_ROCM) -+namespace at { namespace native { -+namespace cub = ::at_cuda_detail::cub; - }} - #endif - - namespace at { - namespace cuda { -+namespace cub { - - namespace detail { - -@@ -55,44 +92,17 @@ struct cuda_type { - using type = __half; - }; - --#if defined(CUDA_VERSION) && CUDA_VERSION >= 11050 --// cub sort support for __nv_bfloat16 is added to cub 1.13 in --// https://github.com/NVIDIA/cub/pull/306 and according to --// https://github.com/NVIDIA/cub#releases, 1.13 is included in --// CUDA Toolkit 11.5 -+#if CUB_SUPPORTS_NV_BFLOAT16() - --// waiting for https://github.com/NVIDIA/cub/pull/306 to land on CUDA - template<> - struct cuda_type { - using type = __nv_bfloat16; - }; - --#elif !defined(__HIP_PLATFORM_HCC__) -- --// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 -- --template <> --struct cub::FpLimits --{ -- static __host__ __device__ __forceinline__ c10::BFloat16 Max() { -- unsigned short max_word = 0x7F7F; -- return reinterpret_cast(max_word); -- } -- -- static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { -- unsigned short lowest_word = 0xFF7F; -- return reinterpret_cast(lowest_word); -- } --}; -- --template <> struct cub::NumericTraits: cub::BaseTraits {}; -- - #endif - - } // namespace detail - --namespace cub { -- - inline int get_num_bits(uint64_t max_key) { - int num_bits = 1; - while (max_key > 1) { -@@ -115,11 +125,11 @@ static inline void sort_keys( - key_t_ *keys_out_ = reinterpret_cast(keys_out); - - if (descending) { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortKeysDescending, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeysDescending, - keys_in_, keys_out_, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } else { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortKeys, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeys, - keys_in_, keys_out_, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } -@@ -147,11 +157,11 @@ static inline void sort_pairs( - key_t_ *keys_out_ = reinterpret_cast(keys_out); - - if (descending) { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortPairsDescending, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairsDescending, - keys_in_, keys_out_, values_in, values_out, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } else { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortPairs, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairs, - keys_in_, keys_out_, values_in, values_out, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } -@@ -183,12 +193,12 @@ static inline void segmented_sort_pairs( - key_t_ *keys_out_ = reinterpret_cast(keys_out); - - if (descending) { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, - keys_in_, keys_out_, values_in, values_out, - num_elements, num_segments, begin_offsets, end_offsets, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } else { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSegmentedRadixSort::SortPairs, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs, - keys_in_, keys_out_, values_in, values_out, - num_elements, num_segments, begin_offsets, end_offsets, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); -@@ -240,7 +250,7 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - // so split at int_max/2 - constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 - int size_cub = std::min(num_items, max_cub_size); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, - input, - output, - scan_op, -@@ -260,7 +270,7 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - first_elem_ptr, - scan_op); - C10_CUDA_KERNEL_LAUNCH_CHECK(); -- using ArgIndexInputIterator = NO_ROCM(detail)::cub::ArgIndexInputIterator; -+ using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator; - using tuple = typename ArgIndexInputIterator::value_type; - auto input_iter_transform = [=] __device__ (const tuple &x)->input_t { - if (x.key == 0) { -@@ -269,9 +279,9 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - return x.value; - } - }; -- auto input_ = NO_ROCM(detail)::cub::TransformInputIterator( -+ auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator( - ArgIndexInputIterator(input + i), input_iter_transform); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, - input_, - output + i, - scan_op, -@@ -287,7 +297,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - // so split at int_max/2 - constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 - int size_cub = std::min(num_items, max_cub_size); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::ExclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, - input, - output, - scan_op, -@@ -309,7 +319,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - C10_CUDA_KERNEL_LAUNCH_CHECK(); - auto input_ = impl::chained_iterator{ - input + i, first_elem_ptr}; -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, - input_, - output + i, - scan_op, -@@ -322,7 +332,7 @@ template::max(), - "cub unique does not support more than INT_MAX elements"); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSelect::Unique, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique, - input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream()); - } - -diff --git a/aten/src/ATen/cuda/cub_definitions.cuh b/aten/src/ATen/cuda/cub_definitions.cuh -new file mode 100644 -index 0000000000000..61119fc174587 ---- /dev/null -+++ b/aten/src/ATen/cuda/cub_definitions.cuh -@@ -0,0 +1,29 @@ -+#pragma once -+ -+#if !defined(USE_ROCM) -+#include // for CUDA_VERSION -+#endif -+ -+#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 -+#include -+#else -+#define CUB_VERSION 0 -+#endif -+ -+// cub sort support for __nv_bfloat16 is added to cub 1.13 in: -+// https://github.com/NVIDIA/cub/pull/306 -+#if CUB_VERSION >= 101300 -+#define CUB_SUPPORTS_NV_BFLOAT16() true -+#else -+#define CUB_SUPPORTS_NV_BFLOAT16() false -+#endif -+ -+// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: -+// https://github.com/NVIDIA/cub/pull/326 -+// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake -+// starting from CUDA 11.5 -+#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true -+#else -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false -+#endif -diff --git a/caffe2/core/context_gpu.cu b/caffe2/core/context_gpu.cu -index c2b89945ad..6d53740091 100644 ---- a/caffe2/core/context_gpu.cu -+++ b/caffe2/core/context_gpu.cu -@@ -21,6 +21,7 @@ - #include "caffe2/core/logging.h" - #include "caffe2/core/tensor.h" - #include "caffe2/utils/string_utils.h" -+#include "caffe2/utils/cub_namespace.cuh" - - C10_DEFINE_string( - caffe2_cuda_memory_pool, -diff --git a/caffe2/operators/accuracy_op.cu b/caffe2/operators/accuracy_op.cu -index f06663d71a..29df54e752 100644 ---- a/caffe2/operators/accuracy_op.cu -+++ b/caffe2/operators/accuracy_op.cu -@@ -3,6 +3,7 @@ - #include "caffe2/utils/GpuAtomics.cuh" - #include "caffe2/utils/math.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/affine_channel_op.cu b/caffe2/operators/affine_channel_op.cu -index adf4ac55c0..efae0a3fc6 100644 ---- a/caffe2/operators/affine_channel_op.cu -+++ b/caffe2/operators/affine_channel_op.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/affine_channel_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/arg_ops.cu b/caffe2/operators/arg_ops.cu -index 7e90d25b83..56deaa6363 100644 ---- a/caffe2/operators/arg_ops.cu -+++ b/caffe2/operators/arg_ops.cu -@@ -2,8 +2,8 @@ - - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/common_gpu.h" - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/batch_moments_op.cu b/caffe2/operators/batch_moments_op.cu -index 4b693b5c04..81359f6440 100644 ---- a/caffe2/operators/batch_moments_op.cu -+++ b/caffe2/operators/batch_moments_op.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/batch_moments_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/batch_sparse_to_dense_op.cu b/caffe2/operators/batch_sparse_to_dense_op.cu -index aea2035a5d..3e7ad8af9a 100644 ---- a/caffe2/operators/batch_sparse_to_dense_op.cu -+++ b/caffe2/operators/batch_sparse_to_dense_op.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/batch_sparse_to_dense_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/boolean_mask_ops.cu b/caffe2/operators/boolean_mask_ops.cu -index 214b7c13ba..501dd3b191 100644 ---- a/caffe2/operators/boolean_mask_ops.cu -+++ b/caffe2/operators/boolean_mask_ops.cu -@@ -2,8 +2,8 @@ - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/boolean_mask_ops.h" -- - #include -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/cross_entropy_op.cu b/caffe2/operators/cross_entropy_op.cu -index 380e80399f..c23f05f8e5 100644 ---- a/caffe2/operators/cross_entropy_op.cu -+++ b/caffe2/operators/cross_entropy_op.cu -@@ -4,6 +4,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/cross_entropy_op.h" - #include "caffe2/operators/operator_fallback_gpu.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/distance_op.cu b/caffe2/operators/distance_op.cu -index 3a8bb337d5..a360166854 100644 ---- a/caffe2/operators/distance_op.cu -+++ b/caffe2/operators/distance_op.cu -@@ -4,6 +4,7 @@ - #include "caffe2/operators/distance_op.h" - #include "caffe2/utils/conversions.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/elementwise_div_op.cu b/caffe2/operators/elementwise_div_op.cu -index 42b103a0f1..33118a8f5e 100644 ---- a/caffe2/operators/elementwise_div_op.cu -+++ b/caffe2/operators/elementwise_div_op.cu -@@ -3,8 +3,8 @@ - #include - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/elementwise_ops_utils.h" -diff --git a/caffe2/operators/elementwise_linear_op.cu b/caffe2/operators/elementwise_linear_op.cu -index cc49115bff..8f749644b2 100644 ---- a/caffe2/operators/elementwise_linear_op.cu -+++ b/caffe2/operators/elementwise_linear_op.cu -@@ -5,6 +5,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/operator_fallback_gpu.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/elementwise_mul_op.cu b/caffe2/operators/elementwise_mul_op.cu -index bdbf760cf9..1991b8b513 100644 ---- a/caffe2/operators/elementwise_mul_op.cu -+++ b/caffe2/operators/elementwise_mul_op.cu -@@ -3,8 +3,8 @@ - #include - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/elementwise_ops_utils.h" -diff --git a/caffe2/operators/elementwise_ops.cu b/caffe2/operators/elementwise_ops.cu -index c9ced33cf8..932bd5dafd 100644 ---- a/caffe2/operators/elementwise_ops.cu -+++ b/caffe2/operators/elementwise_ops.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/elementwise_ops.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - #include - #include -diff --git a/caffe2/operators/find_op.cu b/caffe2/operators/find_op.cu -index f8ff2bab16..0418a71fbc 100644 ---- a/caffe2/operators/find_op.cu -+++ b/caffe2/operators/find_op.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/find_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/generate_proposals_op.cu b/caffe2/operators/generate_proposals_op.cu -index 64518538b6..a4207f8653 100644 ---- a/caffe2/operators/generate_proposals_op.cu -+++ b/caffe2/operators/generate_proposals_op.cu -@@ -5,6 +5,7 @@ - #include "caffe2/operators/generate_proposals_op_util_boxes.h" // BBOX_XFORM_CLIP_DEFAULT - #include "caffe2/operators/generate_proposals_op_util_nms.h" - #include "caffe2/operators/generate_proposals_op_util_nms_gpu.h" -+#include "caffe2/utils/cub_namespace.cuh" - - #ifdef __HIP_PLATFORM_HCC__ - #include -diff --git a/caffe2/operators/normalize_ops.cu b/caffe2/operators/normalize_ops.cu -index 26df05308d..e4d1f34b75 100644 ---- a/caffe2/operators/normalize_ops.cu -+++ b/caffe2/operators/normalize_ops.cu -@@ -5,6 +5,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/normalize_l1_op.h" - #include "caffe2/operators/normalize_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/one_hot_ops.cu b/caffe2/operators/one_hot_ops.cu -index e521b3dd09..87e8196765 100644 ---- a/caffe2/operators/one_hot_ops.cu -+++ b/caffe2/operators/one_hot_ops.cu -@@ -2,6 +2,7 @@ - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/one_hot_ops.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/pack_segments.cu b/caffe2/operators/pack_segments.cu -index 7475100fd3..372638abdd 100644 ---- a/caffe2/operators/pack_segments.cu -+++ b/caffe2/operators/pack_segments.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/pack_segments.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/prelu_op.cu b/caffe2/operators/prelu_op.cu -index 745a393f07..6303b70b4a 100644 ---- a/caffe2/operators/prelu_op.cu -+++ b/caffe2/operators/prelu_op.cu -@@ -1,6 +1,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/prelu_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/reduce_front_back_max_ops.cu b/caffe2/operators/reduce_front_back_max_ops.cu -index 3c6ee7f0ae..c41d5ad579 100644 ---- a/caffe2/operators/reduce_front_back_max_ops.cu -+++ b/caffe2/operators/reduce_front_back_max_ops.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/reduce_front_back_max_ops.h" -+#include "caffe2/utils/cub_namespace.cuh" - - #ifdef __HIP_PLATFORM_HCC__ - #include -diff --git a/caffe2/operators/reduce_front_back_sum_mean_ops.cu b/caffe2/operators/reduce_front_back_sum_mean_ops.cu -index 476596f084..a7ad6dd500 100644 ---- a/caffe2/operators/reduce_front_back_sum_mean_ops.cu -+++ b/caffe2/operators/reduce_front_back_sum_mean_ops.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/reduce_front_back_sum_mean_ops.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/reduction_ops.cu b/caffe2/operators/reduction_ops.cu -index ba55a66de5..9649b85d01 100644 ---- a/caffe2/operators/reduction_ops.cu -+++ b/caffe2/operators/reduction_ops.cu -@@ -2,7 +2,7 @@ - #include "caffe2/operators/reduction_ops.h" - #include "caffe2/utils/conversions.h" - --#include -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/rmac_regions_op.cu b/caffe2/operators/rmac_regions_op.cu -index 0ec2dd351a..de2b2553a7 100644 ---- a/caffe2/operators/rmac_regions_op.cu -+++ b/caffe2/operators/rmac_regions_op.cu -@@ -1,4 +1,5 @@ - #include -+#include "caffe2/utils/cub_namespace.cuh" - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/rmac_regions_op.h" -@@ -10,6 +11,9 @@ - #ifdef __HIP_PLATFORM_HCC__ - namespace rocprim { - #else -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+namespace at_cuda_detail { -+#endif - namespace cub { - #endif - -@@ -22,6 +26,9 @@ inline __host__ __device__ bool operator<( - } - - } // namespace cub -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+} // namespace at_cuda_detail -+#endif - - namespace caffe2 { - -diff --git a/caffe2/operators/segment_reduction_op_gpu.cuh b/caffe2/operators/segment_reduction_op_gpu.cuh -index ffe834e886..eebade352e 100644 ---- a/caffe2/operators/segment_reduction_op_gpu.cuh -+++ b/caffe2/operators/segment_reduction_op_gpu.cuh -@@ -1,3 +1,4 @@ -+#include "caffe2/utils/cub_namespace.cuh" - #include - #include - #include -diff --git a/caffe2/operators/sequence_ops.cu b/caffe2/operators/sequence_ops.cu -index cc34effd3f..2ceb5236ef 100644 ---- a/caffe2/operators/sequence_ops.cu -+++ b/caffe2/operators/sequence_ops.cu -@@ -1,6 +1,7 @@ - #include - - #include -+#include "caffe2/utils/cub_namespace.cuh" - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/sequence_ops.h" -diff --git a/caffe2/operators/softmax_ops.cu b/caffe2/operators/softmax_ops.cu -index 51c0cbc2bf..ebf0700c9e 100644 ---- a/caffe2/operators/softmax_ops.cu -+++ b/caffe2/operators/softmax_ops.cu -@@ -5,6 +5,7 @@ - #include "caffe2/operators/softmax_op.h" - #include "caffe2/operators/softmax_with_loss_op.h" - #include "caffe2/operators/spatial_softmax_with_loss_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/spatial_batch_norm_op_impl.cuh b/caffe2/operators/spatial_batch_norm_op_impl.cuh -index edc076c7d7..6fdb4c63f8 100644 ---- a/caffe2/operators/spatial_batch_norm_op_impl.cuh -+++ b/caffe2/operators/spatial_batch_norm_op_impl.cuh -@@ -5,8 +5,8 @@ - - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/context_gpu.h" - #include "caffe2/utils/math.h" -diff --git a/caffe2/sgd/adagrad_fused_op_gpu.cu b/caffe2/sgd/adagrad_fused_op_gpu.cu -index e2bf91c880..a7057c8737 100644 ---- a/caffe2/sgd/adagrad_fused_op_gpu.cu -+++ b/caffe2/sgd/adagrad_fused_op_gpu.cu -@@ -2,6 +2,7 @@ - #include - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include - #include "caffe2/sgd/adagrad_fused_op_gpu.cuh" - #include "caffe2/utils/math.h" -diff --git a/caffe2/sgd/adagrad_op_gpu.cu b/caffe2/sgd/adagrad_op_gpu.cu -index 8abb3376ca..b80d29700c 100644 ---- a/caffe2/sgd/adagrad_op_gpu.cu -+++ b/caffe2/sgd/adagrad_op_gpu.cu -@@ -4,6 +4,7 @@ - #include "caffe2/core/common_gpu.h" - #include "caffe2/core/context_gpu.h" - #include "caffe2/sgd/adagrad_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/sgd/adam_op_gpu.cu b/caffe2/sgd/adam_op_gpu.cu -index 42ab975faa..6f9c323420 100644 ---- a/caffe2/sgd/adam_op_gpu.cu -+++ b/caffe2/sgd/adam_op_gpu.cu -@@ -2,6 +2,7 @@ - #include "caffe2/core/common_gpu.h" - #include "caffe2/core/context_gpu.h" - #include "caffe2/sgd/adam_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/utils/cub_namespace.cuh b/caffe2/utils/cub_namespace.cuh -new file mode 100644 -index 0000000000000..188a9936f9c6e ---- /dev/null -+++ b/caffe2/utils/cub_namespace.cuh -@@ -0,0 +1,17 @@ -+#pragma once -+ -+// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: -+// https://github.com/NVIDIA/cub/pull/326 -+// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake -+// starting from CUDA 11.5 -+#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true -+#else -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false -+#endif -+ -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+namespace caffe2 { -+namespace cub = ::CUB_WRAPPED_NAMESPACE::cub; -+} -+#endif -diff --git a/caffe2/utils/math/reduce.cu b/caffe2/utils/math/reduce.cu -index 8c40c5d2b0..e8a8b768eb 100644 ---- a/caffe2/utils/math/reduce.cu -+++ b/caffe2/utils/math/reduce.cu -@@ -5,9 +5,8 @@ - #include - #include - #include -- -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include - #include -diff --git a/caffe2/utils/math/reduce.cuh b/caffe2/utils/math/reduce.cuh -index 0c43ad45a3..18bdca11b9 100644 ---- a/caffe2/utils/math/reduce.cuh -+++ b/caffe2/utils/math/reduce.cuh -@@ -1,8 +1,8 @@ - #ifndef CAFFE2_UTILS_MATH_REDUCE_CUH_ - #define CAFFE2_UTILS_MATH_REDUCE_CUH_ - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/common_gpu.h" - -diff --git a/caffe2/utils/math_gpu.cu b/caffe2/utils/math_gpu.cu -index 7f3bb8eea6..54fbcca1d4 100644 ---- a/caffe2/utils/math_gpu.cu -+++ b/caffe2/utils/math_gpu.cu -@@ -7,8 +7,9 @@ - #include - #include - --#include - #include -+#include -+#include "caffe2/utils/cub_namespace.cuh" - - #include - #include -diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake -index ca560288a4..5fd189e4a8 100644 ---- a/cmake/Dependencies.cmake -+++ b/cmake/Dependencies.cmake -@@ -1622,6 +1622,12 @@ if(NOT INTERN_BUILD_MOBILE) - list(APPEND CUDA_NVCC_FLAGS "-Xcompiler" "-fPIC") - endif() - -+ # use cub in a safe manner, see: -+ # https://github.com/pytorch/pytorch/pull/55292 -+ if(NOT ${CUDA_VERSION} LESS 11.5) -+ list(APPEND CUDA_NVCC_FLAGS "-DCUB_WRAPPED_NAMESPACE=at_cuda_detail") -+ endif() -+ - if(CUDA_HAS_FP16 OR NOT ${CUDA_VERSION} LESS 7.5) - message(STATUS "Found CUDA with FP16 support, compiling with torch.cuda.HalfTensor") - list(APPEND CUDA_NVCC_FLAGS "-DCUDA_HAS_FP16=1" "-D__CUDA_NO_HALF_OPERATORS__" "-D__CUDA_NO_HALF_CONVERSIONS__" diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 5f6fec8043c..e7fa73b93df 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -69,6 +69,7 @@ "torch/csrc/jit/frontend/resolver.h", "torch/csrc/jit/frontend/tree_views.h", "torch/csrc/jit/serialization/storage_context.h", + "ATen/DynamicLibrary.h", "datasets.h", "pytorch_adapters.h" diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 0d361c16e36..13e54f8b07f 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -44,6 +44,7 @@ // For inclusion in JNI only, not parsed "ATen/cuda/CUDAGeneratorImpl.h", + "ATen/DynamicLibrary.h", }, link = { "cudart", "cusparse" }, linkpath = { From 344fee44b10eec8c7885b725bc175ca4369ab484 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Mon, 5 Feb 2024 18:28:52 +0100 Subject: [PATCH 12/24] Revert "Add DynamicLibrary.h to JNI" This reverts commit 9ebb6619fad25479427ee34bdc1f769aed714da9. --- pytorch/pytorch.patch | 738 ++++++++++++++++++ .../org/bytedeco/pytorch/presets/torch.java | 1 - .../bytedeco/pytorch/presets/torch_cuda.java | 1 - 3 files changed, 738 insertions(+), 2 deletions(-) create mode 100644 pytorch/pytorch.patch diff --git a/pytorch/pytorch.patch b/pytorch/pytorch.patch new file mode 100644 index 00000000000..ef316b44f0f --- /dev/null +++ b/pytorch/pytorch.patch @@ -0,0 +1,738 @@ +diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml +index f33eda2f94..a92a79a684 100644 +--- a/.github/workflows/lint.yml ++++ b/.github/workflows/lint.yml +@@ -97,7 +97,7 @@ jobs: + - name: Ensure no direct cub include + if: always() + run: | +- (! git --no-pager grep -I -no $'#include + #include + ++#include ++ ++#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() ++ ++#include ++ ++#else ++ + // include cub in a safe manner, see: + // https://github.com/pytorch/pytorch/pull/55292 + #undef CUB_NS_POSTFIX //undef to avoid redefinition warnings + #undef CUB_NS_PREFIX +-#define CUB_NS_PREFIX namespace at { namespace cuda { namespace detail { +-#define CUB_NS_POSTFIX }}} ++#undef CUB_NS_QUALIFIER ++#define CUB_NS_PREFIX namespace at_cuda_detail { ++#define CUB_NS_POSTFIX } ++#define CUB_NS_QUALIFIER ::at_cuda_detail::cub + #include + #undef CUB_NS_POSTFIX + #undef CUB_NS_PREFIX ++#undef CUB_NS_QUALIFIER ++ ++#endif + + #include + #include +@@ -33,16 +46,40 @@ + #define NO_ROCM(x) + #else + #define NO_ROCM(x) x ++#endif + +-namespace at { namespace native { ++#if !defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16() ++ ++namespace at_cuda_detail { ++// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 ++ ++template <> ++struct cub::FpLimits ++{ ++ static __host__ __device__ __forceinline__ c10::BFloat16 Max() { ++ unsigned short max_word = 0x7F7F; ++ return reinterpret_cast(max_word); ++ } ++ ++ static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { ++ unsigned short lowest_word = 0xFF7F; ++ return reinterpret_cast(lowest_word); ++ } ++}; + +-namespace cub = at::cuda::detail::cub; ++template <> struct cub::NumericTraits: cub::BaseTraits {}; ++} ++#endif + ++#if !defined(USE_ROCM) ++namespace at { namespace native { ++namespace cub = ::at_cuda_detail::cub; + }} + #endif + + namespace at { + namespace cuda { ++namespace cub { + + namespace detail { + +@@ -55,44 +92,17 @@ struct cuda_type { + using type = __half; + }; + +-#if defined(CUDA_VERSION) && CUDA_VERSION >= 11050 +-// cub sort support for __nv_bfloat16 is added to cub 1.13 in +-// https://github.com/NVIDIA/cub/pull/306 and according to +-// https://github.com/NVIDIA/cub#releases, 1.13 is included in +-// CUDA Toolkit 11.5 ++#if CUB_SUPPORTS_NV_BFLOAT16() + +-// waiting for https://github.com/NVIDIA/cub/pull/306 to land on CUDA + template<> + struct cuda_type { + using type = __nv_bfloat16; + }; + +-#elif !defined(__HIP_PLATFORM_HCC__) +- +-// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 +- +-template <> +-struct cub::FpLimits +-{ +- static __host__ __device__ __forceinline__ c10::BFloat16 Max() { +- unsigned short max_word = 0x7F7F; +- return reinterpret_cast(max_word); +- } +- +- static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { +- unsigned short lowest_word = 0xFF7F; +- return reinterpret_cast(lowest_word); +- } +-}; +- +-template <> struct cub::NumericTraits: cub::BaseTraits {}; +- + #endif + + } // namespace detail + +-namespace cub { +- + inline int get_num_bits(uint64_t max_key) { + int num_bits = 1; + while (max_key > 1) { +@@ -115,11 +125,11 @@ static inline void sort_keys( + key_t_ *keys_out_ = reinterpret_cast(keys_out); + + if (descending) { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortKeysDescending, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeysDescending, + keys_in_, keys_out_, n, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } else { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortKeys, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeys, + keys_in_, keys_out_, n, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } +@@ -147,11 +157,11 @@ static inline void sort_pairs( + key_t_ *keys_out_ = reinterpret_cast(keys_out); + + if (descending) { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortPairsDescending, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairsDescending, + keys_in_, keys_out_, values_in, values_out, n, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } else { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortPairs, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairs, + keys_in_, keys_out_, values_in, values_out, n, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } +@@ -183,12 +193,12 @@ static inline void segmented_sort_pairs( + key_t_ *keys_out_ = reinterpret_cast(keys_out); + + if (descending) { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } else { +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSegmentedRadixSort::SortPairs, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); +@@ -240,7 +250,7 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT + // so split at int_max/2 + constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 + int size_cub = std::min(num_items, max_cub_size); +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input, + output, + scan_op, +@@ -260,7 +270,7 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT + first_elem_ptr, + scan_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +- using ArgIndexInputIterator = NO_ROCM(detail)::cub::ArgIndexInputIterator; ++ using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator; + using tuple = typename ArgIndexInputIterator::value_type; + auto input_iter_transform = [=] __device__ (const tuple &x)->input_t { + if (x.key == 0) { +@@ -269,9 +279,9 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT + return x.value; + } + }; +- auto input_ = NO_ROCM(detail)::cub::TransformInputIterator( ++ auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator( + ArgIndexInputIterator(input + i), input_iter_transform); +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, +@@ -287,7 +297,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT + // so split at int_max/2 + constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 + int size_cub = std::min(num_items, max_cub_size); +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::ExclusiveScan, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input, + output, + scan_op, +@@ -309,7 +319,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT + C10_CUDA_KERNEL_LAUNCH_CHECK(); + auto input_ = impl::chained_iterator{ + input + i, first_elem_ptr}; +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, +@@ -322,7 +332,7 @@ template::max(), + "cub unique does not support more than INT_MAX elements"); +- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSelect::Unique, ++ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique, + input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream()); + } + +diff --git a/aten/src/ATen/cuda/cub_definitions.cuh b/aten/src/ATen/cuda/cub_definitions.cuh +new file mode 100644 +index 0000000000000..61119fc174587 +--- /dev/null ++++ b/aten/src/ATen/cuda/cub_definitions.cuh +@@ -0,0 +1,29 @@ ++#pragma once ++ ++#if !defined(USE_ROCM) ++#include // for CUDA_VERSION ++#endif ++ ++#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 ++#include ++#else ++#define CUB_VERSION 0 ++#endif ++ ++// cub sort support for __nv_bfloat16 is added to cub 1.13 in: ++// https://github.com/NVIDIA/cub/pull/306 ++#if CUB_VERSION >= 101300 ++#define CUB_SUPPORTS_NV_BFLOAT16() true ++#else ++#define CUB_SUPPORTS_NV_BFLOAT16() false ++#endif ++ ++// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: ++// https://github.com/NVIDIA/cub/pull/326 ++// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake ++// starting from CUDA 11.5 ++#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) ++#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true ++#else ++#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false ++#endif +diff --git a/caffe2/core/context_gpu.cu b/caffe2/core/context_gpu.cu +index c2b89945ad..6d53740091 100644 +--- a/caffe2/core/context_gpu.cu ++++ b/caffe2/core/context_gpu.cu +@@ -21,6 +21,7 @@ + #include "caffe2/core/logging.h" + #include "caffe2/core/tensor.h" + #include "caffe2/utils/string_utils.h" ++#include "caffe2/utils/cub_namespace.cuh" + + C10_DEFINE_string( + caffe2_cuda_memory_pool, +diff --git a/caffe2/operators/accuracy_op.cu b/caffe2/operators/accuracy_op.cu +index f06663d71a..29df54e752 100644 +--- a/caffe2/operators/accuracy_op.cu ++++ b/caffe2/operators/accuracy_op.cu +@@ -3,6 +3,7 @@ + #include "caffe2/utils/GpuAtomics.cuh" + #include "caffe2/utils/math.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + namespace caffe2 { +diff --git a/caffe2/operators/affine_channel_op.cu b/caffe2/operators/affine_channel_op.cu +index adf4ac55c0..efae0a3fc6 100644 +--- a/caffe2/operators/affine_channel_op.cu ++++ b/caffe2/operators/affine_channel_op.cu +@@ -1,5 +1,6 @@ + #include "caffe2/operators/affine_channel_op.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + #include "caffe2/core/context_gpu.h" +diff --git a/caffe2/operators/arg_ops.cu b/caffe2/operators/arg_ops.cu +index 7e90d25b83..56deaa6363 100644 +--- a/caffe2/operators/arg_ops.cu ++++ b/caffe2/operators/arg_ops.cu +@@ -2,8 +2,8 @@ + + #include + ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include "caffe2/core/common_gpu.h" + #include "caffe2/core/context_gpu.h" +diff --git a/caffe2/operators/batch_moments_op.cu b/caffe2/operators/batch_moments_op.cu +index 4b693b5c04..81359f6440 100644 +--- a/caffe2/operators/batch_moments_op.cu ++++ b/caffe2/operators/batch_moments_op.cu +@@ -1,5 +1,6 @@ + #include "caffe2/operators/batch_moments_op.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + #include "caffe2/core/context_gpu.h" +diff --git a/caffe2/operators/batch_sparse_to_dense_op.cu b/caffe2/operators/batch_sparse_to_dense_op.cu +index aea2035a5d..3e7ad8af9a 100644 +--- a/caffe2/operators/batch_sparse_to_dense_op.cu ++++ b/caffe2/operators/batch_sparse_to_dense_op.cu +@@ -1,5 +1,6 @@ + #include "caffe2/operators/batch_sparse_to_dense_op.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + #include "caffe2/core/context_gpu.h" +diff --git a/caffe2/operators/boolean_mask_ops.cu b/caffe2/operators/boolean_mask_ops.cu +index 214b7c13ba..501dd3b191 100644 +--- a/caffe2/operators/boolean_mask_ops.cu ++++ b/caffe2/operators/boolean_mask_ops.cu +@@ -2,8 +2,8 @@ + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/boolean_mask_ops.h" +- + #include ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/cross_entropy_op.cu b/caffe2/operators/cross_entropy_op.cu +index 380e80399f..c23f05f8e5 100644 +--- a/caffe2/operators/cross_entropy_op.cu ++++ b/caffe2/operators/cross_entropy_op.cu +@@ -4,6 +4,7 @@ + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/cross_entropy_op.h" + #include "caffe2/operators/operator_fallback_gpu.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/distance_op.cu b/caffe2/operators/distance_op.cu +index 3a8bb337d5..a360166854 100644 +--- a/caffe2/operators/distance_op.cu ++++ b/caffe2/operators/distance_op.cu +@@ -4,6 +4,7 @@ + #include "caffe2/operators/distance_op.h" + #include "caffe2/utils/conversions.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + namespace caffe2 { +diff --git a/caffe2/operators/elementwise_div_op.cu b/caffe2/operators/elementwise_div_op.cu +index 42b103a0f1..33118a8f5e 100644 +--- a/caffe2/operators/elementwise_div_op.cu ++++ b/caffe2/operators/elementwise_div_op.cu +@@ -3,8 +3,8 @@ + #include + #include + ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/elementwise_ops_utils.h" +diff --git a/caffe2/operators/elementwise_linear_op.cu b/caffe2/operators/elementwise_linear_op.cu +index cc49115bff..8f749644b2 100644 +--- a/caffe2/operators/elementwise_linear_op.cu ++++ b/caffe2/operators/elementwise_linear_op.cu +@@ -5,6 +5,7 @@ + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/operator_fallback_gpu.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + namespace caffe2 { +diff --git a/caffe2/operators/elementwise_mul_op.cu b/caffe2/operators/elementwise_mul_op.cu +index bdbf760cf9..1991b8b513 100644 +--- a/caffe2/operators/elementwise_mul_op.cu ++++ b/caffe2/operators/elementwise_mul_op.cu +@@ -3,8 +3,8 @@ + #include + #include + ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/elementwise_ops_utils.h" +diff --git a/caffe2/operators/elementwise_ops.cu b/caffe2/operators/elementwise_ops.cu +index c9ced33cf8..932bd5dafd 100644 +--- a/caffe2/operators/elementwise_ops.cu ++++ b/caffe2/operators/elementwise_ops.cu +@@ -1,5 +1,6 @@ + #include "caffe2/operators/elementwise_ops.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + #include + #include +diff --git a/caffe2/operators/find_op.cu b/caffe2/operators/find_op.cu +index f8ff2bab16..0418a71fbc 100644 +--- a/caffe2/operators/find_op.cu ++++ b/caffe2/operators/find_op.cu +@@ -1,6 +1,7 @@ + #include + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/find_op.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/generate_proposals_op.cu b/caffe2/operators/generate_proposals_op.cu +index 64518538b6..a4207f8653 100644 +--- a/caffe2/operators/generate_proposals_op.cu ++++ b/caffe2/operators/generate_proposals_op.cu +@@ -5,6 +5,7 @@ + #include "caffe2/operators/generate_proposals_op_util_boxes.h" // BBOX_XFORM_CLIP_DEFAULT + #include "caffe2/operators/generate_proposals_op_util_nms.h" + #include "caffe2/operators/generate_proposals_op_util_nms_gpu.h" ++#include "caffe2/utils/cub_namespace.cuh" + + #ifdef __HIP_PLATFORM_HCC__ + #include +diff --git a/caffe2/operators/normalize_ops.cu b/caffe2/operators/normalize_ops.cu +index 26df05308d..e4d1f34b75 100644 +--- a/caffe2/operators/normalize_ops.cu ++++ b/caffe2/operators/normalize_ops.cu +@@ -5,6 +5,7 @@ + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/normalize_l1_op.h" + #include "caffe2/operators/normalize_op.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/one_hot_ops.cu b/caffe2/operators/one_hot_ops.cu +index e521b3dd09..87e8196765 100644 +--- a/caffe2/operators/one_hot_ops.cu ++++ b/caffe2/operators/one_hot_ops.cu +@@ -2,6 +2,7 @@ + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/one_hot_ops.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/pack_segments.cu b/caffe2/operators/pack_segments.cu +index 7475100fd3..372638abdd 100644 +--- a/caffe2/operators/pack_segments.cu ++++ b/caffe2/operators/pack_segments.cu +@@ -1,6 +1,7 @@ + #include + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/pack_segments.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/prelu_op.cu b/caffe2/operators/prelu_op.cu +index 745a393f07..6303b70b4a 100644 +--- a/caffe2/operators/prelu_op.cu ++++ b/caffe2/operators/prelu_op.cu +@@ -1,6 +1,7 @@ + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/prelu_op.h" + ++#include "caffe2/utils/cub_namespace.cuh" + #include + + namespace caffe2 { +diff --git a/caffe2/operators/reduce_front_back_max_ops.cu b/caffe2/operators/reduce_front_back_max_ops.cu +index 3c6ee7f0ae..c41d5ad579 100644 +--- a/caffe2/operators/reduce_front_back_max_ops.cu ++++ b/caffe2/operators/reduce_front_back_max_ops.cu +@@ -1,6 +1,7 @@ + #include + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/reduce_front_back_max_ops.h" ++#include "caffe2/utils/cub_namespace.cuh" + + #ifdef __HIP_PLATFORM_HCC__ + #include +diff --git a/caffe2/operators/reduce_front_back_sum_mean_ops.cu b/caffe2/operators/reduce_front_back_sum_mean_ops.cu +index 476596f084..a7ad6dd500 100644 +--- a/caffe2/operators/reduce_front_back_sum_mean_ops.cu ++++ b/caffe2/operators/reduce_front_back_sum_mean_ops.cu +@@ -1,6 +1,7 @@ + #include + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/reduce_front_back_sum_mean_ops.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/reduction_ops.cu b/caffe2/operators/reduction_ops.cu +index ba55a66de5..9649b85d01 100644 +--- a/caffe2/operators/reduction_ops.cu ++++ b/caffe2/operators/reduction_ops.cu +@@ -2,7 +2,7 @@ + #include "caffe2/operators/reduction_ops.h" + #include "caffe2/utils/conversions.h" + +-#include ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/rmac_regions_op.cu b/caffe2/operators/rmac_regions_op.cu +index 0ec2dd351a..de2b2553a7 100644 +--- a/caffe2/operators/rmac_regions_op.cu ++++ b/caffe2/operators/rmac_regions_op.cu +@@ -1,4 +1,5 @@ + #include ++#include "caffe2/utils/cub_namespace.cuh" + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/rmac_regions_op.h" +@@ -10,6 +11,9 @@ + #ifdef __HIP_PLATFORM_HCC__ + namespace rocprim { + #else ++#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() ++namespace at_cuda_detail { ++#endif + namespace cub { + #endif + +@@ -22,6 +26,9 @@ inline __host__ __device__ bool operator<( + } + + } // namespace cub ++#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() ++} // namespace at_cuda_detail ++#endif + + namespace caffe2 { + +diff --git a/caffe2/operators/segment_reduction_op_gpu.cuh b/caffe2/operators/segment_reduction_op_gpu.cuh +index ffe834e886..eebade352e 100644 +--- a/caffe2/operators/segment_reduction_op_gpu.cuh ++++ b/caffe2/operators/segment_reduction_op_gpu.cuh +@@ -1,3 +1,4 @@ ++#include "caffe2/utils/cub_namespace.cuh" + #include + #include + #include +diff --git a/caffe2/operators/sequence_ops.cu b/caffe2/operators/sequence_ops.cu +index cc34effd3f..2ceb5236ef 100644 +--- a/caffe2/operators/sequence_ops.cu ++++ b/caffe2/operators/sequence_ops.cu +@@ -1,6 +1,7 @@ + #include + + #include ++#include "caffe2/utils/cub_namespace.cuh" + + #include "caffe2/core/context_gpu.h" + #include "caffe2/operators/sequence_ops.h" +diff --git a/caffe2/operators/softmax_ops.cu b/caffe2/operators/softmax_ops.cu +index 51c0cbc2bf..ebf0700c9e 100644 +--- a/caffe2/operators/softmax_ops.cu ++++ b/caffe2/operators/softmax_ops.cu +@@ -5,6 +5,7 @@ + #include "caffe2/operators/softmax_op.h" + #include "caffe2/operators/softmax_with_loss_op.h" + #include "caffe2/operators/spatial_softmax_with_loss_op.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/operators/spatial_batch_norm_op_impl.cuh b/caffe2/operators/spatial_batch_norm_op_impl.cuh +index edc076c7d7..6fdb4c63f8 100644 +--- a/caffe2/operators/spatial_batch_norm_op_impl.cuh ++++ b/caffe2/operators/spatial_batch_norm_op_impl.cuh +@@ -5,8 +5,8 @@ + + #include + ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include "caffe2/core/context_gpu.h" + #include "caffe2/utils/math.h" +diff --git a/caffe2/sgd/adagrad_fused_op_gpu.cu b/caffe2/sgd/adagrad_fused_op_gpu.cu +index e2bf91c880..a7057c8737 100644 +--- a/caffe2/sgd/adagrad_fused_op_gpu.cu ++++ b/caffe2/sgd/adagrad_fused_op_gpu.cu +@@ -2,6 +2,7 @@ + #include + #include + ++#include "caffe2/utils/cub_namespace.cuh" + #include + #include "caffe2/sgd/adagrad_fused_op_gpu.cuh" + #include "caffe2/utils/math.h" +diff --git a/caffe2/sgd/adagrad_op_gpu.cu b/caffe2/sgd/adagrad_op_gpu.cu +index 8abb3376ca..b80d29700c 100644 +--- a/caffe2/sgd/adagrad_op_gpu.cu ++++ b/caffe2/sgd/adagrad_op_gpu.cu +@@ -4,6 +4,7 @@ + #include "caffe2/core/common_gpu.h" + #include "caffe2/core/context_gpu.h" + #include "caffe2/sgd/adagrad_op.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/sgd/adam_op_gpu.cu b/caffe2/sgd/adam_op_gpu.cu +index 42ab975faa..6f9c323420 100644 +--- a/caffe2/sgd/adam_op_gpu.cu ++++ b/caffe2/sgd/adam_op_gpu.cu +@@ -2,6 +2,7 @@ + #include "caffe2/core/common_gpu.h" + #include "caffe2/core/context_gpu.h" + #include "caffe2/sgd/adam_op.h" ++#include "caffe2/utils/cub_namespace.cuh" + + namespace caffe2 { + +diff --git a/caffe2/utils/cub_namespace.cuh b/caffe2/utils/cub_namespace.cuh +new file mode 100644 +index 0000000000000..188a9936f9c6e +--- /dev/null ++++ b/caffe2/utils/cub_namespace.cuh +@@ -0,0 +1,17 @@ ++#pragma once ++ ++// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: ++// https://github.com/NVIDIA/cub/pull/326 ++// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake ++// starting from CUDA 11.5 ++#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) ++#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true ++#else ++#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false ++#endif ++ ++#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() ++namespace caffe2 { ++namespace cub = ::CUB_WRAPPED_NAMESPACE::cub; ++} ++#endif +diff --git a/caffe2/utils/math/reduce.cu b/caffe2/utils/math/reduce.cu +index 8c40c5d2b0..e8a8b768eb 100644 +--- a/caffe2/utils/math/reduce.cu ++++ b/caffe2/utils/math/reduce.cu +@@ -5,9 +5,8 @@ + #include + #include + #include +- ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include + #include +diff --git a/caffe2/utils/math/reduce.cuh b/caffe2/utils/math/reduce.cuh +index 0c43ad45a3..18bdca11b9 100644 +--- a/caffe2/utils/math/reduce.cuh ++++ b/caffe2/utils/math/reduce.cuh +@@ -1,8 +1,8 @@ + #ifndef CAFFE2_UTILS_MATH_REDUCE_CUH_ + #define CAFFE2_UTILS_MATH_REDUCE_CUH_ + ++#include "caffe2/utils/cub_namespace.cuh" + #include +-#include + + #include "caffe2/core/common_gpu.h" + +diff --git a/caffe2/utils/math_gpu.cu b/caffe2/utils/math_gpu.cu +index 7f3bb8eea6..54fbcca1d4 100644 +--- a/caffe2/utils/math_gpu.cu ++++ b/caffe2/utils/math_gpu.cu +@@ -7,8 +7,9 @@ + #include + #include + +-#include + #include ++#include ++#include "caffe2/utils/cub_namespace.cuh" + + #include + #include +diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake +index ca560288a4..5fd189e4a8 100644 +--- a/cmake/Dependencies.cmake ++++ b/cmake/Dependencies.cmake +@@ -1622,6 +1622,12 @@ if(NOT INTERN_BUILD_MOBILE) + list(APPEND CUDA_NVCC_FLAGS "-Xcompiler" "-fPIC") + endif() + ++ # use cub in a safe manner, see: ++ # https://github.com/pytorch/pytorch/pull/55292 ++ if(NOT ${CUDA_VERSION} LESS 11.5) ++ list(APPEND CUDA_NVCC_FLAGS "-DCUB_WRAPPED_NAMESPACE=at_cuda_detail") ++ endif() ++ + if(CUDA_HAS_FP16 OR NOT ${CUDA_VERSION} LESS 7.5) + message(STATUS "Found CUDA with FP16 support, compiling with torch.cuda.HalfTensor") + list(APPEND CUDA_NVCC_FLAGS "-DCUDA_HAS_FP16=1" "-D__CUDA_NO_HALF_OPERATORS__" "-D__CUDA_NO_HALF_CONVERSIONS__" diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index e7fa73b93df..5f6fec8043c 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -69,7 +69,6 @@ "torch/csrc/jit/frontend/resolver.h", "torch/csrc/jit/frontend/tree_views.h", "torch/csrc/jit/serialization/storage_context.h", - "ATen/DynamicLibrary.h", "datasets.h", "pytorch_adapters.h" diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 13e54f8b07f..0d361c16e36 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -44,7 +44,6 @@ // For inclusion in JNI only, not parsed "ATen/cuda/CUDAGeneratorImpl.h", - "ATen/DynamicLibrary.h", }, link = { "cudart", "cusparse" }, linkpath = { From 4129ee0c139483c22085a71aa0a10c2727dae9cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Tue, 6 Feb 2024 12:09:46 +0100 Subject: [PATCH 13/24] Link jnitorch_cuda with cudnn --- pytorch/pytorch.patch | 738 ------------------ .../bytedeco/pytorch/presets/torch_cuda.java | 2 +- 2 files changed, 1 insertion(+), 739 deletions(-) delete mode 100644 pytorch/pytorch.patch diff --git a/pytorch/pytorch.patch b/pytorch/pytorch.patch deleted file mode 100644 index ef316b44f0f..00000000000 --- a/pytorch/pytorch.patch +++ /dev/null @@ -1,738 +0,0 @@ -diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml -index f33eda2f94..a92a79a684 100644 ---- a/.github/workflows/lint.yml -+++ b/.github/workflows/lint.yml -@@ -97,7 +97,7 @@ jobs: - - name: Ensure no direct cub include - if: always() - run: | -- (! git --no-pager grep -I -no $'#include - #include - -+#include -+ -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+ -+#include -+ -+#else -+ - // include cub in a safe manner, see: - // https://github.com/pytorch/pytorch/pull/55292 - #undef CUB_NS_POSTFIX //undef to avoid redefinition warnings - #undef CUB_NS_PREFIX --#define CUB_NS_PREFIX namespace at { namespace cuda { namespace detail { --#define CUB_NS_POSTFIX }}} -+#undef CUB_NS_QUALIFIER -+#define CUB_NS_PREFIX namespace at_cuda_detail { -+#define CUB_NS_POSTFIX } -+#define CUB_NS_QUALIFIER ::at_cuda_detail::cub - #include - #undef CUB_NS_POSTFIX - #undef CUB_NS_PREFIX -+#undef CUB_NS_QUALIFIER -+ -+#endif - - #include - #include -@@ -33,16 +46,40 @@ - #define NO_ROCM(x) - #else - #define NO_ROCM(x) x -+#endif - --namespace at { namespace native { -+#if !defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16() -+ -+namespace at_cuda_detail { -+// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 -+ -+template <> -+struct cub::FpLimits -+{ -+ static __host__ __device__ __forceinline__ c10::BFloat16 Max() { -+ unsigned short max_word = 0x7F7F; -+ return reinterpret_cast(max_word); -+ } -+ -+ static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { -+ unsigned short lowest_word = 0xFF7F; -+ return reinterpret_cast(lowest_word); -+ } -+}; - --namespace cub = at::cuda::detail::cub; -+template <> struct cub::NumericTraits: cub::BaseTraits {}; -+} -+#endif - -+#if !defined(USE_ROCM) -+namespace at { namespace native { -+namespace cub = ::at_cuda_detail::cub; - }} - #endif - - namespace at { - namespace cuda { -+namespace cub { - - namespace detail { - -@@ -55,44 +92,17 @@ struct cuda_type { - using type = __half; - }; - --#if defined(CUDA_VERSION) && CUDA_VERSION >= 11050 --// cub sort support for __nv_bfloat16 is added to cub 1.13 in --// https://github.com/NVIDIA/cub/pull/306 and according to --// https://github.com/NVIDIA/cub#releases, 1.13 is included in --// CUDA Toolkit 11.5 -+#if CUB_SUPPORTS_NV_BFLOAT16() - --// waiting for https://github.com/NVIDIA/cub/pull/306 to land on CUDA - template<> - struct cuda_type { - using type = __nv_bfloat16; - }; - --#elif !defined(__HIP_PLATFORM_HCC__) -- --// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 -- --template <> --struct cub::FpLimits --{ -- static __host__ __device__ __forceinline__ c10::BFloat16 Max() { -- unsigned short max_word = 0x7F7F; -- return reinterpret_cast(max_word); -- } -- -- static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { -- unsigned short lowest_word = 0xFF7F; -- return reinterpret_cast(lowest_word); -- } --}; -- --template <> struct cub::NumericTraits: cub::BaseTraits {}; -- - #endif - - } // namespace detail - --namespace cub { -- - inline int get_num_bits(uint64_t max_key) { - int num_bits = 1; - while (max_key > 1) { -@@ -115,11 +125,11 @@ static inline void sort_keys( - key_t_ *keys_out_ = reinterpret_cast(keys_out); - - if (descending) { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortKeysDescending, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeysDescending, - keys_in_, keys_out_, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } else { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortKeys, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeys, - keys_in_, keys_out_, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } -@@ -147,11 +157,11 @@ static inline void sort_pairs( - key_t_ *keys_out_ = reinterpret_cast(keys_out); - - if (descending) { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortPairsDescending, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairsDescending, - keys_in_, keys_out_, values_in, values_out, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } else { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceRadixSort::SortPairs, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairs, - keys_in_, keys_out_, values_in, values_out, n, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } -@@ -183,12 +193,12 @@ static inline void segmented_sort_pairs( - key_t_ *keys_out_ = reinterpret_cast(keys_out); - - if (descending) { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, - keys_in_, keys_out_, values_in, values_out, - num_elements, num_segments, begin_offsets, end_offsets, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); - } else { -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSegmentedRadixSort::SortPairs, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs, - keys_in_, keys_out_, values_in, values_out, - num_elements, num_segments, begin_offsets, end_offsets, - begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); -@@ -240,7 +250,7 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - // so split at int_max/2 - constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 - int size_cub = std::min(num_items, max_cub_size); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, - input, - output, - scan_op, -@@ -260,7 +270,7 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - first_elem_ptr, - scan_op); - C10_CUDA_KERNEL_LAUNCH_CHECK(); -- using ArgIndexInputIterator = NO_ROCM(detail)::cub::ArgIndexInputIterator; -+ using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator; - using tuple = typename ArgIndexInputIterator::value_type; - auto input_iter_transform = [=] __device__ (const tuple &x)->input_t { - if (x.key == 0) { -@@ -269,9 +279,9 @@ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - return x.value; - } - }; -- auto input_ = NO_ROCM(detail)::cub::TransformInputIterator( -+ auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator( - ArgIndexInputIterator(input + i), input_iter_transform); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, - input_, - output + i, - scan_op, -@@ -287,7 +297,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - // so split at int_max/2 - constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 - int size_cub = std::min(num_items, max_cub_size); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::ExclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, - input, - output, - scan_op, -@@ -309,7 +319,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT - C10_CUDA_KERNEL_LAUNCH_CHECK(); - auto input_ = impl::chained_iterator{ - input + i, first_elem_ptr}; -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceScan::InclusiveScan, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, - input_, - output + i, - scan_op, -@@ -322,7 +332,7 @@ template::max(), - "cub unique does not support more than INT_MAX elements"); -- CUB_WRAPPER(NO_ROCM(detail)::cub::DeviceSelect::Unique, -+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique, - input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream()); - } - -diff --git a/aten/src/ATen/cuda/cub_definitions.cuh b/aten/src/ATen/cuda/cub_definitions.cuh -new file mode 100644 -index 0000000000000..61119fc174587 ---- /dev/null -+++ b/aten/src/ATen/cuda/cub_definitions.cuh -@@ -0,0 +1,29 @@ -+#pragma once -+ -+#if !defined(USE_ROCM) -+#include // for CUDA_VERSION -+#endif -+ -+#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 -+#include -+#else -+#define CUB_VERSION 0 -+#endif -+ -+// cub sort support for __nv_bfloat16 is added to cub 1.13 in: -+// https://github.com/NVIDIA/cub/pull/306 -+#if CUB_VERSION >= 101300 -+#define CUB_SUPPORTS_NV_BFLOAT16() true -+#else -+#define CUB_SUPPORTS_NV_BFLOAT16() false -+#endif -+ -+// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: -+// https://github.com/NVIDIA/cub/pull/326 -+// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake -+// starting from CUDA 11.5 -+#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true -+#else -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false -+#endif -diff --git a/caffe2/core/context_gpu.cu b/caffe2/core/context_gpu.cu -index c2b89945ad..6d53740091 100644 ---- a/caffe2/core/context_gpu.cu -+++ b/caffe2/core/context_gpu.cu -@@ -21,6 +21,7 @@ - #include "caffe2/core/logging.h" - #include "caffe2/core/tensor.h" - #include "caffe2/utils/string_utils.h" -+#include "caffe2/utils/cub_namespace.cuh" - - C10_DEFINE_string( - caffe2_cuda_memory_pool, -diff --git a/caffe2/operators/accuracy_op.cu b/caffe2/operators/accuracy_op.cu -index f06663d71a..29df54e752 100644 ---- a/caffe2/operators/accuracy_op.cu -+++ b/caffe2/operators/accuracy_op.cu -@@ -3,6 +3,7 @@ - #include "caffe2/utils/GpuAtomics.cuh" - #include "caffe2/utils/math.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/affine_channel_op.cu b/caffe2/operators/affine_channel_op.cu -index adf4ac55c0..efae0a3fc6 100644 ---- a/caffe2/operators/affine_channel_op.cu -+++ b/caffe2/operators/affine_channel_op.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/affine_channel_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/arg_ops.cu b/caffe2/operators/arg_ops.cu -index 7e90d25b83..56deaa6363 100644 ---- a/caffe2/operators/arg_ops.cu -+++ b/caffe2/operators/arg_ops.cu -@@ -2,8 +2,8 @@ - - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/common_gpu.h" - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/batch_moments_op.cu b/caffe2/operators/batch_moments_op.cu -index 4b693b5c04..81359f6440 100644 ---- a/caffe2/operators/batch_moments_op.cu -+++ b/caffe2/operators/batch_moments_op.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/batch_moments_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/batch_sparse_to_dense_op.cu b/caffe2/operators/batch_sparse_to_dense_op.cu -index aea2035a5d..3e7ad8af9a 100644 ---- a/caffe2/operators/batch_sparse_to_dense_op.cu -+++ b/caffe2/operators/batch_sparse_to_dense_op.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/batch_sparse_to_dense_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - #include "caffe2/core/context_gpu.h" -diff --git a/caffe2/operators/boolean_mask_ops.cu b/caffe2/operators/boolean_mask_ops.cu -index 214b7c13ba..501dd3b191 100644 ---- a/caffe2/operators/boolean_mask_ops.cu -+++ b/caffe2/operators/boolean_mask_ops.cu -@@ -2,8 +2,8 @@ - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/boolean_mask_ops.h" -- - #include -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/cross_entropy_op.cu b/caffe2/operators/cross_entropy_op.cu -index 380e80399f..c23f05f8e5 100644 ---- a/caffe2/operators/cross_entropy_op.cu -+++ b/caffe2/operators/cross_entropy_op.cu -@@ -4,6 +4,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/cross_entropy_op.h" - #include "caffe2/operators/operator_fallback_gpu.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/distance_op.cu b/caffe2/operators/distance_op.cu -index 3a8bb337d5..a360166854 100644 ---- a/caffe2/operators/distance_op.cu -+++ b/caffe2/operators/distance_op.cu -@@ -4,6 +4,7 @@ - #include "caffe2/operators/distance_op.h" - #include "caffe2/utils/conversions.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/elementwise_div_op.cu b/caffe2/operators/elementwise_div_op.cu -index 42b103a0f1..33118a8f5e 100644 ---- a/caffe2/operators/elementwise_div_op.cu -+++ b/caffe2/operators/elementwise_div_op.cu -@@ -3,8 +3,8 @@ - #include - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/elementwise_ops_utils.h" -diff --git a/caffe2/operators/elementwise_linear_op.cu b/caffe2/operators/elementwise_linear_op.cu -index cc49115bff..8f749644b2 100644 ---- a/caffe2/operators/elementwise_linear_op.cu -+++ b/caffe2/operators/elementwise_linear_op.cu -@@ -5,6 +5,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/operator_fallback_gpu.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/elementwise_mul_op.cu b/caffe2/operators/elementwise_mul_op.cu -index bdbf760cf9..1991b8b513 100644 ---- a/caffe2/operators/elementwise_mul_op.cu -+++ b/caffe2/operators/elementwise_mul_op.cu -@@ -3,8 +3,8 @@ - #include - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/elementwise_ops_utils.h" -diff --git a/caffe2/operators/elementwise_ops.cu b/caffe2/operators/elementwise_ops.cu -index c9ced33cf8..932bd5dafd 100644 ---- a/caffe2/operators/elementwise_ops.cu -+++ b/caffe2/operators/elementwise_ops.cu -@@ -1,5 +1,6 @@ - #include "caffe2/operators/elementwise_ops.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - #include - #include -diff --git a/caffe2/operators/find_op.cu b/caffe2/operators/find_op.cu -index f8ff2bab16..0418a71fbc 100644 ---- a/caffe2/operators/find_op.cu -+++ b/caffe2/operators/find_op.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/find_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/generate_proposals_op.cu b/caffe2/operators/generate_proposals_op.cu -index 64518538b6..a4207f8653 100644 ---- a/caffe2/operators/generate_proposals_op.cu -+++ b/caffe2/operators/generate_proposals_op.cu -@@ -5,6 +5,7 @@ - #include "caffe2/operators/generate_proposals_op_util_boxes.h" // BBOX_XFORM_CLIP_DEFAULT - #include "caffe2/operators/generate_proposals_op_util_nms.h" - #include "caffe2/operators/generate_proposals_op_util_nms_gpu.h" -+#include "caffe2/utils/cub_namespace.cuh" - - #ifdef __HIP_PLATFORM_HCC__ - #include -diff --git a/caffe2/operators/normalize_ops.cu b/caffe2/operators/normalize_ops.cu -index 26df05308d..e4d1f34b75 100644 ---- a/caffe2/operators/normalize_ops.cu -+++ b/caffe2/operators/normalize_ops.cu -@@ -5,6 +5,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/normalize_l1_op.h" - #include "caffe2/operators/normalize_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/one_hot_ops.cu b/caffe2/operators/one_hot_ops.cu -index e521b3dd09..87e8196765 100644 ---- a/caffe2/operators/one_hot_ops.cu -+++ b/caffe2/operators/one_hot_ops.cu -@@ -2,6 +2,7 @@ - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/one_hot_ops.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/pack_segments.cu b/caffe2/operators/pack_segments.cu -index 7475100fd3..372638abdd 100644 ---- a/caffe2/operators/pack_segments.cu -+++ b/caffe2/operators/pack_segments.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/pack_segments.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/prelu_op.cu b/caffe2/operators/prelu_op.cu -index 745a393f07..6303b70b4a 100644 ---- a/caffe2/operators/prelu_op.cu -+++ b/caffe2/operators/prelu_op.cu -@@ -1,6 +1,7 @@ - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/prelu_op.h" - -+#include "caffe2/utils/cub_namespace.cuh" - #include - - namespace caffe2 { -diff --git a/caffe2/operators/reduce_front_back_max_ops.cu b/caffe2/operators/reduce_front_back_max_ops.cu -index 3c6ee7f0ae..c41d5ad579 100644 ---- a/caffe2/operators/reduce_front_back_max_ops.cu -+++ b/caffe2/operators/reduce_front_back_max_ops.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/reduce_front_back_max_ops.h" -+#include "caffe2/utils/cub_namespace.cuh" - - #ifdef __HIP_PLATFORM_HCC__ - #include -diff --git a/caffe2/operators/reduce_front_back_sum_mean_ops.cu b/caffe2/operators/reduce_front_back_sum_mean_ops.cu -index 476596f084..a7ad6dd500 100644 ---- a/caffe2/operators/reduce_front_back_sum_mean_ops.cu -+++ b/caffe2/operators/reduce_front_back_sum_mean_ops.cu -@@ -1,6 +1,7 @@ - #include - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/reduce_front_back_sum_mean_ops.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/reduction_ops.cu b/caffe2/operators/reduction_ops.cu -index ba55a66de5..9649b85d01 100644 ---- a/caffe2/operators/reduction_ops.cu -+++ b/caffe2/operators/reduction_ops.cu -@@ -2,7 +2,7 @@ - #include "caffe2/operators/reduction_ops.h" - #include "caffe2/utils/conversions.h" - --#include -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/rmac_regions_op.cu b/caffe2/operators/rmac_regions_op.cu -index 0ec2dd351a..de2b2553a7 100644 ---- a/caffe2/operators/rmac_regions_op.cu -+++ b/caffe2/operators/rmac_regions_op.cu -@@ -1,4 +1,5 @@ - #include -+#include "caffe2/utils/cub_namespace.cuh" - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/rmac_regions_op.h" -@@ -10,6 +11,9 @@ - #ifdef __HIP_PLATFORM_HCC__ - namespace rocprim { - #else -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+namespace at_cuda_detail { -+#endif - namespace cub { - #endif - -@@ -22,6 +26,9 @@ inline __host__ __device__ bool operator<( - } - - } // namespace cub -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+} // namespace at_cuda_detail -+#endif - - namespace caffe2 { - -diff --git a/caffe2/operators/segment_reduction_op_gpu.cuh b/caffe2/operators/segment_reduction_op_gpu.cuh -index ffe834e886..eebade352e 100644 ---- a/caffe2/operators/segment_reduction_op_gpu.cuh -+++ b/caffe2/operators/segment_reduction_op_gpu.cuh -@@ -1,3 +1,4 @@ -+#include "caffe2/utils/cub_namespace.cuh" - #include - #include - #include -diff --git a/caffe2/operators/sequence_ops.cu b/caffe2/operators/sequence_ops.cu -index cc34effd3f..2ceb5236ef 100644 ---- a/caffe2/operators/sequence_ops.cu -+++ b/caffe2/operators/sequence_ops.cu -@@ -1,6 +1,7 @@ - #include - - #include -+#include "caffe2/utils/cub_namespace.cuh" - - #include "caffe2/core/context_gpu.h" - #include "caffe2/operators/sequence_ops.h" -diff --git a/caffe2/operators/softmax_ops.cu b/caffe2/operators/softmax_ops.cu -index 51c0cbc2bf..ebf0700c9e 100644 ---- a/caffe2/operators/softmax_ops.cu -+++ b/caffe2/operators/softmax_ops.cu -@@ -5,6 +5,7 @@ - #include "caffe2/operators/softmax_op.h" - #include "caffe2/operators/softmax_with_loss_op.h" - #include "caffe2/operators/spatial_softmax_with_loss_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/operators/spatial_batch_norm_op_impl.cuh b/caffe2/operators/spatial_batch_norm_op_impl.cuh -index edc076c7d7..6fdb4c63f8 100644 ---- a/caffe2/operators/spatial_batch_norm_op_impl.cuh -+++ b/caffe2/operators/spatial_batch_norm_op_impl.cuh -@@ -5,8 +5,8 @@ - - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/context_gpu.h" - #include "caffe2/utils/math.h" -diff --git a/caffe2/sgd/adagrad_fused_op_gpu.cu b/caffe2/sgd/adagrad_fused_op_gpu.cu -index e2bf91c880..a7057c8737 100644 ---- a/caffe2/sgd/adagrad_fused_op_gpu.cu -+++ b/caffe2/sgd/adagrad_fused_op_gpu.cu -@@ -2,6 +2,7 @@ - #include - #include - -+#include "caffe2/utils/cub_namespace.cuh" - #include - #include "caffe2/sgd/adagrad_fused_op_gpu.cuh" - #include "caffe2/utils/math.h" -diff --git a/caffe2/sgd/adagrad_op_gpu.cu b/caffe2/sgd/adagrad_op_gpu.cu -index 8abb3376ca..b80d29700c 100644 ---- a/caffe2/sgd/adagrad_op_gpu.cu -+++ b/caffe2/sgd/adagrad_op_gpu.cu -@@ -4,6 +4,7 @@ - #include "caffe2/core/common_gpu.h" - #include "caffe2/core/context_gpu.h" - #include "caffe2/sgd/adagrad_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/sgd/adam_op_gpu.cu b/caffe2/sgd/adam_op_gpu.cu -index 42ab975faa..6f9c323420 100644 ---- a/caffe2/sgd/adam_op_gpu.cu -+++ b/caffe2/sgd/adam_op_gpu.cu -@@ -2,6 +2,7 @@ - #include "caffe2/core/common_gpu.h" - #include "caffe2/core/context_gpu.h" - #include "caffe2/sgd/adam_op.h" -+#include "caffe2/utils/cub_namespace.cuh" - - namespace caffe2 { - -diff --git a/caffe2/utils/cub_namespace.cuh b/caffe2/utils/cub_namespace.cuh -new file mode 100644 -index 0000000000000..188a9936f9c6e ---- /dev/null -+++ b/caffe2/utils/cub_namespace.cuh -@@ -0,0 +1,17 @@ -+#pragma once -+ -+// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: -+// https://github.com/NVIDIA/cub/pull/326 -+// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake -+// starting from CUDA 11.5 -+#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true -+#else -+#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false -+#endif -+ -+#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() -+namespace caffe2 { -+namespace cub = ::CUB_WRAPPED_NAMESPACE::cub; -+} -+#endif -diff --git a/caffe2/utils/math/reduce.cu b/caffe2/utils/math/reduce.cu -index 8c40c5d2b0..e8a8b768eb 100644 ---- a/caffe2/utils/math/reduce.cu -+++ b/caffe2/utils/math/reduce.cu -@@ -5,9 +5,8 @@ - #include - #include - #include -- -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include - #include -diff --git a/caffe2/utils/math/reduce.cuh b/caffe2/utils/math/reduce.cuh -index 0c43ad45a3..18bdca11b9 100644 ---- a/caffe2/utils/math/reduce.cuh -+++ b/caffe2/utils/math/reduce.cuh -@@ -1,8 +1,8 @@ - #ifndef CAFFE2_UTILS_MATH_REDUCE_CUH_ - #define CAFFE2_UTILS_MATH_REDUCE_CUH_ - -+#include "caffe2/utils/cub_namespace.cuh" - #include --#include - - #include "caffe2/core/common_gpu.h" - -diff --git a/caffe2/utils/math_gpu.cu b/caffe2/utils/math_gpu.cu -index 7f3bb8eea6..54fbcca1d4 100644 ---- a/caffe2/utils/math_gpu.cu -+++ b/caffe2/utils/math_gpu.cu -@@ -7,8 +7,9 @@ - #include - #include - --#include - #include -+#include -+#include "caffe2/utils/cub_namespace.cuh" - - #include - #include -diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake -index ca560288a4..5fd189e4a8 100644 ---- a/cmake/Dependencies.cmake -+++ b/cmake/Dependencies.cmake -@@ -1622,6 +1622,12 @@ if(NOT INTERN_BUILD_MOBILE) - list(APPEND CUDA_NVCC_FLAGS "-Xcompiler" "-fPIC") - endif() - -+ # use cub in a safe manner, see: -+ # https://github.com/pytorch/pytorch/pull/55292 -+ if(NOT ${CUDA_VERSION} LESS 11.5) -+ list(APPEND CUDA_NVCC_FLAGS "-DCUB_WRAPPED_NAMESPACE=at_cuda_detail") -+ endif() -+ - if(CUDA_HAS_FP16 OR NOT ${CUDA_VERSION} LESS 7.5) - message(STATUS "Found CUDA with FP16 support, compiling with torch.cuda.HalfTensor") - list(APPEND CUDA_NVCC_FLAGS "-DCUDA_HAS_FP16=1" "-D__CUDA_NO_HALF_OPERATORS__" "-D__CUDA_NO_HALF_CONVERSIONS__" diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 0d361c16e36..3f9689096f5 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -45,7 +45,7 @@ // For inclusion in JNI only, not parsed "ATen/cuda/CUDAGeneratorImpl.h", }, - link = { "cudart", "cusparse" }, + link = { "cudart", "cusparse", "cudnn" }, linkpath = { "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/lib/x64/", "/usr/local/cuda-12.3/lib64/", From b63ca7834888d6b4991dcac32d6f291d5b3770fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Wed, 7 Feb 2024 09:55:21 +0100 Subject: [PATCH 14/24] Remove upcast on Module --- .../pytorch/AdaptiveAvgPool1dImpl.java | 3 - .../pytorch/AdaptiveAvgPool1dImplBase.java | 3 - .../AdaptiveAvgPool1dImplCloneable.java | 5 - .../pytorch/AdaptiveAvgPool2dImpl.java | 3 - .../pytorch/AdaptiveAvgPool2dImplBase.java | 3 - .../AdaptiveAvgPool2dImplCloneable.java | 5 - .../pytorch/AdaptiveAvgPool3dImpl.java | 3 - .../pytorch/AdaptiveAvgPool3dImplBase.java | 3 - .../AdaptiveAvgPool3dImplCloneable.java | 5 - .../AdaptiveLogSoftmaxWithLossImpl.java | 3 - ...aptiveLogSoftmaxWithLossImplCloneable.java | 5 - .../pytorch/AdaptiveMaxPool1dImpl.java | 3 - .../pytorch/AdaptiveMaxPool1dImplBase.java | 3 - .../AdaptiveMaxPool1dImplCloneable.java | 5 - .../pytorch/AdaptiveMaxPool2dImpl.java | 3 - .../pytorch/AdaptiveMaxPool2dImplBase.java | 3 - .../AdaptiveMaxPool2dImplCloneable.java | 5 - .../pytorch/AdaptiveMaxPool3dImpl.java | 3 - .../pytorch/AdaptiveMaxPool3dImplBase.java | 3 - .../AdaptiveMaxPool3dImplCloneable.java | 5 - .../bytedeco/pytorch/AlphaDropoutImpl.java | 3 - .../pytorch/AlphaDropoutImplBase.java | 3 - .../pytorch/AlphaDropoutImplCloneable.java | 5 - .../org/bytedeco/pytorch/AvgPool1dImpl.java | 3 - .../bytedeco/pytorch/AvgPool1dImplBase.java | 3 - .../pytorch/AvgPool1dImplCloneable.java | 5 - .../org/bytedeco/pytorch/AvgPool2dImpl.java | 3 - .../bytedeco/pytorch/AvgPool2dImplBase.java | 3 - .../pytorch/AvgPool2dImplCloneable.java | 5 - .../org/bytedeco/pytorch/AvgPool3dImpl.java | 3 - .../bytedeco/pytorch/AvgPool3dImplBase.java | 3 - .../pytorch/AvgPool3dImplCloneable.java | 5 - .../org/bytedeco/pytorch/BCELossImpl.java | 3 - .../pytorch/BCELossImplCloneable.java | 5 - .../pytorch/BCEWithLogitsLossImpl.java | 3 - .../BCEWithLogitsLossImplCloneable.java | 5 - .../org/bytedeco/pytorch/BatchNorm1dImpl.java | 3 - .../bytedeco/pytorch/BatchNorm1dImplBase.java | 3 - .../pytorch/BatchNorm1dImplBaseBase.java | 3 - .../pytorch/BatchNorm1dImplCloneable.java | 5 - .../org/bytedeco/pytorch/BatchNorm2dImpl.java | 3 - .../bytedeco/pytorch/BatchNorm2dImplBase.java | 3 - .../pytorch/BatchNorm2dImplBaseBase.java | 3 - .../pytorch/BatchNorm2dImplCloneable.java | 5 - .../org/bytedeco/pytorch/BatchNorm3dImpl.java | 3 - .../bytedeco/pytorch/BatchNorm3dImplBase.java | 3 - .../pytorch/BatchNorm3dImplBaseBase.java | 3 - .../pytorch/BatchNorm3dImplCloneable.java | 5 - .../org/bytedeco/pytorch/BilinearImpl.java | 3 - .../pytorch/BilinearImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/CELUImpl.java | 3 - .../bytedeco/pytorch/CELUImplCloneable.java | 5 - .../org/bytedeco/pytorch/CTCLossImpl.java | 3 - .../pytorch/CTCLossImplCloneable.java | 5 - .../bytedeco/pytorch/ConstantPad1dImpl.java | 3 - .../pytorch/ConstantPad1dImplBase.java | 3 - .../pytorch/ConstantPad1dImplCloneable.java | 5 - .../bytedeco/pytorch/ConstantPad2dImpl.java | 3 - .../pytorch/ConstantPad2dImplBase.java | 3 - .../pytorch/ConstantPad2dImplCloneable.java | 5 - .../bytedeco/pytorch/ConstantPad3dImpl.java | 3 - .../pytorch/ConstantPad3dImplBase.java | 3 - .../pytorch/ConstantPad3dImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/Conv1dImpl.java | 3 - .../org/bytedeco/pytorch/Conv1dImplBase.java | 3 - .../bytedeco/pytorch/Conv1dImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/Conv2dImpl.java | 3 - .../org/bytedeco/pytorch/Conv2dImplBase.java | 3 - .../bytedeco/pytorch/Conv2dImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/Conv3dImpl.java | 3 - .../org/bytedeco/pytorch/Conv3dImplBase.java | 3 - .../bytedeco/pytorch/Conv3dImplCloneable.java | 5 - .../bytedeco/pytorch/ConvTranspose1dImpl.java | 3 - .../pytorch/ConvTranspose1dImplBase.java | 3 - .../pytorch/ConvTranspose1dImplBaseBase.java | 3 - .../pytorch/ConvTranspose1dImplCloneable.java | 5 - .../bytedeco/pytorch/ConvTranspose2dImpl.java | 3 - .../pytorch/ConvTranspose2dImplBase.java | 3 - .../pytorch/ConvTranspose2dImplBaseBase.java | 3 - .../pytorch/ConvTranspose2dImplCloneable.java | 5 - .../bytedeco/pytorch/ConvTranspose3dImpl.java | 3 - .../pytorch/ConvTranspose3dImplBase.java | 3 - .../pytorch/ConvTranspose3dImplBaseBase.java | 3 - .../pytorch/ConvTranspose3dImplCloneable.java | 5 - .../pytorch/CosineEmbeddingLossImpl.java | 3 - .../CosineEmbeddingLossImplCloneable.java | 5 - .../pytorch/CosineSimilarityImpl.java | 3 - .../CosineSimilarityImplCloneable.java | 5 - .../pytorch/CrossEntropyLossImpl.java | 3 - .../CrossEntropyLossImplCloneable.java | 5 - .../bytedeco/pytorch/CrossMapLRN2dImpl.java | 3 - .../pytorch/CrossMapLRN2dImplCloneable.java | 5 - .../org/bytedeco/pytorch/Dropout2dImpl.java | 3 - .../bytedeco/pytorch/Dropout2dImplBase.java | 3 - .../pytorch/Dropout2dImplCloneable.java | 5 - .../org/bytedeco/pytorch/Dropout3dImpl.java | 3 - .../bytedeco/pytorch/Dropout3dImplBase.java | 3 - .../pytorch/Dropout3dImplCloneable.java | 5 - .../org/bytedeco/pytorch/DropoutImpl.java | 3 - .../org/bytedeco/pytorch/DropoutImplBase.java | 3 - .../pytorch/DropoutImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/ELUImpl.java | 3 - .../bytedeco/pytorch/ELUImplCloneable.java | 5 - .../bytedeco/pytorch/EmbeddingBagImpl.java | 3 - .../pytorch/EmbeddingBagImplCloneable.java | 5 - .../org/bytedeco/pytorch/EmbeddingImpl.java | 3 - .../pytorch/EmbeddingImplCloneable.java | 5 - .../pytorch/FeatureAlphaDropoutImpl.java | 3 - .../pytorch/FeatureAlphaDropoutImplBase.java | 3 - .../FeatureAlphaDropoutImplCloneable.java | 5 - .../org/bytedeco/pytorch/FlattenImpl.java | 3 - .../pytorch/FlattenImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/FoldImpl.java | 3 - .../bytedeco/pytorch/FoldImplCloneable.java | 5 - .../pytorch/FractionalMaxPool2dImpl.java | 3 - .../FractionalMaxPool2dImplCloneable.java | 5 - .../pytorch/FractionalMaxPool3dImpl.java | 3 - .../FractionalMaxPool3dImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/GELUImpl.java | 3 - .../bytedeco/pytorch/GELUImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/GLUImpl.java | 3 - .../bytedeco/pytorch/GLUImplCloneable.java | 5 - .../org/bytedeco/pytorch/GRUCellImpl.java | 3 - .../org/bytedeco/pytorch/GRUCellImplBase.java | 3 - .../pytorch/GRUCellImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/GRUImpl.java | 3 - .../org/bytedeco/pytorch/GRUImplBase.java | 3 - .../bytedeco/pytorch/GRUImplCloneable.java | 5 - .../org/bytedeco/pytorch/GroupNormImpl.java | 3 - .../pytorch/GroupNormImplCloneable.java | 5 - .../org/bytedeco/pytorch/HardshrinkImpl.java | 3 - .../pytorch/HardshrinkImplCloneable.java | 5 - .../org/bytedeco/pytorch/HardtanhImpl.java | 3 - .../pytorch/HardtanhImplCloneable.java | 5 - .../pytorch/HingeEmbeddingLossImpl.java | 3 - .../HingeEmbeddingLossImplCloneable.java | 5 - .../org/bytedeco/pytorch/HuberLossImpl.java | 3 - .../pytorch/HuberLossImplCloneable.java | 5 - .../org/bytedeco/pytorch/IdentityImpl.java | 3 - .../pytorch/IdentityImplCloneable.java | 5 - .../bytedeco/pytorch/InstanceNorm1dImpl.java | 3 - .../pytorch/InstanceNorm1dImplBase.java | 3 - .../pytorch/InstanceNorm1dImplBaseBase.java | 3 - .../pytorch/InstanceNorm1dImplCloneable.java | 5 - .../bytedeco/pytorch/InstanceNorm2dImpl.java | 3 - .../pytorch/InstanceNorm2dImplBase.java | 3 - .../pytorch/InstanceNorm2dImplBaseBase.java | 3 - .../pytorch/InstanceNorm2dImplCloneable.java | 5 - .../bytedeco/pytorch/InstanceNorm3dImpl.java | 3 - .../pytorch/InstanceNorm3dImplBase.java | 3 - .../pytorch/InstanceNorm3dImplBaseBase.java | 3 - .../pytorch/InstanceNorm3dImplCloneable.java | 5 - .../org/bytedeco/pytorch/KLDivLossImpl.java | 3 - .../pytorch/KLDivLossImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/L1LossImpl.java | 3 - .../bytedeco/pytorch/L1LossImplCloneable.java | 5 - .../org/bytedeco/pytorch/LPPool1dImpl.java | 3 - .../bytedeco/pytorch/LPPool1dImplBase.java | 3 - .../pytorch/LPPool1dImplCloneable.java | 5 - .../org/bytedeco/pytorch/LPPool2dImpl.java | 3 - .../bytedeco/pytorch/LPPool2dImplBase.java | 3 - .../pytorch/LPPool2dImplCloneable.java | 5 - .../org/bytedeco/pytorch/LSTMCellImpl.java | 3 - .../bytedeco/pytorch/LSTMCellImplBase.java | 3 - .../pytorch/LSTMCellImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/LSTMImpl.java | 3 - .../org/bytedeco/pytorch/LSTMImplBase.java | 3 - .../bytedeco/pytorch/LSTMImplCloneable.java | 5 - .../org/bytedeco/pytorch/LayerNormImpl.java | 3 - .../pytorch/LayerNormImplCloneable.java | 5 - .../org/bytedeco/pytorch/LeakyReLUImpl.java | 3 - .../pytorch/LeakyReLUImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/LinearImpl.java | 3 - .../bytedeco/pytorch/LinearImplCloneable.java | 5 - .../pytorch/LocalResponseNormImpl.java | 3 - .../LocalResponseNormImplCloneable.java | 5 - .../org/bytedeco/pytorch/LogSigmoidImpl.java | 3 - .../pytorch/LogSigmoidImplCloneable.java | 5 - .../org/bytedeco/pytorch/LogSoftmaxImpl.java | 3 - .../pytorch/LogSoftmaxImplCloneable.java | 5 - .../org/bytedeco/pytorch/MSELossImpl.java | 3 - .../pytorch/MSELossImplCloneable.java | 5 - .../pytorch/MarginRankingLossImpl.java | 3 - .../MarginRankingLossImplCloneable.java | 5 - .../org/bytedeco/pytorch/MaxPool1dImpl.java | 3 - .../bytedeco/pytorch/MaxPool1dImplBase.java | 3 - .../pytorch/MaxPool1dImplCloneable.java | 5 - .../org/bytedeco/pytorch/MaxPool2dImpl.java | 3 - .../bytedeco/pytorch/MaxPool2dImplBase.java | 3 - .../pytorch/MaxPool2dImplCloneable.java | 5 - .../org/bytedeco/pytorch/MaxPool3dImpl.java | 3 - .../bytedeco/pytorch/MaxPool3dImplBase.java | 3 - .../pytorch/MaxPool3dImplCloneable.java | 5 - .../org/bytedeco/pytorch/MaxUnpool1dImpl.java | 3 - .../bytedeco/pytorch/MaxUnpool1dImplBase.java | 3 - .../pytorch/MaxUnpool1dImplCloneable.java | 5 - .../org/bytedeco/pytorch/MaxUnpool2dImpl.java | 3 - .../bytedeco/pytorch/MaxUnpool2dImplBase.java | 3 - .../pytorch/MaxUnpool2dImplCloneable.java | 5 - .../org/bytedeco/pytorch/MaxUnpool3dImpl.java | 3 - .../bytedeco/pytorch/MaxUnpool3dImplBase.java | 3 - .../pytorch/MaxUnpool3dImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/MishImpl.java | 3 - .../bytedeco/pytorch/MishImplCloneable.java | 5 - .../gen/java/org/bytedeco/pytorch/Module.java | 145 ++++++------------ .../org/bytedeco/pytorch/ModuleDictImpl.java | 3 - .../pytorch/ModuleDictImplCloneable.java | 5 - .../org/bytedeco/pytorch/ModuleListImpl.java | 9 +- .../pytorch/ModuleListImplCloneable.java | 5 - .../pytorch/MultiLabelMarginLossImpl.java | 3 - .../MultiLabelMarginLossImplCloneable.java | 5 - .../pytorch/MultiLabelSoftMarginLossImpl.java | 3 - ...MultiLabelSoftMarginLossImplCloneable.java | 5 - .../bytedeco/pytorch/MultiMarginLossImpl.java | 3 - .../pytorch/MultiMarginLossImplCloneable.java | 5 - .../pytorch/MultiheadAttentionImpl.java | 3 - .../MultiheadAttentionImplCloneable.java | 5 - .../org/bytedeco/pytorch/NLLLossImpl.java | 3 - .../pytorch/NLLLossImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/PReLUImpl.java | 3 - .../bytedeco/pytorch/PReLUImplCloneable.java | 5 - .../pytorch/PairwiseDistanceImpl.java | 3 - .../PairwiseDistanceImplCloneable.java | 5 - .../bytedeco/pytorch/ParameterDictImpl.java | 3 - .../pytorch/ParameterDictImplCloneable.java | 5 - .../bytedeco/pytorch/ParameterListImpl.java | 3 - .../pytorch/ParameterListImplCloneable.java | 5 - .../bytedeco/pytorch/PixelShuffleImpl.java | 3 - .../pytorch/PixelShuffleImplCloneable.java | 5 - .../bytedeco/pytorch/PixelUnshuffleImpl.java | 3 - .../pytorch/PixelUnshuffleImplCloneable.java | 5 - .../bytedeco/pytorch/PoissonNLLLossImpl.java | 3 - .../pytorch/PoissonNLLLossImplCloneable.java | 5 - .../org/bytedeco/pytorch/RNNCellImpl.java | 3 - .../org/bytedeco/pytorch/RNNCellImplBase.java | 3 - .../pytorch/RNNCellImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/RNNImpl.java | 3 - .../org/bytedeco/pytorch/RNNImplBase.java | 3 - .../bytedeco/pytorch/RNNImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/RReLUImpl.java | 3 - .../bytedeco/pytorch/RReLUImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/ReLU6Impl.java | 3 - .../bytedeco/pytorch/ReLU6ImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/ReLUImpl.java | 3 - .../bytedeco/pytorch/ReLUImplCloneable.java | 5 - .../bytedeco/pytorch/ReflectionPad1dImpl.java | 3 - .../pytorch/ReflectionPad1dImplBase.java | 3 - .../pytorch/ReflectionPad1dImplCloneable.java | 5 - .../bytedeco/pytorch/ReflectionPad2dImpl.java | 3 - .../pytorch/ReflectionPad2dImplBase.java | 3 - .../pytorch/ReflectionPad2dImplCloneable.java | 5 - .../bytedeco/pytorch/ReflectionPad3dImpl.java | 3 - .../pytorch/ReflectionPad3dImplBase.java | 3 - .../pytorch/ReflectionPad3dImplCloneable.java | 5 - .../pytorch/ReplicationPad1dImpl.java | 3 - .../pytorch/ReplicationPad1dImplBase.java | 3 - .../ReplicationPad1dImplCloneable.java | 5 - .../pytorch/ReplicationPad2dImpl.java | 3 - .../pytorch/ReplicationPad2dImplBase.java | 3 - .../ReplicationPad2dImplCloneable.java | 5 - .../pytorch/ReplicationPad3dImpl.java | 3 - .../pytorch/ReplicationPad3dImplBase.java | 3 - .../ReplicationPad3dImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/SELUImpl.java | 3 - .../bytedeco/pytorch/SELUImplCloneable.java | 5 - .../org/bytedeco/pytorch/SequentialImpl.java | 3 - .../pytorch/SequentialImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/SiLUImpl.java | 3 - .../bytedeco/pytorch/SiLUImplCloneable.java | 5 - .../org/bytedeco/pytorch/SigmoidImpl.java | 3 - .../pytorch/SigmoidImplCloneable.java | 5 - .../bytedeco/pytorch/SmoothL1LossImpl.java | 3 - .../pytorch/SmoothL1LossImplCloneable.java | 5 - .../bytedeco/pytorch/SoftMarginLossImpl.java | 3 - .../pytorch/SoftMarginLossImplCloneable.java | 5 - .../org/bytedeco/pytorch/Softmax2dImpl.java | 3 - .../pytorch/Softmax2dImplCloneable.java | 5 - .../org/bytedeco/pytorch/SoftmaxImpl.java | 3 - .../pytorch/SoftmaxImplCloneable.java | 5 - .../org/bytedeco/pytorch/SoftminImpl.java | 3 - .../pytorch/SoftminImplCloneable.java | 5 - .../org/bytedeco/pytorch/SoftplusImpl.java | 3 - .../pytorch/SoftplusImplCloneable.java | 5 - .../org/bytedeco/pytorch/SoftshrinkImpl.java | 3 - .../pytorch/SoftshrinkImplCloneable.java | 5 - .../org/bytedeco/pytorch/SoftsignImpl.java | 3 - .../pytorch/SoftsignImplCloneable.java | 5 - .../pytorch/StringSharedModuleDict.java | 6 +- .../pytorch/StringSharedModuleDictItem.java | 4 +- .../java/org/bytedeco/pytorch/TanhImpl.java | 3 - .../bytedeco/pytorch/TanhImplCloneable.java | 5 - .../org/bytedeco/pytorch/TanhshrinkImpl.java | 3 - .../pytorch/TanhshrinkImplCloneable.java | 5 - .../org/bytedeco/pytorch/ThresholdImpl.java | 3 - .../pytorch/ThresholdImplCloneable.java | 5 - .../pytorch/TransformerDecoderImpl.java | 3 - .../TransformerDecoderImplCloneable.java | 5 - .../pytorch/TransformerDecoderLayerImpl.java | 3 - .../TransformerDecoderLayerImplCloneable.java | 5 - .../pytorch/TransformerEncoderImpl.java | 3 - .../TransformerEncoderImplCloneable.java | 5 - .../pytorch/TransformerEncoderLayerImpl.java | 3 - .../TransformerEncoderLayerImplCloneable.java | 5 - .../org/bytedeco/pytorch/TransformerImpl.java | 3 - .../pytorch/TransformerImplCloneable.java | 5 - .../pytorch/TripletMarginLossImpl.java | 3 - .../TripletMarginLossImplCloneable.java | 5 - .../TripletMarginWithDistanceLossImpl.java | 3 - ...etMarginWithDistanceLossImplCloneable.java | 5 - .../org/bytedeco/pytorch/UnflattenImpl.java | 3 - .../pytorch/UnflattenImplCloneable.java | 5 - .../java/org/bytedeco/pytorch/UnfoldImpl.java | 3 - .../bytedeco/pytorch/UnfoldImplCloneable.java | 5 - .../org/bytedeco/pytorch/UpsampleImpl.java | 3 - .../pytorch/UpsampleImplCloneable.java | 5 - .../org/bytedeco/pytorch/ZeroPad1dImpl.java | 3 - .../bytedeco/pytorch/ZeroPad1dImplBase.java | 3 - .../pytorch/ZeroPad1dImplCloneable.java | 5 - .../org/bytedeco/pytorch/ZeroPad2dImpl.java | 3 - .../bytedeco/pytorch/ZeroPad2dImplBase.java | 3 - .../pytorch/ZeroPad2dImplCloneable.java | 5 - .../org/bytedeco/pytorch/ZeroPad3dImpl.java | 3 - .../bytedeco/pytorch/ZeroPad3dImplBase.java | 3 - .../pytorch/ZeroPad3dImplCloneable.java | 5 - .../org/bytedeco/pytorch/global/torch.java | 9 +- .../org/bytedeco/pytorch/presets/torch.java | 5 +- 326 files changed, 60 insertions(+), 1338 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java index 226e1b7a262..f8c1bb8caf7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java @@ -44,9 +44,6 @@ private native void allocate( @Const @ByRef AdaptiveAvgPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveAvgPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java index a66fcdd7661..40b08a1ce43 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java @@ -26,9 +26,6 @@ public class AdaptiveAvgPool1dImplBase extends AdaptiveAvgPool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveAvgPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveAvgPool1dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveAvgPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java index 01cdc15049f..48dfd2fbd0c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java @@ -22,11 +22,6 @@ public class AdaptiveAvgPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveAvgPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java index 1801896a8f0..d810f6bb144 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java @@ -44,9 +44,6 @@ private native void allocate( @Const @ByRef AdaptiveAvgPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveAvgPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java index 35587ecaa9c..276f3a01156 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java @@ -22,9 +22,6 @@ public class AdaptiveAvgPool2dImplBase extends AdaptiveAvgPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveAvgPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveAvgPool2dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveAvgPool2dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java index bfb4afc7800..7ddff12187a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java @@ -22,11 +22,6 @@ public class AdaptiveAvgPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveAvgPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java index 34562039864..0b4b3af9720 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java @@ -44,9 +44,6 @@ private native void allocate( @Const @ByRef AdaptiveAvgPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveAvgPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java index 4c3fa104e86..5f1a5b4e6e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java @@ -22,9 +22,6 @@ public class AdaptiveAvgPool3dImplBase extends AdaptiveAvgPool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveAvgPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveAvgPool3dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveAvgPool3dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java index 4f04de525fd..0133810ada4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java @@ -22,11 +22,6 @@ public class AdaptiveAvgPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveAvgPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java index 7bb4db72f32..eda1d3583fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java @@ -41,9 +41,6 @@ public class AdaptiveLogSoftmaxWithLossImpl extends AdaptiveLogSoftmaxWithLossIm static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveLogSoftmaxWithLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveLogSoftmaxWithLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public AdaptiveLogSoftmaxWithLossImpl( @Cast("int64_t") long in_features, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java index 457b439a882..af51f4e06e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java @@ -22,11 +22,6 @@ public class AdaptiveLogSoftmaxWithLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveLogSoftmaxWithLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveLogSoftmaxWithLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveLogSoftmaxWithLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java index 7e67b0b29db..81010dd00cc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java @@ -44,9 +44,6 @@ private native void allocate( @Const @ByRef AdaptiveMaxPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveMaxPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java index 5ead4bdba58..495fa7d06c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java @@ -26,9 +26,6 @@ public class AdaptiveMaxPool1dImplBase extends AdaptiveMaxPool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveMaxPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveMaxPool1dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveMaxPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java index 9671ef0ec88..418391fb82f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java @@ -22,11 +22,6 @@ public class AdaptiveMaxPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveMaxPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java index ae1ecc328e7..1843987daea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java @@ -44,9 +44,6 @@ private native void allocate( @Const @ByRef AdaptiveMaxPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveMaxPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java index af355086439..273ea117a34 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java @@ -22,9 +22,6 @@ public class AdaptiveMaxPool2dImplBase extends AdaptiveMaxPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveMaxPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveMaxPool2dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveMaxPool2dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java index b9e057d7585..c524e3d8712 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java @@ -22,11 +22,6 @@ public class AdaptiveMaxPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveMaxPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java index 891b7d7323e..ef3cec1183d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java @@ -44,9 +44,6 @@ private native void allocate( @Const @ByRef AdaptiveMaxPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveMaxPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java index d9176dff49d..85d7563ae1e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java @@ -22,9 +22,6 @@ public class AdaptiveMaxPool3dImplBase extends AdaptiveMaxPool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveMaxPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast,torch::nn::AdaptiveMaxPool3dImpl>, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AdaptiveMaxPool3dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java index f709df5b1d1..0815e37d3c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java @@ -22,11 +22,6 @@ public class AdaptiveMaxPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AdaptiveMaxPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java index 7e3ac0d1aca..8aa6f8c4e9b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java @@ -45,9 +45,6 @@ public class AlphaDropoutImpl extends AlphaDropoutImplBase { private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AlphaDropoutImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AlphaDropoutImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java index f02821cceea..18fab886e05 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java @@ -23,9 +23,6 @@ public class AlphaDropoutImplBase extends AlphaDropoutImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AlphaDropoutImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public AlphaDropoutImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AlphaDropoutImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java index 4f037cea950..859ea089357 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java @@ -22,11 +22,6 @@ public class AlphaDropoutImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AlphaDropoutImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AlphaDropoutImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AlphaDropoutImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java index f231dfe7123..1937194bbdf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java @@ -42,9 +42,6 @@ public class AvgPool1dImpl extends AvgPool1dImplBase { private native void allocate(@Const @ByRef AvgPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AvgPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java index 1c3d2b8adf9..38ce73924f6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java @@ -24,9 +24,6 @@ public class AvgPool1dImplBase extends AvgPool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public AvgPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AvgPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java index 8b0e77dbeb2..2c37a94eef8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java @@ -22,11 +22,6 @@ public class AvgPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AvgPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java index 4a88680e572..af2351a0914 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java @@ -42,9 +42,6 @@ public class AvgPool2dImpl extends AvgPool2dImplBase { private native void allocate(@Const @ByRef AvgPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AvgPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java index 6d720bf3018..b5c44b68be7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java @@ -22,9 +22,6 @@ public class AvgPool2dImplBase extends AvgPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public AvgPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AvgPool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java index d13fbf236c3..eeddd68607b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java @@ -22,11 +22,6 @@ public class AvgPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AvgPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java index 599ced93035..3d88a6167c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java @@ -42,9 +42,6 @@ public class AvgPool3dImpl extends AvgPool3dImplBase { private native void allocate(@Const @ByRef AvgPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public AvgPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java index b4417c31280..e1221202c68 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java @@ -22,9 +22,6 @@ public class AvgPool3dImplBase extends AvgPool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public AvgPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public AvgPool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java index 2f0f79146b5..b21dd9618ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java @@ -22,11 +22,6 @@ public class AvgPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public AvgPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java index bdd0545b6e2..302df9666a5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java @@ -37,9 +37,6 @@ public class BCELossImpl extends BCELossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BCELossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public BCELossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public BCELossImpl(@ByVal(nullValue = "torch::nn::BCELossOptions{}") BCELossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::BCELossOptions{}") BCELossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java index e2443f065e9..6ec0eb00023 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java @@ -22,11 +22,6 @@ public class BCELossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BCELossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public BCELossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BCELossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java index 4d1c829b806..b4feec62624 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java @@ -41,9 +41,6 @@ public class BCEWithLogitsLossImpl extends BCEWithLogitsLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BCEWithLogitsLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public BCEWithLogitsLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public BCEWithLogitsLossImpl(@ByVal(nullValue = "torch::nn::BCEWithLogitsLossOptions{}") BCEWithLogitsLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::BCEWithLogitsLossOptions{}") BCEWithLogitsLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java index 92efee160fb..b4cea865ca5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java @@ -22,11 +22,6 @@ public class BCEWithLogitsLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BCEWithLogitsLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public BCEWithLogitsLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BCEWithLogitsLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java index 2c72020726c..ef67cd0b4aa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java @@ -43,8 +43,5 @@ public class BatchNorm1dImpl extends BatchNorm1dImplBase { private native void allocate(@Const @ByRef BatchNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java index b068097725a..bdc37ef9e6e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java @@ -24,9 +24,6 @@ public class BatchNorm1dImplBase extends BatchNorm1dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java index 12a05a38e11..b4ae722bcce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java @@ -25,9 +25,6 @@ public class BatchNorm1dImplBaseBase extends BatchNorm1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm1dImplBaseBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm1dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java index e79f157841f..3abc8ed6f4f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java @@ -22,11 +22,6 @@ public class BatchNorm1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java index ad69547e5f1..dca3bbd4e46 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java @@ -43,8 +43,5 @@ public class BatchNorm2dImpl extends BatchNorm2dImplBase { private native void allocate(@Const @ByRef BatchNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java index 975b4b6ac52..7cee0bbb592 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java @@ -22,9 +22,6 @@ public class BatchNorm2dImplBase extends BatchNorm2dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java index 1f679f5d67b..098f0c726bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java @@ -22,9 +22,6 @@ public class BatchNorm2dImplBaseBase extends BatchNorm2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm2dImplBaseBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm2dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java index fae34c5a3a3..5b4486f119c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java @@ -22,11 +22,6 @@ public class BatchNorm2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java index 9c4bc5ce1ab..4c53a2502d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java @@ -43,8 +43,5 @@ public class BatchNorm3dImpl extends BatchNorm3dImplBase { private native void allocate(@Const @ByRef BatchNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java index ef80d4b6a86..aa430c11fbd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java @@ -22,9 +22,6 @@ public class BatchNorm3dImplBase extends BatchNorm3dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java index e78392d0ea3..51b0783d20e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java @@ -22,9 +22,6 @@ public class BatchNorm3dImplBaseBase extends BatchNorm3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm3dImplBaseBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm3dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java index 4226bbbeb76..1ffac3653fa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java @@ -22,11 +22,6 @@ public class BatchNorm3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public BatchNorm3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java index b0ee161545a..2ff1d54f60c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java @@ -36,9 +36,6 @@ public class BilinearImpl extends BilinearImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BilinearImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public BilinearImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public BilinearImpl(@Cast("int64_t") long in1_features, @Cast("int64_t") long in2_features, @Cast("int64_t") long out_features) { super((Pointer)null); allocate(in1_features, in2_features, out_features); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long in1_features, @Cast("int64_t") long in2_features, @Cast("int64_t") long out_features); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java index fcecb842634..20f2c3784bf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java @@ -22,11 +22,6 @@ public class BilinearImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BilinearImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public BilinearImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BilinearImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java index 7fa9a51abd4..09ceda6e127 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java @@ -36,9 +36,6 @@ public class CELUImpl extends CELUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CELUImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public CELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CELUImpl(@Const @ByRef(nullValue = "torch::nn::CELUOptions{}") CELUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::CELUOptions{}") CELUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java index f5d0401bd36..ca1bf9c15fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java @@ -22,11 +22,6 @@ public class CELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CELUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public CELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CELUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java index 2ca70f1a029..72bfd641ff3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java @@ -37,9 +37,6 @@ public class CTCLossImpl extends CTCLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CTCLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public CTCLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CTCLossImpl(@ByVal(nullValue = "torch::nn::CTCLossOptions{}") CTCLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::CTCLossOptions{}") CTCLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java index c543c778517..c15d39e20c2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java @@ -22,11 +22,6 @@ public class CTCLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CTCLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public CTCLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CTCLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java index 14b7bb75920..c756a5be903 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java @@ -42,8 +42,5 @@ public class ConstantPad1dImpl extends ConstantPad1dImplBase { private native void allocate(@Const @ByRef ConstantPad1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConstantPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java index 3b95292ab83..ba3b49901cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java @@ -26,9 +26,6 @@ public class ConstantPad1dImplBase extends ConstantPad1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConstantPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConstantPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java index b2393789cff..eb4a8ed8c7b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java @@ -22,11 +22,6 @@ public class ConstantPad1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConstantPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java index c7a3370d1ae..120ff47601e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java @@ -42,8 +42,5 @@ public class ConstantPad2dImpl extends ConstantPad2dImplBase { private native void allocate(@Const @ByRef ConstantPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConstantPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java index 16e6e5f4a1b..84517d5fcb2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java @@ -22,9 +22,6 @@ public class ConstantPad2dImplBase extends ConstantPad2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConstantPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConstantPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java index b6a275de58a..81323b5e5c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java @@ -22,11 +22,6 @@ public class ConstantPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConstantPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java index 120b0322198..34fa4bda9f6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java @@ -42,8 +42,5 @@ public class ConstantPad3dImpl extends ConstantPad3dImplBase { private native void allocate(@Const @ByRef ConstantPad3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConstantPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java index f9802662cb1..df7449f8b7d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java @@ -22,9 +22,6 @@ public class ConstantPad3dImplBase extends ConstantPad3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConstantPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConstantPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java index 72fc32ddbc9..18409adf29e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java @@ -22,11 +22,6 @@ public class ConstantPad3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConstantPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java index 458a686abcf..9598c469633 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java @@ -36,9 +36,6 @@ public class Conv1dImpl extends Conv1dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public Conv1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public Conv1dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java index 7fb92fdd393..f41914c9da3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java @@ -24,9 +24,6 @@ public class Conv1dImplBase extends Conv1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public Conv1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Conv1dImplBase(@ByVal DetailConv1dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv1dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java index f505c1c1385..33989a694a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java @@ -22,11 +22,6 @@ public class Conv1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public Conv1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java index 6c09f570c22..bcbba04019d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java @@ -36,9 +36,6 @@ public class Conv2dImpl extends Conv2dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public Conv2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public Conv2dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java index d0ac3c79e8d..69e7a14c0b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java @@ -22,9 +22,6 @@ public class Conv2dImplBase extends Conv2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public Conv2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Conv2dImplBase(@ByVal DetailConv2dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv2dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java index 22fb1655ad2..fa81cc3a2c5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java @@ -22,11 +22,6 @@ public class Conv2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public Conv2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java index 841d6e8f9e9..43ec04d34ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java @@ -36,9 +36,6 @@ public class Conv3dImpl extends Conv3dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public Conv3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public Conv3dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java index 1bd9bf61d22..466040b9962 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java @@ -22,9 +22,6 @@ public class Conv3dImplBase extends Conv3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public Conv3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Conv3dImplBase(@ByVal DetailConv3dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv3dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java index 819cb65d1d1..a67ab190ce9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java @@ -22,11 +22,6 @@ public class Conv3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public Conv3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java index 6d2d69d4705..70c2af64001 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java @@ -38,9 +38,6 @@ public class ConvTranspose1dImpl extends ConvTranspose1dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ConvTranspose1dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java index 05e994ba372..486f8e6de8c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java @@ -30,9 +30,6 @@ public class ConvTranspose1dImplBase extends ConvTranspose1dImplBaseBase { private native void allocate(@ByVal DetailConv1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); /** Pretty prints the {@code ConvTranspose{1,2,3}d} module into the given {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java index 519f8c5149c..7fc4690f041 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java @@ -22,9 +22,6 @@ public class ConvTranspose1dImplBaseBase extends ConvTranspose1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose1dImplBaseBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose1dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConvTranspose1dImplBaseBase(@ByVal DetailConv1dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv1dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java index db7be0f6ce8..ca6eb38739d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java @@ -22,11 +22,6 @@ public class ConvTranspose1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java index 6c99250f640..f1ac7203ac1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java @@ -38,9 +38,6 @@ public class ConvTranspose2dImpl extends ConvTranspose2dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ConvTranspose2dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java index d33e2a70df2..2b6747cda3e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java @@ -26,9 +26,6 @@ public class ConvTranspose2dImplBase extends ConvTranspose2dImplBaseBase { private native void allocate(@ByVal DetailConv2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); /** Pretty prints the {@code ConvTranspose{1,2,3}d} module into the given {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java index 4b1053103f6..2c6a0855d50 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java @@ -22,9 +22,6 @@ public class ConvTranspose2dImplBaseBase extends ConvTranspose2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose2dImplBaseBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose2dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConvTranspose2dImplBaseBase(@ByVal DetailConv2dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv2dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java index 5196602780b..dd57a5ddc90 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java @@ -22,11 +22,6 @@ public class ConvTranspose2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java index 281eeaf3b46..6762020b959 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java @@ -38,9 +38,6 @@ public class ConvTranspose3dImpl extends ConvTranspose3dImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ConvTranspose3dImpl( @Cast("int64_t") long input_channels, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java index 7c013dd973c..ed5d75ef5ae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java @@ -26,9 +26,6 @@ public class ConvTranspose3dImplBase extends ConvTranspose3dImplBaseBase { private native void allocate(@ByVal DetailConv3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); /** Pretty prints the {@code ConvTranspose{1,2,3}d} module into the given {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java index ba49bf1b854..d15f5df1cb6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java @@ -22,9 +22,6 @@ public class ConvTranspose3dImplBaseBase extends ConvTranspose3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose3dImplBaseBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose3dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ConvTranspose3dImplBaseBase(@ByVal DetailConv3dOptions options_) { super((Pointer)null); allocate(options_); } private native void allocate(@ByVal DetailConv3dOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java index fd4404b9aa9..65b59c547c4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java @@ -22,11 +22,6 @@ public class ConvTranspose3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ConvTranspose3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java index 6a8968f6d15..3a2da58d4a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java @@ -41,9 +41,6 @@ public class CosineEmbeddingLossImpl extends CosineEmbeddingLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CosineEmbeddingLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public CosineEmbeddingLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CosineEmbeddingLossImpl(@ByVal(nullValue = "torch::nn::CosineEmbeddingLossOptions{}") CosineEmbeddingLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::CosineEmbeddingLossOptions{}") CosineEmbeddingLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java index 0910a547d1b..3b8c3e0440e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java @@ -22,11 +22,6 @@ public class CosineEmbeddingLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CosineEmbeddingLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public CosineEmbeddingLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CosineEmbeddingLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java index 937d9a6787b..827f15cb262 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java @@ -35,9 +35,6 @@ public class CosineSimilarityImpl extends CosineSimilarityImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CosineSimilarityImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public CosineSimilarityImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CosineSimilarityImpl(@Const @ByRef(nullValue = "torch::nn::CosineSimilarityOptions{}") CosineSimilarityOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::CosineSimilarityOptions{}") CosineSimilarityOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java index d2c107901ff..8a650a3a79b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java @@ -22,11 +22,6 @@ public class CosineSimilarityImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CosineSimilarityImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public CosineSimilarityImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CosineSimilarityImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java index eecd92b4b80..1172451ffa8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java @@ -39,9 +39,6 @@ public class CrossEntropyLossImpl extends CrossEntropyLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CrossEntropyLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public CrossEntropyLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CrossEntropyLossImpl(@ByVal(nullValue = "torch::nn::CrossEntropyLossOptions{}") CrossEntropyLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::CrossEntropyLossOptions{}") CrossEntropyLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java index b30424c29a2..4842962650d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java @@ -22,11 +22,6 @@ public class CrossEntropyLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CrossEntropyLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public CrossEntropyLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CrossEntropyLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java index b0354584da6..6859e3b05c2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java @@ -32,9 +32,6 @@ public class CrossMapLRN2dImpl extends CrossMapLRN2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CrossMapLRN2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public CrossMapLRN2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public CrossMapLRN2dImpl(@Cast("int64_t") long size) { super((Pointer)null); allocate(size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java index 95491174c97..407fadcba12 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java @@ -22,11 +22,6 @@ public class CrossMapLRN2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CrossMapLRN2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public CrossMapLRN2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CrossMapLRN2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java index df1a60721ea..74303a0ce19 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java @@ -45,9 +45,6 @@ public class Dropout2dImpl extends Dropout2dImplBase { private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public Dropout2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java index 4d0c67a3c6c..3c313585b0e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java @@ -23,9 +23,6 @@ public class Dropout2dImplBase extends Dropout2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public Dropout2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Dropout2dImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java index 665449c71ed..2002aaf28d7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java @@ -22,11 +22,6 @@ public class Dropout2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public Dropout2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Dropout2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java index 891ea028af7..1054388faea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java @@ -45,9 +45,6 @@ public class Dropout3dImpl extends Dropout3dImplBase { private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public Dropout3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java index f6e94308368..f44841f3784 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java @@ -23,9 +23,6 @@ public class Dropout3dImplBase extends Dropout3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public Dropout3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public Dropout3dImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java index 5feb8ff3e3a..e351b4888c4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java @@ -22,11 +22,6 @@ public class Dropout3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public Dropout3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Dropout3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java index 5313f5de715..a6385ff3d06 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java @@ -45,9 +45,6 @@ public class DropoutImpl extends DropoutImplBase { private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DropoutImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public DropoutImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java index 4bd921a2c6d..bec7b497bb7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java @@ -23,9 +23,6 @@ public class DropoutImplBase extends DropoutImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DropoutImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public DropoutImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public DropoutImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java index 6539714e20f..58ba40489a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java @@ -22,11 +22,6 @@ public class DropoutImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DropoutImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public DropoutImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr DropoutImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java index 928af9d2393..179ef0707c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java @@ -36,9 +36,6 @@ public class ELUImpl extends ELUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ELUImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ELUImpl(@Const @ByRef(nullValue = "torch::nn::ELUOptions{}") ELUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::ELUOptions{}") ELUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java index f88d7c05d83..beb5a1a7dba 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java @@ -22,11 +22,6 @@ public class ELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ELUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ELUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java index a8889bb6884..35275965833 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java @@ -39,9 +39,6 @@ public class EmbeddingBagImpl extends EmbeddingBagImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EmbeddingBagImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public EmbeddingBagImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public EmbeddingBagImpl(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim) { super((Pointer)null); allocate(num_embeddings, embedding_dim); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java index 57b8ed85924..a453b93f3e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java @@ -22,11 +22,6 @@ public class EmbeddingBagImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EmbeddingBagImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public EmbeddingBagImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr EmbeddingBagImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java index 48cd74d50d2..1ae10adff45 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java @@ -38,9 +38,6 @@ public class EmbeddingImpl extends EmbeddingImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EmbeddingImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public EmbeddingImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public EmbeddingImpl(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim) { super((Pointer)null); allocate(num_embeddings, embedding_dim); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java index c1d2ac98db4..50645956a6c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java @@ -22,11 +22,6 @@ public class EmbeddingImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EmbeddingImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public EmbeddingImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr EmbeddingImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java index 95671dd5ebc..16ca75de041 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java @@ -42,9 +42,6 @@ public class FeatureAlphaDropoutImpl extends FeatureAlphaDropoutImplBase { private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureAlphaDropoutImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public FeatureAlphaDropoutImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java index 7a45508ef1b..33a4d40aa2c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java @@ -23,9 +23,6 @@ public class FeatureAlphaDropoutImplBase extends FeatureAlphaDropoutImplCloneabl static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureAlphaDropoutImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public FeatureAlphaDropoutImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public FeatureAlphaDropoutImplBase(double p) { super((Pointer)null); allocate(p); } private native void allocate(double p); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java index 123d8475bdd..978d590532d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java @@ -22,11 +22,6 @@ public class FeatureAlphaDropoutImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureAlphaDropoutImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public FeatureAlphaDropoutImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FeatureAlphaDropoutImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java index f5084538c87..f289d677db2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java @@ -36,9 +36,6 @@ public class FlattenImpl extends FlattenImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FlattenImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public FlattenImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public FlattenImpl(@Const @ByRef(nullValue = "torch::nn::FlattenOptions{}") FlattenOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::FlattenOptions{}") FlattenOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java index 57421f8fd40..e86931bbce1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java @@ -22,11 +22,6 @@ public class FlattenImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FlattenImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public FlattenImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FlattenImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java index 89b0725f9b3..d8eafe57d2b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java @@ -35,9 +35,6 @@ public class FoldImpl extends FoldImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FoldImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public FoldImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public FoldImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(output_size, kernel_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java index aa55fe58cdf..d5e1ffc32fb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java @@ -22,11 +22,6 @@ public class FoldImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FoldImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public FoldImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FoldImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java index 95143991442..1ec06789b17 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java @@ -37,9 +37,6 @@ public class FractionalMaxPool2dImpl extends FractionalMaxPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public FractionalMaxPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public FractionalMaxPool2dImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java index 2ba5c7c2caf..46e7d4e434b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java @@ -22,11 +22,6 @@ public class FractionalMaxPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public FractionalMaxPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FractionalMaxPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java index 1f0d49c52c0..aa0e7510d90 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java @@ -37,9 +37,6 @@ public class FractionalMaxPool3dImpl extends FractionalMaxPool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public FractionalMaxPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public FractionalMaxPool3dImpl(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java index a1388fdb3b1..96858a56e24 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java @@ -22,11 +22,6 @@ public class FractionalMaxPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public FractionalMaxPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FractionalMaxPool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java index 551609bf9ef..6ff05b5f0fa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java @@ -28,9 +28,6 @@ public class GELUImpl extends GELUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GELUImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public GELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public GELUImpl(@ByVal(nullValue = "torch::nn::GELUOptions{}") GELUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::GELUOptions{}") GELUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java index 3f26d87eae7..d3a80abcaa3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java @@ -22,11 +22,6 @@ public class GELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GELUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public GELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GELUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java index 78bf46ace76..c5376d0194b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java @@ -36,9 +36,6 @@ public class GLUImpl extends GLUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GLUImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public GLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public GLUImpl(@Const @ByRef(nullValue = "torch::nn::GLUOptions{}") GLUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::GLUOptions{}") GLUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java index 212e33756b5..6da711b14c2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java @@ -22,11 +22,6 @@ public class GLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GLUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public GLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java index 7614eea76f0..4aa5b267624 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java @@ -37,9 +37,6 @@ public class GRUCellImpl extends GRUCellImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUCellImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public GRUCellImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public GRUCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java index 4da554dfc41..c8cec2ad177 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java @@ -22,9 +22,6 @@ public class GRUCellImplBase extends GRUCellImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUCellImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public GRUCellImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public GRUCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNCellOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java index 848a9936814..f2254938541 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java @@ -22,11 +22,6 @@ public class GRUCellImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUCellImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public GRUCellImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GRUCellImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java index 981109759e9..e22f2a1275b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java @@ -37,9 +37,6 @@ public class GRUImpl extends GRUImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public GRUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public GRUImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java index 40ba56e31e5..2616fa4f3e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java @@ -22,9 +22,6 @@ public class GRUImplBase extends GRUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public GRUImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public GRUImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java index 8e9c2a6e5af..38524131315 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java @@ -22,11 +22,6 @@ public class GRUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public GRUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GRUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java index eae47f85155..c0d64cffc5c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java @@ -37,9 +37,6 @@ public class GroupNormImpl extends GroupNormImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GroupNormImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public GroupNormImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public GroupNormImpl(@Cast("int64_t") long num_groups, @Cast("int64_t") long num_channels) { super((Pointer)null); allocate(num_groups, num_channels); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long num_groups, @Cast("int64_t") long num_channels); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java index ada151423ef..24c1513a983 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java @@ -22,11 +22,6 @@ public class GroupNormImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GroupNormImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public GroupNormImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GroupNormImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java index 8d60bee19e9..6b03ce788e8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java @@ -36,9 +36,6 @@ public class HardshrinkImpl extends HardshrinkImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HardshrinkImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public HardshrinkImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public HardshrinkImpl(@Const @ByRef(nullValue = "torch::nn::HardshrinkOptions{}") HardshrinkOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardshrinkOptions{}") HardshrinkOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java index fcbd6e94a18..49339277be2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java @@ -22,11 +22,6 @@ public class HardshrinkImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HardshrinkImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public HardshrinkImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HardshrinkImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java index 38f17d60868..fbb5887439f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java @@ -37,9 +37,6 @@ public class HardtanhImpl extends HardtanhImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HardtanhImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public HardtanhImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public HardtanhImpl(@Const @ByRef(nullValue = "torch::nn::HardtanhOptions{}") HardtanhOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardtanhOptions{}") HardtanhOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java index 2409ee6a32e..b65b8fddb4e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java @@ -22,11 +22,6 @@ public class HardtanhImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HardtanhImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public HardtanhImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HardtanhImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java index bd6b1c9416c..ffcc664973e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java @@ -39,9 +39,6 @@ public class HingeEmbeddingLossImpl extends HingeEmbeddingLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HingeEmbeddingLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public HingeEmbeddingLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public HingeEmbeddingLossImpl(@ByVal(nullValue = "torch::nn::HingeEmbeddingLossOptions{}") HingeEmbeddingLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::HingeEmbeddingLossOptions{}") HingeEmbeddingLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java index 08dec534cb0..d4da78bd6d5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java @@ -22,11 +22,6 @@ public class HingeEmbeddingLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HingeEmbeddingLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public HingeEmbeddingLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HingeEmbeddingLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java index 43601fadd89..72b653abdc6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java @@ -38,9 +38,6 @@ public class HuberLossImpl extends HuberLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HuberLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public HuberLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public HuberLossImpl(@ByVal(nullValue = "torch::nn::HuberLossOptions{}") HuberLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::HuberLossOptions{}") HuberLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java index ed3bc534847..6cf018deaca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java @@ -22,11 +22,6 @@ public class HuberLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HuberLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public HuberLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HuberLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java index 2d6e9f01562..6d2f39723ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java @@ -31,9 +31,6 @@ public class IdentityImpl extends IdentityImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IdentityImpl(Pointer p) { super(p); } @SharedPtr @Name("std::make_shared") private native void allocate(); - /** Downcast constructor. */ - public IdentityImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java index 98c59a911bb..a72416f178b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java @@ -22,11 +22,6 @@ public class IdentityImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IdentityImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public IdentityImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr IdentityImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java index 1f810106ba7..f7cb4fd68f7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java @@ -43,8 +43,5 @@ public class InstanceNorm1dImpl extends InstanceNorm1dImplBase { private native void allocate(@Const @ByRef InstanceNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java index ed0926b221f..266988ad110 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java @@ -24,9 +24,6 @@ public class InstanceNorm1dImplBase extends InstanceNorm1dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java index 457810710c8..52692223f6f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java @@ -22,9 +22,6 @@ public class InstanceNorm1dImplBaseBase extends InstanceNorm1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm1dImplBaseBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm1dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java index fee8898b975..43d71c7e334 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java @@ -22,11 +22,6 @@ public class InstanceNorm1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java index a5ff4e0c751..0503dd3ce99 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java @@ -43,8 +43,5 @@ public class InstanceNorm2dImpl extends InstanceNorm2dImplBase { private native void allocate(@Const @ByRef InstanceNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java index 4c70fe88a62..0799736eeed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java @@ -22,9 +22,6 @@ public class InstanceNorm2dImplBase extends InstanceNorm2dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java index 555948f48ed..48ec9f48536 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java @@ -22,9 +22,6 @@ public class InstanceNorm2dImplBaseBase extends InstanceNorm2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm2dImplBaseBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm2dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java index f5ad0431f1a..0af04b948d5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java @@ -22,11 +22,6 @@ public class InstanceNorm2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java index a7b61a92e24..439452c1063 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java @@ -43,8 +43,5 @@ public class InstanceNorm3dImpl extends InstanceNorm3dImplBase { private native void allocate(@Const @ByRef InstanceNormOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java index b841fba4294..eff6a757165 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java @@ -22,9 +22,6 @@ public class InstanceNorm3dImplBase extends InstanceNorm3dImplBaseBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java index 690c0b76a81..ae5f7ccd368 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java @@ -22,9 +22,6 @@ public class InstanceNorm3dImplBaseBase extends InstanceNorm3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm3dImplBaseBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm3dImplBaseBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java index fd6487f91a3..000c2598f24 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java @@ -22,11 +22,6 @@ public class InstanceNorm3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public InstanceNorm3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java index 335948d04e0..ae9b837f78c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java @@ -37,9 +37,6 @@ public class KLDivLossImpl extends KLDivLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public KLDivLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public KLDivLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public KLDivLossImpl(@ByVal(nullValue = "torch::nn::KLDivLossOptions{}") KLDivLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::KLDivLossOptions{}") KLDivLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java index 384cd017f79..af8dbb9e3de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java @@ -22,11 +22,6 @@ public class KLDivLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public KLDivLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public KLDivLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr KLDivLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java index 36954004930..cf520d1d944 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java @@ -37,9 +37,6 @@ public class L1LossImpl extends L1LossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public L1LossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public L1LossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public L1LossImpl(@ByVal(nullValue = "torch::nn::L1LossOptions{}") L1LossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::L1LossOptions{}") L1LossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java index 263c18e3666..cfe41ff633c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java @@ -22,11 +22,6 @@ public class L1LossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public L1LossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public L1LossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr L1LossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java index 14091780b2a..1729542d918 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java @@ -42,9 +42,6 @@ public class LPPool1dImpl extends LPPool1dImplBase { private native void allocate(@Const @ByRef LPPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public LPPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java index 4ffc8f3366d..776a2469992 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java @@ -26,9 +26,6 @@ public class LPPool1dImplBase extends LPPool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public LPPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LPPool1dImplBase(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(norm_type, kernel_size); } private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java index 00aa4022bff..dd514681b85 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java @@ -22,11 +22,6 @@ public class LPPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public LPPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LPPool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java index eaee937df4b..28d8d92c014 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java @@ -43,9 +43,6 @@ public class LPPool2dImpl extends LPPool2dImplBase { private native void allocate(@Const @ByRef LPPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public LPPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java index 1b15025d22e..0601d774cdd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java @@ -22,9 +22,6 @@ public class LPPool2dImplBase extends LPPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public LPPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LPPool2dImplBase(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(norm_type, kernel_size); } private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java index fbfdabb7cbe..33ce8a87e87 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java @@ -22,11 +22,6 @@ public class LPPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public LPPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LPPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java index 1eb5ab36e2b..ef4b2bd1f80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java @@ -37,9 +37,6 @@ public class LSTMCellImpl extends LSTMCellImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMCellImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public LSTMCellImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LSTMCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java index 474d86e2d1a..5001ee130b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java @@ -22,9 +22,6 @@ public class LSTMCellImplBase extends LSTMCellImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMCellImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public LSTMCellImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LSTMCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNCellOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java index fcf5dc005a8..e73e97f049c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java @@ -22,11 +22,6 @@ public class LSTMCellImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMCellImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public LSTMCellImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LSTMCellImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java index 231a323504a..6ae33ab51f3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java @@ -37,9 +37,6 @@ public class LSTMImpl extends LSTMImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public LSTMImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LSTMImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java index 27ff0b13b45..e5eef332d8b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java @@ -22,9 +22,6 @@ public class LSTMImplBase extends LSTMImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public LSTMImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public LSTMImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java index 86c0804d600..4e991af3709 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java @@ -22,11 +22,6 @@ public class LSTMImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public LSTMImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LSTMImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java index 893ce319f44..63b866d074c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java @@ -38,9 +38,6 @@ public class LayerNormImpl extends LayerNormImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LayerNormImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public LayerNormImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LayerNormImpl(@ByVal @Cast("std::vector*") LongVector normalized_shape) { super((Pointer)null); allocate(normalized_shape); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("std::vector*") LongVector normalized_shape); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java index 07d4736aac8..fc65da9c28b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java @@ -22,11 +22,6 @@ public class LayerNormImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LayerNormImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public LayerNormImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LayerNormImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java index d8610884c51..0ef956903d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java @@ -36,9 +36,6 @@ public class LeakyReLUImpl extends LeakyReLUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LeakyReLUImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public LeakyReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LeakyReLUImpl(@Const @ByRef(nullValue = "torch::nn::LeakyReLUOptions{}") LeakyReLUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::LeakyReLUOptions{}") LeakyReLUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java index 31ea3bdc611..c6994a89ce4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java @@ -22,11 +22,6 @@ public class LeakyReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LeakyReLUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public LeakyReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LeakyReLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java index 7665a5457ca..662ec8c9c82 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java @@ -36,9 +36,6 @@ public class LinearImpl extends LinearImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LinearImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public LinearImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LinearImpl(@Cast("int64_t") long in_features, @Cast("int64_t") long out_features) { super((Pointer)null); allocate(in_features, out_features); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long in_features, @Cast("int64_t") long out_features); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java index 1b792be3b99..37729690823 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java @@ -22,11 +22,6 @@ public class LinearImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LinearImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public LinearImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LinearImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java index 0f341fffa75..69250a439db 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java @@ -40,9 +40,6 @@ public class LocalResponseNormImpl extends LocalResponseNormImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LocalResponseNormImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public LocalResponseNormImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LocalResponseNormImpl(@Cast("int64_t") long size) { super((Pointer)null); allocate(size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java index 0fadc9bfa9e..79d128d5ebc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java @@ -22,11 +22,6 @@ public class LocalResponseNormImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LocalResponseNormImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public LocalResponseNormImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LocalResponseNormImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java index aa074362dc7..0669be66e06 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java @@ -31,9 +31,6 @@ public class LogSigmoidImpl extends LogSigmoidImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSigmoidImpl(Pointer p) { super(p); } @SharedPtr @Name("std::make_shared") private native void allocate(); - /** Downcast constructor. */ - public LogSigmoidImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java index 84029b0f3ad..3191c9891e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java @@ -22,11 +22,6 @@ public class LogSigmoidImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSigmoidImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public LogSigmoidImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LogSigmoidImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java index 6a77efc49d1..85f20ecb888 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java @@ -36,9 +36,6 @@ public class LogSoftmaxImpl extends LogSoftmaxImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSoftmaxImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public LogSoftmaxImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public LogSoftmaxImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java index cf567ea3bc5..970ca32f670 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java @@ -22,11 +22,6 @@ public class LogSoftmaxImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSoftmaxImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public LogSoftmaxImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LogSoftmaxImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java index 3b0d7330dc8..4b4f624553e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java @@ -37,9 +37,6 @@ public class MSELossImpl extends MSELossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MSELossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MSELossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MSELossImpl(@ByVal(nullValue = "torch::nn::MSELossOptions{}") MSELossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MSELossOptions{}") MSELossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java index 9032c751d25..92f02ef33f7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java @@ -22,11 +22,6 @@ public class MSELossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MSELossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MSELossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MSELossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java index 27228758ca3..d3b8366f568 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java @@ -40,9 +40,6 @@ public class MarginRankingLossImpl extends MarginRankingLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MarginRankingLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MarginRankingLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MarginRankingLossImpl(@ByVal(nullValue = "torch::nn::MarginRankingLossOptions{}") MarginRankingLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MarginRankingLossOptions{}") MarginRankingLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java index 8b81ca61bed..5210bd84cbb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java @@ -22,11 +22,6 @@ public class MarginRankingLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MarginRankingLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MarginRankingLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MarginRankingLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java index 32b2ea85819..7bf93d33438 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java @@ -42,9 +42,6 @@ public class MaxPool1dImpl extends MaxPool1dImplBase { private native void allocate(@Const @ByRef MaxPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxPool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java index 3f0981fb1b0..e4afe51fb26 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java @@ -26,9 +26,6 @@ public class MaxPool1dImplBase extends MaxPool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxPool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java index 7c237cbc398..e17b2b98218 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java @@ -22,11 +22,6 @@ public class MaxPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxPool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java index ff540157117..5be87cfbcde 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java @@ -42,9 +42,6 @@ public class MaxPool2dImpl extends MaxPool2dImplBase { private native void allocate(@Const @ByRef MaxPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxPool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java index d57d7ef0403..1d4de48238c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java @@ -22,9 +22,6 @@ public class MaxPool2dImplBase extends MaxPool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxPool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxPool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java index 3baca54ae9a..ccb503c260b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java @@ -22,11 +22,6 @@ public class MaxPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxPool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java index 54f6d4de5b0..eb14f2d9965 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java @@ -42,9 +42,6 @@ public class MaxPool3dImpl extends MaxPool3dImplBase { private native void allocate(@Const @ByRef MaxPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxPool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java index 3dfe436efa8..6f188ab9d93 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java @@ -22,9 +22,6 @@ public class MaxPool3dImplBase extends MaxPool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxPool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxPool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java index 7869295f029..bf4f52396fe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java @@ -22,11 +22,6 @@ public class MaxPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxPool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java index b1164c14d6d..44b27cb1227 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java @@ -42,9 +42,6 @@ public class MaxUnpool1dImpl extends MaxUnpool1dImplBase { private native void allocate(@Const @ByRef MaxUnpool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxUnpool1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java index 24d3cb92c40..6f1072bc6ee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java @@ -26,9 +26,6 @@ public class MaxUnpool1dImplBase extends MaxUnpool1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxUnpool1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxUnpool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java index 09611be8a7c..63128464f52 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java @@ -22,11 +22,6 @@ public class MaxUnpool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxUnpool1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java index 3368fabf8bb..d3c968e285f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java @@ -42,9 +42,6 @@ public class MaxUnpool2dImpl extends MaxUnpool2dImplBase { private native void allocate(@Const @ByRef MaxUnpool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxUnpool2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java index 91edb8e0192..f4250e755ec 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java @@ -22,9 +22,6 @@ public class MaxUnpool2dImplBase extends MaxUnpool2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxUnpool2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxUnpool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java index 2ab1cd9b7d3..9c97ee57290 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java @@ -22,11 +22,6 @@ public class MaxUnpool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxUnpool2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java index a912c6aac12..7ee872cbbeb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java @@ -42,9 +42,6 @@ public class MaxUnpool3dImpl extends MaxUnpool3dImplBase { private native void allocate(@Const @ByRef MaxUnpool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxUnpool3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward( @Const @ByRef Tensor input, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java index ef0c1fdd526..b0e6926b8ba 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java @@ -22,9 +22,6 @@ public class MaxUnpool3dImplBase extends MaxUnpool3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxUnpool3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public MaxUnpool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java index ea6b5e74825..7fc5ee12c9c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java @@ -22,11 +22,6 @@ public class MaxUnpool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MaxUnpool3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java index cfd6d798c7c..b05a929a1df 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java @@ -31,9 +31,6 @@ public class MishImpl extends MishImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MishImpl(Pointer p) { super(p); } @SharedPtr @Name("std::make_shared") private native void allocate(); - /** Downcast constructor. */ - public MishImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java index a4af5d046d6..313d70c96a5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java @@ -22,11 +22,6 @@ public class MishImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MishImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MishImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MishImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java index 14d5a8a8fbb..3531035392f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java @@ -65,7 +65,6 @@ public class Module extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Module(Pointer p) { super(p); } - public Module asModule() { return this; } /** Tells the base {@code Module} about the name of the submodule. */ @@ -79,10 +78,9 @@ public class Module extends Pointer { * time {@code .name()} is invoked. */ public Module() { super((Pointer)null); allocate(); } @SharedPtr @Name("std::make_shared") private native void allocate(); - public Module(@Const @ByRef Module arg0) { super((Pointer)null); allocate(arg0.asModule()); } + public Module(@Const @ByRef Module arg0) { super((Pointer)null); allocate(arg0); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef Module arg0); - public Module put(Module arg0) { return asModule()._put(arg0.asModule()); } - private native @ByRef @Name("operator =") Module _put(@Const @ByRef Module arg0); + public native @ByRef @Name("operator =") Module put(@Const @ByRef Module arg0); /** Returns the name of the {@code Module}. * @@ -96,8 +94,7 @@ public class Module extends Pointer { /// /// - public BytePointer name() { return asModule()._name(); } - private native @StdString @NoException(true) @Name("name") BytePointer _name(); + public native @StdString @NoException(true) BytePointer name(); /** Performs a recursive deep copy of the module and all its registered * parameters, buffers and submodules. @@ -117,8 +114,7 @@ public class Module extends Pointer { * \endrst */ /// - public Module clone(DeviceOptional device) { return asModule()._clone(device); } - private native @SharedPtr("torch::nn::Module") @ByVal @Virtual(subclasses=false, method="clone") @Cast({"", "std::shared_ptr"}) @Const({false, false, true}) @Name("clone") Module _clone( + public native @SharedPtr("torch::nn::Module") @ByVal @Virtual(subclasses=false, method="clone") @Cast({"", "std::shared_ptr"}) @Const({false, false, true}) Module clone( @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); /** Applies the {@code function} to the {@code Module} and recursively to every submodule. @@ -133,8 +129,7 @@ public class Module extends Pointer { * \endrst */ /// - public void apply(ModuleApplyFunction function) { asModule()._apply(function); } - private native @Name("apply") void _apply(@Const @ByRef ModuleApplyFunction function); + public native void apply(@Const @ByRef ModuleApplyFunction function); /** Applies the {@code function} to the {@code Module} and recursively to every submodule. * The function must accept a {@code const Module&}. @@ -162,15 +157,12 @@ public class Module extends Pointer { * \endrst */ /// - public void apply(NamedModuleApplyFunction function, BytePointer name_prefix) { asModule()._apply(function, name_prefix); } - private native @Name("apply") void _apply( + public native void apply( @Const @ByRef NamedModuleApplyFunction function, @StdString BytePointer name_prefix/*=std::string()*/); - public void apply(NamedModuleApplyFunction function) { asModule()._apply(function); } - private native @Name("apply") void _apply( + public native void apply( @Const @ByRef NamedModuleApplyFunction function); - public void apply(NamedModuleApplyFunction function, String name_prefix) { asModule()._apply(function, name_prefix); } - private native @Name("apply") void _apply( + public native void apply( @Const @ByRef NamedModuleApplyFunction function, @StdString String name_prefix/*=std::string()*/); @@ -200,8 +192,7 @@ public class Module extends Pointer { * \endrst */ /// - public void apply(SharedModuleApplyFunction function) { asModule()._apply(function); } - private native @Name("apply") void _apply(@Cast("const torch::nn::Module::ModulePointerApplyFunction*") @ByRef SharedModuleApplyFunction function); + public native void apply(@Cast("const torch::nn::Module::ModulePointerApplyFunction*") @ByRef SharedModuleApplyFunction function); /** Applies the {@code function} to the {@code Module} and recursively to every submodule. * The function must accept a {@code const std::string&} for the key of the module, @@ -218,47 +209,36 @@ public class Module extends Pointer { * std::cout << key << ": " << module->name() << std::endl; * }); * \endrst */ - public void apply(NamedSharedModuleApplyFunction function, BytePointer name_prefix) { asModule()._apply(function, name_prefix); } - private native @Name("apply") void _apply( + public native void apply( @Const @ByRef NamedSharedModuleApplyFunction function, @StdString BytePointer name_prefix/*=std::string()*/); - public void apply(NamedSharedModuleApplyFunction function) { asModule()._apply(function); } - private native @Name("apply") void _apply( + public native void apply( @Const @ByRef NamedSharedModuleApplyFunction function); - public void apply(NamedSharedModuleApplyFunction function, String name_prefix) { asModule()._apply(function, name_prefix); } - private native @Name("apply") void _apply( + public native void apply( @Const @ByRef NamedSharedModuleApplyFunction function, @StdString String name_prefix/*=std::string()*/); /** Returns the parameters of this {@code Module} and if {@code recurse} is true, also * recursively of every submodule. */ - public TensorVector parameters(boolean recurse) { return asModule()._parameters(recurse); } - private native @Name("parameters") @Cast({"", "std::vector"}) @StdMove TensorVector _parameters(@Cast("bool") boolean recurse/*=true*/); - public TensorVector parameters() { return asModule()._parameters(); } - private native @Name("parameters") @Cast({"", "std::vector"}) @StdMove TensorVector _parameters(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector parameters(@Cast("bool") boolean recurse/*=true*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector parameters(); /** Returns an {@code OrderedDict} with the parameters of this {@code Module} along with * their keys, and if {@code recurse} is true also recursively of every submodule. */ - public StringTensorDict named_parameters(boolean recurse) { return asModule()._named_parameters(recurse); } - private native @ByVal @Name("named_parameters") StringTensorDict _named_parameters(@Cast("bool") boolean recurse/*=true*/); - public StringTensorDict named_parameters() { return asModule()._named_parameters(); } - private native @ByVal @Name("named_parameters") StringTensorDict _named_parameters(); + public native @ByVal StringTensorDict named_parameters(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal StringTensorDict named_parameters(); /** Returns the buffers of this {@code Module} and if {@code recurse} is true, also * recursively of every submodule. */ - public TensorVector buffers(boolean recurse) { return asModule()._buffers(recurse); } - private native @Name("buffers") @Cast({"", "std::vector"}) @StdMove TensorVector _buffers(@Cast("bool") boolean recurse/*=true*/); - public TensorVector buffers() { return asModule()._buffers(); } - private native @Name("buffers") @Cast({"", "std::vector"}) @StdMove TensorVector _buffers(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector buffers(@Cast("bool") boolean recurse/*=true*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector buffers(); /** Returns an {@code OrderedDict} with the buffers of this {@code Module} along with * their keys, and if {@code recurse} is true also recursively of every submodule. */ /// - public StringTensorDict named_buffers(boolean recurse) { return asModule()._named_buffers(recurse); } - private native @ByVal @Name("named_buffers") StringTensorDict _named_buffers(@Cast("bool") boolean recurse/*=true*/); - public StringTensorDict named_buffers() { return asModule()._named_buffers(); } - private native @ByVal @Name("named_buffers") StringTensorDict _named_buffers(); + public native @ByVal StringTensorDict named_buffers(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal StringTensorDict named_buffers(); /** Returns the submodules of this {@code Module} (the entire submodule hierarchy) * and if {@code include_self} is true, also inserts a {@code shared_ptr} to this module @@ -273,10 +253,8 @@ public class Module extends Pointer { * \endrst */ /// - public SharedModuleVector modules(boolean include_self) { return asModule()._modules(include_self); } - private native @ByVal @Name("modules") SharedModuleVector _modules(@Cast("bool") boolean include_self/*=true*/); - public SharedModuleVector modules() { return asModule()._modules(); } - private native @ByVal @Name("modules") SharedModuleVector _modules(); + public native @ByVal SharedModuleVector modules(@Cast("bool") boolean include_self/*=true*/); + public native @ByVal SharedModuleVector modules(); /** Returns an {@code OrderedDict} of the submodules of this {@code Module} (the entire * submodule hierarchy) and their keys, and if {@code include_self} is true, also @@ -291,36 +269,29 @@ public class Module extends Pointer { * this method with {@code include_self} set to false if your {@code Module} is not * stored in a {@code shared_ptr}. * \endrst */ - public StringSharedModuleDict named_modules(BytePointer name_prefix, boolean include_self) { return asModule()._named_modules(name_prefix, include_self); } - private native @ByVal @Name("named_modules") StringSharedModuleDict _named_modules( + public native @ByVal StringSharedModuleDict named_modules( @StdString BytePointer name_prefix/*=std::string()*/, @Cast("bool") boolean include_self/*=true*/); - public StringSharedModuleDict named_modules() { return asModule()._named_modules(); } - private native @ByVal @Name("named_modules") StringSharedModuleDict _named_modules(); - public StringSharedModuleDict named_modules(String name_prefix, boolean include_self) { return asModule()._named_modules(name_prefix, include_self); } - private native @ByVal @Name("named_modules") StringSharedModuleDict _named_modules( + public native @ByVal StringSharedModuleDict named_modules(); + public native @ByVal StringSharedModuleDict named_modules( @StdString String name_prefix/*=std::string()*/, @Cast("bool") boolean include_self/*=true*/); /** Returns the direct submodules of this {@code Module}. */ - public SharedModuleVector children() { return asModule()._children(); } - private native @ByVal @Name("children") SharedModuleVector _children(); + public native @ByVal SharedModuleVector children(); /** Returns an {@code OrderedDict} of the direct submodules of this {@code Module} and * their keys. */ - public StringSharedModuleDict named_children() { return asModule()._named_children(); } - private native @ByVal @Name("named_children") StringSharedModuleDict _named_children(); + public native @ByVal StringSharedModuleDict named_children(); /** Enables "training" mode. */ - public void train(boolean on) { asModule()._train(on); } - private native @Virtual(subclasses=false, method="train") @Name("train") void _train(@Cast("bool") boolean on/*=true*/); + public native @Virtual(subclasses=false, method="train") void train(@Cast("bool") boolean on/*=true*/); /** Calls train(false) to enable "eval" mode. * Do not override this method, override {@code train()} instead. */ /// - public void eval() { asModule()._eval(); } - private native @Name("eval") void _eval(); + public native void eval(); /** True if the module is in training mode. * @@ -333,8 +304,7 @@ public class Module extends Pointer { * depending on this property. */ /// - public boolean is_training() { return asModule()._is_training(); } - private native @Cast("bool") @Virtual(subclasses=false, method="is_training") @NoException(true) @Const({false, false, true}) @Name("is_training") boolean _is_training(); + public native @Cast("bool") @Virtual(subclasses=false, method="is_training") @NoException(true) @Const({false, false, true}) boolean is_training(); /** Recursively casts all parameters to the given {@code dtype} and {@code device}. * @@ -344,8 +314,7 @@ public class Module extends Pointer { * effect. */ /// - public void to(Device device, ScalarType dtype, boolean non_blocking) { asModule()._to(device, dtype, non_blocking); } - private native @Virtual(subclasses=false, method="to") @Name("to") void _to( + public native @Virtual(subclasses=false, method="to") void to( @ByVal Device device, ScalarType dtype, @Cast("bool") boolean non_blocking/*=false*/); @@ -358,8 +327,7 @@ public class Module extends Pointer { * effect. */ /// - public void to(ScalarType dtype, boolean non_blocking) { asModule()._to(dtype, non_blocking); } - private native @Virtual(subclasses=false, method="to") @Name("to") void _to(ScalarType dtype, @Cast("bool") boolean non_blocking/*=false*/); + public native @Virtual(subclasses=false, method="to") void to(ScalarType dtype, @Cast("bool") boolean non_blocking/*=false*/); /** Recursively moves all parameters to the given device. * @@ -367,16 +335,14 @@ public class Module extends Pointer { * destination is on the GPU or vice versa, the copy is performed * asynchronously with respect to the host. Otherwise, the argument has no * effect. */ - public void to(Device device, boolean non_blocking) { asModule()._to(device, non_blocking); } - private native @Virtual(subclasses=false, method="to") @Name("to") void _to(@ByVal Device device, @Cast("bool") boolean non_blocking/*=false*/); + public native @Virtual(subclasses=false, method="to") void to(@ByVal Device device, @Cast("bool") boolean non_blocking/*=false*/); /** Recursively zeros out the {@code grad} value of each registered parameter. */ /// /// /// - public void zero_grad(boolean set_to_none) { asModule()._zero_grad(set_to_none); } - private native @Virtual(subclasses=false, method="zero_grad") @Name("zero_grad") void _zero_grad(@Cast("bool") boolean set_to_none/*=true*/); + public native @Virtual(subclasses=false, method="zero_grad") void zero_grad(@Cast("bool") boolean set_to_none/*=true*/); /** Attempts to cast this {@code Module} to the given {@code ModuleType}. * @@ -451,8 +417,7 @@ public class Module extends Pointer { * {@code nn::Functional}), those submodules are skipped when serializing. */ /// - public void save(OutputArchive archive) { asModule()._save(archive); } - private native @Virtual(subclasses=false, method="save") @Const({false, false, true}) @Name("save") void _save(@ByRef OutputArchive archive); + public native @Virtual(subclasses=false, method="save") @Const({false, false, true}) void save(@ByRef OutputArchive archive); /** Deserializes the {@code Module} from the given {@code InputArchive}. * @@ -461,8 +426,7 @@ public class Module extends Pointer { * {@code InputArchive} when deserializing. */ /// - public void load(InputArchive archive) { asModule()._load(archive); } - private native @Virtual(subclasses=false, method="load") @Name("load") void _load(@ByRef InputArchive archive); + public native @Virtual(subclasses=false, method="load") void load(@ByRef InputArchive archive); /** Streams a pretty representation of the {@code Module} into the given {@code stream}. * By default, this representation will be the name of the module (taken from @@ -471,8 +435,7 @@ public class Module extends Pointer { * * Override this method to change the pretty print. The input * {@code stream} should be returned from the method, to allow easy chaining. */ - public void pretty_print(Pointer stream) { asModule()._pretty_print(stream); } - private native @Virtual(subclasses=false, method="pretty_print") @Const({false, false, true}) @Name("pretty_print") void _pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); + public native @Virtual(subclasses=false, method="pretty_print") @Const({false, false, true}) void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); /** Returns whether the {@code Module} is serializable. */ @@ -480,8 +443,7 @@ public class Module extends Pointer { /// /// /// - public boolean is_serializable() { return asModule()._is_serializable(); } - private native @Cast("bool") @Virtual(subclasses=false, method="is_serializable") @Const({false, false, true}) @Name("is_serializable") boolean _is_serializable(); + public native @Cast("bool") @Virtual(subclasses=false, method="is_serializable") @Const({false, false, true}) boolean is_serializable(); /** Registers a parameter with this {@code Module}. * @@ -504,22 +466,18 @@ public class Module extends Pointer { /// /// /// - public Tensor register_parameter(BytePointer name, Tensor tensor, boolean requires_grad) { return asModule()._register_parameter(name, tensor, requires_grad); } - private native @ByRef @Name("register_parameter") Tensor _register_parameter( + public native @ByRef Tensor register_parameter( @StdString BytePointer name, @ByVal Tensor tensor, @Cast("bool") boolean requires_grad/*=true*/); - public Tensor register_parameter(BytePointer name, Tensor tensor) { return asModule()._register_parameter(name, tensor); } - private native @ByRef @Name("register_parameter") Tensor _register_parameter( + public native @ByRef Tensor register_parameter( @StdString BytePointer name, @ByVal Tensor tensor); - public Tensor register_parameter(String name, Tensor tensor, boolean requires_grad) { return asModule()._register_parameter(name, tensor, requires_grad); } - private native @ByRef @Name("register_parameter") Tensor _register_parameter( + public native @ByRef Tensor register_parameter( @StdString String name, @ByVal Tensor tensor, @Cast("bool") boolean requires_grad/*=true*/); - public Tensor register_parameter(String name, Tensor tensor) { return asModule()._register_parameter(name, tensor); } - private native @ByRef @Name("register_parameter") Tensor _register_parameter( + public native @ByRef Tensor register_parameter( @StdString String name, @ByVal Tensor tensor); @@ -540,10 +498,8 @@ public class Module extends Pointer { /// /// /// - public Tensor register_buffer(BytePointer name, Tensor tensor) { return asModule()._register_buffer(name, tensor); } - private native @ByRef @Name("register_buffer") Tensor _register_buffer(@StdString BytePointer name, @ByVal Tensor tensor); - public Tensor register_buffer(String name, Tensor tensor) { return asModule()._register_buffer(name, tensor); } - private native @ByRef @Name("register_buffer") Tensor _register_buffer(@StdString String name, @ByVal Tensor tensor); + public native @ByRef Tensor register_buffer(@StdString BytePointer name, @ByVal Tensor tensor); + public native @ByRef Tensor register_buffer(@StdString String name, @ByVal Tensor tensor); /** Registers a submodule with this {@code Module}. * @@ -563,9 +519,9 @@ public class Module extends Pointer { /// /// private native @Name("register_module") void _register_module(@StdString BytePointer name, @SharedPtr @ByVal Module module); - public M register_module(BytePointer name, M module) { asModule()._register_module(name, module.asModule()); return module; } + public M register_module(BytePointer name, M module) { _register_module(name, module); return module; } private native @Name("register_module") void _register_module(@StdString String name, @SharedPtr @ByVal Module module); - public M register_module(String name, M module) { asModule()._register_module(name, module.asModule()); return module; } + public M register_module(String name, M module) { _register_module(name, module); return module; } /** Registers a submodule with this {@code Module}. * @@ -608,12 +564,9 @@ public class Module extends Pointer { /** Unregisters a submodule from this {@code Module}. If there is no such module * with {@code name} an exception is thrown. */ - public void unregister_module(BytePointer name) { asModule()._unregister_module(name); } - private native @Name("unregister_module") void _unregister_module(@StdString BytePointer name); - public void unregister_module(String name) { asModule()._unregister_module(name); } - private native @Name("unregister_module") void _unregister_module(@StdString String name); - private static Pointer shiftLeft(Pointer stream, Module module) { return _shiftLeft(stream, module.asModule()); } - private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer _shiftLeft( + public native void unregister_module(@StdString BytePointer name); + public native void unregister_module(@StdString String name); + private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( @Cast("std::ostream*") @ByRef Pointer stream, @Const @ByRef Module module); public Pointer shiftLeft(Pointer stream) { return shiftLeft(stream, this); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java index c70f0a38458..79bd8186836 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java @@ -79,9 +79,6 @@ public class ModuleDictImpl extends ModuleDictImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleDictImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ModuleDictImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ModuleDictImpl() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java index c92565281cb..06c3324d321 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java @@ -29,11 +29,6 @@ public class ModuleDictImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleDictImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ModuleDictImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ModuleDictImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java index a85a4be5188..c3371957dc4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java @@ -67,9 +67,6 @@ public class ModuleListImpl extends ModuleListImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleListImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ModuleListImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ModuleListImpl() { super((Pointer)null); allocate(); } @@ -90,8 +87,7 @@ public class ModuleListImpl extends ModuleListImplCloneable { /** Pretty prints the {@code ModuleList} module into the given {@code stream}. */ public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); - public void push_back(Module module) { _push_back(module.asModule()); } - private native @Name("push_back") void _push_back(@SharedPtr("torch::nn::Module") @ByVal Module module); + public native void push_back(@SharedPtr("torch::nn::Module") @ByVal Module module); /** Adds a new {@code Module} to the {@code ModuleList} container, moving or copying * it into a {@code shared_ptr} internally. This method allows passing value types, @@ -138,8 +134,7 @@ public class ModuleListImpl extends ModuleListImplCloneable { /** True if there are no modules in the {@code ModuleList}. */ public native @Cast("bool") @NoException(true) boolean is_empty(); - public void insert(long index, Module module) { _insert(index, module.asModule()); } - private native @Name("insert") void _insert(@Cast("size_t") long index, @SharedPtr("torch::nn::Module") @ByVal Module module); + public native void insert(@Cast("size_t") long index, @SharedPtr("torch::nn::Module") @ByVal Module module); /** Unwraps the contained module of a {@code ModuleHolder} and inserts it in the * {@code ModuleList}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java index c71d0ef5093..c97573f06eb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java @@ -22,11 +22,6 @@ public class ModuleListImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleListImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ModuleListImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ModuleListImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java index 39091dc6c54..eac7e76e484 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java @@ -40,9 +40,6 @@ public class MultiLabelMarginLossImpl extends MultiLabelMarginLossImplCloneable static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiLabelMarginLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MultiLabelMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiLabelMarginLossImpl(@ByVal(nullValue = "torch::nn::MultiLabelMarginLossOptions{}") MultiLabelMarginLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MultiLabelMarginLossOptions{}") MultiLabelMarginLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java index ad3df9b4b9b..322b864e5b3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java @@ -22,11 +22,6 @@ public class MultiLabelMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiLabelMarginLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MultiLabelMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiLabelMarginLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java index 8e05af0c6cc..5dc30d04fca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java @@ -40,9 +40,6 @@ public class MultiLabelSoftMarginLossImpl extends MultiLabelSoftMarginLossImplCl static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiLabelSoftMarginLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MultiLabelSoftMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiLabelSoftMarginLossImpl( @ByVal(nullValue = "torch::nn::MultiLabelSoftMarginLossOptions{}") MultiLabelSoftMarginLossOptions options_) { super((Pointer)null); allocate(options_); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java index 7f625222a72..e71b9f2b738 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java @@ -22,11 +22,6 @@ public class MultiLabelSoftMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiLabelSoftMarginLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MultiLabelSoftMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiLabelSoftMarginLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java index 10ebc98b558..1b4818b71ba 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java @@ -40,9 +40,6 @@ public class MultiMarginLossImpl extends MultiMarginLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiMarginLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MultiMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiMarginLossImpl(@ByVal(nullValue = "torch::nn::MultiMarginLossOptions{}") MultiMarginLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::MultiMarginLossOptions{}") MultiMarginLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java index cd4b1562f0f..7eb0e9097c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java @@ -22,11 +22,6 @@ public class MultiMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiMarginLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MultiMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiMarginLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java index 9a70eab0562..3d5886a7b29 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java @@ -36,9 +36,6 @@ public class MultiheadAttentionImpl extends MultiheadAttentionImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiheadAttentionImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public MultiheadAttentionImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public MultiheadAttentionImpl(@Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads) { super((Pointer)null); allocate(embed_dim, num_heads); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java index b0849693f74..625b9aa2001 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java @@ -22,11 +22,6 @@ public class MultiheadAttentionImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiheadAttentionImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public MultiheadAttentionImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiheadAttentionImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java index 858a69d77e2..94915e4af24 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java @@ -37,9 +37,6 @@ public class NLLLossImpl extends NLLLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NLLLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public NLLLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public NLLLossImpl(@ByVal(nullValue = "torch::nn::NLLLossOptions{}") NLLLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::NLLLossOptions{}") NLLLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java index 15260d65061..b49ff7957b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java @@ -22,11 +22,6 @@ public class NLLLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NLLLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public NLLLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr NLLLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java index aa280065541..03028fde6b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java @@ -36,9 +36,6 @@ public class PReLUImpl extends PReLUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PReLUImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public PReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public PReLUImpl(@Const @ByRef(nullValue = "torch::nn::PReLUOptions{}") PReLUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::PReLUOptions{}") PReLUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java index dff4304aca0..1fe1edfa502 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java @@ -22,11 +22,6 @@ public class PReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PReLUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public PReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PReLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java index 1d06dc41a3c..0749d9b5525 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java @@ -38,9 +38,6 @@ public class PairwiseDistanceImpl extends PairwiseDistanceImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PairwiseDistanceImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public PairwiseDistanceImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public PairwiseDistanceImpl(@Const @ByRef(nullValue = "torch::nn::PairwiseDistanceOptions{}") PairwiseDistanceOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::PairwiseDistanceOptions{}") PairwiseDistanceOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java index bc37452f466..574eb297a8d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java @@ -22,11 +22,6 @@ public class PairwiseDistanceImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PairwiseDistanceImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public PairwiseDistanceImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PairwiseDistanceImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java index c9653f15d05..98b36a68eb2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java @@ -23,9 +23,6 @@ public class ParameterDictImpl extends ParameterDictImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParameterDictImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ParameterDictImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ParameterDictImpl() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java index 489a708f97c..933a91bec93 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java @@ -22,11 +22,6 @@ public class ParameterDictImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParameterDictImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ParameterDictImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ParameterDictImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java index a8226b9074c..8db377e6ffa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java @@ -22,9 +22,6 @@ public class ParameterListImpl extends ParameterListImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParameterListImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ParameterListImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ParameterListImpl() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java index c3af0ff152a..b60913f11ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java @@ -22,11 +22,6 @@ public class ParameterListImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParameterListImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ParameterListImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ParameterListImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java index 2966a3ae01a..15559a0c629 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java @@ -39,9 +39,6 @@ public class PixelShuffleImpl extends PixelShuffleImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PixelShuffleImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public PixelShuffleImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public PixelShuffleImpl(@Const @ByRef PixelShuffleOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef PixelShuffleOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java index 645974afd88..a92efd00b13 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java @@ -22,11 +22,6 @@ public class PixelShuffleImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PixelShuffleImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public PixelShuffleImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PixelShuffleImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java index cb29e266e55..5872807b44d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java @@ -38,9 +38,6 @@ public class PixelUnshuffleImpl extends PixelUnshuffleImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PixelUnshuffleImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public PixelUnshuffleImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public PixelUnshuffleImpl(@Const @ByRef PixelUnshuffleOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef PixelUnshuffleOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java index bbb8a3eb53b..4cbbb13342c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java @@ -22,11 +22,6 @@ public class PixelUnshuffleImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PixelUnshuffleImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public PixelUnshuffleImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PixelUnshuffleImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java index 8d226726038..984c1441432 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java @@ -38,9 +38,6 @@ public class PoissonNLLLossImpl extends PoissonNLLLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PoissonNLLLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public PoissonNLLLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public PoissonNLLLossImpl(@ByVal(nullValue = "torch::nn::PoissonNLLLossOptions{}") PoissonNLLLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::PoissonNLLLossOptions{}") PoissonNLLLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java index af8d41d4318..30d27fcf1dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java @@ -22,11 +22,6 @@ public class PoissonNLLLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PoissonNLLLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public PoissonNLLLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PoissonNLLLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java index c0fc9b2a0fb..cd339533abd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java @@ -38,9 +38,6 @@ public class RNNCellImpl extends RNNCellImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNCellImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public RNNCellImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public RNNCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java index ea4d4c567ce..d583b23f38c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java @@ -23,9 +23,6 @@ public class RNNCellImplBase extends RNNCellImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNCellImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public RNNCellImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public RNNCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNCellOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java index 23067331246..3522281f3a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java @@ -22,11 +22,6 @@ public class RNNCellImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNCellImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public RNNCellImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RNNCellImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java index 269f10ae5f0..05783f8520a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java @@ -37,9 +37,6 @@ public class RNNImpl extends RNNImplBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public RNNImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public RNNImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java index ce7177d1f03..9929e281b00 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java @@ -23,9 +23,6 @@ public class RNNImplBase extends RNNImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public RNNImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public RNNImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } private native void allocate(@Const @ByRef RNNOptionsBase options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java index 55690e589e7..776ee61e974 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java @@ -22,11 +22,6 @@ public class RNNImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public RNNImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RNNImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java index 44eda46bae7..62a8fd6ea67 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java @@ -36,9 +36,6 @@ public class RReLUImpl extends RReLUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RReLUImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public RReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public RReLUImpl(@Const @ByRef(nullValue = "torch::nn::RReLUOptions{}") RReLUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::RReLUOptions{}") RReLUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java index 1b3ec5d033a..608e5eeb68c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java @@ -22,11 +22,6 @@ public class RReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RReLUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public RReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RReLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java index 9b39b0b7d8f..c6e998fd3b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java @@ -36,9 +36,6 @@ public class ReLU6Impl extends ReLU6ImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReLU6Impl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReLU6Impl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ReLU6Impl(@Const @ByRef(nullValue = "torch::nn::ReLU6Options{}") ReLU6Options options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLU6Options{}") ReLU6Options options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java index fc488ef186a..3869b5543be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java @@ -22,11 +22,6 @@ public class ReLU6ImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReLU6ImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReLU6ImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReLU6ImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java index 78c7e7fc9a1..611e4434ba2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java @@ -36,9 +36,6 @@ public class ReLUImpl extends ReLUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReLUImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ReLUImpl(@Const @ByRef(nullValue = "torch::nn::ReLUOptions{}") ReLUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLUOptions{}") ReLUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java index dccc5ce35cb..97c8c8e0155 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java @@ -22,11 +22,6 @@ public class ReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReLUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java index 5fdc8ea83fc..270f2240625 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java @@ -43,8 +43,5 @@ public class ReflectionPad1dImpl extends ReflectionPad1dImplBase { private native void allocate(@Const @ByRef ReflectionPad1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReflectionPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java index 508157b4619..db124763911 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java @@ -24,9 +24,6 @@ public class ReflectionPad1dImplBase extends ReflectionPad1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReflectionPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReflectionPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java index 06e71579768..fa912b87928 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java @@ -22,11 +22,6 @@ public class ReflectionPad1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReflectionPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java index bee6a940547..ae1160fe8ae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java @@ -43,8 +43,5 @@ public class ReflectionPad2dImpl extends ReflectionPad2dImplBase { private native void allocate(@Const @ByRef ReflectionPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReflectionPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java index 6452c2bc22b..898e7143f99 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java @@ -22,9 +22,6 @@ public class ReflectionPad2dImplBase extends ReflectionPad2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReflectionPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReflectionPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java index 6cf80544d7e..27f9c65e64b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java @@ -22,11 +22,6 @@ public class ReflectionPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReflectionPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java index ede21dcb151..7d5e9807ad8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java @@ -44,8 +44,5 @@ public class ReflectionPad3dImpl extends ReflectionPad3dImplBase { private native void allocate(@Const @ByRef ReflectionPad3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReflectionPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java index 53d50abb9cf..fe6fb57060b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java @@ -22,9 +22,6 @@ public class ReflectionPad3dImplBase extends ReflectionPad3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReflectionPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReflectionPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java index 8b2c697be02..e42b10962e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java @@ -22,11 +22,6 @@ public class ReflectionPad3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReflectionPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java index 1ed82f59743..63fc3e48554 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java @@ -43,8 +43,5 @@ public class ReplicationPad1dImpl extends ReplicationPad1dImplBase { private native void allocate(@Const @ByRef ReplicationPad1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReplicationPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java index 2e156e39800..bc52ab36db2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java @@ -26,9 +26,6 @@ public class ReplicationPad1dImplBase extends ReplicationPad1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReplicationPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReplicationPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java index 93b1f2756ee..f472e1a83d8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java @@ -22,11 +22,6 @@ public class ReplicationPad1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReplicationPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java index 529582c86c0..30640e3f68f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java @@ -43,8 +43,5 @@ public class ReplicationPad2dImpl extends ReplicationPad2dImplBase { private native void allocate(@Const @ByRef ReplicationPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReplicationPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java index 899b36b857d..d25e4f54982 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java @@ -22,9 +22,6 @@ public class ReplicationPad2dImplBase extends ReplicationPad2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReplicationPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReplicationPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java index 5e548c2d57e..2ba10fe5f5d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java @@ -22,11 +22,6 @@ public class ReplicationPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReplicationPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java index 9c081408321..360f499b0cc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java @@ -43,8 +43,5 @@ public class ReplicationPad3dImpl extends ReplicationPad3dImplBase { private native void allocate(@Const @ByRef ReplicationPad3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReplicationPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java index ab4bceae459..062cdc1f134 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java @@ -22,9 +22,6 @@ public class ReplicationPad3dImplBase extends ReplicationPad3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReplicationPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ReplicationPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java index 578426fce46..0e9beedb2b6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java @@ -22,11 +22,6 @@ public class ReplicationPad3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ReplicationPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java index ff7bb6acc12..db2f93a4c7a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java @@ -36,9 +36,6 @@ public class SELUImpl extends SELUImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SELUImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public SELUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SELUImpl(@Const @ByRef(nullValue = "torch::nn::SELUOptions{}") SELUOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::SELUOptions{}") SELUOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java index e7c4107de73..548d481eb01 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java @@ -22,11 +22,6 @@ public class SELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SELUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SELUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SELUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java index 85e598b770b..329dab60ef1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java @@ -91,9 +91,6 @@ public class SequentialImpl extends SequentialImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SequentialImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public SequentialImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SequentialImpl() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java index f405cc12ba8..f87ed1c64d5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java @@ -22,11 +22,6 @@ public class SequentialImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SequentialImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SequentialImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SequentialImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java index e2330221441..f4043e63c10 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java @@ -31,9 +31,6 @@ public class SiLUImpl extends SiLUImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SiLUImpl(Pointer p) { super(p); } @SharedPtr @Name("std::make_shared") private native void allocate(); - /** Downcast constructor. */ - public SiLUImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java index da71df7e39c..6f1b7102450 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java @@ -22,11 +22,6 @@ public class SiLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SiLUImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SiLUImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SiLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java index 3ffe65add00..4499765c75e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java @@ -31,9 +31,6 @@ public class SigmoidImpl extends SigmoidImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SigmoidImpl(Pointer p) { super(p); } @SharedPtr @Name("std::make_shared") private native void allocate(); - /** Downcast constructor. */ - public SigmoidImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java index 3008eca0dd3..5bdcf66cf60 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java @@ -22,11 +22,6 @@ public class SigmoidImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SigmoidImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SigmoidImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SigmoidImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java index bf319e7e133..f1248ab6a61 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java @@ -40,9 +40,6 @@ public class SmoothL1LossImpl extends SmoothL1LossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SmoothL1LossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public SmoothL1LossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SmoothL1LossImpl(@ByVal(nullValue = "torch::nn::SmoothL1LossOptions{}") SmoothL1LossOptions options) { super((Pointer)null); allocate(options); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::SmoothL1LossOptions{}") SmoothL1LossOptions options); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java index 74793bb5d7b..1a110aeb13f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java @@ -22,11 +22,6 @@ public class SmoothL1LossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SmoothL1LossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SmoothL1LossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SmoothL1LossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java index e158a3d291c..946407d682e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java @@ -39,9 +39,6 @@ public class SoftMarginLossImpl extends SoftMarginLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftMarginLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftMarginLossImpl(@ByVal(nullValue = "torch::nn::SoftMarginLossOptions{}") SoftMarginLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::SoftMarginLossOptions{}") SoftMarginLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java index 2d30bfdf0e1..b0ca1eb8581 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java @@ -22,11 +22,6 @@ public class SoftMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftMarginLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftMarginLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java index 21f75fff99d..69a87bcb6e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java @@ -31,9 +31,6 @@ public class Softmax2dImpl extends Softmax2dImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Softmax2dImpl(Pointer p) { super(p); } @SharedPtr @Name("std::make_shared") private native void allocate(); - /** Downcast constructor. */ - public Softmax2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java index ae8fac057c6..348109edafb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java @@ -22,11 +22,6 @@ public class Softmax2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Softmax2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public Softmax2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Softmax2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java index ec2af7738da..81e6488b75e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java @@ -36,9 +36,6 @@ public class SoftmaxImpl extends SoftmaxImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftmaxImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftmaxImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftmaxImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java index 6abcf2438d6..09ec3d76ded 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java @@ -22,11 +22,6 @@ public class SoftmaxImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftmaxImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftmaxImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftmaxImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java index c07747a05f1..588200ee4a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java @@ -36,9 +36,6 @@ public class SoftminImpl extends SoftminImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftminImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftminImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftminImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java index 3d63cc46fe2..9e001c47610 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java @@ -22,11 +22,6 @@ public class SoftminImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftminImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftminImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftminImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java index c584cdce368..d862f321ded 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java @@ -36,9 +36,6 @@ public class SoftplusImpl extends SoftplusImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftplusImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftplusImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftplusImpl(@Const @ByRef(nullValue = "torch::nn::SoftplusOptions{}") SoftplusOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftplusOptions{}") SoftplusOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java index cef1e58c41a..14e360fd8d1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java @@ -22,11 +22,6 @@ public class SoftplusImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftplusImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftplusImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftplusImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java index ab7d3c27344..916f0f2470d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java @@ -36,9 +36,6 @@ public class SoftshrinkImpl extends SoftshrinkImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftshrinkImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftshrinkImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public SoftshrinkImpl(@Const @ByRef(nullValue = "torch::nn::SoftshrinkOptions{}") SoftshrinkOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftshrinkOptions{}") SoftshrinkOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java index 0cb37f3814b..d56e6f52bfb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java @@ -22,11 +22,6 @@ public class SoftshrinkImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftshrinkImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftshrinkImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftshrinkImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java index 9e6ed477696..21821550f3a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java @@ -31,9 +31,6 @@ public class SoftsignImpl extends SoftsignImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftsignImpl(Pointer p) { super(p); } @SharedPtr @Name("std::make_shared") private native void allocate(); - /** Downcast constructor. */ - public SoftsignImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java index 3f6b13ff1af..50f209625d9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java @@ -22,11 +22,6 @@ public class SoftsignImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftsignImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public SoftsignImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftsignImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java index 19a79b3c2f4..db63d62880d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java @@ -149,10 +149,8 @@ public class StringSharedModuleDict extends Pointer { /** Inserts a new {@code (key, value)} pair into the {@code OrderedDict}. Throws an * exception if the key is already present. If insertion is successful, * immediately returns a reference to the inserted value. */ - public Module insert(BytePointer key, Module value) { return _insert(key, value.asModule()); } - private native @SharedPtr("torch::nn::Module") @ByRef @Name("insert") Module _insert(@StdString BytePointer key, @SharedPtr("torch::nn::Module") @ByRef(true) Module value); - public Module insert(String key, Module value) { return _insert(key, value.asModule()); } - private native @SharedPtr("torch::nn::Module") @ByRef @Name("insert") Module _insert(@StdString String key, @SharedPtr("torch::nn::Module") @ByRef(true) Module value); + public native @SharedPtr("torch::nn::Module") @ByRef Module insert(@StdString BytePointer key, @SharedPtr("torch::nn::Module") @ByRef(true) Module value); + public native @SharedPtr("torch::nn::Module") @ByRef Module insert(@StdString String key, @SharedPtr("torch::nn::Module") @ByRef(true) Module value); /** Inserts all items from {@code other} into this {@code OrderedDict}. If any key from * {@code other} is already present in this {@code OrderedDict}, an exception is thrown. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java index 3c579f723c6..1500915d00d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java @@ -25,9 +25,9 @@ public class StringSharedModuleDictItem extends Pointer { public StringSharedModuleDictItem(Pointer p) { super(p); } /** Constructs a new item. */ - public StringSharedModuleDictItem(@StdString BytePointer key, @SharedPtr("torch::nn::Module") @ByVal Module value) { super((Pointer)null); allocate(key, value.asModule()); } + public StringSharedModuleDictItem(@StdString BytePointer key, @SharedPtr("torch::nn::Module") @ByVal Module value) { super((Pointer)null); allocate(key, value); } private native void allocate(@StdString BytePointer key, @SharedPtr("torch::nn::Module") @ByVal Module value); - public StringSharedModuleDictItem(@StdString String key, @SharedPtr("torch::nn::Module") @ByVal Module value) { super((Pointer)null); allocate(key, value.asModule()); } + public StringSharedModuleDictItem(@StdString String key, @SharedPtr("torch::nn::Module") @ByVal Module value) { super((Pointer)null); allocate(key, value); } private native void allocate(@StdString String key, @SharedPtr("torch::nn::Module") @ByVal Module value); /** Returns a reference to the value. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java index 49ff49e6bb5..1cc7b4be3b9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java @@ -31,9 +31,6 @@ public class TanhImpl extends TanhImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TanhImpl(Pointer p) { super(p); } @SharedPtr @Name("std::make_shared") private native void allocate(); - /** Downcast constructor. */ - public TanhImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java index ebe945a4e09..d47b9e29c2c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java @@ -22,11 +22,6 @@ public class TanhImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TanhImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public TanhImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TanhImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java index bd234d27782..6fe1ce3447a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java @@ -31,9 +31,6 @@ public class TanhshrinkImpl extends TanhshrinkImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TanhshrinkImpl(Pointer p) { super(p); } @SharedPtr @Name("std::make_shared") private native void allocate(); - /** Downcast constructor. */ - public TanhshrinkImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java index 88f863f1590..59023b4125e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java @@ -22,11 +22,6 @@ public class TanhshrinkImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TanhshrinkImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public TanhshrinkImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TanhshrinkImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java index 1cf586d9197..190eccca7cf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java @@ -36,9 +36,6 @@ public class ThresholdImpl extends ThresholdImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ThresholdImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ThresholdImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public ThresholdImpl(double threshold, double value) { super((Pointer)null); allocate(threshold, value); } @SharedPtr @Name("std::make_shared") private native void allocate(double threshold, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java index 173f4c7aa8f..2850f512aa9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java @@ -22,11 +22,6 @@ public class ThresholdImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ThresholdImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ThresholdImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ThresholdImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java index 00b455cd0cc..a3e2fa4533a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java @@ -43,9 +43,6 @@ public class TransformerDecoderImpl extends TransformerDecoderImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public TransformerDecoderImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerDecoderImpl(@ByVal TransformerDecoderOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal TransformerDecoderOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java index 0952a34e536..e0a09023b31 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java @@ -22,11 +22,6 @@ public class TransformerDecoderImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public TransformerDecoderImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerDecoderImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java index 9b3b7fbcf30..37d65fa799d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java @@ -44,9 +44,6 @@ public class TransformerDecoderLayerImpl extends TransformerDecoderLayerImplClon static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderLayerImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public TransformerDecoderLayerImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerDecoderLayerImpl(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead) { super((Pointer)null); allocate(d_model, nhead); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java index 9a22ba5f73a..4be3281c13e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java @@ -22,11 +22,6 @@ public class TransformerDecoderLayerImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderLayerImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public TransformerDecoderLayerImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerDecoderLayerImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java index 2f4098f91e0..f2ec2b0690a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java @@ -41,9 +41,6 @@ public class TransformerEncoderImpl extends TransformerEncoderImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public TransformerEncoderImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerEncoderImpl(@ByVal TransformerEncoderOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal TransformerEncoderOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java index f02de944e70..84dd60da29c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java @@ -22,11 +22,6 @@ public class TransformerEncoderImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public TransformerEncoderImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerEncoderImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java index d5e5b088b14..51d24df51b4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java @@ -39,9 +39,6 @@ public class TransformerEncoderLayerImpl extends TransformerEncoderLayerImplClon static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderLayerImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public TransformerEncoderLayerImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TransformerEncoderLayerImpl(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead) { super((Pointer)null); allocate(d_model, nhead); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java index 1da2de3da14..4fdea95b083 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java @@ -22,11 +22,6 @@ public class TransformerEncoderLayerImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderLayerImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public TransformerEncoderLayerImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerEncoderLayerImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java index 9122e5572f9..072bb4e5ad3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java @@ -41,9 +41,6 @@ public class TransformerImpl extends TransformerImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public TransformerImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); /// diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java index b73673a9341..777bb01d885 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java @@ -22,11 +22,6 @@ public class TransformerImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public TransformerImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java index ae6b43a4a1f..5122ca9d766 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java @@ -43,9 +43,6 @@ public class TripletMarginLossImpl extends TripletMarginLossImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TripletMarginLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public TripletMarginLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TripletMarginLossImpl(@ByVal(nullValue = "torch::nn::TripletMarginLossOptions{}") TripletMarginLossOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal(nullValue = "torch::nn::TripletMarginLossOptions{}") TripletMarginLossOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java index a8c325d521b..ad737e830b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java @@ -22,11 +22,6 @@ public class TripletMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TripletMarginLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public TripletMarginLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TripletMarginLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java index 09468e99ad6..242a2cf4aa6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java @@ -45,9 +45,6 @@ public class TripletMarginWithDistanceLossImpl extends TripletMarginWithDistance static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TripletMarginWithDistanceLossImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public TripletMarginWithDistanceLossImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public TripletMarginWithDistanceLossImpl( @ByVal(nullValue = "torch::nn::TripletMarginWithDistanceLossOptions{}") TripletMarginWithDistanceLossOptions options_) { super((Pointer)null); allocate(options_); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java index fe04daf388d..843c61c63c4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java @@ -22,11 +22,6 @@ public class TripletMarginWithDistanceLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TripletMarginWithDistanceLossImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public TripletMarginWithDistanceLossImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TripletMarginWithDistanceLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java index ea4acc7c462..cbf9f84ecf5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java @@ -38,9 +38,6 @@ public class UnflattenImpl extends UnflattenImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnflattenImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public UnflattenImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public UnflattenImpl(@Cast("int64_t") long dim, @ByVal @Cast("std::vector*") LongVector sizes) { super((Pointer)null); allocate(dim, sizes); } @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("int64_t") long dim, @ByVal @Cast("std::vector*") LongVector sizes); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java index 433aed2064f..00044bc8d64 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java @@ -22,11 +22,6 @@ public class UnflattenImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnflattenImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public UnflattenImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UnflattenImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java index 7ef3c6540c9..e0b6dd05e50 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java @@ -36,9 +36,6 @@ public class UnfoldImpl extends UnfoldImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnfoldImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public UnfoldImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public UnfoldImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java index 8247aaeec61..00ab692334a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java @@ -22,11 +22,6 @@ public class UnfoldImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnfoldImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public UnfoldImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UnfoldImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java index 3cba3256a4e..8747042ea58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java @@ -38,9 +38,6 @@ public class UpsampleImpl extends UpsampleImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UpsampleImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public UpsampleImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); public UpsampleImpl(@Const @ByRef(nullValue = "torch::nn::UpsampleOptions{}") UpsampleOptions options_) { super((Pointer)null); allocate(options_); } @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef(nullValue = "torch::nn::UpsampleOptions{}") UpsampleOptions options_); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java index 513a3807d5d..5c93181455a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java @@ -22,11 +22,6 @@ public class UpsampleImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UpsampleImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public UpsampleImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UpsampleImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java index c9bf9acee5e..aa149e13097 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java @@ -31,8 +31,5 @@ public class ZeroPad1dImpl extends ZeroPad1dImplBase { private native void allocate(@Const @ByRef ZeroPad1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad1dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ZeroPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java index 7df9d347f13..be9f231d9b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java @@ -26,9 +26,6 @@ public class ZeroPad1dImplBase extends ZeroPad1dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad1dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ZeroPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ZeroPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java index 5f9f177afd9..e3cc2eea0c2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java @@ -22,11 +22,6 @@ public class ZeroPad1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad1dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ZeroPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ZeroPad1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java index fc715f83c07..def78305b92 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java @@ -31,8 +31,5 @@ public class ZeroPad2dImpl extends ZeroPad2dImplBase { private native void allocate(@Const @ByRef ZeroPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad2dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ZeroPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java index 5bb6045c1e9..bb513e4baf7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java @@ -22,9 +22,6 @@ public class ZeroPad2dImplBase extends ZeroPad2dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad2dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ZeroPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ZeroPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java index f259214825f..a9d0b15cdcf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java @@ -22,11 +22,6 @@ public class ZeroPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad2dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ZeroPad2dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ZeroPad2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java index be8360e1255..2840fc1bec9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java @@ -31,8 +31,5 @@ public class ZeroPad3dImpl extends ZeroPad3dImplBase { private native void allocate(@Const @ByRef ZeroPad3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad3dImpl(Pointer p) { super(p); } - /** Downcast constructor. */ - public ZeroPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast") void allocate(@SharedPtr Module pointer); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java index e540737da5f..0b1d5bac38c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java @@ -22,9 +22,6 @@ public class ZeroPad3dImplBase extends ZeroPad3dImplCloneable { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad3dImplBase(Pointer p) { super(p); } - /** Downcast constructor. */ - public ZeroPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); public ZeroPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java index 0ff22ca3eaf..ef5c990d605 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java @@ -22,11 +22,6 @@ public class ZeroPad3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad3dImplCloneable(Pointer p) { super(p); } - /** Downcast constructor. */ - public ZeroPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } - @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ZeroPad3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index ca7d939c01e..c00e43d2653 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -67894,19 +67894,16 @@ The list of (type, depth) pairs controls the type of specializations and the num // Targeting ../Module.java -@Namespace("torch::nn") public static Pointer shiftLeft(Pointer stream, Module module) { return _shiftLeft(stream, module.asModule()); } -private static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer _shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Const @ByRef Module module); +@Namespace("torch::nn") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Const @ByRef Module module); /** Serialize a {@code Module} pointer into an {@code OutputArchive}. */ -@Namespace("torch::nn") public static OutputArchive shiftLeft(OutputArchive archive, Module module) { return _shiftLeft(archive, module.asModule()); } -private static native @ByRef @Name("operator <<") OutputArchive _shiftLeft( +@Namespace("torch::nn") public static native @ByRef @Name("operator <<") OutputArchive shiftLeft( @ByRef OutputArchive archive, @Const @SharedPtr("torch::nn::Module") @ByRef Module module); /** Deserializes a {@code Module} from an {@code InputArchive}. */ -@Namespace("torch::nn") public static InputArchive shiftRight(InputArchive archive, Module module) { return _shiftRight(archive, module.asModule()); } -private static native @ByRef @Name("operator >>") InputArchive _shiftRight( +@Namespace("torch::nn") public static native @ByRef @Name("operator >>") InputArchive shiftRight( @ByRef InputArchive archive, @Const @SharedPtr("torch::nn::Module") @ByRef Module module); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 5f6fec8043c..41006e58beb 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1568,11 +1568,10 @@ public void map(InfoMap infoMap) { // native subclasses. .put(new Info("torch::nn::Module::register_module").javaText( "private native @Name(\"register_module\") void _register_module(@StdString BytePointer name, @SharedPtr @ByVal Module module);\n" + - "public M register_module(BytePointer name, M module) { asModule()._register_module(name, module.asModule()); return module; }\n" + + "public M register_module(BytePointer name, M module) { _register_module(name, module); return module; }\n" + "private native @Name(\"register_module\") void _register_module(@StdString String name, @SharedPtr @ByVal Module module);\n" + - "public M register_module(String name, M module) { asModule()._register_module(name, module.asModule()); return module; }" + "public M register_module(String name, M module) { _register_module(name, module); return module; }" )) - .put(new Info("torch::nn::Module").upcast()) ; String[] virtuals = {"train", "is_training", "to", "zero_grad", "save", "load", "pretty_print", "is_serializable"}; for (String m : virtuals) From 84bfcec2a7ba4f490ed300780f2dc42c3f017adb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Thu, 8 Feb 2024 08:07:31 +0100 Subject: [PATCH 15/24] Virtualize FunctionPreHook and FunctionPostHook. Remove @StdMove on TensorVector valueTypes. --- .../java/org/bytedeco/pytorch/Adagrad.java | 8 +- .../gen/java/org/bytedeco/pytorch/Adam.java | 8 +- .../gen/java/org/bytedeco/pytorch/AdamW.java | 8 +- .../org/bytedeco/pytorch/AutogradContext.java | 8 +- .../bytedeco/pytorch/FunctionPostHook.java | 11 +- .../org/bytedeco/pytorch/FunctionPreHook.java | 7 +- .../org/bytedeco/pytorch/GRUImplBase.java | 2 +- .../gen/java/org/bytedeco/pytorch/IValue.java | 2 +- .../gen/java/org/bytedeco/pytorch/LBFGS.java | 8 +- .../org/bytedeco/pytorch/LSTMImplBase.java | 2 +- .../gen/java/org/bytedeco/pytorch/Module.java | 8 +- .../gen/java/org/bytedeco/pytorch/Node.java | 6 +- .../java/org/bytedeco/pytorch/Optimizer.java | 2 +- .../bytedeco/pytorch/OptimizerParamGroup.java | 8 +- .../bytedeco/pytorch/ParameterDictImpl.java | 2 +- .../java/org/bytedeco/pytorch/Pickler.java | 2 +- .../org/bytedeco/pytorch/PyObjectHolder.java | 2 +- .../java/org/bytedeco/pytorch/RMSprop.java | 8 +- .../org/bytedeco/pytorch/RNNImplBase.java | 2 +- .../gen/java/org/bytedeco/pytorch/SGD.java | 4 +- .../bytedeco/pytorch/StringTensorDict.java | 2 +- .../T_TensorTensorTensorTensorVector_T.java | 8 +- .../T_TensorTensorVectorTensorVector_T.java | 12 +- .../pytorch/T_TensorTensorVector_T.java | 8 +- .../gen/java/org/bytedeco/pytorch/Tensor.java | 110 +++---- .../org/bytedeco/pytorch/TensorArrayRef.java | 2 +- .../org/bytedeco/pytorch/TensorDataset.java | 4 +- .../java/org/bytedeco/pytorch/TensorList.java | 2 +- .../org/bytedeco/pytorch/TensorVector.java | 2 +- .../pytorch/TensorVectorOptional.java | 6 +- .../org/bytedeco/pytorch/global/torch.java | 272 +++++++++--------- .../org/bytedeco/pytorch/presets/torch.java | 35 ++- 32 files changed, 298 insertions(+), 273 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java index 17d93d7c548..01a7f182ea6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java @@ -35,10 +35,10 @@ public Adagrad( private native void allocate( @ByVal OptimizerParamGroupVector param_groups); - public Adagrad(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults); - public Adagrad(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public Adagrad(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults); + public Adagrad(@ByVal TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@ByVal TensorVector params); public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java index 90a85d786dd..9b3ddfe5273 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java @@ -34,10 +34,10 @@ public Adam( @ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); } private native void allocate( @ByVal OptimizerParamGroupVector param_groups); - public Adam(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults); - public Adam(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public Adam(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults); + public Adam(@ByVal TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@ByVal TensorVector params); public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java index 89fde2d5d67..44de72027df 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java @@ -34,10 +34,10 @@ public AdamW( @ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); } private native void allocate( @ByVal OptimizerParamGroupVector param_groups); - public AdamW(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults); - public AdamW(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public AdamW(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults); + public AdamW(@ByVal TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@ByVal TensorVector params); public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java index 20d33e18457..d7cc6f65b8c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java @@ -46,15 +46,15 @@ public class AutogradContext extends Pointer { /** Saves the list of variables for a future call to {@code backward}. This * should be called at most once from inside of {@code forward}. */ - public native void save_for_backward(@Cast({"", "std::vector"}) @StdMove TensorVector to_save); + public native void save_for_backward(@ByVal TensorVector to_save); /** Marks variables in the list as modified in an in-place operation. This * should be called at most once from inside of {@code forward} and all arguments * should be inputs. */ - public native void mark_dirty(@Cast({"", "std::vector"}) @StdMove TensorVector inputs); + public native void mark_dirty(@Const @ByRef TensorVector inputs); /** Marks outputs in the list as not requiring gradients. This should be * called at most once from inside of {@code forward} and all arguments should be * outputs. */ - public native void mark_non_differentiable(@Cast({"", "std::vector"}) @StdMove TensorVector outputs); + public native void mark_non_differentiable(@Const @ByRef TensorVector outputs); // Sets whether undefined output grad tensors should be expanded to tensors // full of zeros before calling backward function. Default value is true. public native void set_materialize_grads(@Cast("bool") boolean value); @@ -62,7 +62,7 @@ public class AutogradContext extends Pointer { /** Get the list of variables that were saved in {@code forward} using * {@code save_for_backward()}. Before returning them to the user, a check is made * to ensure that they were not modified by any in-place operations. */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector get_saved_variables(); + public native @ByVal TensorVector get_saved_variables(); public native @Const @ByRef TensorImplSet get_and_bump_dirty(); public native @Const @ByRef TensorImplSet get_non_differentiable(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java index e0d5e3429e0..c6810f6921a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java @@ -21,12 +21,15 @@ @Namespace("torch::autograd") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class FunctionPostHook extends Pointer { static { Loader.load(); } + /** Default native constructor. */ + public FunctionPostHook() { super((Pointer)null); allocate(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionPostHook(Pointer p) { super(p); } + @UniquePtr @Name("std::make_unique") private native void allocate(); - public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply( - @Cast({"", "std::vector"}) @StdMove TensorVector outputs, - @Cast({"", "std::vector"}) @StdMove TensorVector inputs); + @Virtual(true) public native @ByVal @Name("operator ()") TensorVector apply( + @Const @ByRef TensorVector outputs, + @Const @ByRef TensorVector inputs); // only implemented for python hooks, registers hook with compiled autograd - public native void compiled_args(@ByRef CompiledNodeArgs args); + @Virtual public native void compiled_args(@ByRef CompiledNodeArgs args); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java index 19614a70941..ebc7e2bb154 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java @@ -21,10 +21,13 @@ @Namespace("torch::autograd") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class FunctionPreHook extends Pointer { static { Loader.load(); } + /** Default native constructor. */ + public FunctionPreHook() { super((Pointer)null); allocate(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionPreHook(Pointer p) { super(p); } + @UniquePtr @Name("std::make_unique") private native void allocate(); - public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply(@Cast({"", "std::vector"}) @StdMove TensorVector grads); + @Virtual(true) public native @ByVal @Name("operator ()") TensorVector apply(@Const @ByRef TensorVector grads); // only implemented for python hooks, registers hook with compiled autograd - public native void compiled_args(@ByRef CompiledNodeArgs args); + @Virtual public native void compiled_args(@ByRef CompiledNodeArgs args); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java index 2616fa4f3e3..fb09dff96af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java @@ -55,7 +55,7 @@ public class GRUImplBase extends GRUImplCloneable { * called once upon construction, inside {@code reset()}. */ public native void flatten_parameters(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector all_weights(); + public native @ByVal TensorVector all_weights(); /** The RNN's options. */ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java index 8a03b49384b..d6605db4342 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java @@ -318,7 +318,7 @@ public class IValue extends Pointer { public native @Cast("bool") boolean isTensorList(); public native @ByVal TensorList toTensorList(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector toTensorVector(); + public native @ByVal TensorVector toTensorVector(); // OptionalTensorList public native @Cast("bool") boolean isOptionalTensorList(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java index 192e446e6fd..92b66abae10 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java @@ -34,10 +34,10 @@ public LBFGS( @ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); } private native void allocate( @ByVal OptimizerParamGroupVector param_groups); - public LBFGS(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::LBFGSOptions{}") LBFGSOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::LBFGSOptions{}") LBFGSOptions defaults); - public LBFGS(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public LBFGS(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::LBFGSOptions{}") LBFGSOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::LBFGSOptions{}") LBFGSOptions defaults); + public LBFGS(@ByVal TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@ByVal TensorVector params); public native @ByVal Tensor step(@ByVal LossClosure closure); public native void save(@ByRef OutputArchive archive); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java index e5eef332d8b..967a7e27eb3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java @@ -55,7 +55,7 @@ public class LSTMImplBase extends LSTMImplCloneable { * called once upon construction, inside {@code reset()}. */ public native void flatten_parameters(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector all_weights(); + public native @ByVal TensorVector all_weights(); /** The RNN's options. */ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java index 3531035392f..fc0e895c829 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java @@ -220,8 +220,8 @@ public native void apply( /** Returns the parameters of this {@code Module} and if {@code recurse} is true, also * recursively of every submodule. */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector parameters(@Cast("bool") boolean recurse/*=true*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector parameters(); + public native @ByVal TensorVector parameters(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal TensorVector parameters(); /** Returns an {@code OrderedDict} with the parameters of this {@code Module} along with * their keys, and if {@code recurse} is true also recursively of every submodule. */ @@ -230,8 +230,8 @@ public native void apply( /** Returns the buffers of this {@code Module} and if {@code recurse} is true, also * recursively of every submodule. */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector buffers(@Cast("bool") boolean recurse/*=true*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector buffers(); + public native @ByVal TensorVector buffers(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal TensorVector buffers(); /** Returns an {@code OrderedDict} with the buffers of this {@code Module} along with * their keys, and if {@code recurse} is true also recursively of every submodule. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java index dd94e37e3c5..d4e91331d20 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java @@ -85,7 +85,7 @@ public class Node extends Pointer { public native @SharedPtr Node getptr(); /** Evaluates the function on the given inputs and returns the result of the * function call. */ - public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply(@Cast({"", "std::vector"}) @StdMove TensorVector inputs); + public native @ByVal @Name("operator ()") TensorVector apply(@ByRef(true) TensorVector inputs); // Graph Connectivity API //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -324,7 +324,7 @@ public native void add_retains_grad_hook( // Used by compiled autograd to call apply() with different saved tensors // Implementations should call saved.before() on all attrs, then apply(), then // saved.after() on all attrs in the same order. - public native @Cast({"", "std::vector"}) @StdMove TensorVector apply_with_saved( - @Cast({"", "std::vector"}) @StdMove TensorVector inputs, + public native @ByVal TensorVector apply_with_saved( + @Const @ByRef TensorVector inputs, @ByRef SwapSavedVariables saved); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java index a589c87b0b7..a5465d88544 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java @@ -37,7 +37,7 @@ public class Optimizer extends Pointer { public native @ByVal Tensor step(); /** Adds the given vector of parameters to the optimizer's parameter list. */ - public native void add_parameters(@Cast({"", "std::vector"}) @StdMove TensorVector parameters); + public native void add_parameters(@Const @ByRef TensorVector parameters); /** Zeros out the gradients of all parameters. */ public native void zero_grad(@Cast("bool") boolean set_to_none/*=true*/); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java index 37d07afe44e..2d695459a30 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java @@ -30,13 +30,13 @@ public class OptimizerParamGroup extends Pointer { // be copy-constructible. public OptimizerParamGroup(@Const @ByRef OptimizerParamGroup param_group) { super((Pointer)null); allocate(param_group); } private native void allocate(@Const @ByRef OptimizerParamGroup param_group); - public OptimizerParamGroup(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public OptimizerParamGroup(@ByVal TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@ByVal TensorVector params); public OptimizerParamGroup( - @Cast({"", "std::vector"}) @StdMove TensorVector params, + @ByVal TensorVector params, @UniquePtr @ByVal OptimizerOptions options) { super((Pointer)null); allocate(params, options); } private native void allocate( - @Cast({"", "std::vector"}) @StdMove TensorVector params, + @ByVal TensorVector params, @UniquePtr @ByVal OptimizerOptions options); public native @Cast("bool") boolean has_options(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java index 98b36a68eb2..c0a10faa889 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java @@ -55,7 +55,7 @@ public ParameterDictImpl( public native @ByVal StringVector keys(); /** Return the Values in the dict */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector values(); + public native @ByVal TensorVector values(); /** Return an iterator to the start of ParameterDict */ public native @ByVal @Cast("torch::nn::ParameterDictImpl::Iterator*") StringTensorDictItemVector.Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java index 29133e33be4..0bbb1c43dc9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java @@ -65,7 +65,7 @@ private native void allocate( public native void startTuple(); public native void endTuple(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensorData(); + public native @Const @ByRef TensorVector tensorData(); public native void pushDict(@Const @ByRef IValue ivalue); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java index 43f580b52c8..f7716d9b2b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java @@ -32,5 +32,5 @@ public class PyObjectHolder extends Pointer { public native @ByVal IValue toIValue(@Const @ByRef Type.TypePtr type, @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N); public native @ByVal IValue toIValue(@Const @ByRef Type.TypePtr type); public native @StdString BytePointer toStr(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector extractTensors(); + public native @ByVal TensorVector extractTensors(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java index e330bc486d6..f7fa66c1fe9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java @@ -35,10 +35,10 @@ public RMSprop( private native void allocate( @ByVal OptimizerParamGroupVector param_groups); - public RMSprop(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::RMSpropOptions{}") RMSpropOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::RMSpropOptions{}") RMSpropOptions defaults); - public RMSprop(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public RMSprop(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::RMSpropOptions{}") RMSpropOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::RMSpropOptions{}") RMSpropOptions defaults); + public RMSprop(@ByVal TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@ByVal TensorVector params); public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java index 9929e281b00..bce83d6acf2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java @@ -56,7 +56,7 @@ public class RNNImplBase extends RNNImplCloneable { * called once upon construction, inside {@code reset()}. */ public native void flatten_parameters(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector all_weights(); + public native @ByVal TensorVector all_weights(); /** The RNN's options. */ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java index 6f3abb3d6bc..3e3a716c706 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java @@ -31,8 +31,8 @@ private native void allocate( @ByVal OptimizerParamGroupVector param_groups, @ByVal SGDOptions defaults); - public SGD(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal SGDOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal SGDOptions defaults); + public SGD(@ByVal TensorVector params, @ByVal SGDOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@ByVal TensorVector params, @ByVal SGDOptions defaults); public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java index 6b79594bcec..e3e9e0f9b56 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java @@ -179,7 +179,7 @@ public class StringTensorDict extends Pointer { /** Returns a newly allocated vector and copies all values from this * {@code OrderedDict} into the vector. */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector values(); + public native @ByVal TensorVector values(); /** Returns a newly allocated vector and copies all keys and values from this * {@code OrderedDict} into a vector of {@code std::pair}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java index bc7bef90512..123c62399be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java @@ -22,8 +22,8 @@ public class T_TensorTensorTensorTensorVector_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public T_TensorTensorTensorTensorVector_T(Pointer p) { super(p); } - public T_TensorTensorTensorTensorVector_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3) { allocate(value0, value1, value2, value3); } - private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3); + public T_TensorTensorTensorTensorVector_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef TensorVector value3) { allocate(value0, value1, value2, value3); } + private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef TensorVector value3); public T_TensorTensorTensorTensorVector_T() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef T_TensorTensorTensorTensorVector_T put(@ByRef T_TensorTensorTensorTensorVector_T x); @@ -34,7 +34,7 @@ public class T_TensorTensorTensorTensorVector_T extends Pointer { @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensorVector_T container); public @ByRef Tensor get2() { return get2(this); } @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensorVector_T container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get3(@ByRef T_TensorTensorTensorTensorVector_T container); + public @ByRef TensorVector get3() { return get3(this); } + @Namespace @Name("std::get<3>") public static native @ByRef TensorVector get3(@ByRef T_TensorTensorTensorTensorVector_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java index c49397fcd85..843f570a9b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java @@ -22,17 +22,17 @@ public class T_TensorTensorVectorTensorVector_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public T_TensorTensorVectorTensorVector_T(Pointer p) { super(p); } - public T_TensorTensorVectorTensorVector_T(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2) { allocate(value0, value1, value2); } - private native void allocate(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2); + public T_TensorTensorVectorTensorVector_T(@ByRef Tensor value0, @ByRef TensorVector value1, @ByRef TensorVector value2) { allocate(value0, value1, value2); } + private native void allocate(@ByRef Tensor value0, @ByRef TensorVector value1, @ByRef TensorVector value2); public T_TensorTensorVectorTensorVector_T() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef T_TensorTensorVectorTensorVector_T put(@ByRef T_TensorTensorVectorTensorVector_T x); public @ByRef Tensor get0() { return get0(this); } @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorVectorTensorVector_T container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get1(@ByRef T_TensorTensorVectorTensorVector_T container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get2(@ByRef T_TensorTensorVectorTensorVector_T container); + public @ByRef TensorVector get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef TensorVector get1(@ByRef T_TensorTensorVectorTensorVector_T container); + public @ByRef TensorVector get2() { return get2(this); } + @Namespace @Name("std::get<2>") public static native @ByRef TensorVector get2(@ByRef T_TensorTensorVectorTensorVector_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java index 6cb2b372068..ee1cd5741d9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java @@ -22,15 +22,15 @@ public class T_TensorTensorVector_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public T_TensorTensorVector_T(Pointer p) { super(p); } - public T_TensorTensorVector_T(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1) { allocate(value0, value1); } - private native void allocate(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1); + public T_TensorTensorVector_T(@ByRef Tensor value0, @ByRef TensorVector value1) { allocate(value0, value1); } + private native void allocate(@ByRef Tensor value0, @ByRef TensorVector value1); public T_TensorTensorVector_T() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef T_TensorTensorVector_T put(@ByRef T_TensorTensorVector_T x); public @ByRef Tensor get0() { return get0(this); } @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorVector_T container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get1(@ByRef T_TensorTensorVector_T container); + public @ByRef TensorVector get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef TensorVector get1(@ByRef T_TensorTensorVector_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index e69a10a36ff..317818af8eb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -503,22 +503,22 @@ private native void allocate( public native @ByVal Tensor broadcast_to_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor ceil(); public native @ByRef Tensor ceil_(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Cast("int64_t") long chunks); - public native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Cast("int64_t") long chunks); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Cast("int64_t") long sections, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Cast("int64_t") long sections); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymInt sections, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymInt sections); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal LongArrayRef indices, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] indices, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymIntArrayRef indices, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymIntArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor tensor_indices_or_sections, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor tensor_indices_or_sections); + public native @ByVal TensorVector unsafe_chunk(@Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector unsafe_chunk(@Cast("int64_t") long chunks); + public native @ByVal TensorVector chunk(@Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector chunk(@Cast("int64_t") long chunks); + public native @ByVal TensorVector tensor_split(@Cast("int64_t") long sections, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector tensor_split(@Cast("int64_t") long sections); + public native @ByVal TensorVector tensor_split_symint(@ByVal SymInt sections, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector tensor_split_symint(@ByVal SymInt sections); + public native @ByVal TensorVector tensor_split(@ByVal LongArrayRef indices, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector tensor_split(@ByVal LongArrayRef indices); + public native @ByVal TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] indices, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); + public native @ByVal TensorVector tensor_split_symint(@ByVal SymIntArrayRef indices, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector tensor_split_symint(@ByVal SymIntArrayRef indices); + public native @ByVal TensorVector tensor_split(@Const @ByRef Tensor tensor_indices_or_sections, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector tensor_split(@Const @ByRef Tensor tensor_indices_or_sections); public native @ByVal Tensor clamp(@Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); public native @ByVal Tensor clamp(@Const @ByRef ScalarOptional min); public native @ByVal Tensor clamp(@Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); @@ -934,41 +934,41 @@ private native void allocate( public native @ByVal Tensor softmax(@Cast("int64_t") long dim); public native @ByVal Tensor softmax(@ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor softmax(@ByVal Dimname dim); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Cast("int64_t") long split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@ByVal SymInt split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Cast("int64_t") long split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymInt split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal LongArrayRef split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal LongArrayRef split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymIntArrayRef split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymIntArrayRef split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal LongArrayRef split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal LongArrayRef split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Cast("int64_t") long sections); - public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@ByVal LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Cast("int64_t") long sections); - public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@ByVal LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Cast("int64_t") long sections); - public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@ByVal LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); + public native @ByVal TensorVector unsafe_split(@Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector unsafe_split(@Cast("int64_t") long split_size); + public native @ByVal TensorVector unsafe_split_symint(@ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector unsafe_split_symint(@ByVal SymInt split_size); + public native @ByVal TensorVector split(@Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector split(@Cast("int64_t") long split_size); + public native @ByVal TensorVector split_symint(@ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector split_symint(@ByVal SymInt split_size); + public native @ByVal TensorVector split(@ByVal LongArrayRef split_size, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector split(@ByVal LongArrayRef split_size); + public native @ByVal TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_size, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_size); + public native @ByVal TensorVector split_symint(@ByVal SymIntArrayRef split_size, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector split_symint(@ByVal SymIntArrayRef split_size); + public native @ByVal TensorVector unsafe_split_with_sizes(@ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector unsafe_split_with_sizes(@ByVal LongArrayRef split_sizes); + public native @ByVal TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); + public native @ByVal TensorVector unsafe_split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector unsafe_split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes); + public native @ByVal TensorVector split_with_sizes(@ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector split_with_sizes(@ByVal LongArrayRef split_sizes); + public native @ByVal TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); + public native @ByVal TensorVector split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes); + public native @ByVal TensorVector hsplit(@Cast("int64_t") long sections); + public native @ByVal TensorVector hsplit(@ByVal LongArrayRef indices); + public native @ByVal TensorVector hsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); + public native @ByVal TensorVector vsplit(@Cast("int64_t") long sections); + public native @ByVal TensorVector vsplit(@ByVal LongArrayRef indices); + public native @ByVal TensorVector vsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); + public native @ByVal TensorVector dsplit(@Cast("int64_t") long sections); + public native @ByVal TensorVector dsplit(@ByVal LongArrayRef indices); + public native @ByVal TensorVector dsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); public native @ByVal Tensor squeeze(); public native @ByVal Tensor squeeze(@Cast("int64_t") long dim); public native @ByVal Tensor squeeze(@ByVal Dimname dim); @@ -1158,9 +1158,9 @@ private native void allocate( public native @ByVal Tensor col_indices(); public native @ByVal Tensor ccol_indices(); public native @ByVal Tensor row_indices(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@ByVal Dimname dim); + public native @ByVal TensorVector unbind(@Cast("int64_t") long dim/*=0*/); + public native @ByVal TensorVector unbind(); + public native @ByVal TensorVector unbind(@ByVal Dimname dim); public native @ByVal Tensor to_sparse(@Cast("int64_t") long sparse_dim); public native @ByVal Tensor _to_sparse(@Cast("int64_t") long sparse_dim); public native @ByVal Tensor to_sparse(@ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); @@ -1421,7 +1421,7 @@ private native void allocate( public native @ByVal Tensor nonzero(); public native @ByVal Tensor nonzero_static(@Cast("int64_t") long size, @Cast("int64_t") long fill_value/*=-1*/); public native @ByVal Tensor nonzero_static(@Cast("int64_t") long size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector nonzero_numpy(); + public native @ByVal TensorVector nonzero_numpy(); public native @ByVal Tensor argwhere(); public native @ByVal Tensor gather(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); public native @ByVal Tensor gather(@Cast("int64_t") long dim, @Const @ByRef Tensor index); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java index b9d4f36930a..83b64f68655 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java @@ -129,7 +129,7 @@ public class TensorArrayRef extends Pointer { /** \} * \name Expensive Operations * \{ */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector vec(); + public native @ByVal TensorVector vec(); /** \} */ } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java index 89cd1d2b34e..74d52d0aa40 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java @@ -27,8 +27,8 @@ public class TensorDataset extends TensorDatasetBase { public TensorDataset(Pointer p) { super(p); } /** Creates a {@code TensorDataset} from a vector of tensors. */ - public TensorDataset(@Cast({"", "std::vector"}) @StdMove TensorVector tensors) { super((Pointer)null); allocate(tensors); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector tensors); + public TensorDataset(@Const @ByRef TensorVector tensors) { super((Pointer)null); allocate(tensors); } + private native void allocate(@Const @ByRef TensorVector tensors); public TensorDataset(@ByVal Tensor tensor) { super((Pointer)null); allocate(tensor); } private native void allocate(@ByVal Tensor tensor); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java index 1bd9225efef..0077b8ebd2a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java @@ -225,7 +225,7 @@ public class TensorList extends Pointer { */ public native @Cast("bool") boolean is(@Const @ByRef TensorList rhs); - public native @Cast({"", "std::vector"}) @StdMove TensorVector vec(); + public native @ByVal TensorVector vec(); /** * Returns the number of Lists currently pointing to this same list. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java index b2d84235aba..1f466cf6542 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java @@ -28,7 +28,7 @@ public class TensorVector extends Pointer { public TensorVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef TensorVector put(@ByRef @Cast({"", "std::vector"}) @StdMove TensorVector x); + public native @Name("operator =") @ByRef TensorVector put(@ByRef TensorVector x); public boolean empty() { return size() == 0; } public native long size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java index 4d816aaefa6..c3c45025ab4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java @@ -22,14 +22,14 @@ public class TensorVectorOptional extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorVectorOptional(Pointer p) { super(p); } - public TensorVectorOptional(@Cast({"", "std::vector"}) @StdMove TensorVector value) { this(); put(value); } + public TensorVectorOptional(TensorVector value) { this(); put(value); } public TensorVectorOptional() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef TensorVectorOptional put(@ByRef TensorVectorOptional x); public native boolean has_value(); public native void reset(); - public native @Name("value") @Cast({"", "std::vector"}) @StdMove TensorVector get(); - @ValueSetter public native TensorVectorOptional put(@Cast({"", "std::vector"}) @StdMove TensorVector value); + public native @Name("value") @ByRef TensorVector get(); + @ValueSetter public native TensorVectorOptional put(@ByRef TensorVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index c00e43d2653..8a954c0c585 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -15769,13 +15769,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { /// /// @Namespace("torch::autograd") public static native void backward( - @Cast({"", "std::vector"}) @StdMove TensorVector tensors, - @Cast({"", "std::vector"}) @StdMove TensorVector grad_tensors/*={}*/, + @Const @ByRef TensorVector tensors, + @Const @ByRef(nullValue = "torch::autograd::variable_list{}") TensorVector grad_tensors, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/, - @Cast({"", "std::vector"}) @StdMove TensorVector inputs/*={}*/); + @Const @ByRef(nullValue = "torch::autograd::variable_list{}") TensorVector inputs); @Namespace("torch::autograd") public static native void backward( - @Cast({"", "std::vector"}) @StdMove TensorVector tensors); + @Const @ByRef TensorVector tensors); /** Computes and returns the sum of gradients of outputs with respect to the * inputs. @@ -15803,16 +15803,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { * @param allow_unused If {@code }false{@code }, specifying inputs that were not * used when computing outputs (and therefore their grad is always zero) * is an error. Defaults to {@code }false{@code }. */ -@Namespace("torch::autograd") public static native @Cast({"", "std::vector"}) @StdMove TensorVector grad( - @Cast({"", "std::vector"}) @StdMove TensorVector outputs, - @Cast({"", "std::vector"}) @StdMove TensorVector inputs, - @Cast({"", "std::vector"}) @StdMove TensorVector grad_outputs/*={}*/, +@Namespace("torch::autograd") public static native @ByVal TensorVector grad( + @Const @ByRef TensorVector outputs, + @Const @ByRef TensorVector inputs, + @Const @ByRef(nullValue = "torch::autograd::variable_list{}") TensorVector grad_outputs, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/, @Cast("bool") boolean allow_unused/*=false*/); -@Namespace("torch::autograd") public static native @Cast({"", "std::vector"}) @StdMove TensorVector grad( - @Cast({"", "std::vector"}) @StdMove TensorVector outputs, - @Cast({"", "std::vector"}) @StdMove TensorVector inputs); +@Namespace("torch::autograd") public static native @ByVal TensorVector grad( + @Const @ByRef TensorVector outputs, + @Const @ByRef TensorVector inputs); /** Creates a new dual level and returns its index. This level index should then * be used to call into the other functions below. This API supports entering a @@ -20705,8 +20705,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::align_tensors(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector align_tensors(@ByVal TensorArrayRef tensors); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector align_tensors(@ByVal TensorVector tensors); +@Namespace("at") public static native @ByVal TensorVector align_tensors(@ByVal TensorArrayRef tensors); +@Namespace("at") public static native @ByVal TensorVector align_tensors(@ByVal TensorVector tensors); @@ -21981,8 +21981,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor atleast_1d(@Const @ByRef Tensor self); // aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_1d(@ByVal TensorArrayRef tensors); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_1d(@ByVal TensorVector tensors); +@Namespace("at") public static native @ByVal TensorVector atleast_1d(@ByVal TensorArrayRef tensors); +@Namespace("at") public static native @ByVal TensorVector atleast_1d(@ByVal TensorVector tensors); @@ -22015,8 +22015,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor atleast_2d(@Const @ByRef Tensor self); // aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_2d(@ByVal TensorArrayRef tensors); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_2d(@ByVal TensorVector tensors); +@Namespace("at") public static native @ByVal TensorVector atleast_2d(@ByVal TensorArrayRef tensors); +@Namespace("at") public static native @ByVal TensorVector atleast_2d(@ByVal TensorVector tensors); @@ -22049,8 +22049,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor atleast_3d(@Const @ByRef Tensor self); // aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_3d(@ByVal TensorArrayRef tensors); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_3d(@ByVal TensorVector tensors); +@Namespace("at") public static native @ByVal TensorVector atleast_3d(@ByVal TensorArrayRef tensors); +@Namespace("at") public static native @ByVal TensorVector atleast_3d(@ByVal TensorVector tensors); @@ -23316,8 +23316,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::broadcast_tensors(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector broadcast_tensors(@ByVal TensorArrayRef tensors); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector broadcast_tensors(@ByVal TensorVector tensors); +@Namespace("at") public static native @ByVal TensorVector broadcast_tensors(@ByVal TensorArrayRef tensors); +@Namespace("at") public static native @ByVal TensorVector broadcast_tensors(@ByVal TensorVector tensors); @@ -24012,8 +24012,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks); +@Namespace("at") public static native @ByVal TensorVector chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks); @@ -26797,8 +26797,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor dequantize(@Const @ByRef Tensor self); // aten::dequantize.tensors(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dequantize(@ByVal TensorArrayRef tensors); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dequantize(@ByVal TensorVector tensors); +@Namespace("at") public static native @ByVal TensorVector dequantize(@ByVal TensorArrayRef tensors); +@Namespace("at") public static native @ByVal TensorVector dequantize(@ByVal TensorVector tensors); // aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor dequantize_out(@ByRef Tensor out, @Const @ByRef Tensor self); @@ -27491,11 +27491,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); +@Namespace("at") public static native @ByVal TensorVector dsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); // aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); +@Namespace("at") public static native @ByVal TensorVector dsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); +@Namespace("at") public static native @ByVal TensorVector dsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); @@ -32098,42 +32098,42 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self); // aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing); // aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorVector spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorVector spacing); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorVector spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorVector spacing); // aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorVector spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorVector spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorVector spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @ByVal TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorVector spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); @@ -33174,11 +33174,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); +@Namespace("at") public static native @ByVal TensorVector hsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); // aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); +@Namespace("at") public static native @ByVal TensorVector hsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); +@Namespace("at") public static native @ByVal TensorVector hsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); @@ -39551,14 +39551,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::meshgrid(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorArrayRef tensors); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorVector tensors); +@Namespace("at") public static native @ByVal TensorVector meshgrid(@ByVal TensorArrayRef tensors); +@Namespace("at") public static native @ByVal TensorVector meshgrid(@ByVal TensorVector tensors); // aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorArrayRef tensors, @StringView BytePointer indexing); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorVector tensors, @StringView String indexing); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorVector tensors, @StringView BytePointer indexing); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorArrayRef tensors, @StringView String indexing); +@Namespace("at") public static native @ByVal TensorVector meshgrid(@ByVal TensorArrayRef tensors, @StringView BytePointer indexing); +@Namespace("at") public static native @ByVal TensorVector meshgrid(@ByVal TensorVector tensors, @StringView String indexing); +@Namespace("at") public static native @ByVal TensorVector meshgrid(@ByVal TensorVector tensors, @StringView BytePointer indexing); +@Namespace("at") public static native @ByVal TensorVector meshgrid(@ByVal TensorArrayRef tensors, @StringView String indexing); @@ -43074,7 +43074,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::nonzero_numpy(Tensor self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector nonzero_numpy(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal TensorVector nonzero_numpy(@Const @ByRef Tensor self); @@ -44843,8 +44843,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor quantize_per_tensor(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, ScalarType dtype); // aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector quantize_per_tensor(@ByVal TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector quantize_per_tensor(@ByVal TensorVector tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); +@Namespace("at") public static native @ByVal TensorVector quantize_per_tensor(@ByVal TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); +@Namespace("at") public static native @ByVal TensorVector quantize_per_tensor(@ByVal TensorVector tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); // aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor quantize_per_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, ScalarType dtype); @@ -52982,25 +52982,25 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); +@Namespace("at") public static native @ByVal TensorVector split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); // aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); +@Namespace("at") public static native @ByVal TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); // aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal LongArrayRef split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal LongArrayRef split_size); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_size); +@Namespace("at") public static native @ByVal TensorVector split(@Const @ByRef Tensor self, @ByVal LongArrayRef split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split(@Const @ByRef Tensor self, @ByVal LongArrayRef split_size); +@Namespace("at") public static native @ByVal TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_size); // aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_size); +@Namespace("at") public static native @ByVal TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_size); @@ -53031,13 +53031,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); +@Namespace("at") public static native @ByVal TensorVector split_copy(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split_copy(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); // aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); +@Namespace("at") public static native @ByVal TensorVector split_copy_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split_copy_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); // aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () @@ -53092,15 +53092,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); // aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); @@ -53131,15 +53131,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); // aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector split_with_sizes_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); // aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () @@ -54369,30 +54369,30 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Cast("int64_t") long sections, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Cast("int64_t") long sections); +@Namespace("at") public static native @ByVal TensorVector tensor_split(@Const @ByRef Tensor self, @Cast("int64_t") long sections, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector tensor_split(@Const @ByRef Tensor self, @Cast("int64_t") long sections); // aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymInt sections, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymInt sections); +@Namespace("at") public static native @ByVal TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymInt sections, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymInt sections); // aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal LongArrayRef indices, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] indices, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); +@Namespace("at") public static native @ByVal TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal LongArrayRef indices, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); +@Namespace("at") public static native @ByVal TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] indices, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); // aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef indices, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef indices); +@Namespace("at") public static native @ByVal TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef indices, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef indices); // aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor_indices_or_sections, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor_indices_or_sections); +@Namespace("at") public static native @ByVal TensorVector tensor_split(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor_indices_or_sections, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector tensor_split(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor_indices_or_sections); @@ -55639,11 +55639,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal TensorVector unbind(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector unbind(@Const @ByRef Tensor self); // aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Const @ByRef Tensor self, @ByVal Dimname dim); +@Namespace("at") public static native @ByVal TensorVector unbind(@Const @ByRef Tensor self, @ByVal Dimname dim); @@ -55673,8 +55673,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind_copy(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal TensorVector unbind_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector unbind_copy(@Const @ByRef Tensor self); // aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () @Namespace("at") public static native void unbind_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); @@ -55759,8 +55759,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unflatten_dense_tensors(@Const @ByRef Tensor flat, @ByVal TensorArrayRef tensors); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unflatten_dense_tensors(@Const @ByRef Tensor flat, @ByVal TensorVector tensors); +@Namespace("at") public static native @ByVal TensorVector unflatten_dense_tensors(@Const @ByRef Tensor flat, @ByVal TensorArrayRef tensors); +@Namespace("at") public static native @ByVal TensorVector unflatten_dense_tensors(@Const @ByRef Tensor flat, @ByVal TensorVector tensors); @@ -56055,8 +56055,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks); +@Namespace("at") public static native @ByVal TensorVector unsafe_chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector unsafe_chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks); @@ -56086,13 +56086,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); +@Namespace("at") public static native @ByVal TensorVector unsafe_split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector unsafe_split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); // aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); +@Namespace("at") public static native @ByVal TensorVector unsafe_split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector unsafe_split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); // aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () @@ -56147,15 +56147,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); +@Namespace("at") public static native @ByVal TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); +@Namespace("at") public static native @ByVal TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); // aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); +@Namespace("at") public static native @ByVal TensorVector unsafe_split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal TensorVector unsafe_split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); // aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () @@ -57747,11 +57747,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); +@Namespace("at") public static native @ByVal TensorVector vsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); // aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); +@Namespace("at") public static native @ByVal TensorVector vsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); +@Namespace("at") public static native @ByVal TensorVector vsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); @@ -57836,7 +57836,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor where(@Const @ByRef Tensor condition, @Const @ByRef Scalar self, @Const @ByRef Scalar other); // aten::where(Tensor condition) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector where(@Const @ByRef Tensor condition); +@Namespace("at") public static native @ByVal TensorVector where(@Const @ByRef Tensor condition); @@ -59668,8 +59668,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector expand_outplace(@ByVal TensorArrayRef to_expand); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector expand_outplace(@ByVal TensorVector to_expand); +@Namespace("at") public static native @ByVal TensorVector expand_outplace(@ByVal TensorArrayRef to_expand); +@Namespace("at") public static native @ByVal TensorVector expand_outplace(@ByVal TensorVector to_expand); @Namespace("at") public static native @ByVal Tensor sum_to( @ByVal Tensor tensor, @@ -59960,7 +59960,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @SharedPtr Node function); /** Return true if any of the variables in the list require a gradient. */ -@Namespace("torch::autograd") public static native @Cast("bool") boolean any_variable_requires_grad(@Cast({"", "std::vector"}) @StdMove TensorVector variables); +@Namespace("torch::autograd") public static native @Cast("bool") boolean any_variable_requires_grad(@Const @ByRef TensorVector variables); /** Return the next edges of all the given variables, or tuples of variables. */ @@ -62807,7 +62807,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::indexing::impl") public static native @ByVal TensorOptionalList typeConvertIndices( @Const @ByRef Tensor arg0, - @Cast({"", "std::vector"}) @StdMove TensorVector indices); + @ByRef(true) TensorVector indices); // NOTE: Why do we mirror instead of replace the `count_specified_dimensions` // function in torch/csrc/autograd/python_variable_indexing.cpp? It's because @@ -62906,11 +62906,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::indexing") public static native @ByVal Tensor dispatch_index( @Const @ByRef Tensor self, - @Cast({"", "std::vector"}) @StdMove TensorVector indices); + @ByRef(true) TensorVector indices); @Namespace("at::indexing") public static native @ByVal Tensor dispatch_index_put_( @ByRef Tensor self, - @Cast({"", "std::vector"}) @StdMove TensorVector indices, + @ByRef(true) TensorVector indices, @Const @ByRef Tensor value); // NOTE [ Setting `disable_slice_optimization` when calling C++ tensor indexing @@ -76224,12 +76224,12 @@ scalar_t sf(scalar_t x, scalar_t y) // sense!) in order to return a CPU-side `double`. This C++ version therefore // cannot be run fully asynchronously w.r.t. the device of the gradients. @Namespace("torch::nn::utils") public static native double clip_grad_norm_( - @Cast({"", "std::vector"}) @StdMove TensorVector parameters, + @Const @ByRef TensorVector parameters, double max_norm, double norm_type/*=2.0*/, @Cast("bool") boolean error_if_nonfinite/*=false*/); @Namespace("torch::nn::utils") public static native double clip_grad_norm_( - @Cast({"", "std::vector"}) @StdMove TensorVector parameters, + @Const @ByRef TensorVector parameters, double max_norm); // A wrapper around clip_grad_norm_ that allows us to call the function with a @@ -76251,7 +76251,7 @@ scalar_t sf(scalar_t x, scalar_t y) // See https://pytorch.org/docs/stable/nn.html#clip-grad-value // for more details about this module. @Namespace("torch::nn::utils") public static native void clip_grad_value_( - @Cast({"", "std::vector"}) @StdMove TensorVector parameters, + @Const @ByRef TensorVector parameters, double clip_value); // A wrapper around clip_grad_value_ that allows us to call the function with a @@ -76283,12 +76283,12 @@ scalar_t sf(scalar_t x, scalar_t y) // Convert parameters to one vector @Namespace("torch::nn::utils") public static native @ByVal Tensor parameters_to_vector( - @Cast({"", "std::vector"}) @StdMove TensorVector parameters); + @Const @ByRef TensorVector parameters); // Convert one vector to the parameters @Namespace("torch::nn::utils") public static native void vector_to_parameters( @Const @ByRef Tensor vec, - @Cast({"", "std::vector"}) @StdMove TensorVector parameters); + @Const @ByRef TensorVector parameters); // namespace utils // namespace nn diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 41006e58beb..3e8af46c1da 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -632,9 +632,9 @@ public void map(InfoMap infoMap) { .put(new Info("std::vector").pointerTypes("ShapeSymbolVector").define()) .put(new Info("std::vector").pointerTypes("TensorImplVector").define()) .put(new Info("std::vector", "torch::autograd::edge_list") // Used in Node constructor - .valueTypes("@Cast({\"\", \"std::vector\"}) @StdMove EdgeVector").pointerTypes("EdgeVector").define()) + .valueTypes("@Cast({\"\", \"std::vector\"}) @StdMove EdgeVector").pointerTypes("EdgeVector").define()) .put(new Info("std::vector", "std::vector", "std::vector", "torch::autograd::variable_list") - .valueTypes("@Cast({\"\", \"std::vector\"}) @StdMove TensorVector").pointerTypes("TensorVector").define()) + .pointerTypes("TensorVector").define()) .put(new Info("std::vector", "std::vector").pointerTypes("TensorIndexVector").define()) .put(new Info("std::vector >").pointerTypes("TensorOptionalVector").define()) .put(new Info("const std::vector >", @@ -1808,10 +1808,10 @@ public void map(InfoMap infoMap) { new PointerInfo("at::CPUGeneratorImpl"), new PointerInfo("at::TensorIterator"), new PointerInfo("caffe2::serialize::IStreamAdapter"), - new PointerInfo("torch::autograd::FunctionPreHook"), - new PointerInfo("torch::autograd::FunctionPostHook"), - // Other classes passed as unique ptr ar abstract, so not instantiated from Java: - // ReadAdapterInterface, PostAccumulateGradHook, FunctionPreHook, FunctionPostHook, FuncTorchTLSBase, AutogradMetaInterface, + new PointerInfo("torch::autograd::FunctionPreHook").virtualize(), + new PointerInfo("torch::autograd::FunctionPostHook").virtualize(), + // Other classes passed as unique ptr are abstract, so not instantiated from Java: + // ReadAdapterInterface, PostAccumulateGradHook, FuncTorchTLSBase, AutogradMetaInterface, // GeneratorImpl, OpRegistrationListener, AttributeValue }) { pi.makeUnique(infoMap); @@ -2717,6 +2717,7 @@ static class PointerInfo { String javaName; final String[] argumentNames; String[] otherCppNames = new String[0]; + boolean virtualize = false; PointerInfo(String... an) { argumentNames = an; @@ -2738,6 +2739,11 @@ PointerInfo javaName(String jn) { return this; } + PointerInfo virtualize() { + virtualize = true; + return this; + } + void makeShared(InfoMap infoMap) { // See issue #670 String[] cppNames = new String[argumentNames.length + otherCppNames.length]; @@ -2751,7 +2757,15 @@ void makeShared(InfoMap infoMap) { // Also annotate constructor of target class to ensure only one shared_ptr exists for each instance String n = argumentNames[0].substring(argumentNames[0].lastIndexOf(' ') + 1); // Remove possible const - String n2 = n.equals("torch::nn::Module") ? "JavaCPP_torch_0003a_0003ann_0003a_0003aModule" : n; + String n2 = n; + if (virtualize) { + n2 = mangle(n2); + infoMap.put(new Info(n).virtualize()); + } else if (n.equals("torch::nn::Module")) { + // We don't set virtualize on Module since we don't want all virtual + // member functions to be annotated @Virtual (clone_, ...) + n2 = mangle(n2); + } infoMap.put(new Info(n + n.substring(n.lastIndexOf("::"))).annotations("@SharedPtr", "@Name(\"std::make_shared<" + n2 + ">\")")); } @@ -2764,7 +2778,12 @@ void makeUnique(InfoMap infoMap) { infoMap.put(new Info(cppNames).annotations("@UniquePtr").pointerTypes(javaBaseName)); String n = argumentNames[0].substring(argumentNames[0].lastIndexOf(' ') + 1); // Remove possible const - infoMap.put(new Info(n + n.substring(n.lastIndexOf("::"))).annotations("@UniquePtr", "@Name(\"std::make_unique<" + n + ">\")")); + String n2 = n; + if (virtualize) { + n2 = mangle(n2); + infoMap.put(new Info(n).virtualize()); + } + infoMap.put(new Info(n + n.substring(n.lastIndexOf("::"))).annotations("@UniquePtr", "@Name(\"std::make_unique<" + n2 + ">\")")); } } From d381ebadb930aac12a8ce703dee4846b4a2e88a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Thu, 8 Feb 2024 08:41:56 +0100 Subject: [PATCH 16/24] Remove @StdMove on Storage valueTypes. --- .../gen/java/org/bytedeco/pytorch/IValue.java | 6 +-- .../bytedeco/pytorch/NestedTensorImpl.java | 4 +- .../java/org/bytedeco/pytorch/Storage.java | 4 +- .../gen/java/org/bytedeco/pytorch/Tensor.java | 14 +++--- .../java/org/bytedeco/pytorch/TensorBase.java | 2 +- .../java/org/bytedeco/pytorch/TensorImpl.java | 28 +++++------ .../org/bytedeco/pytorch/global/torch.java | 46 +++++++++---------- .../org/bytedeco/pytorch/presets/torch.java | 3 +- 8 files changed, 53 insertions(+), 54 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java index d6605db4342..8befb02b029 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java @@ -155,11 +155,11 @@ public class IValue extends Pointer { public native @ByRef Tensor toTensor(); public native TensorImpl unsafeToTensorImpl(); - public IValue(@Cast({"", "c10::Storage&&"}) @StdMove Storage s) { super((Pointer)null); allocate(s); } - private native void allocate(@Cast({"", "c10::Storage&&"}) @StdMove Storage s); + public IValue(@ByVal Storage s) { super((Pointer)null); allocate(s); } + private native void allocate(@ByVal Storage s); public native @Cast("bool") boolean isStorage(); - public native @Cast({"", "c10::Storage&&"}) @StdMove Storage toStorage(); + public native @ByVal Storage toStorage(); public native @ByRef IValue toIValue(); /** \private [doxygen private] */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java index c8ae1e30a43..d5bda553fad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java @@ -25,14 +25,14 @@ public class NestedTensorImpl extends TensorImpl { public NestedTensorImpl(Pointer p) { super(p); } public NestedTensorImpl( - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByVal Storage storage, @ByVal DispatchKeySet key_set, @Const @ByVal TypeMeta data_type, @ByVal Tensor nested_sizes, @ByVal Tensor nested_strides, @ByVal Tensor storage_offsets) { super((Pointer)null); allocate(storage, key_set, data_type, nested_sizes, nested_strides, storage_offsets); } private native void allocate( - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByVal Storage storage, @ByVal DispatchKeySet key_set, @Const @ByVal TypeMeta data_type, @ByVal Tensor nested_sizes, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java index f95e94083d6..50220dbdd1d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java @@ -106,7 +106,7 @@ private native void allocate( // Legacy constructor for partially initialized (dtype or memory) storages // that can be temporarily created with Caffe2 APIs. See the note on top of // TensorImpl.h for details. - public static native @Cast({"", "c10::Storage&&"}) @StdMove Storage create_legacy(@ByVal Device device); + public static native @ByVal Storage create_legacy(@ByVal Device device); // Mimic create_legacy, but without requiring a newly-created StorageImpl. public native void reset_legacy(); @@ -154,7 +154,7 @@ private native void allocate( public native @Cast("bool") boolean unique(); - public native @Cast("bool") boolean is_alias_of(@Cast({"", "c10::Storage&&"}) @StdMove Storage other); + public native @Cast("bool") boolean is_alias_of(@Const @ByRef Storage other); public native void UniqueStorageShareExternalPointer( Pointer src, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index 317818af8eb..6109db6aac6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -1215,13 +1215,13 @@ private native void allocate( public native @ByVal Tensor to(@Const @ByRef Tensor other, @Cast("bool") boolean non_blocking/*=false*/, @Cast("bool") boolean copy/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); public native @ByVal Tensor to(@Const @ByRef Tensor other); public native @ByVal Scalar item(); - public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source); - public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); - public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); - public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); - public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); - public native @ByRef Tensor set__symint(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); - public native @ByRef Tensor set__symint(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); + public native @ByRef Tensor set_(@ByVal Storage source); + public native @ByRef Tensor set_(@ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); + public native @ByRef Tensor set_(@ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); + public native @ByRef Tensor set_(@ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); + public native @ByRef Tensor set_(@ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); + public native @ByRef Tensor set__symint(@ByVal Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); + public native @ByRef Tensor set__symint(@ByVal Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java index eedb5347d87..86de38accf1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java @@ -181,7 +181,7 @@ private native void allocate( public native @ByVal DispatchKeySet key_set(); public native ScalarType scalar_type(); public native @Cast("bool") boolean has_storage(); - public native @Cast({"", "c10::Storage&&"}) @StdMove Storage storage(); + public native @Const @ByRef Storage storage(); public native @Cast("bool") boolean is_alias_of(@Const @ByRef TensorBase other); // Move the storage backend to shm based diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java index 74e2e755a1f..1996b4d0733 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java @@ -116,33 +116,33 @@ public enum ImplType { VIEW(0); * Construct a 1-dim 0-size tensor backed by the given storage. */ public TensorImpl( - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByRef(true) Storage storage, @ByVal DispatchKeySet arg1, @Const @ByVal TypeMeta data_type) { super((Pointer)null); allocate(storage, arg1, data_type); } private native void allocate( - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByRef(true) Storage storage, @ByVal DispatchKeySet arg1, @Const @ByVal TypeMeta data_type); // See Note [Enum ImplType] public TensorImpl( ImplType arg0, - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByRef(true) Storage storage, @ByVal DispatchKeySet arg2, @Const @ByVal TypeMeta data_type) { super((Pointer)null); allocate(arg0, storage, arg2, data_type); } private native void allocate( ImplType arg0, - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByRef(true) Storage storage, @ByVal DispatchKeySet arg2, @Const @ByVal TypeMeta data_type); public TensorImpl( @Cast("c10::TensorImpl::ImplType") int arg0, - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByRef(true) Storage storage, @ByVal DispatchKeySet arg2, @Const @ByVal TypeMeta data_type) { super((Pointer)null); allocate(arg0, storage, arg2, data_type); } private native void allocate( @Cast("c10::TensorImpl::ImplType") int arg0, - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByRef(true) Storage storage, @ByVal DispatchKeySet arg2, @Const @ByVal TypeMeta data_type); @@ -161,19 +161,19 @@ private native void allocate( // Legacy constructors so I don't have to go update call sites. // TODO: When Variable is added, delete these constructors public TensorImpl( - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByRef(true) Storage storage, DispatchKey dispatch_key, @Const @ByVal TypeMeta data_type) { super((Pointer)null); allocate(storage, dispatch_key, data_type); } private native void allocate( - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByRef(true) Storage storage, DispatchKey dispatch_key, @Const @ByVal TypeMeta data_type); public TensorImpl( - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByRef(true) Storage storage, @Cast("c10::DispatchKey") short dispatch_key, @Const @ByVal TypeMeta data_type) { super((Pointer)null); allocate(storage, dispatch_key, data_type); } private native void allocate( - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByRef(true) Storage storage, @Cast("c10::DispatchKey") short dispatch_key, @Const @ByVal TypeMeta data_type); public TensorImpl( @@ -399,14 +399,14 @@ public enum SizesStridesPolicy { * Avoid using this method if possible; try to use only Tensor APIs to perform * operations. */ - public native @Cast({"", "c10::Storage&&"}) @StdMove Storage storage(); + public native @Const @ByRef Storage storage(); /** * Return the underlying storage, unsafely assuming this is a basic strided * tensor. In cases where {@code storage} access would throw, this returns a * default-constructed Storage. */ - public native @Cast({"", "c10::Storage&&"}) @StdMove Storage unsafe_storage(); + public native @Const @ByRef Storage unsafe_storage(); public native @Cast("bool") boolean unique_version(); // Whether a tensor is sparse COO or not. @@ -993,10 +993,10 @@ public native void ShareExternalPointer( */ public native @Cast("bool") @NoException(true) boolean dtype_initialized(); - public native void set_storage_keep_dtype(@Cast({"", "c10::Storage&&"}) @StdMove Storage storage); + public native void set_storage_keep_dtype(@ByVal Storage storage); public native void set_storage_and_dtype( - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, + @ByVal Storage storage, @Const @ByVal TypeMeta data_type); public native void empty_tensor_restride_symint(MemoryFormat memory_format); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 8a954c0c585..e0dbf2c20a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -6866,8 +6866,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include @Namespace("c10") public static native @Cast("bool") boolean isSharedStorageAlias( - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage0, - @Cast({"", "c10::Storage&&"}) @StdMove Storage storage1); + @Const @ByRef Storage storage0, + @Const @ByRef Storage storage1); // Targeting ../Storage.java @@ -8768,11 +8768,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { */ @Namespace("at") public static native void storage_copy( @ByRef Storage dst, - @Cast({"", "c10::Storage&&"}) @StdMove Storage src, + @Const @ByRef Storage src, @Cast("bool") boolean non_blocking/*=false*/); @Namespace("at") public static native void storage_copy( @ByRef Storage dst, - @Cast({"", "c10::Storage&&"}) @StdMove Storage src); + @Const @ByRef Storage src); /** * In place change the storage to shm based. @@ -48829,44 +48829,44 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source); +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Storage source); // aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @ByVal Storage source, @ByRef Tensor out); // aten::set.source_Storage(Tensor self, Storage source) -> Tensor -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source); +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @ByVal Storage source); // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByRef Tensor out); // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); -@Namespace("at") public static native @ByRef Tensor set_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); +@Namespace("at") public static native @ByRef Tensor set_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); +@Namespace("at") public static native @ByRef Tensor set_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_symint_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor set_symint_outf(@Const @ByRef Tensor self, @ByVal Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByRef Tensor out); // aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @ByVal Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor -@Namespace("at") public static native @ByVal Tensor set_symint(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); -@Namespace("at") public static native @ByVal Tensor set_symint(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); +@Namespace("at") public static native @ByVal Tensor set_symint(@Const @ByRef Tensor self, @ByVal Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); +@Namespace("at") public static native @ByVal Tensor set_symint(@Const @ByRef Tensor self, @ByVal Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); // aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) @@ -63108,7 +63108,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor unsafeTensorFromTH(Pointer th_pointer, @Cast("bool") boolean retain); -@Namespace("at") public static native @Cast({"", "c10::Storage&&"}) @StdMove Storage unsafeStorageFromTH(Pointer th_pointer, @Cast("bool") boolean retain); +@Namespace("at") public static native @ByVal Storage unsafeStorageFromTH(Pointer th_pointer, @Cast("bool") boolean retain); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 3e8af46c1da..c288d60c168 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -346,8 +346,7 @@ public void map(InfoMap infoMap) { .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Float8_e4m3fn>::t)").pointerTypes("Float8_e4m3fn")) .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Float8_e5m2fnuz>::t)").pointerTypes("Float8_e5m2fnuz")) .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Float8_e4m3fnuz>::t)").pointerTypes("Float8_e4m3fnuz")) - .put(new Info("c10::DataPtr", "at::DataPtr").valueTypes("@Cast({\"\", \"c10::DataPtr&&\"}) @StdMove DataPtr").pointerTypes("DataPtr")) - .put(new Info("c10::Storage", "at::Storage").valueTypes("@Cast({\"\", \"c10::Storage&&\"}) @StdMove Storage").pointerTypes("Storage")) + .put(new Info("c10::DataPtr", "at::DataPtr").valueTypes("@Cast({\"\", \"c10::DataPtr&&\"}) @StdMove DataPtr").pointerTypes("DataPtr")) // DataPtr::operator= deleted .put(new Info("c10::ClassType").purify().pointerTypes("ClassType")) // Issue #669 .put(new Info("c10::EnumType").purify().pointerTypes("EnumType")) // Issue #669 .put(new Info("c10::NamedType").purify().pointerTypes("NamedType")) // Issue #669 From 97e1ce614dafe85f68e5b02333f05f387db41968 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Thu, 8 Feb 2024 09:06:21 +0100 Subject: [PATCH 17/24] Remove `@StdMove` on `MaybeOwned` --- .../org/bytedeco/pytorch/OperandInfo.java | 8 +-- ...nedTensorMaybeOwnedTensorMaybeOwned_T.java | 16 ++--- .../T_TensorMaybeOwnedTensorMaybeOwned_T.java | 12 ++-- .../gen/java/org/bytedeco/pytorch/Tensor.java | 6 +- .../pytorch/TensorBaseMaybeOwned.java | 10 ++-- .../bytedeco/pytorch/TensorMaybeOwned.java | 10 ++-- .../org/bytedeco/pytorch/global/torch.java | 18 +++--- .../org/bytedeco/pytorch/presets/torch.java | 60 ++++++++++--------- 8 files changed, 71 insertions(+), 69 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java index 94b2cf8e10a..053afe6bfb7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java @@ -35,8 +35,8 @@ public class OperandInfo extends Pointer { public OperandInfo() { super((Pointer)null); allocate(); } private native void allocate(); - public OperandInfo(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned t) { super((Pointer)null); allocate(t); } - private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned t); + public OperandInfo(@ByRef(true) TensorBaseMaybeOwned t) { super((Pointer)null); allocate(t); } + private native void allocate(@ByRef(true) TensorBaseMaybeOwned t); public OperandInfo(@Const @ByRef OperandInfo arg0) { super((Pointer)null); allocate(arg0); } private native void allocate(@Const @ByRef OperandInfo arg0); @@ -81,7 +81,7 @@ public class OperandInfo extends Pointer { * coalescing. */ public native @Const @ByRef Tensor tensor(); public native @Const @ByRef TensorBase tensor_base(); - public native void tensor(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned tensor); + public native void tensor(@ByRef(true) TensorBaseMaybeOwned tensor); // Save the original tensor operand in cases when an output is modified // (e.g. if dtype is changed) @@ -91,7 +91,7 @@ public class OperandInfo extends Pointer { // Set tensor to a new value, and store the old tensor value in // original_tensor Should only ever be called once for the lifetime of an // operand - public native void exchange_tensor(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned new_tensor); + public native void exchange_tensor(@ByRef(true) TensorBaseMaybeOwned new_tensor); // Move original_tensor back into tensor, exchange_tensor must have been // called before diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java index 6e5d6265534..d6ebd89d280 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java @@ -22,17 +22,17 @@ public class T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T extends Pointe static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T(Pointer p) { super(p); } - public T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value0, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value1, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value2) { allocate(value0, value1, value2); } - private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value0, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value1, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value2); + public T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T(@ByRef TensorMaybeOwned value0, @ByRef TensorMaybeOwned value1, @ByRef TensorMaybeOwned value2) { allocate(value0, value1, value2); } + private native void allocate(@ByRef TensorMaybeOwned value0, @ByRef TensorMaybeOwned value1, @ByRef TensorMaybeOwned value2); public T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T put(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T x); - public @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get0(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T container); - public @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get1(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T container); - public @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get2(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T container); + public @ByRef TensorMaybeOwned get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @ByRef TensorMaybeOwned get0(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T container); + public @ByRef TensorMaybeOwned get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef TensorMaybeOwned get1(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T container); + public @ByRef TensorMaybeOwned get2() { return get2(this); } + @Namespace @Name("std::get<2>") public static native @ByRef TensorMaybeOwned get2(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java index 0c833a91732..ab9e6cced67 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java @@ -22,15 +22,15 @@ public class T_TensorMaybeOwnedTensorMaybeOwned_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public T_TensorMaybeOwnedTensorMaybeOwned_T(Pointer p) { super(p); } - public T_TensorMaybeOwnedTensorMaybeOwned_T(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value0, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value1) { allocate(value0, value1); } - private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value0, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value1); + public T_TensorMaybeOwnedTensorMaybeOwned_T(@ByRef TensorMaybeOwned value0, @ByRef TensorMaybeOwned value1) { allocate(value0, value1); } + private native void allocate(@ByRef TensorMaybeOwned value0, @ByRef TensorMaybeOwned value1); public T_TensorMaybeOwnedTensorMaybeOwned_T() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef T_TensorMaybeOwnedTensorMaybeOwned_T put(@ByRef T_TensorMaybeOwnedTensorMaybeOwned_T x); - public @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get0(@ByRef T_TensorMaybeOwnedTensorMaybeOwned_T container); - public @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get1(@ByRef T_TensorMaybeOwnedTensorMaybeOwned_T container); + public @ByRef TensorMaybeOwned get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @ByRef TensorMaybeOwned get0(@ByRef T_TensorMaybeOwnedTensorMaybeOwned_T container); + public @ByRef TensorMaybeOwned get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef TensorMaybeOwned get1(@ByRef T_TensorMaybeOwnedTensorMaybeOwned_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index 6109db6aac6..f7cbc6eba3d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -85,9 +85,9 @@ private native void allocate( * increment/decrement if *this is already contiguous, at the cost * in all cases of an extra pointer of stack usage, an extra branch * to access, and an extra branch at destruction time. */ - public native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expect_contiguous(MemoryFormat memory_format/*=c10::MemoryFormat::Contiguous*/); - public native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expect_contiguous(); - public native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expect_contiguous(@Cast("c10::MemoryFormat") byte memory_format/*=c10::MemoryFormat::Contiguous*/); + public native @ByVal TensorMaybeOwned expect_contiguous(MemoryFormat memory_format/*=c10::MemoryFormat::Contiguous*/); + public native @ByVal TensorMaybeOwned expect_contiguous(); + public native @ByVal TensorMaybeOwned expect_contiguous(@Cast("c10::MemoryFormat") byte memory_format/*=c10::MemoryFormat::Contiguous*/); // Use .contiguous() instead. Trying to borrow from a prvalue Tensor // will only lead to trouble and dangling references. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java index eb0dc83eb91..9df03681b61 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java @@ -39,14 +39,14 @@ public class TensorBaseMaybeOwned extends Pointer { // T*. Copying an owned T yields another owned T for safety: no // chains of borrowing by default! (Note you could get that behavior // with MaybeOwned::borrowed(*rhs) if you wanted it.) - public TensorBaseMaybeOwned(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned rhs) { super((Pointer)null); allocate(rhs); } - private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned rhs); + public TensorBaseMaybeOwned(@Const @ByRef TensorBaseMaybeOwned rhs) { super((Pointer)null); allocate(rhs); } + private native void allocate(@Const @ByRef TensorBaseMaybeOwned rhs); - public native @ByRef @Name("operator =") TensorBaseMaybeOwned put(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned rhs); + public native @ByRef @Name("operator =") TensorBaseMaybeOwned put(@Const @ByRef TensorBaseMaybeOwned rhs); - public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned borrowed(@Const @ByRef TensorBase t); + public static native @ByVal TensorBaseMaybeOwned borrowed(@Const @ByRef TensorBase t); - public static native @NoException(true) @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned owned(@ByRef(true) TensorBase t); + public static native @ByVal @NoException(true) TensorBaseMaybeOwned owned(@ByRef(true) TensorBase t); // This is an implementation detail! You should know what you're doing // if you are testing this. If you just want to guarantee ownership move diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java index c9da0abb0d3..6ba8cb2ca3d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java @@ -48,14 +48,14 @@ public class TensorMaybeOwned extends Pointer { // T*. Copying an owned T yields another owned T for safety: no // chains of borrowing by default! (Note you could get that behavior // with MaybeOwned::borrowed(*rhs) if you wanted it.) - public TensorMaybeOwned(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned rhs) { super((Pointer)null); allocate(rhs); } - private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned rhs); + public TensorMaybeOwned(@Const @ByRef TensorMaybeOwned rhs) { super((Pointer)null); allocate(rhs); } + private native void allocate(@Const @ByRef TensorMaybeOwned rhs); - public native @ByRef @Name("operator =") TensorMaybeOwned put(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned rhs); + public native @ByRef @Name("operator =") TensorMaybeOwned put(@Const @ByRef TensorMaybeOwned rhs); - public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned borrowed(@Const @ByRef Tensor t); + public static native @ByVal TensorMaybeOwned borrowed(@Const @ByRef Tensor t); - public static native @NoException(true) @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned owned(@ByRef(true) Tensor t); + public static native @ByVal @NoException(true) TensorMaybeOwned owned(@ByRef(true) Tensor t); // This is an implementation detail! You should know what you're doing // if you are testing this. If you just want to guarantee ownership move diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index e0dbf2c20a9..1698a36cf84 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -59547,17 +59547,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // resulting from a function call, but it is still possible to make a // mistake. -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_inplace( +@Namespace("at") public static native @ByVal TensorMaybeOwned expand_inplace( @Const @ByRef Tensor tensor, @Const @ByRef Tensor to_expand); -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_inplace( +@Namespace("at") public static native @ByVal TensorMaybeOwned expand_inplace( @Const @ByRef Tensor tensor, @Const @ByRef Tensor to_expand, @Cast("const char*") BytePointer api_name); -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_inplace( +@Namespace("at") public static native @ByVal TensorMaybeOwned expand_inplace( @Const @ByRef Tensor tensor, @Const @ByRef Tensor to_expand, String api_name); @@ -59640,28 +59640,28 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( +@Namespace("at") public static native @ByVal TensorMaybeOwned expand_size( @Const @ByRef Tensor to_expand, @ByVal LongArrayRef sizes); -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( +@Namespace("at") public static native @ByVal TensorMaybeOwned expand_size( @Const @ByRef Tensor to_expand, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( +@Namespace("at") public static native @ByVal TensorMaybeOwned expand_size( @Const @ByRef Tensor to_expand, @ByVal LongArrayRef sizes, @Cast("const char*") BytePointer api_name); -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( +@Namespace("at") public static native @ByVal TensorMaybeOwned expand_size( @Const @ByRef Tensor to_expand, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, String api_name); -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( +@Namespace("at") public static native @ByVal TensorMaybeOwned expand_size( @Const @ByRef Tensor to_expand, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @Cast("const char*") BytePointer api_name); -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( +@Namespace("at") public static native @ByVal TensorMaybeOwned expand_size( @Const @ByRef Tensor to_expand, @ByVal LongArrayRef sizes, String api_name); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index c288d60c168..7756d4ec2f5 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -307,37 +307,14 @@ public void map(InfoMap infoMap) { .put(new Info("std::size_t", "c10::Dict::size_type", "c10::Dict::size_type").cast().valueTypes("long").pointerTypes("SizeTPointer")) .put(new Info("c10::approx_time_t").cast().valueTypes("long").pointerTypes("LongPointer")) - .put(new Info( - "torch::ExpandingArray<1>", "torch::ExpandingArray<2>", "torch::ExpandingArray<3>", "torch::ExpandingArray<4>", - "torch::ExpandingArray", "torch::ExpandingArray<1*2>", "torch::ExpandingArray<2*2>", "torch::ExpandingArray<3*2>").cast().pointerTypes("LongPointer")) - .put(new Info("torch::ExpandingArray<1,double>", "torch::ExpandingArray<2,double>", "torch::ExpandingArray<3,double>").cast().pointerTypes("DoublePointer")) - .put(new Info("torch::ExpandingArrayWithOptionalElem<2>", "torch::ExpandingArrayWithOptionalElem<3>").cast().pointerTypes("LongOptional")) - .put(new Info("std::pair").pointerTypes("EnumNameValue").define()) .put(new Info("c10::ClassType::Property").pointerTypes("ClassType.Property")) - .put(new Info("std::list >").pointerTypes("RecordFunctionHandleIntList").define()) .put(new Info("at::RecordFunctionHandle").valueTypes("long")) .put(new Info("c10::ivalue::Future::FutureError::FutureError").skip()) // This constructor takes a std::string&& but parser sends a std::string& .put(new Info("operator const std::string&()").javaText( // Hopefully targets the one in ConstantString only "public native @Const @ByRef @Name(\"operator const std::string&\") @StdString @Override String toString();" )) - .put(new Info("c10::weak_intrusive_ptr").pointerTypes("WeakStorage")) - - .put(new Info("torch::monitor::Stat").pointerTypes("DoubleStat")) - .put(new Info("torch::monitor::Stat").pointerTypes("LongStat")) - .put(new Info("torch::jit::generic_graph_node_list").pointerTypes("graph_node_list")) - .put(new Info("torch::jit::generic_graph_node_list_iterator").pointerTypes("graph_node_list_iterator")) - .put(new Info("torch::autograd::Function").pointerTypes("FunctionCrossMapLRN2d")) - .put(new Info("strong::type,strong::hashable>").pointerTypes("Pointer")) - - .put(new Info("c10::VaryingShape").pointerTypes("LongVaryingShape")) - .put(new Info("c10::VaryingShape").pointerTypes("StrideVaryingShape")) - .put(new Info("torch::detail::SelectiveStr").pointerTypes("DisabledStr")) - .put(new Info("torch::detail::SelectiveStr").pointerTypes("EnabledStr")) - .put(new Info("torch::detail::SelectiveStr::operator const char*", - "torch::detail::SelectiveStr::operator const char*"). - javaText("public native @Name(\"operator const char*\") @Cast(\"const char*\") BytePointer asBytePointer();"))// Fixes bug where constexpr prevents addition of const in @Name .put(new Info("fbgemm::bfloat16", "__nv_bfloat16", "sycl::ext::oneapi::bfloat16").pointerTypes("BFloat16").valueTypes("short", "short", "short")) .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)").cast().valueTypes("boolean").pointerTypes("BoolPointer")) .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Half>::t)").pointerTypes("Half")) @@ -350,14 +327,9 @@ public void map(InfoMap infoMap) { .put(new Info("c10::ClassType").purify().pointerTypes("ClassType")) // Issue #669 .put(new Info("c10::EnumType").purify().pointerTypes("EnumType")) // Issue #669 .put(new Info("c10::NamedType").purify().pointerTypes("NamedType")) // Issue #669 - .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorMaybeOwned").pointerTypes("TensorMaybeOwned")) - .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorBaseMaybeOwned").pointerTypes("TensorBaseMaybeOwned")) - .put(new Info("at::InferExpandGeometryResult").pointerTypes("DimVectorInferExpandGeometryResult")) .put(new Info("at::namedinference::TensorName").valueTypes("@Cast({\"\", \"at::namedinference::TensorName&&\"}) @StdMove TensorName").pointerTypes("TensorName")) .put(new Info("c10::remove_symint::type").valueTypes("long")) .put(new Info("std::aligned_storage_t").pointerTypes("Pointer")) - .put(new Info("c10::TensorImpl::identity").pointerTypes("SymIntIdentity")) - .put(new Info("c10::TensorImpl::identity").pointerTypes("LongIdentity")) .put(new Info("c10::requires_grad", "at::range", "at::bernoulli_out", "at::normal_out", "at::stft").skipDefaults()) .put(new Info("c10::prim::requires_grad").javaNames("requires_grad")) .put(new Info("c10::aten::clone").javaNames("_clone")) @@ -985,6 +957,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::pair", "std::pair").pointerTypes("PointerPair").define()) .put(new Info("std::pair").pointerTypes("SizeTMatchedSchemaPair").define()) .put(new Info("std::pair").pointerTypes("BytePointerPair").define()) + .put(new Info("std::pair").pointerTypes("EnumNameValue").define()) ; //// Intrusive pointers @@ -1997,7 +1970,36 @@ We need either to put an annotation info on each member, or javaName("@NoOffset ; - //// Instantiation of templated functions. + //// Instantiation of misc class templates. + infoMap + .put(new Info("std::list >").pointerTypes("RecordFunctionHandleIntList").define()) + .put(new Info( + "torch::ExpandingArray<1>", "torch::ExpandingArray<2>", "torch::ExpandingArray<3>", "torch::ExpandingArray<4>", + "torch::ExpandingArray", "torch::ExpandingArray<1*2>", "torch::ExpandingArray<2*2>", "torch::ExpandingArray<3*2>").cast().pointerTypes("LongPointer")) + .put(new Info("torch::ExpandingArray<1,double>", "torch::ExpandingArray<2,double>", "torch::ExpandingArray<3,double>").cast().pointerTypes("DoublePointer")) + .put(new Info("torch::ExpandingArrayWithOptionalElem<2>", "torch::ExpandingArrayWithOptionalElem<3>").cast().pointerTypes("LongOptional")) + .put(new Info("c10::VaryingShape").pointerTypes("LongVaryingShape")) + .put(new Info("c10::VaryingShape").pointerTypes("StrideVaryingShape")) + .put(new Info("c10::MaybeOwned").pointerTypes("TensorMaybeOwned")) + .put(new Info("c10::MaybeOwned").pointerTypes("TensorBaseMaybeOwned")) + .put(new Info("at::InferExpandGeometryResult").pointerTypes("DimVectorInferExpandGeometryResult")) + .put(new Info("c10::TensorImpl::identity").pointerTypes("SymIntIdentity")) + .put(new Info("c10::TensorImpl::identity").pointerTypes("LongIdentity")) + .put(new Info("torch::detail::SelectiveStr").pointerTypes("DisabledStr")) + .put(new Info("torch::detail::SelectiveStr").pointerTypes("EnabledStr")) + .put(new Info("torch::detail::SelectiveStr::operator const char*", + "torch::detail::SelectiveStr::operator const char*"). + javaText("public native @Name(\"operator const char*\") @Cast(\"const char*\") BytePointer asBytePointer();"))// Fixes bug where constexpr prevents addition of const in @Name + .put(new Info("c10::weak_intrusive_ptr").pointerTypes("WeakStorage")) + + .put(new Info("torch::monitor::Stat").pointerTypes("DoubleStat")) + .put(new Info("torch::monitor::Stat").pointerTypes("LongStat")) + .put(new Info("torch::jit::generic_graph_node_list").pointerTypes("graph_node_list")) + .put(new Info("torch::jit::generic_graph_node_list_iterator").pointerTypes("graph_node_list_iterator")) + .put(new Info("torch::autograd::Function").pointerTypes("FunctionCrossMapLRN2d")) + ; + + //// Instantiation of function templates. for (String op : new String[]{"exp", "log", "log10", "log2", "sqrt", "pow", "sin", "cos", "tan", "asin", "acos", "atan", "sinh", "cosh", "tanh", "asinh", "acosh", "atanh", "log1p" }) { infoMap.put(new Info("c10_complex_math::" + op + "").javaNames(op)) From a41a0c134ee2e097a9443b912ecc53e0e115faa7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Thu, 8 Feb 2024 09:30:08 +0100 Subject: [PATCH 18/24] Remove `@StdMove` on `TensorName` --- pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java | 6 +++--- pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java | 2 +- .../src/main/java/org/bytedeco/pytorch/presets/torch.java | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java index 7104546a5a5..fa4ff2f48a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java @@ -51,12 +51,12 @@ public class TensorName extends Pointer { private native void allocate(@ByVal DimnameVector origin, int origin_idx); // op_name is only used for error reporting. - public native @Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName unify(@Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName other, @Cast("const char*") BytePointer op_name); - public native @Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName unify(@Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName other, String op_name); + public native @Const @ByRef TensorName unify(@Const @ByRef TensorName other, @Cast("const char*") BytePointer op_name); + public native @Const @ByRef TensorName unify(@Const @ByRef TensorName other, String op_name); public native @ByVal Dimname toDimname(); private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( @Cast("std::ostream*") @ByRef Pointer out, - @Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName tensorname); + @Const @ByRef TensorName tensorname); public Pointer shiftLeft(Pointer out) { return shiftLeft(out, this); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java index 2e29e684832..86c050de12d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java @@ -49,6 +49,6 @@ public class TensorNames extends Pointer { public native void checkUnique(@Cast("const char*") BytePointer op_name); public native void checkUnique(String op_name); - public native void append(@Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName name); + public native void append(@ByRef(true) TensorName name); public native @StdMove DimnameVector toDimnameVec(); } diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 7756d4ec2f5..108b3a3e086 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -327,7 +327,7 @@ public void map(InfoMap infoMap) { .put(new Info("c10::ClassType").purify().pointerTypes("ClassType")) // Issue #669 .put(new Info("c10::EnumType").purify().pointerTypes("EnumType")) // Issue #669 .put(new Info("c10::NamedType").purify().pointerTypes("NamedType")) // Issue #669 - .put(new Info("at::namedinference::TensorName").valueTypes("@Cast({\"\", \"at::namedinference::TensorName&&\"}) @StdMove TensorName").pointerTypes("TensorName")) + .put(new Info("at::namedinference::TensorName").pointerTypes("TensorName")) .put(new Info("c10::remove_symint::type").valueTypes("long")) .put(new Info("std::aligned_storage_t").pointerTypes("Pointer")) .put(new Info("c10::requires_grad", "at::range", "at::bernoulli_out", "at::normal_out", "at::stft").skipDefaults()) From fff554f7a4bd1bf5387b74969d7e89c7839cfeb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Thu, 8 Feb 2024 10:32:11 +0100 Subject: [PATCH 19/24] Remove `@StdMove` on `EdgeVector` and `DimnameVector` --- .../org/bytedeco/pytorch/DimnameArrayRef.java | 2 +- .../java/org/bytedeco/pytorch/EdgeVector.java | 2 +- .../org/bytedeco/pytorch/NamedTensorMeta.java | 2 +- .../gen/java/org/bytedeco/pytorch/Node.java | 2 +- .../org/bytedeco/pytorch/TensorNames.java | 2 +- .../org/bytedeco/pytorch/global/torch.java | 44 +++++++++---------- .../org/bytedeco/pytorch/presets/torch.java | 8 +--- 7 files changed, 29 insertions(+), 33 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java index 66a373aac38..040428171f0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java @@ -130,7 +130,7 @@ public class DimnameArrayRef extends Pointer { /** \} * \name Expensive Operations * \{ */ - public native @StdMove DimnameVector vec(); + public native @ByVal DimnameVector vec(); /** \} */ } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java index 1625f734810..bdbd027698b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java @@ -28,7 +28,7 @@ public class EdgeVector extends Pointer { public EdgeVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef EdgeVector put(@ByRef @Cast({"", "std::vector"}) @StdMove EdgeVector x); + public native @Name("operator =") @ByRef EdgeVector put(@ByRef EdgeVector x); public boolean empty() { return size() == 0; } public native long size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java index b3aface5867..e45972725d1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java @@ -73,5 +73,5 @@ public enum HAS_NON_WILDCARD { public native void set_names(@Cast("at::NamedTensorMeta::HAS_NON_WILDCARD") int arg0, @ByVal DimnameArrayRef new_names); // INVARIANT: at least one Dimname is non-WILDCARD - public native @StdMove DimnameVector names_(); public native NamedTensorMeta names_(DimnameVector setter); + public native @ByRef DimnameVector names_(); public native NamedTensorMeta names_(DimnameVector setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java index d4e91331d20..2f95bdc593d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java @@ -140,7 +140,7 @@ public class Node extends Pointer { public native void add_next_edge(@ByVal Edge edge); - public native void set_next_edges(@Cast({"", "std::vector"}) @StdMove EdgeVector next_edges); + public native void set_next_edges(@ByRef(true) EdgeVector next_edges); public native @Const @ByRef @NoException(true) Edge next_edge(@Cast("size_t") long index); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java index 86c050de12d..5ba13495d98 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java @@ -50,5 +50,5 @@ public class TensorNames extends Pointer { public native void checkUnique(String op_name); public native void append(@ByRef(true) TensorName name); - public native @StdMove DimnameVector toDimnameVec(); + public native @ByVal DimnameVector toDimnameVec(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 1698a36cf84..9ebc248db96 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -413,9 +413,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../ExampleVector.java -// Targeting ../EnumNameValue.java - - // Targeting ../StringTensorPair.java @@ -437,6 +434,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../BytePointerPair.java +// Targeting ../EnumNameValue.java + + // Targeting ../T_DataPtrSizeT_T.java @@ -8622,7 +8622,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Sets the names of `tensor` to be `names`. @Namespace("at") public static native @Const @ByRef TensorBase internal_set_names_inplace(@Const @ByRef TensorBase tensor, @ByVal DimnameListOptional names); -@Namespace("at") public static native @Const @ByRef TensorBase internal_set_names_inplace(@Const @ByRef TensorBase tensor, @StdMove DimnameVector names, @Cast("bool") boolean validate_names); +@Namespace("at") public static native @Const @ByRef TensorBase internal_set_names_inplace(@Const @ByRef TensorBase tensor, @ByRef(true) DimnameVector names, @Cast("bool") boolean validate_names); @Namespace("at") @MemberGetter public static native @Cast("const size_t") long kMaxNamedTensorDim(); @@ -8631,7 +8631,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Some helper functions on TensorImpl. Useful for working with names in TH. // XXX: Ideally these would exist as methods on TensorImpl @Namespace("at::impl") public static native void internal_set_names_inplace(TensorImpl impl, @ByVal DimnameListOptional names, @Cast("bool") boolean validate_names); -@Namespace("at::impl") public static native void internal_set_names_inplace(TensorImpl impl, @StdMove DimnameVector names, @Cast("bool") boolean validate_names); +@Namespace("at::impl") public static native void internal_set_names_inplace(TensorImpl impl, @ByRef(true) DimnameVector names, @Cast("bool") boolean validate_names); @@ -14916,25 +14916,25 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // the same index from the right in other. // 3) The output names are obtained by unifying the names individually from the // right. -@Namespace("at") public static native @StdMove DimnameVector unify_from_right( +@Namespace("at") public static native @ByVal DimnameVector unify_from_right( @ByVal DimnameArrayRef names, @ByVal DimnameArrayRef other, @Cast("const char*") BytePointer action/*="broadcast"*/); -@Namespace("at") public static native @StdMove DimnameVector unify_from_right( +@Namespace("at") public static native @ByVal DimnameVector unify_from_right( @ByVal DimnameArrayRef names, @ByVal DimnameArrayRef other); -@Namespace("at") public static native @StdMove DimnameVector unify_from_right( +@Namespace("at") public static native @ByVal DimnameVector unify_from_right( @ByVal DimnameVector names, @ByVal DimnameVector other, String action/*="broadcast"*/); -@Namespace("at") public static native @StdMove DimnameVector unify_from_right( +@Namespace("at") public static native @ByVal DimnameVector unify_from_right( @ByVal DimnameVector names, @ByVal DimnameVector other); -@Namespace("at") public static native @StdMove DimnameVector unify_from_right( +@Namespace("at") public static native @ByVal DimnameVector unify_from_right( @ByVal DimnameVector names, @ByVal DimnameVector other, @Cast("const char*") BytePointer action/*="broadcast"*/); -@Namespace("at") public static native @StdMove DimnameVector unify_from_right( +@Namespace("at") public static native @ByVal DimnameVector unify_from_right( @ByVal DimnameArrayRef names, @ByVal DimnameArrayRef other, String action/*="broadcast"*/); @@ -15038,34 +15038,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByRef Tensor result, @Const @ByRef Tensor self); -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_broadcast_outnames( +@Namespace("at::namedinference") public static native @ByVal DimnameVector compute_broadcast_outnames( @Const @ByRef Tensor self, @Const @ByRef Tensor other); -@Namespace("at::namedinference") public static native @StdMove DimnameVector broadcast_to_outnames( +@Namespace("at::namedinference") public static native @ByVal DimnameVector broadcast_to_outnames( @Const @ByRef Tensor tensor, @Const @ByRef Tensor reference_tensor, @Cast("const char*") BytePointer op_name); -@Namespace("at::namedinference") public static native @StdMove DimnameVector broadcast_to_outnames( +@Namespace("at::namedinference") public static native @ByVal DimnameVector broadcast_to_outnames( @Const @ByRef Tensor tensor, @Const @ByRef Tensor reference_tensor, String op_name); -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_matmul_outnames( +@Namespace("at::namedinference") public static native @ByVal DimnameVector compute_matmul_outnames( @Const @ByRef Tensor self, @Const @ByRef Tensor other); -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_cdist_outnames( +@Namespace("at::namedinference") public static native @ByVal DimnameVector compute_cdist_outnames( @Const @ByRef Tensor self, @Const @ByRef Tensor other); -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_bmm_outnames( +@Namespace("at::namedinference") public static native @ByVal DimnameVector compute_bmm_outnames( @Const @ByRef Tensor result, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_squeeze_outnames(@Const @ByRef Tensor tensor); -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_squeeze_outnames( +@Namespace("at::namedinference") public static native @ByVal DimnameVector compute_squeeze_outnames(@Const @ByRef Tensor tensor); +@Namespace("at::namedinference") public static native @ByVal DimnameVector compute_squeeze_outnames( @Const @ByRef Tensor tensor, long dims); @@ -15140,19 +15140,19 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByRef TensorBase src); // result = m1 @ m2 + bias -@Namespace("at::namedinference") public static native @StdMove DimnameVector propagate_names_for_addmm( +@Namespace("at::namedinference") public static native @ByVal DimnameVector propagate_names_for_addmm( @Const @ByRef Tensor m1, @Const @ByRef Tensor m2, @Const @ByRef Tensor bias); -@Namespace("at::namedinference") public static native @StdMove DimnameVector propagate_names_for_addmv( +@Namespace("at::namedinference") public static native @ByVal DimnameVector propagate_names_for_addmv( @Const @ByRef Tensor mat, @Const @ByRef Tensor vec, @Const @ByRef Tensor bias); @Namespace("at::namedinference") public static native void check_names_for_dot(TensorImpl vec1, TensorImpl vec2); -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_baddbmm_outnames( +@Namespace("at::namedinference") public static native @ByVal DimnameVector compute_baddbmm_outnames( @Const @ByRef Tensor result, @Const @ByRef Tensor self, @Const @ByRef Tensor other, diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 108b3a3e086..712cf3cdcc6 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -598,12 +598,11 @@ public void map(InfoMap infoMap) { .put(new Info("std::vector >", "std::vector").pointerTypes("SharedClassTypeVector").define()) .put(new Info("std::vector >", "std::vector", "std::vector", "c10::AliasTypeSet").pointerTypes("TypeVector").define()) - .put(new Info("const std::vector", "std::vector").valueTypes("@StdMove DimnameVector").pointerTypes("DimnameVector").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("DimnameVector").define()) .put(new Info("std::vector").pointerTypes("StrideVector").define()) .put(new Info("std::vector").pointerTypes("ShapeSymbolVector").define()) .put(new Info("std::vector").pointerTypes("TensorImplVector").define()) - .put(new Info("std::vector", "torch::autograd::edge_list") // Used in Node constructor - .valueTypes("@Cast({\"\", \"std::vector\"}) @StdMove EdgeVector").pointerTypes("EdgeVector").define()) + .put(new Info("std::vector", "torch::autograd::edge_list").pointerTypes("EdgeVector").define()) // Used in Node constructor .put(new Info("std::vector", "std::vector", "std::vector", "torch::autograd::variable_list") .pointerTypes("TensorVector").define()) .put(new Info("std::vector", "std::vector").pointerTypes("TensorIndexVector").define()) @@ -942,9 +941,6 @@ public void map(InfoMap infoMap) { ; } - // What is the use for this ? - //.put(new Info("torch::OrderedDict") - // .valueTypes("@Cast({\"\", \"torch::OrderedDict&&\"}) @StdMove StringAnyModuleDict")) //// std::pair infoMap From 425fe392ec94e27f6c2b5f53f99683ec41313a48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Thu, 8 Feb 2024 21:01:14 +0100 Subject: [PATCH 20/24] Disable AOTInductor --- pytorch/include_list.pl | 8 ++- .../pytorch/AOTIModelContainerRunner.java | 58 ------------------- .../pytorch/AOTIModelContainerRunnerCpu.java | 40 ------------- .../org/bytedeco/pytorch/DynamicLibrary.java | 28 --------- .../cuda/AOTIModelContainerRunnerCuda.java | 58 ------------------- .../org/bytedeco/pytorch/global/torch.java | 22 ------- .../bytedeco/pytorch/global/torch_cuda.java | 12 ---- .../org/bytedeco/pytorch/presets/torch.java | 3 +- .../bytedeco/pytorch/presets/torch_cuda.java | 3 +- .../pytorch/presets/torch_cuda_include.h | 3 +- .../bytedeco/pytorch/presets/torch_include.h | 3 +- 11 files changed, 14 insertions(+), 224 deletions(-) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunner.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DynamicLibrary.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java diff --git a/pytorch/include_list.pl b/pytorch/include_list.pl index 2b67e999ee2..a5153bb3f1d 100644 --- a/pytorch/include_list.pl +++ b/pytorch/include_list.pl @@ -52,7 +52,9 @@ sub go { chdir "cppbuild/linux-x86_64-gpu/pytorch/torch/include"; -go('torch/csrc/api/include/torch/torch.h', 'torch/script.h', 'torch/csrc/inductor/aoti_model_container_runner.h'); +# Doesn't compile on Windows. Waiting for 2.2.1. +#go('torch/csrc/api/include/torch/torch.h', 'torch/script.h', 'torch/csrc/inductor/aoti_model_container_runner.h'); +go('torch/csrc/api/include/torch/torch.h', 'torch/script.h'); print <"}) @StdMove TensorVector run( - @Cast({"", "std::vector"}) @StdMove TensorVector inputs); - - public native @ByVal BytePointerVector get_call_spec(); - - public AOTIModelContainerRunner( - @Cast("const char*") BytePointer model_path, - @Cast("size_t") long num_models, - @Cast("bool") boolean is_cpu, - @Cast("const char*") BytePointer cubin_dir) { super((Pointer)null); allocate(model_path, num_models, is_cpu, cubin_dir); } - private native void allocate( - @Cast("const char*") BytePointer model_path, - @Cast("size_t") long num_models, - @Cast("bool") boolean is_cpu, - @Cast("const char*") BytePointer cubin_dir); - public AOTIModelContainerRunner( - String model_path, - @Cast("size_t") long num_models, - @Cast("bool") boolean is_cpu, - String cubin_dir) { super((Pointer)null); allocate(model_path, num_models, is_cpu, cubin_dir); } - private native void allocate( - String model_path, - @Cast("size_t") long num_models, - @Cast("bool") boolean is_cpu, - String cubin_dir); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java deleted file mode 100644 index 26cee36966f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java +++ /dev/null @@ -1,40 +0,0 @@ -// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::inductor") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AOTIModelContainerRunnerCpu extends AOTIModelContainerRunner { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AOTIModelContainerRunnerCpu(Pointer p) { super(p); } - - public AOTIModelContainerRunnerCpu(@Cast("const char*") BytePointer model_path, @Cast("size_t") long num_models/*=1*/) { super((Pointer)null); allocate(model_path, num_models); } - private native void allocate(@Cast("const char*") BytePointer model_path, @Cast("size_t") long num_models/*=1*/); - public AOTIModelContainerRunnerCpu(@Cast("const char*") BytePointer model_path) { super((Pointer)null); allocate(model_path); } - private native void allocate(@Cast("const char*") BytePointer model_path); - public AOTIModelContainerRunnerCpu(String model_path, @Cast("size_t") long num_models/*=1*/) { super((Pointer)null); allocate(model_path, num_models); } - private native void allocate(String model_path, @Cast("size_t") long num_models/*=1*/); - public AOTIModelContainerRunnerCpu(String model_path) { super((Pointer)null); allocate(model_path); } - private native void allocate(String model_path); - - public native @Cast({"", "std::vector"}) @StdMove TensorVector run( - @Cast({"", "std::vector"}) @StdMove TensorVector inputs); - - public native @ByVal BytePointerVector get_call_spec(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DynamicLibrary.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DynamicLibrary.java deleted file mode 100644 index a7b641d7013..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DynamicLibrary.java +++ /dev/null @@ -1,28 +0,0 @@ -// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Forward declare DynamicLibrary -@Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DynamicLibrary extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public DynamicLibrary() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DynamicLibrary(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java deleted file mode 100644 index 5bbc854a2be..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AOTIModelContainerRunnerCuda.java +++ /dev/null @@ -1,58 +0,0 @@ -// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch.cuda; - -import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.cuda.functions.*; -import org.bytedeco.pytorch.Error; -import org.bytedeco.pytorch.global.torch.DeviceType; -import org.bytedeco.pytorch.global.torch.ScalarType; -import org.bytedeco.pytorch.global.torch.MemoryFormat; -import org.bytedeco.pytorch.Allocator; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; -import org.bytedeco.pytorch.*; -import static org.bytedeco.pytorch.global.torch.*; - -import static org.bytedeco.pytorch.global.torch_cuda.*; - - -@Namespace("torch::inductor") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) -public class AOTIModelContainerRunnerCuda extends AOTIModelContainerRunner { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AOTIModelContainerRunnerCuda(Pointer p) { super(p); } - - public AOTIModelContainerRunnerCuda( - @Cast("const char*") BytePointer model_path, - @Cast("size_t") long num_models/*=1*/, - @Cast("const char*") BytePointer cubin_dir/*=nullptr*/) { super((Pointer)null); allocate(model_path, num_models, cubin_dir); } - private native void allocate( - @Cast("const char*") BytePointer model_path, - @Cast("size_t") long num_models/*=1*/, - @Cast("const char*") BytePointer cubin_dir/*=nullptr*/); - public AOTIModelContainerRunnerCuda( - @Cast("const char*") BytePointer model_path) { super((Pointer)null); allocate(model_path); } - private native void allocate( - @Cast("const char*") BytePointer model_path); - public AOTIModelContainerRunnerCuda( - String model_path, - @Cast("size_t") long num_models/*=1*/, - String cubin_dir/*=nullptr*/) { super((Pointer)null); allocate(model_path, num_models, cubin_dir); } - private native void allocate( - String model_path, - @Cast("size_t") long num_models/*=1*/, - String cubin_dir/*=nullptr*/); - public AOTIModelContainerRunnerCuda( - String model_path) { super((Pointer)null); allocate(model_path); } - private native void allocate( - String model_path); - - public native @Cast({"", "std::vector"}) @StdMove TensorVector run( - @Cast({"", "std::vector"}) @StdMove TensorVector inputs); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 9ebc248db96..b825dc8f7fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -79699,28 +79699,6 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/csrc/inductor/aoti_model_container_runner.h - -// #if !defined(C10_MOBILE) && !defined(ANDROID) -// #pragma once - -// #include -// #include -// Targeting ../DynamicLibrary.java - - - -// Targeting ../AOTIModelContainerRunner.java - - -// Targeting ../AOTIModelContainerRunnerCpu.java - - - - // namespace torch::inductor -// #endif - - // Parsed from datasets.h /* diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index 66270cd5580..82a41460be3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -1249,16 +1249,4 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10 -// Parsed from torch/csrc/inductor/aoti_model_container_runner_cuda.h - -// #pragma once - -// #include -// Targeting ../cuda/AOTIModelContainerRunnerCuda.java - - - - // namespace torch::inductor - - } diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 712cf3cdcc6..e5a4146e425 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -61,7 +61,8 @@ include = { "torch/torch.h", "torch/script.h", - "torch/csrc/inductor/aoti_model_container_runner.h", + // Doesn't compile on Windows. Waiting for 2.2.1. + //"torch/csrc/inductor/aoti_model_container_runner.h", // For inclusion in JNI only, not parsed (compiler needs some complete definitions) "torch/csrc/jit/runtime/instruction.h", diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 3f9689096f5..2b4e5466a0e 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -40,7 +40,8 @@ "ATen/cudnn/Descriptors.h", "ATen/cudnn/Types.h", "c10/cuda/CUDAGuard.h", - "torch/csrc/inductor/aoti_model_container_runner_cuda.h", + // Doesn't compile on Windows. Waiting for 2.2.1 + // "torch/csrc/inductor/aoti_model_container_runner_cuda.h", // For inclusion in JNI only, not parsed "ATen/cuda/CUDAGeneratorImpl.h", diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h index 4195ee66a89..c1cb1af0ebb 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -27,4 +27,5 @@ #include "ATen/cudnn/Descriptors.h" #include "ATen/cudnn/Types.h" #include "c10/cuda/CUDAGuard.h" -#include "torch/csrc/inductor/aoti_model_container_runner_cuda.h" \ No newline at end of file +// Doesn't compile on Windows. Waiting for 2.2.1. +// #include "torch/csrc/inductor/aoti_model_container_runner_cuda.h" \ No newline at end of file diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h index 81706181751..516c6889e7d 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -1426,6 +1426,7 @@ #include "torch/csrc/jit/serialization/pickle.h" // #include "torch/csrc/inductor/aoti_torch/c/shim.h" // model.so API, not part of libtorch API // #include "torch/csrc/inductor/aoti_runtime/interface.h" // model.so API, not part of libtorch API -#include "torch/csrc/inductor/aoti_model_container_runner.h" +// Doesn't compile on Windows. Waiting for 2.2.1. +// #include "torch/csrc/inductor/aoti_model_container_runner.h" #include "datasets.h" \ No newline at end of file From 605704a2e5b7a122d24bfb0017d1e81fc02c4924 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Sun, 11 Feb 2024 18:34:31 +0100 Subject: [PATCH 21/24] Add Module.asX --- .../gen/java/org/bytedeco/pytorch/Module.java | 650 ++++++++++++++++++ .../org/bytedeco/pytorch/presets/torch.java | 1 + 2 files changed, 651 insertions(+) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java index fc0e895c829..063755fac57 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java @@ -393,6 +393,656 @@ public native void apply( * MyModule module; * module.apply(initialize_weights); * \endrst */ + + /// + /// + /// + public native @Name("as") @NoException(true) ModuleDictImpl asModuleDict(); + + /// + /// + /// + public native @Name("as") @NoException(true) ModuleListImpl asModuleList(); + + /// + /// + /// + public native @Name("as") @NoException(true) SequentialImpl asSequential(); + + /// + /// + /// + public native @Name("as") @NoException(true) ParameterDictImpl asParameterDict(); + + /// + /// + /// + public native @Name("as") @NoException(true) ParameterListImpl asParameterList(); + + /// + /// + /// + public native @Name("as") @NoException(true) AdaptiveLogSoftmaxWithLossImpl asAdaptiveLogSoftmaxWithLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) BatchNorm1dImpl asBatchNorm1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) InstanceNorm1dImpl asInstanceNorm1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) Conv1dImpl asConv1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ConvTranspose1dImpl asConvTranspose1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) DropoutImpl asDropout(); + + /// + /// + /// + public native @Name("as") @NoException(true) BatchNorm2dImpl asBatchNorm2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) InstanceNorm2dImpl asInstanceNorm2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) Conv2dImpl asConv2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ConvTranspose2dImpl asConvTranspose2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) Dropout2dImpl asDropout2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) BatchNorm3dImpl asBatchNorm3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) InstanceNorm3dImpl asInstanceNorm3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) Conv3dImpl asConv3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ConvTranspose3dImpl asConvTranspose3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) Dropout3dImpl asDropout3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) AlphaDropoutImpl asAlphaDropout(); + + /// + /// + /// + public native @Name("as") @NoException(true) FeatureAlphaDropoutImpl asFeatureAlphaDropout(); + + /// + /// + /// + public native @Name("as") @NoException(true) CosineSimilarityImpl asCosineSimilarity(); + + /// + /// + /// + public native @Name("as") @NoException(true) PairwiseDistanceImpl asPairwiseDistance(); + + /// + /// + /// + public native @Name("as") @NoException(true) EmbeddingImpl asEmbedding(); + + /// + /// + /// + public native @Name("as") @NoException(true) EmbeddingBagImpl asEmbeddingBag(); + + /// + /// + /// + public native @Name("as") @NoException(true) FoldImpl asFold(); + + /// + /// + /// + public native @Name("as") @NoException(true) UnfoldImpl asUnfold(); + + /// + /// + /// + public native @Name("as") @NoException(true) IdentityImpl asIdentity(); + + /// + /// + /// + public native @Name("as") @NoException(true) LinearImpl asLinear(); + + /// + /// + /// + public native @Name("as") @NoException(true) BilinearImpl asBilinear(); + + /// + /// + /// + public native @Name("as") @NoException(true) FlattenImpl asFlatten(); + + /// + /// + /// + public native @Name("as") @NoException(true) UnflattenImpl asUnflatten(); + + /// + /// + /// + public native @Name("as") @NoException(true) L1LossImpl asL1Loss(); + + /// + /// + /// + public native @Name("as") @NoException(true) KLDivLossImpl asKLDivLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) MSELossImpl asMSELoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) BCELossImpl asBCELoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) HingeEmbeddingLossImpl asHingeEmbeddingLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) MultiMarginLossImpl asMultiMarginLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) CosineEmbeddingLossImpl asCosineEmbeddingLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) SmoothL1LossImpl asSmoothL1Loss(); + + /// + /// + /// + public native @Name("as") @NoException(true) HuberLossImpl asHuberLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) MultiLabelMarginLossImpl asMultiLabelMarginLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) SoftMarginLossImpl asSoftMarginLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) MultiLabelSoftMarginLossImpl asMultiLabelSoftMarginLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) TripletMarginLossImpl asTripletMarginLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) TripletMarginWithDistanceLossImpl asTripletMarginWithDistanceLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) CTCLossImpl asCTCLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) PoissonNLLLossImpl asPoissonNLLLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) MarginRankingLossImpl asMarginRankingLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) NLLLossImpl asNLLLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) CrossEntropyLossImpl asCrossEntropyLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) BCEWithLogitsLossImpl asBCEWithLogitsLoss(); + + /// + /// + /// + public native @Name("as") @NoException(true) ReflectionPad1dImpl asReflectionPad1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ReplicationPad1dImpl asReplicationPad1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ConstantPad1dImpl asConstantPad1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ZeroPad1dImpl asZeroPad1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) AvgPool1dImpl asAvgPool1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) MaxPool1dImpl asMaxPool1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) AdaptiveAvgPool1dImpl asAdaptiveAvgPool1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) AdaptiveMaxPool1dImpl asAdaptiveMaxPool1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) MaxUnpool1dImpl asMaxUnpool1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) LPPool1dImpl asLPPool1d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ReflectionPad2dImpl asReflectionPad2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ReplicationPad2dImpl asReplicationPad2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ConstantPad2dImpl asConstantPad2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ZeroPad2dImpl asZeroPad2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) AvgPool2dImpl asAvgPool2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) MaxPool2dImpl asMaxPool2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) AdaptiveAvgPool2dImpl asAdaptiveAvgPool2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) AdaptiveMaxPool2dImpl asAdaptiveMaxPool2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) MaxUnpool2dImpl asMaxUnpool2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) FractionalMaxPool2dImpl asFractionalMaxPool2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) LPPool2dImpl asLPPool2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ReflectionPad3dImpl asReflectionPad3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ReplicationPad3dImpl asReplicationPad3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ConstantPad3dImpl asConstantPad3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) ZeroPad3dImpl asZeroPad3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) AvgPool3dImpl asAvgPool3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) MaxPool3dImpl asMaxPool3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) AdaptiveAvgPool3dImpl asAdaptiveAvgPool3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) AdaptiveMaxPool3dImpl asAdaptiveMaxPool3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) MaxUnpool3dImpl asMaxUnpool3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) FractionalMaxPool3dImpl asFractionalMaxPool3d(); + + /// + /// + /// + public native @Name("as") @NoException(true) RNNImpl asRNN(); + + /// + /// + /// + public native @Name("as") @NoException(true) LSTMImpl asLSTM(); + + /// + /// + /// + public native @Name("as") @NoException(true) GRUImpl asGRU(); + + /// + /// + /// + public native @Name("as") @NoException(true) RNNCellImpl asRNNCell(); + + /// + /// + /// + public native @Name("as") @NoException(true) LSTMCellImpl asLSTMCell(); + + /// + /// + /// + public native @Name("as") @NoException(true) GRUCellImpl asGRUCell(); + + /// + /// + /// + public native @Name("as") @NoException(true) PixelShuffleImpl asPixelShuffle(); + + /// + /// + /// + public native @Name("as") @NoException(true) PixelUnshuffleImpl asPixelUnshuffle(); + + /// + /// + /// + public native @Name("as") @NoException(true) UpsampleImpl asUpsample(); + + /// + /// + /// + public native @Name("as") @NoException(true) ELUImpl asELU(); + + /// + /// + /// + public native @Name("as") @NoException(true) SELUImpl asSELU(); + + /// + /// + /// + public native @Name("as") @NoException(true) HardshrinkImpl asHardshrink(); + + /// + /// + /// + public native @Name("as") @NoException(true) HardtanhImpl asHardtanh(); + + /// + /// + /// + public native @Name("as") @NoException(true) LeakyReLUImpl asLeakyReLU(); + + /// + /// + /// + public native @Name("as") @NoException(true) LogSigmoidImpl asLogSigmoid(); + + /// + /// + /// + public native @Name("as") @NoException(true) SoftmaxImpl asSoftmax(); + + /// + /// + /// + public native @Name("as") @NoException(true) SoftminImpl asSoftmin(); + + /// + /// + /// + public native @Name("as") @NoException(true) LogSoftmaxImpl asLogSoftmax(); + + /// + /// + /// + public native @Name("as") @NoException(true) Softmax2dImpl asSoftmax2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) PReLUImpl asPReLU(); + + /// + /// + /// + public native @Name("as") @NoException(true) ReLUImpl asReLU(); + + /// + /// + /// + public native @Name("as") @NoException(true) ReLU6Impl asReLU6(); + + /// + /// + /// + public native @Name("as") @NoException(true) RReLUImpl asRReLU(); + + /// + /// + /// + public native @Name("as") @NoException(true) CELUImpl asCELU(); + + /// + /// + /// + public native @Name("as") @NoException(true) GLUImpl asGLU(); + + /// + /// + /// + public native @Name("as") @NoException(true) GELUImpl asGELU(); + + /// + /// + /// + public native @Name("as") @NoException(true) SiLUImpl asSiLU(); + + /// + /// + /// + public native @Name("as") @NoException(true) MishImpl asMish(); + + /// + /// + /// + public native @Name("as") @NoException(true) SigmoidImpl asSigmoid(); + + /// + /// + /// + public native @Name("as") @NoException(true) SoftplusImpl asSoftplus(); + + /// + /// + /// + public native @Name("as") @NoException(true) SoftshrinkImpl asSoftshrink(); + + /// + /// + /// + public native @Name("as") @NoException(true) SoftsignImpl asSoftsign(); + + /// + /// + /// + public native @Name("as") @NoException(true) TanhImpl asTanh(); + + /// + /// + /// + public native @Name("as") @NoException(true) TanhshrinkImpl asTanhshrink(); + + /// + /// + /// + public native @Name("as") @NoException(true) ThresholdImpl asThreshold(); + + /// + /// + /// + public native @Name("as") @NoException(true) MultiheadAttentionImpl asMultiheadAttention(); + + /// + /// + /// + public native @Name("as") @NoException(true) LayerNormImpl asLayerNorm(); + + /// + /// + /// + public native @Name("as") @NoException(true) LocalResponseNormImpl asLocalResponseNorm(); + + /// + /// + /// + public native @Name("as") @NoException(true) CrossMapLRN2dImpl asCrossMapLRN2d(); + + /// + /// + /// + public native @Name("as") @NoException(true) GroupNormImpl asGroupNorm(); + + /// + /// + /// + public native @Name("as") @NoException(true) TransformerEncoderLayerImpl asTransformerEncoderLayer(); + + /// + /// + /// + public native @Name("as") @NoException(true) TransformerDecoderLayerImpl asTransformerDecoderLayer(); + + /// + /// + /// + public native @Name("as") @NoException(true) TransformerEncoderImpl asTransformerEncoder(); + + /// + /// + /// + public native @Name("as") @NoException(true) TransformerDecoderImpl asTransformerDecoder(); + + /// + /// + /// + public native @Name("as") @NoException(true) TransformerImpl asTransformer(); /** Attempts to cast this {@code Module} to the given {@code ModuleType}. * diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index e5a4146e425..ad874abba5a 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -215,6 +215,7 @@ public void mapModule(InfoMap infoMap, String name, String base, String baseBase .put(new Info("torch::nn::Cloneable").pointerTypes(name + "ImplCloneable").purify()) .put(new Info("torch::nn::ModuleHolder").skip()) .put(new Info("torch::nn::" + name).skip()) + .put(new Info("torch::nn::Module::as").javaNames("as" + name)); ; if (anyModuleCompatible) { From 5732dcaff2de31486df88f650bfe6be6ee34a824 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Wed, 14 Feb 2024 11:58:12 +0100 Subject: [PATCH 22/24] Add preload of nvrtc-builtins --- .../src/main/java/org/bytedeco/pytorch/presets/torch.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index ad874abba5a..7dbb771cae2 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -147,8 +147,9 @@ public void init(ClassProperties properties) { if (platform.startsWith("windows")) { preloads.add(i++, "zlibwapi"); } - String[] libs = {"cudart", "cublasLt", "cublas", "cufft", "curand", "nvJitLink", "cusparse", "cusolver", "cudnn", "nccl", "nvrtc", "myelin", "nvinfer", - "cudnn_ops_infer", "cudnn_ops_train", "cudnn_adv_infer", "cudnn_adv_train", "cudnn_cnn_infer", "cudnn_cnn_train"}; + String[] libs = {"cudart", "cublasLt", "cublas", "cufft", "curand", "nvJitLink", "cusparse", "cusolver", + "cudnn", "nccl", "nvrtc", "nvrtc-builtins", "myelin", "nvinfer", "cudnn_ops_infer", "cudnn_ops_train", + "cudnn_adv_infer", "cudnn_adv_train", "cudnn_cnn_infer", "cudnn_cnn_train"}; for (String lib : libs) { if (platform.startsWith("linux")) { lib += lib.startsWith("cudnn") ? "@.8" @@ -158,6 +159,7 @@ public void init(ClassProperties properties) { : lib.equals("cufft") ? "@.11" : lib.equals("curand") ? "@.10" : lib.equals("cusolver") ? "@.11" + : lib.equals("nvrtc-builtins") ? "@.12.3" : "@.12"; } else if (platform.startsWith("windows")) { lib += lib.startsWith("cudnn") ? "64_8" @@ -168,6 +170,7 @@ public void init(ClassProperties properties) { : lib.equals("curand") ? "64_10" : lib.equals("cusolver") ? "64_11" : lib.equals("nvrtc") ? "64_120_0" + : lib.equals("nvrtc-builtins") ? "64_123" : lib.equals("nvJitLink") ? "_120_0" : "64_12"; } else { From 7cb77c8fa64025200ecc9cc4f44d6f3ee4c7ec60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Fri, 23 Feb 2024 00:16:01 +0100 Subject: [PATCH 23/24] Update to Pytorch 2.2.1 --- pytorch/cppbuild.sh | 2 +- pytorch/platform/gpu/pom.xml | 2 +- pytorch/platform/pom.xml | 2 +- pytorch/pom.xml | 2 +- pytorch/samples/pom.xml | 4 ++-- pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh index fe9e23675c0..8968c4e1f6c 100755 --- a/pytorch/cppbuild.sh +++ b/pytorch/cppbuild.sh @@ -35,7 +35,7 @@ if [[ $PLATFORM == windows* ]]; then export PYTHON_BIN_PATH=$(which python.exe) fi -PYTORCH_VERSION=2.2.0 +PYTORCH_VERSION=2.2.1 export PYTORCH_BUILD_VERSION="$PYTORCH_VERSION" export PYTORCH_BUILD_NUMBER=1 diff --git a/pytorch/platform/gpu/pom.xml b/pytorch/platform/gpu/pom.xml index a6d719c9d43..1e75fc89896 100644 --- a/pytorch/platform/gpu/pom.xml +++ b/pytorch/platform/gpu/pom.xml @@ -12,7 +12,7 @@ org.bytedeco pytorch-platform-gpu - 2.2.0-${project.parent.version} + 2.2.1-${project.parent.version} JavaCPP Presets Platform GPU for PyTorch diff --git a/pytorch/platform/pom.xml b/pytorch/platform/pom.xml index 7be6c3a7ca7..cdfa017a97a 100644 --- a/pytorch/platform/pom.xml +++ b/pytorch/platform/pom.xml @@ -12,7 +12,7 @@ org.bytedeco pytorch-platform - 2.2.0-${project.parent.version} + 2.2.1-${project.parent.version} JavaCPP Presets Platform for PyTorch diff --git a/pytorch/pom.xml b/pytorch/pom.xml index 676d8a70fb1..82a35b6b3cb 100644 --- a/pytorch/pom.xml +++ b/pytorch/pom.xml @@ -11,7 +11,7 @@ org.bytedeco pytorch - 2.2.0-${project.parent.version} + 2.2.1-${project.parent.version} JavaCPP Presets for PyTorch diff --git a/pytorch/samples/pom.xml b/pytorch/samples/pom.xml index 6ed2a5735d4..35eac3291b5 100644 --- a/pytorch/samples/pom.xml +++ b/pytorch/samples/pom.xml @@ -12,14 +12,14 @@ org.bytedeco pytorch-platform - 2.2.0-1.5.11-SNAPSHOT + 2.2.1-1.5.11-SNAPSHOT org.bytedeco pytorch-platform-gpu - 2.2.0-1.5.11-SNAPSHOT + 2.2.1-1.5.11-SNAPSHOT diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index b825dc8f7fd..497fbf2d99c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -77902,11 +77902,11 @@ scalar_t sf(scalar_t x, scalar_t y) public static final int TORCH_VERSION_MINOR = 2; /** Indicates the patch version of LibTorch. */ -public static final int TORCH_VERSION_PATCH = 0; +public static final int TORCH_VERSION_PATCH = 1; /** Indicates the version of LibTorch. */ public static final String TORCH_VERSION = - "2.2.0"; + "2.2.1"; // Parsed from torch/csrc/autograd/InferenceMode.h From 04ccfc97f151b616ea559d5f46cc9bdf9c747bc9 Mon Sep 17 00:00:00 2001 From: Samuel Audet Date: Sat, 2 Mar 2024 21:29:48 +0900 Subject: [PATCH 24/24] Update CHANGELOG.md and fix nits --- CHANGELOG.md | 2 ++ platform/pom.xml | 2 +- pytorch/README.md | 10 +++++----- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 01cea8c5fd1..0a289c2c04a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,6 @@ + * Upgrade presets for PyTorch 2.2.1 ([pull #1466](https://github.com/bytedeco/javacpp-presets/pull/1466)) + ### January 29, 2024 version 1.5.10 * Introduce `macosx-arm64` builds for PyTorch ([pull #1463](https://github.com/bytedeco/javacpp-presets/pull/1463)) * Reenable `linux-arm64` builds for CPython and NumPy ([pull #1386](https://github.com/bytedeco/javacpp-presets/pull/1386)) diff --git a/platform/pom.xml b/platform/pom.xml index aad8a9aca6d..84fa45b000b 100644 --- a/platform/pom.xml +++ b/platform/pom.xml @@ -292,7 +292,7 @@ org.bytedeco pytorch-platform - 2.1.2-${project.version} + 2.2.1-${project.version} org.bytedeco diff --git a/pytorch/README.md b/pytorch/README.md index 0a90dd2002c..387204919f3 100644 --- a/pytorch/README.md +++ b/pytorch/README.md @@ -9,7 +9,7 @@ Introduction ------------ This directory contains the JavaCPP Presets module for: - * PyTorch 2.2.0 https://pytorch.org/ + * PyTorch 2.2.1 https://pytorch.org/ Please refer to the parent README.md file for more detailed information about the JavaCPP Presets. @@ -48,28 +48,28 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic org.bytedeco pytorch-platform - 2.2.0-1.5.11-SNAPSHOT + 2.2.1-1.5.11-SNAPSHOT org.bytedeco pytorch-platform-gpu - 2.2.0-1.5.11-SNAPSHOT + 2.2.1-1.5.11-SNAPSHOT org.bytedeco cuda-platform-redist - 12.3-8.9-1.5.10 + 12.3-8.9-1.5.11-SNAPSHOT org.bytedeco mkl-platform-redist - 2024.0-1.5.10 + 2024.0-1.5.11-SNAPSHOT