From 2ff8ec4db848ce052b4e74928bb30f18903658f8 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 10 Sep 2025 15:18:11 +0900 Subject: [PATCH 001/101] SixLabors.ImageSharp version bump. Otherwise, dotnet build will fail. --- src/Examples.Utils/Examples.Utils.csproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Examples.Utils/Examples.Utils.csproj b/src/Examples.Utils/Examples.Utils.csproj index a542b181d..620d5b487 100644 --- a/src/Examples.Utils/Examples.Utils.csproj +++ b/src/Examples.Utils/Examples.Utils.csproj @@ -17,7 +17,7 @@ - + From 5b821cb2f0b1b50e4ccac752feda7f3de73c8a68 Mon Sep 17 00:00:00 2001 From: ds5678 <49847914+ds5678@users.noreply.github.com> Date: Thu, 9 Jan 2025 19:50:26 -0800 Subject: [PATCH 002/101] Dispose `Scalar`s implicitly created in `Tensor` operators --- src/TorchSharp/Tensor/Tensor.Operators.cs | 225 ++++++++++++++++++---- 1 file changed, 184 insertions(+), 41 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.Operators.cs b/src/TorchSharp/Tensor/Tensor.Operators.cs index 31e8f7449..e74664cd7 100644 --- a/src/TorchSharp/Tensor/Tensor.Operators.cs +++ b/src/TorchSharp/Tensor/Tensor.Operators.cs @@ -12,66 +12,209 @@ public partial class Tensor public static Tensor operator +(Tensor left, Scalar right) => left.add(right); public static Tensor operator +(Scalar left, Tensor right) => right.add(left); - public static Tensor operator +(Tensor left, int right) => left.add(right); - public static Tensor operator +(Tensor left, long right) => left.add(right); - public static Tensor operator +(Tensor left, float right) => left.add(right); - public static Tensor operator +(Tensor left, double right) => left.add(right); - - public static Tensor operator +(int left, Tensor right) => right.add(left); - public static Tensor operator +(long left, Tensor right) => right.add(left); - public static Tensor operator +(float left, Tensor right) => right.add(left); - public static Tensor operator +(double left, Tensor right) => right.add(left); + public static Tensor operator +(Tensor left, int right) + { + using Scalar scalar = right; + return left.add(scalar); + } + public static Tensor operator +(Tensor left, long right) + { + using Scalar scalar = right; + return left.add(scalar); + } + public static Tensor operator +(Tensor left, float right) + { + using Scalar scalar = right; + return left.add(scalar); + } + public static Tensor operator +(Tensor left, double right) + { + using Scalar scalar = right; + return left.add(scalar); + } + + public static Tensor operator +(int left, Tensor right) + { + using Scalar scalar = left; + return right.add(scalar); + } + public static Tensor operator +(long left, Tensor right) + { + using Scalar scalar = left; + return right.add(scalar); + } + public static Tensor operator +(float left, Tensor right) + { + using Scalar scalar = left; + return right.add(scalar); + } + public static Tensor operator +(double left, Tensor right) + { + using Scalar scalar = left; + return right.add(scalar); + } public static Tensor operator *(Tensor left, Tensor right) => left.mul(right); public static Tensor operator *(Tensor left, Scalar right) => left.mul(right); public static Tensor operator *(Scalar left, Tensor right) => right.mul(left); - public static Tensor operator *(Tensor left, int right) => left.mul(right); - public static Tensor operator *(Tensor left, long right) => left.mul(right); - public static Tensor operator *(Tensor left, float right) => left.mul(right); - public static Tensor operator *(Tensor left, double right) => left.mul(right); - - public static Tensor operator *(int left, Tensor right) => right.mul(left); - public static Tensor operator *(long left, Tensor right) => right.mul(left); - public static Tensor operator *(float left, Tensor right) => right.mul(left); - public static Tensor operator *(double left, Tensor right) => right.mul(left); + public static Tensor operator *(Tensor left, int right) + { + using Scalar scalar = right; + return left.mul(scalar); + } + public static Tensor operator *(Tensor left, long right) + { + using Scalar scalar = right; + return left.mul(scalar); + } + public static Tensor operator *(Tensor left, float right) + { + using Scalar scalar = right; + return left.mul(scalar); + } + public static Tensor operator *(Tensor left, double right) + { + using Scalar scalar = right; + return left.mul(scalar); + } + + public static Tensor operator *(int left, Tensor right) + { + using Scalar scalar = left; + return right.mul(scalar); + } + public static Tensor operator *(long left, Tensor right) + { + using Scalar scalar = left; + return right.mul(scalar); + } + public static Tensor operator *(float left, Tensor right) + { + using Scalar scalar = left; + return right.mul(scalar); + } + public static Tensor operator *(double left, Tensor right) + { + using Scalar scalar = left; + return right.mul(scalar); + } public static Tensor operator -(Tensor left, Tensor right) => left.sub(right); public static Tensor operator -(Tensor left, Scalar right) => left.sub(right); public static Tensor operator -(Scalar left, Tensor right) => right.negative().add(left); - public static Tensor operator -(Tensor left, int right) => left.sub(right); - public static Tensor operator -(Tensor left, long right) => left.sub(right); - public static Tensor operator -(Tensor left, float right) => left.sub(right); - public static Tensor operator -(Tensor left, double right) => left.sub(right); - - public static Tensor operator -(int left, Tensor right) => right.negative().add(left); - public static Tensor operator -(long left, Tensor right) => right.negative().add(left); - public static Tensor operator -(float left, Tensor right) => right.negative().add(left); - public static Tensor operator -(double left, Tensor right) => right.negative().add(left); + public static Tensor operator -(Tensor left, int right) + { + using Scalar scalar = right; + return left.sub(scalar); + } + public static Tensor operator -(Tensor left, long right) + { + using Scalar scalar = right; + return left.sub(scalar); + } + public static Tensor operator -(Tensor left, float right) + { + using Scalar scalar = right; + return left.sub(scalar); + } + public static Tensor operator -(Tensor left, double right) + { + using Scalar scalar = right; + return left.sub(scalar); + } + + public static Tensor operator -(int left, Tensor right) + { + using Scalar scalar = left; + return right.negative().add(scalar); + } + public static Tensor operator -(long left, Tensor right) + { + using Scalar scalar = left; + return right.negative().add(scalar); + } + public static Tensor operator -(float left, Tensor right) + { + using Scalar scalar = left; + return right.negative().add(scalar); + } + public static Tensor operator -(double left, Tensor right) + { + using Scalar scalar = left; + return right.negative().add(scalar); + } public static Tensor operator /(Tensor left, Tensor right) => left.div(right); public static Tensor operator /(Tensor left, Scalar right) => left.div(right); public static Tensor operator /(Scalar left, Tensor right) => right.reciprocal().mul(left); - public static Tensor operator /(Tensor left, int right) => left.div(right); - public static Tensor operator /(Tensor left, long right) => left.div(right); - public static Tensor operator /(Tensor left, float right) => left.div(right); - public static Tensor operator /(Tensor left, double right) => left.div(right); - - public static Tensor operator /(int left, Tensor right) => right.reciprocal().mul(left); - public static Tensor operator /(long left, Tensor right) => right.reciprocal().mul(left); - public static Tensor operator /(float left, Tensor right) => right.reciprocal().mul(left); - public static Tensor operator /(double left, Tensor right) => right.reciprocal().mul(left); - + public static Tensor operator /(Tensor left, int right) + { + using Scalar scalar = right; + return left.div(scalar); + } + public static Tensor operator /(Tensor left, long right) + { + using Scalar scalar = right; + return left.div(scalar); + } + public static Tensor operator /(Tensor left, float right) + { + using Scalar scalar = right; + return left.div(scalar); + } + public static Tensor operator /(Tensor left, double right) + { + using Scalar scalar = right; + return left.div(scalar); + } + + public static Tensor operator /(int left, Tensor right) + { + using Scalar scalar = left; + return right.reciprocal().mul(scalar); + } + public static Tensor operator /(long left, Tensor right) + { + using Scalar scalar = left; + return right.reciprocal().mul(scalar); + } + public static Tensor operator /(float left, Tensor right) + { + using Scalar scalar = left; + return right.reciprocal().mul(scalar); + } + public static Tensor operator /(double left, Tensor right) + { + using Scalar scalar = left; + return right.reciprocal().mul(scalar); + } public static Tensor operator %(Tensor left, Tensor right) => left.remainder(right); public static Tensor operator %(Tensor left, Scalar right) => left.remainder(right); - public static Tensor operator %(Tensor left, int right) => left.remainder(right); - public static Tensor operator %(Tensor left, long right) => left.remainder(right); - public static Tensor operator %(Tensor left, float right) => left.remainder(right); - public static Tensor operator %(Tensor left, double right) => left.remainder(right); + public static Tensor operator %(Tensor left, int right) + { + using Scalar scalar = right; + return left.remainder(scalar); + } + public static Tensor operator %(Tensor left, long right) + { + using Scalar scalar = right; + return left.remainder(scalar); + } + public static Tensor operator %(Tensor left, float right) + { + using Scalar scalar = right; + return left.remainder(scalar); + } + public static Tensor operator %(Tensor left, double right) + { + using Scalar scalar = right; + return left.remainder(scalar); + } public static Tensor operator &(Tensor left, Tensor right) => left.bitwise_and(right); From 069bb0d1652aab7762980fbf772b483948e88fc9 Mon Sep 17 00:00:00 2001 From: ds5678 <49847914+ds5678@users.noreply.github.com> Date: Thu, 9 Jan 2025 19:56:27 -0800 Subject: [PATCH 003/101] Use Scalar operators in the primitive overloads for clarity and maintainability --- src/TorchSharp/Tensor/Tensor.Operators.cs | 72 +++++++++++------------ 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.Operators.cs b/src/TorchSharp/Tensor/Tensor.Operators.cs index e74664cd7..564d1da42 100644 --- a/src/TorchSharp/Tensor/Tensor.Operators.cs +++ b/src/TorchSharp/Tensor/Tensor.Operators.cs @@ -15,43 +15,43 @@ public partial class Tensor public static Tensor operator +(Tensor left, int right) { using Scalar scalar = right; - return left.add(scalar); + return left + scalar; } public static Tensor operator +(Tensor left, long right) { using Scalar scalar = right; - return left.add(scalar); + return left + scalar; } public static Tensor operator +(Tensor left, float right) { using Scalar scalar = right; - return left.add(scalar); + return left + scalar; } public static Tensor operator +(Tensor left, double right) { using Scalar scalar = right; - return left.add(scalar); + return left + scalar; } public static Tensor operator +(int left, Tensor right) { using Scalar scalar = left; - return right.add(scalar); + return scalar + right; } public static Tensor operator +(long left, Tensor right) { using Scalar scalar = left; - return right.add(scalar); + return scalar + right; } public static Tensor operator +(float left, Tensor right) { using Scalar scalar = left; - return right.add(scalar); + return scalar + right; } public static Tensor operator +(double left, Tensor right) { using Scalar scalar = left; - return right.add(scalar); + return scalar + right; } public static Tensor operator *(Tensor left, Tensor right) => left.mul(right); @@ -61,43 +61,43 @@ public partial class Tensor public static Tensor operator *(Tensor left, int right) { using Scalar scalar = right; - return left.mul(scalar); + return left * scalar; } public static Tensor operator *(Tensor left, long right) { using Scalar scalar = right; - return left.mul(scalar); + return left * scalar; } public static Tensor operator *(Tensor left, float right) { using Scalar scalar = right; - return left.mul(scalar); + return left * scalar; } public static Tensor operator *(Tensor left, double right) { using Scalar scalar = right; - return left.mul(scalar); + return left * scalar; } public static Tensor operator *(int left, Tensor right) { using Scalar scalar = left; - return right.mul(scalar); + return scalar * right; } public static Tensor operator *(long left, Tensor right) { using Scalar scalar = left; - return right.mul(scalar); + return scalar * right; } public static Tensor operator *(float left, Tensor right) { using Scalar scalar = left; - return right.mul(scalar); + return scalar * right; } public static Tensor operator *(double left, Tensor right) { using Scalar scalar = left; - return right.mul(scalar); + return scalar * right; } public static Tensor operator -(Tensor left, Tensor right) => left.sub(right); @@ -107,43 +107,43 @@ public partial class Tensor public static Tensor operator -(Tensor left, int right) { using Scalar scalar = right; - return left.sub(scalar); + return left - scalar; } public static Tensor operator -(Tensor left, long right) { using Scalar scalar = right; - return left.sub(scalar); + return left - scalar; } public static Tensor operator -(Tensor left, float right) { using Scalar scalar = right; - return left.sub(scalar); + return left - scalar; } public static Tensor operator -(Tensor left, double right) { using Scalar scalar = right; - return left.sub(scalar); + return left - scalar; } public static Tensor operator -(int left, Tensor right) { using Scalar scalar = left; - return right.negative().add(scalar); + return scalar - right; } public static Tensor operator -(long left, Tensor right) { using Scalar scalar = left; - return right.negative().add(scalar); + return scalar - right; } public static Tensor operator -(float left, Tensor right) { using Scalar scalar = left; - return right.negative().add(scalar); + return scalar - right; } public static Tensor operator -(double left, Tensor right) { using Scalar scalar = left; - return right.negative().add(scalar); + return scalar - right; } public static Tensor operator /(Tensor left, Tensor right) => left.div(right); @@ -153,43 +153,43 @@ public partial class Tensor public static Tensor operator /(Tensor left, int right) { using Scalar scalar = right; - return left.div(scalar); + return left / scalar; } public static Tensor operator /(Tensor left, long right) { using Scalar scalar = right; - return left.div(scalar); + return left / scalar; } public static Tensor operator /(Tensor left, float right) { using Scalar scalar = right; - return left.div(scalar); + return left / scalar; } public static Tensor operator /(Tensor left, double right) { using Scalar scalar = right; - return left.div(scalar); + return left / scalar; } public static Tensor operator /(int left, Tensor right) { using Scalar scalar = left; - return right.reciprocal().mul(scalar); + return scalar / right; } public static Tensor operator /(long left, Tensor right) { using Scalar scalar = left; - return right.reciprocal().mul(scalar); + return scalar / right; } public static Tensor operator /(float left, Tensor right) { using Scalar scalar = left; - return right.reciprocal().mul(scalar); + return scalar / right; } public static Tensor operator /(double left, Tensor right) { using Scalar scalar = left; - return right.reciprocal().mul(scalar); + return scalar / right; } public static Tensor operator %(Tensor left, Tensor right) => left.remainder(right); @@ -198,22 +198,22 @@ public partial class Tensor public static Tensor operator %(Tensor left, int right) { using Scalar scalar = right; - return left.remainder(scalar); + return left % scalar; } public static Tensor operator %(Tensor left, long right) { using Scalar scalar = right; - return left.remainder(scalar); + return left % scalar; } public static Tensor operator %(Tensor left, float right) { using Scalar scalar = right; - return left.remainder(scalar); + return left % scalar; } public static Tensor operator %(Tensor left, double right) { using Scalar scalar = right; - return left.remainder(scalar); + return left % scalar; } public static Tensor operator &(Tensor left, Tensor right) => left.bitwise_and(right); From 5c79729ba97cca7a4ea5cc5d792e1057926f3cb4 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 10 Sep 2025 16:31:12 +0900 Subject: [PATCH 004/101] Introduce TorchSharp.ScalarLeakDetector. In the following case, at least 266 exceptions are observed. * allowImplicitConversionOperator = false * dotnet test /p:SkipCuda=true /p:SkipNetFxBuild=true --blame test\TorchSharpTest\TorchSharpTest.csproj -c Release * Update src/TorchSharp/Scalar.cs. + Introduce ScalarLeakDetector. + Update Scalar. - Use ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed. --- src/TorchSharp/Scalar.cs | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/src/TorchSharp/Scalar.cs b/src/TorchSharp/Scalar.cs index cfe92cd98..4ca217f7c 100644 --- a/src/TorchSharp/Scalar.cs +++ b/src/TorchSharp/Scalar.cs @@ -5,6 +5,28 @@ #nullable enable namespace TorchSharp { + /// + /// Represents a leak detector for Scalar. + /// + public static partial class ScalarLeakDetector + { + /// + /// Allows implicit conversion from a .NET scalar value to Scalar.
+ /// FIXME: Declared true for default to be compatible to 0.105.1 or earlier. + ///
+ public static bool allowImplicitConversionOperator { get; set; } = true; + /// + /// Throws an exception if implicit conversion is not allowed. + /// + /// + public static void ThrowIfImplicitConversionNotAllowed() + { + if (!allowImplicitConversionOperator) + { + throw new InvalidCastException("Unexpected implicit conversion to Scalar."); + } + } + } /// /// Represents a dynamically typed scalar value to the LibTorch runtime. /// @@ -31,6 +53,7 @@ internal Scalar(IntPtr handle) /// The scalar value. public static implicit operator Scalar(byte value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } @@ -40,6 +63,7 @@ public static implicit operator Scalar(byte value) /// The scalar value. public static implicit operator Scalar(sbyte value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } @@ -49,6 +73,7 @@ public static implicit operator Scalar(sbyte value) /// The scalar value. public static implicit operator Scalar(short value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } @@ -58,6 +83,7 @@ public static implicit operator Scalar(short value) /// The scalar value. public static implicit operator Scalar(int value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } @@ -67,6 +93,7 @@ public static implicit operator Scalar(int value) /// The scalar value. public static implicit operator Scalar(long value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } @@ -77,6 +104,7 @@ public static implicit operator Scalar(long value) /// The scalar value. public static implicit operator Scalar(Half value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } #endif @@ -87,6 +115,7 @@ public static implicit operator Scalar(Half value) /// The scalar value. public static implicit operator Scalar(float value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } @@ -96,6 +125,7 @@ public static implicit operator Scalar(float value) /// The scalar value. public static implicit operator Scalar(double value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } @@ -105,6 +135,7 @@ public static implicit operator Scalar(double value) /// The scalar value. public static implicit operator Scalar(bool value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } @@ -114,6 +145,7 @@ public static implicit operator Scalar(bool value) /// The scalar value. public static implicit operator Scalar((float, float) value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } @@ -123,6 +155,7 @@ public static implicit operator Scalar((float, float) value) /// The scalar value. public static implicit operator Scalar(System.Numerics.Complex value) { + ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed(); return value.ToScalar(); } From fb0ff439c7298e72a659c581a5857a4ab740dfec Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 10 Sep 2025 17:08:41 +0900 Subject: [PATCH 005/101] Introduce TorchSharp.TensorLeakDetector. In the following case, at least 45 exceptions are observed. * allowImplicitConversionOperator = false * dotnet test /p:SkipCuda=true /p:SkipNetFxBuild=true --blame test\TorchSharpTest\TorchSharpTest.csproj -c Release * Update src/TorchSharp/Tensor/Tensor.cs + Introduce TensorLeakDetector. + Update Tensor. - Use TensorLeakDetector.ThrowIfImplicitConversionNotAllowed. --- src/TorchSharp/Tensor/Tensor.cs | 40 +++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index c17995a52..94dbc5008 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -14,6 +14,24 @@ #nullable enable namespace TorchSharp { + public static partial class TensorLeakDetector + { + /// + /// Allows implicit conversion to torch.Tensor.
+ /// FIXME: Declared true for default to be compatible to 0.105.1 or earlier. + ///
+ public static bool allowImplicitConversionOperator { get; set; } = true; + /// + /// Throws an exception if implicit conversion is not allowed. + /// + /// + public static void ThrowIfImplicitConversionNotAllowed() + { + if (!allowImplicitConversionOperator) { + throw new InvalidCastException("Unexpected implicit conversion to torch.Tensor."); + } + } + } public static partial class torch { /// @@ -6321,6 +6339,7 @@ public Tensor where(Tensor condition, Tensor y) /// The numeric value. public static implicit operator Tensor(byte value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6330,6 +6349,7 @@ public static implicit operator Tensor(byte value) /// The numeric value. public static implicit operator Tensor(sbyte value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6339,6 +6359,7 @@ public static implicit operator Tensor(sbyte value) /// The numeric value. public static implicit operator Tensor(short value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6348,6 +6369,7 @@ public static implicit operator Tensor(short value) /// The numeric value. public static implicit operator Tensor(int value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6357,6 +6379,7 @@ public static implicit operator Tensor(int value) /// The numeric value. public static implicit operator Tensor(long value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6366,6 +6389,7 @@ public static implicit operator Tensor(long value) /// The numeric value. public static implicit operator Tensor(float value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6375,6 +6399,7 @@ public static implicit operator Tensor(float value) /// The numeric value. public static implicit operator Tensor(double value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6384,6 +6409,7 @@ public static implicit operator Tensor(double value) /// The numeric value. public static implicit operator Tensor(bool value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6393,6 +6419,7 @@ public static implicit operator Tensor(bool value) /// The numeric value. public static implicit operator Tensor((float, float) value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6402,6 +6429,7 @@ public static implicit operator Tensor((float, float) value) /// The numeric value. public static implicit operator Tensor(System.Numerics.Complex value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6411,6 +6439,7 @@ public static implicit operator Tensor(System.Numerics.Complex value) /// The numeric value array. public static implicit operator Tensor(byte[] value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6420,6 +6449,7 @@ public static implicit operator Tensor(byte[] value) /// The numeric value array. public static implicit operator Tensor(sbyte[] value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6429,6 +6459,7 @@ public static implicit operator Tensor(sbyte[] value) /// The numeric value array. public static implicit operator Tensor(short[] value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6438,6 +6469,7 @@ public static implicit operator Tensor(short[] value) /// The numeric value array. public static implicit operator Tensor(int[] value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6447,6 +6479,7 @@ public static implicit operator Tensor(int[] value) /// The numeric value array. public static implicit operator Tensor(long[] value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6456,6 +6489,7 @@ public static implicit operator Tensor(long[] value) /// The numeric value array. public static implicit operator Tensor(float[] value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6465,6 +6499,7 @@ public static implicit operator Tensor(float[] value) /// The numeric value array. public static implicit operator Tensor(double[] value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6474,6 +6509,7 @@ public static implicit operator Tensor(double[] value) /// The numeric value array. public static implicit operator Tensor(bool[] value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6483,6 +6519,7 @@ public static implicit operator Tensor(bool[] value) /// The numeric value array. public static implicit operator Tensor((float, float)[] value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6492,6 +6529,7 @@ public static implicit operator Tensor((float, float)[] value) /// The numeric value array. public static implicit operator Tensor(System.Numerics.Complex[] value) { + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return torch.tensor(value); } @@ -6502,6 +6540,7 @@ public static implicit operator Tensor(System.Numerics.Complex[] value) public static implicit operator Tensor(Scalar scalar) { _throw(); + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return new Tensor(IntPtr.Zero); } @@ -7246,6 +7285,7 @@ public static implicit operator TensorIndex(long value) public static implicit operator Tensor(TensorIndex value) { _throw(); + TensorLeakDetector.ThrowIfImplicitConversionNotAllowed(); return new Tensor(IntPtr.Zero); } From 512a2ba90e041dbc51c30f1e9460f05e5b693a40 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 11:44:53 +0900 Subject: [PATCH 006/101] Declare TorchSharp.Scalar more explicitly. * Update src/TorchSharp/Tensor/Tensor.Operators.cs. + Declare TorchSharp.Scalar more explicitly. - Use prefix for left or right. - Call ToScalar explicitly. --- src/TorchSharp/Tensor/Tensor.Operators.cs | 144 +++++++++++----------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.Operators.cs b/src/TorchSharp/Tensor/Tensor.Operators.cs index 564d1da42..bd12e56d6 100644 --- a/src/TorchSharp/Tensor/Tensor.Operators.cs +++ b/src/TorchSharp/Tensor/Tensor.Operators.cs @@ -14,44 +14,44 @@ public partial class Tensor public static Tensor operator +(Tensor left, int right) { - using Scalar scalar = right; - return left + scalar; + using Scalar right_scalar = right.ToScalar(); + return left + right_scalar; } public static Tensor operator +(Tensor left, long right) { - using Scalar scalar = right; - return left + scalar; + using Scalar right_scalar = right.ToScalar(); + return left + right_scalar; } public static Tensor operator +(Tensor left, float right) { - using Scalar scalar = right; - return left + scalar; + using Scalar right_scalar = right.ToScalar(); + return left + right_scalar; } public static Tensor operator +(Tensor left, double right) { - using Scalar scalar = right; - return left + scalar; + using Scalar right_scalar = right.ToScalar(); + return left + right_scalar; } public static Tensor operator +(int left, Tensor right) { - using Scalar scalar = left; - return scalar + right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar + right; } public static Tensor operator +(long left, Tensor right) { - using Scalar scalar = left; - return scalar + right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar + right; } public static Tensor operator +(float left, Tensor right) { - using Scalar scalar = left; - return scalar + right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar + right; } public static Tensor operator +(double left, Tensor right) { - using Scalar scalar = left; - return scalar + right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar + right; } public static Tensor operator *(Tensor left, Tensor right) => left.mul(right); @@ -60,44 +60,44 @@ public partial class Tensor public static Tensor operator *(Tensor left, int right) { - using Scalar scalar = right; - return left * scalar; + using Scalar right_scalar = right.ToScalar(); + return left * right_scalar; } public static Tensor operator *(Tensor left, long right) { - using Scalar scalar = right; - return left * scalar; + using Scalar right_scalar = right.ToScalar(); + return left * right_scalar; } public static Tensor operator *(Tensor left, float right) { - using Scalar scalar = right; - return left * scalar; + using Scalar right_scalar = right.ToScalar(); + return left * right_scalar; } public static Tensor operator *(Tensor left, double right) { - using Scalar scalar = right; - return left * scalar; + using Scalar right_scalar = right.ToScalar(); + return left * right_scalar; } public static Tensor operator *(int left, Tensor right) { - using Scalar scalar = left; - return scalar * right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar * right; } public static Tensor operator *(long left, Tensor right) { - using Scalar scalar = left; - return scalar * right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar * right; } public static Tensor operator *(float left, Tensor right) { - using Scalar scalar = left; - return scalar * right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar * right; } public static Tensor operator *(double left, Tensor right) { - using Scalar scalar = left; - return scalar * right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar * right; } public static Tensor operator -(Tensor left, Tensor right) => left.sub(right); @@ -106,44 +106,44 @@ public partial class Tensor public static Tensor operator -(Tensor left, int right) { - using Scalar scalar = right; - return left - scalar; + using Scalar right_scalar = right.ToScalar(); + return left - right_scalar; } public static Tensor operator -(Tensor left, long right) { - using Scalar scalar = right; - return left - scalar; + using Scalar right_scalar = right.ToScalar(); + return left - right_scalar; } public static Tensor operator -(Tensor left, float right) { - using Scalar scalar = right; - return left - scalar; + using Scalar right_scalar = right.ToScalar(); + return left - right_scalar; } public static Tensor operator -(Tensor left, double right) { - using Scalar scalar = right; - return left - scalar; + using Scalar right_scalar = right.ToScalar(); + return left - right_scalar; } public static Tensor operator -(int left, Tensor right) { - using Scalar scalar = left; - return scalar - right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar - right; } public static Tensor operator -(long left, Tensor right) { - using Scalar scalar = left; - return scalar - right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar - right; } public static Tensor operator -(float left, Tensor right) { - using Scalar scalar = left; - return scalar - right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar - right; } public static Tensor operator -(double left, Tensor right) { - using Scalar scalar = left; - return scalar - right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar - right; } public static Tensor operator /(Tensor left, Tensor right) => left.div(right); @@ -152,44 +152,44 @@ public partial class Tensor public static Tensor operator /(Tensor left, int right) { - using Scalar scalar = right; - return left / scalar; + using Scalar right_scalar = right.ToScalar(); + return left / right_scalar; } public static Tensor operator /(Tensor left, long right) { - using Scalar scalar = right; - return left / scalar; + using Scalar right_scalar = right.ToScalar(); + return left / right_scalar; } public static Tensor operator /(Tensor left, float right) { - using Scalar scalar = right; - return left / scalar; + using Scalar right_scalar = right.ToScalar(); + return left / right_scalar; } public static Tensor operator /(Tensor left, double right) { - using Scalar scalar = right; - return left / scalar; + using Scalar right_scalar = right.ToScalar(); + return left / right_scalar; } public static Tensor operator /(int left, Tensor right) { - using Scalar scalar = left; - return scalar / right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar / right; } public static Tensor operator /(long left, Tensor right) { - using Scalar scalar = left; - return scalar / right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar / right; } public static Tensor operator /(float left, Tensor right) { - using Scalar scalar = left; - return scalar / right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar / right; } public static Tensor operator /(double left, Tensor right) { - using Scalar scalar = left; - return scalar / right; + using Scalar left_scalar = left.ToScalar(); + return left_scalar / right; } public static Tensor operator %(Tensor left, Tensor right) => left.remainder(right); @@ -197,23 +197,23 @@ public partial class Tensor public static Tensor operator %(Tensor left, int right) { - using Scalar scalar = right; - return left % scalar; + using Scalar right_scalar = right.ToScalar(); + return left % right_scalar; } public static Tensor operator %(Tensor left, long right) { - using Scalar scalar = right; - return left % scalar; + using Scalar right_scalar = right.ToScalar(); + return left % right_scalar; } public static Tensor operator %(Tensor left, float right) { - using Scalar scalar = right; - return left % scalar; + using Scalar right_scalar = right.ToScalar(); + return left % right_scalar; } public static Tensor operator %(Tensor left, double right) { - using Scalar scalar = right; - return left % scalar; + using Scalar right_scalar = right.ToScalar(); + return left % right_scalar; } public static Tensor operator &(Tensor left, Tensor right) => left.bitwise_and(right); From 06b9e45722008ff9f9694d04d342af9d27216099 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 13:42:52 +0900 Subject: [PATCH 007/101] Declare TorchSharp.Scalar explicitly. * Update src/TorchSharp/Tensor/Tensor.Math.cs. + Declare TorchSharp.Scalar explicitly. + Add FIXMEs. --- src/TorchSharp/Tensor/Tensor.Math.cs | 30 ++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.Math.cs b/src/TorchSharp/Tensor/Tensor.Math.cs index fb7207638..829d686ce 100644 --- a/src/TorchSharp/Tensor/Tensor.Math.cs +++ b/src/TorchSharp/Tensor/Tensor.Math.cs @@ -55,7 +55,10 @@ public Tensor abs_() /// public Tensor add(Tensor target) { - return add(target, 1); + // FIXME: Consider implement another THSTensor_add variant that takes no alpha? + // at::Tensor::add has default c10::Scalar alpha = 1. + using Scalar one_scalar = 1.ToScalar(); + return add(target, one_scalar); } /// @@ -79,7 +82,10 @@ public Tensor add(Tensor target, Scalar alpha) /// public Tensor add(Scalar scalar) { - return add(scalar, 1); + // FIXME: Consider implement another THSTensor_add_scalar variant that takes no alpha? + // at::Tensor::add has default c10::Scalar alpha = 1. + using Scalar one_scalar = 1.ToScalar(); + return add(scalar, one_scalar); } /// @@ -103,7 +109,10 @@ public Tensor add(Scalar scalar, Scalar alpha) /// public Tensor add_(Tensor target) { - return add_(target, 1); + // FIXME: Consider implement another THSTensor_add_ variant that takes no alpha? + // at::Tensor::add_ has default c10::Scalar alpha = 1. + using Scalar one_scalar = 1.ToScalar(); + return add_(target, one_scalar); } /// @@ -126,7 +135,10 @@ public Tensor add_(Tensor target, Scalar alpha) /// public Tensor add_(Scalar scalar) { - return add_(scalar, 1); + // FIXME: Consider implement another THSTensor_add_scalar_ variant that takes no alpha? + // at::Tensor::add_ has default c10::Scalar alpha = 1. + using Scalar one_scalar = 1.ToScalar(); + return add_(scalar, one_scalar); } /// @@ -200,7 +212,10 @@ public Tensor addcdiv(Tensor tensor1, Tensor tensor2, Scalar value) /// public Tensor addcdiv(Tensor tensor1, Tensor tensor2) { - return addcdiv(tensor1, tensor2, 1); + // FIXME: Consider implement another THSTensor_addcdiv variant that takes no value? + // at::Tensor::addcdiv has default c10::Scalar value = 1. + using Scalar one_scalar = 1.ToScalar(); + return addcdiv(tensor1, tensor2, one_scalar); } /// @@ -225,7 +240,10 @@ public Tensor addcdiv_(Tensor tensor1, Tensor tensor2, Scalar value) /// public Tensor addcdiv_(Tensor tensor1, Tensor tensor2) { - return addcdiv_(tensor1, tensor2, 1); + // FIXME: Consider implement another THSTensor_addcdiv variant that takes no value? + // at::Tensor::addcdiv has default c10::Scalar value = 1. + using Scalar one_scalar = 1.ToScalar(); + return addcdiv_(tensor1, tensor2, one_scalar); } /// From 0fb172da3335558f7dea6fe2d2b5360e774a47e6 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 15:01:49 +0900 Subject: [PATCH 008/101] Update Adadelta.step. * Update src/TorchSharp/Optimizers/Adadelta.cs. + Update Adadelta.step. - Declare TorchSharp.Scalar explicitly. - Add FIXME for possible unused weight_decay_scalar. - Cache weight_decay != 0 explicitly. --- src/TorchSharp/Optimizers/Adadelta.cs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/TorchSharp/Optimizers/Adadelta.cs b/src/TorchSharp/Optimizers/Adadelta.cs index 924dcb468..c0f10da21 100644 --- a/src/TorchSharp/Optimizers/Adadelta.cs +++ b/src/TorchSharp/Optimizers/Adadelta.cs @@ -129,10 +129,15 @@ public override Tensor step(Func closure = null) var options = group.Options as Options; var rho = options.rho.Value; - var eps = options.eps.Value; + using var rho_scalar = rho.ToScalar(); + using var rho_bar_scalar = (1 - rho).ToScalar(); + using var eps_scalar = options.eps.Value.ToScalar(); var weight_decay = options.weight_decay.Value; + var need_weight_decay = (weight_decay != 0); + using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay? var maximize = options.maximize.Value; var lr = options.LearningRate.Value; + using var negative_lr_scalar = (-lr).ToScalar(); foreach (var param in group.Parameters) { @@ -149,17 +154,17 @@ public override Tensor step(Func closure = null) var square_avg = state.square_avg; var acc_delta = state.acc_delta; - grad = (weight_decay != 0) - ? grad.add(param, alpha: weight_decay) + grad = (need_weight_decay) + ? grad.add(param, alpha: weight_decay_scalar) : grad.alias(); - square_avg.mul_(rho).addcmul_(grad, grad, 1 - rho); + square_avg.mul_(rho_scalar).addcmul_(grad, grad, rho_bar_scalar); - var std = square_avg.add(eps).sqrt_(); - var delta = acc_delta.add(eps).sqrt_().div_(std).mul_(grad); + var std = square_avg.add(eps_scalar).sqrt_(); + var delta = acc_delta.add(eps_scalar).sqrt_().div_(std).mul_(grad); - param.add_(delta, alpha: -lr); - acc_delta.mul_(rho).addcmul_(delta, delta, 1 - rho); + param.add_(delta, alpha: negative_lr_scalar); + acc_delta.mul_(rho_scalar).addcmul_(delta, delta, rho_bar_scalar); } }, closure); } From 645523c587e776946edb191638d5e0309c1bf845 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 15:18:58 +0900 Subject: [PATCH 009/101] Update Adagrad.step. * Update src/TorchSharp/Optimizers/Adagrad.cs. + Update Adagrad.step. + Declare TorchSharp.Scalar explicitly. - Add FIXME for possible unused weight_decay_scalar. + Add FIXME for possible unsued initial_accumulator_value. + Cache weight_decay != 0. --- src/TorchSharp/Optimizers/Adagrad.cs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/TorchSharp/Optimizers/Adagrad.cs b/src/TorchSharp/Optimizers/Adagrad.cs index a4d4b70fc..2c9c9c8d4 100644 --- a/src/TorchSharp/Optimizers/Adagrad.cs +++ b/src/TorchSharp/Optimizers/Adagrad.cs @@ -139,9 +139,12 @@ public override Tensor step(Func closure = null) var options = group.Options as Options; var lr_decay = options.lr_decay.Value; var weight_decay = options.weight_decay.Value; - var eps = options.eps.Value; - var initial_accumulator_value = options.initial_accumulator_value.Value; + var need_weight_decay = weight_decay != 0; + using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay? + using var eps_scalar = options.eps.Value.ToScalar(); + var initial_accumulator_value = options.initial_accumulator_value.Value; // FIXME: Unused? var lr = options.LearningRate.Value; + using var one_scalar = 1.ToScalar(); foreach (var param in group.Parameters) { @@ -153,20 +156,19 @@ public override Tensor step(Func closure = null) state.step += 1; - if (weight_decay != 0) { - grad = grad.add(param, alpha: weight_decay); - } + if (need_weight_decay) grad = grad.add(param, alpha: weight_decay_scalar); var clr = lr / (1 + (state.step - 1) * lr_decay); + using var negative_clr_scalar = (-clr).ToScalar(); if (grad.is_sparse) throw new NotImplementedException("Adagrad optimization over sparse parameters"); if (torch.is_complex(grad)) throw new NotImplementedException("Adagrad optimization over complex parameters"); - state.sum.addcmul_(grad, grad, value: 1); - var std = state.sum.sqrt().add_(eps); - param.addcdiv_(grad, std, value: -clr); + state.sum.addcmul_(grad, grad, value: one_scalar); + var std = state.sum.sqrt().add_(eps_scalar); + param.addcdiv_(grad, std, value: negative_clr_scalar); } From 3f0d8062d733e30a83684e90bda6f33c1acc94ce Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 15:37:05 +0900 Subject: [PATCH 010/101] Update Adam.step. * Update src/TorchSharp/Optimizers/Adam.cs. + Update Adam.step. - Declare TorchSharp.Scalar explicitly. - Add FIXME for possible unused weight_decay_scalar. - Cache weight_decay != 0. - Add FIXME for possible no denom disposing. --- src/TorchSharp/Optimizers/Adam.cs | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/src/TorchSharp/Optimizers/Adam.cs b/src/TorchSharp/Optimizers/Adam.cs index 2e04fc6ef..222a63439 100644 --- a/src/TorchSharp/Optimizers/Adam.cs +++ b/src/TorchSharp/Optimizers/Adam.cs @@ -154,10 +154,18 @@ public override Tensor step(Func closure = null) var options = group.Options as Options; var beta1 = options.beta1.Value; var beta2 = options.beta2.Value; + using var beta1_scalar = beta1.ToScalar(); + using var beta2_scalar = beta2.ToScalar(); + var beta1_bar = 1 - beta1; + var beta2_bar = 1 - beta2; + using var beta1_bar_scalar = beta1_bar.ToScalar(); + using var beta2_bar_scalar = beta2_bar.ToScalar(); var weight_decay = options.weight_decay.Value; + var need_weight_decay = weight_decay != 0; + using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay? var amsgrad = options.amsgrad.Value; var maximize = options.maximize.Value; - var eps = options.eps.Value; + using var eps_scalar = options.eps.Value.ToScalar(); var lr = options.LearningRate.Value; foreach (var param in group.Parameters) { @@ -175,25 +183,24 @@ public override Tensor step(Func closure = null) var bias_correction1 = 1 - Math.Pow(beta1, state.step); var bias_correction2 = 1 - Math.Pow(beta2, state.step); - if (weight_decay != 0) { - grad = grad.add(param, alpha: weight_decay); - } + if (need_weight_decay) grad = grad.add(param, alpha: weight_decay_scalar); - state.exp_avg.mul_(beta1).add_(grad, alpha: 1 - beta1); - state.exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value: 1 - beta2); + state.exp_avg.mul_(beta1_scalar).add_(grad, alpha: beta1_bar_scalar); + state.exp_avg_sq.mul_(beta2_scalar).addcmul_(grad, grad.conj(), value: beta2_bar_scalar); - Tensor denom = null; + Tensor denom = null; // FIXME: Need dispose? if (amsgrad) { var t0 = state.max_exp_avg_sq; state.max_exp_avg_sq = torch.maximum(t0, state.exp_avg_sq).DetachFromDisposeScope(); t0.Dispose(); - denom = (state.max_exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps); + denom = (state.max_exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps_scalar); } else { - denom = (state.exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps); + denom = (state.exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps_scalar); } var step_size = lr / bias_correction1; - param.addcdiv_(state.exp_avg, denom, value: -step_size); + using var negative_step_size_scalar = (-step_size).ToScalar(); + param.addcdiv_(state.exp_avg, denom, value: negative_step_size_scalar); } }, closure); } From 19d87cc762219ea412ebada4b6eb331f517e07f5 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 15:51:45 +0900 Subject: [PATCH 011/101] Update Adamax.step. * Update src/TorchSharp/Optimizers/Adamax.cs. + Update Adamax.step. - Declare TorchSharp.Scalar explicitly. - Add FIXME for possible unused weight_decay_scalar. - Cache weight_decay != 0. - Add FIXME for CA1806. --- src/TorchSharp/Optimizers/Adamax.cs | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/src/TorchSharp/Optimizers/Adamax.cs b/src/TorchSharp/Optimizers/Adamax.cs index e09ef9170..c54505070 100644 --- a/src/TorchSharp/Optimizers/Adamax.cs +++ b/src/TorchSharp/Optimizers/Adamax.cs @@ -142,8 +142,14 @@ public override Tensor step(Func closure = null) var options = group.Options as Options; var beta1 = options.beta1.Value; var beta2 = options.beta2.Value; - var eps = options.eps.Value; + using var beta1_scalar = beta1.ToScalar(); + using var beta2_scalar = beta2.ToScalar(); + var beta1_bar = 1 - beta1; + using var beta1_bar_scalar = beta1_bar.ToScalar(); + using var eps_scalar = options.eps.Value.ToScalar(); var weight_decay = options.weight_decay.Value; + var need_weight_decay = weight_decay != 0; + using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay? var lr = options.LearningRate.Value; foreach (var param in group.Parameters) { @@ -161,21 +167,22 @@ public override Tensor step(Func closure = null) var exp_avg = state.exp_avg; var exp_inf = state.exp_inf; - grad = (weight_decay != 0) - ? grad.add(param, alpha: weight_decay) + grad = (need_weight_decay) + ? grad.add(param, alpha: weight_decay_scalar) : grad.alias(); - exp_avg.mul_(beta1).add_(grad, alpha: 1 - beta1); + exp_avg.mul_(beta1_scalar).add_(grad, alpha: beta1_bar_scalar); var norm_buf = torch.cat(new Tensor[] { - exp_inf.mul_(beta2).unsqueeze(0), - grad.abs().add_(eps).unsqueeze_(0) + exp_inf.mul_(beta2_scalar).unsqueeze(0), + grad.abs().add_(eps_scalar).unsqueeze_(0) }, 0); - torch.amax(norm_buf, new long[] { 0 }, false, exp_inf); + torch.amax(norm_buf, new long[] { 0 }, false, exp_inf); // FIXME: CA1806? var clr = lr / (1 - Math.Pow(beta1, state.step)); - param.addcdiv_(exp_avg, exp_inf, value: -clr); + using var negative_clr_scalar = (-clr).ToScalar(); + param.addcdiv_(exp_avg, exp_inf, value: negative_clr_scalar); } }, closure); } From 68f00b2d72f27c8702045896a4c8221d5979244f Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 16:12:57 +0900 Subject: [PATCH 012/101] Update ASGD.step. * Update src/TorchSharp/Optimizers/ASGD.cs. + Update ASGD.step. - Declare TorchSharp.Scalar explicitly. - Add FIXME for possible unsed weight_decay_scalar. - Cache weight_decay != 0. --- src/TorchSharp/Optimizers/ASGD.cs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/TorchSharp/Optimizers/ASGD.cs b/src/TorchSharp/Optimizers/ASGD.cs index 260810aa0..4e9fb852c 100644 --- a/src/TorchSharp/Optimizers/ASGD.cs +++ b/src/TorchSharp/Optimizers/ASGD.cs @@ -140,6 +140,8 @@ public override Tensor step(Func closure = null) var lambd = options.lambd.Value; var alpha = options.alpha.Value; var weight_decay = options.weight_decay.Value; + var need_weight_decay = weight_decay != 0; + using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay? var t0 = options.t0.Value; var lr = options.LearningRate.Value; @@ -157,15 +159,19 @@ public override Tensor step(Func closure = null) state.step += 1; - grad = (weight_decay != 0) - ? grad.add(param, alpha: weight_decay) + grad = (need_weight_decay) + ? grad.add(param, alpha: weight_decay_scalar) : grad.alias(); - param.mul_(1 - lambd * state.eta); - param.add_(grad, alpha: -state.eta); + var lambd_eta_bar = 1 - lambd * state.eta; + using var lambd_eta_bar_scalar = lambd_eta_bar.ToScalar(); + param.mul_(lambd_eta_bar_scalar); + using var negative_eta_scalar = (-state.eta).ToScalar(); + param.add_(grad, alpha: negative_eta_scalar); if (state.mu != 1) { - state.ax.add_(param.sub(state.ax).mul(state.mu)); + using var mu_scalar = state.mu.ToScalar(); + state.ax.add_(param.sub(state.ax).mul(mu_scalar)); } else { state.ax.copy_(param); } From 4cc71c6763bb731ff7a8258dedfd9afcc1d820b3 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 16:37:28 +0900 Subject: [PATCH 013/101] Update NAdam.step. * Update src/TorchSharp/Optimizers/NAdam.cs. + Update NAdam.step. - Declare TorchSharp.Scalar explicitly. - Add FIXME for possible unused weight_decay_scalar. - Cache weight_decay != 0. - Add FIXME for possible no denom disposing. --- src/TorchSharp/Optimizers/NAdam.cs | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/TorchSharp/Optimizers/NAdam.cs b/src/TorchSharp/Optimizers/NAdam.cs index 6118cc5d1..ed5add3e4 100644 --- a/src/TorchSharp/Optimizers/NAdam.cs +++ b/src/TorchSharp/Optimizers/NAdam.cs @@ -147,8 +147,16 @@ public override Tensor step(Func closure = null) var options = group.Options as Options; var beta1 = options.beta1.Value; var beta2 = options.beta2.Value; - var eps = options.eps.Value; + using var beta1_scalar = beta1.ToScalar(); + using var beta2_scalar = beta2.ToScalar(); + var beta1_bar = 1 - beta1; + var beta2_bar = 1 - beta2; + using var beta1_bar_scalar = beta1_bar.ToScalar(); + using var beta2_bar_scalar = beta2_bar.ToScalar(); + using var eps_scalar = options.eps.Value.ToScalar(); var weight_decay = options.weight_decay.Value; + var need_weight_decay = weight_decay != 0; + using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay? var momentum_decay = options.momentum_decay.Value; var lr = options.LearningRate.Value; @@ -166,9 +174,10 @@ public override Tensor step(Func closure = null) var exp_avg_sq = state.exp_avg_sq; var bias_correction2 = 1 - Math.Pow(beta2, state.step); + using var bias_correction2_scalar = bias_correction2.ToScalar(); - grad = (weight_decay != 0) - ? grad.add(param, alpha: weight_decay) + grad = (need_weight_decay) + ? grad.add(param, alpha: weight_decay_scalar) : grad.alias(); var mu = beta1 * (1.0 - 0.5 * Math.Pow(0.96, state.step * momentum_decay)); @@ -177,13 +186,17 @@ public override Tensor step(Func closure = null) var mu_product = state.mu_product * mu; var mu_product_next = mu_product * mu_next; - exp_avg.mul_(beta1).add_(grad, alpha: 1 - beta1); - exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value: 1 - beta2); + exp_avg.mul_(beta1_scalar).add_(grad, alpha: beta1_bar_scalar); + exp_avg_sq.mul_(beta2_scalar).addcmul_(grad, grad, value: beta2_bar_scalar); - var denom = exp_avg_sq.div(bias_correction2).sqrt_().add_(eps); + var denom = exp_avg_sq.div(bias_correction2_scalar).sqrt_().add_(eps_scalar); // FIXME: Need dispose? - param.addcdiv_(grad, denom, value: -lr * (1 - mu) / (1 - mu_product)); - param.addcdiv_(exp_avg, denom, value: -lr * mu_next / (1 - mu_product_next)); + var scaled_lr = lr * (1 - mu) / (1 - mu_product); + using var negative_scaled_scalar = (-scaled_lr).ToScalar(); + param.addcdiv_(grad, denom, value: negative_scaled_scalar); + var scaled_lr_next = lr * mu_next / (1 - mu_product_next); + using var negative_scaled_lr_next_scalar = (-scaled_lr_next).ToScalar(); + param.addcdiv_(exp_avg, denom, value: negative_scaled_lr_next_scalar); state.mu_product = mu_product; } From 997c679c7184de25840c2f7288f4324c1ca91bfa Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 16:59:15 +0900 Subject: [PATCH 014/101] Update RAdam.step. * Update src/TorchSharp/Optimizers/RAdam.cs. + Update RAdam.step. - Declare TorchSharp.Scalar explicitly. - Add FIXME for possible unused weight_decay_scalar. - Cache weight_decay != 0. - Add FIXME for possible torch.Tensor.sub_ use. - Add FIXME for possible no dispose for torch.Tensor. - bias_corrected_exp_avg - t6 - adaptive_lr and its intermediates and derives - Add FIXME for possible no dispose on param.add_ if rho_t > 5. --- src/TorchSharp/Optimizers/RAdam.cs | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/src/TorchSharp/Optimizers/RAdam.cs b/src/TorchSharp/Optimizers/RAdam.cs index d64416196..ed12e328a 100644 --- a/src/TorchSharp/Optimizers/RAdam.cs +++ b/src/TorchSharp/Optimizers/RAdam.cs @@ -141,9 +141,19 @@ public override Tensor step(Func closure = null) var options = group.Options as Options; var beta1 = options.beta1.Value; var beta2 = options.beta2.Value; - var eps = options.eps.Value; + using var beta1_scalar = beta1.ToScalar(); + using var beta2_scalar = beta2.ToScalar(); + var beta1_bar = 1 - beta1; + var beta2_bar = 1 - beta2; + using var beta1_bar_scalar = beta1_bar.ToScalar(); + using var beta2_bar_scalar = beta2_bar.ToScalar(); + using var eps_scalar = options.eps.Value.ToScalar(); var weight_decay = options.weight_decay.Value; + var need_weight_decay = weight_decay != 0; + using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay? var lr = options.LearningRate.Value; + using var lr_scalar = lr.ToScalar(); + using var negative_one_scalar = (-1.0).ToScalar(); // FIXME: Use torch.Tensor.sub_ instead? foreach (var param in group.Parameters) { @@ -161,27 +171,27 @@ public override Tensor step(Func closure = null) var bias_correction1 = 1 - Math.Pow(beta1, state.step); var bias_correction2 = 1 - Math.Pow(beta2, state.step); - grad = (weight_decay != 0) - ? grad.add(param, alpha: weight_decay) + grad = (need_weight_decay) + ? grad.add(param, alpha: weight_decay_scalar) : grad.alias(); - exp_avg.mul_(beta1).add_(grad, alpha: 1 - beta1); - exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value: 1 - beta2); + exp_avg.mul_(beta1_scalar).add_(grad, alpha: beta1_bar_scalar); + exp_avg_sq.mul_(beta2_scalar).addcmul_(grad, grad, value: beta2_bar_scalar); - var bias_corrected_exp_avg = exp_avg / bias_correction1; + var bias_corrected_exp_avg = exp_avg / bias_correction1; // FIXME: Need dispose? var rho_inf = 2 / (1 - beta2) - 1; var rho_t = rho_inf - 2 * state.step * Math.Pow(beta2, state.step) / bias_correction2; - var t6 = bias_corrected_exp_avg * lr; + var t6 = bias_corrected_exp_avg.mul(lr_scalar); // FIXME: Need dispose? if (rho_t > 5) { var rect = Math.Sqrt((rho_t - 4) * (rho_t - 2) * rho_inf / ((rho_inf - 4) * (rho_inf - 2) * rho_t)); - var adaptive_lr = Math.Sqrt(bias_correction2) / exp_avg_sq.sqrt().add_(eps); + var adaptive_lr = Math.Sqrt(bias_correction2) / exp_avg_sq.sqrt().add_(eps_scalar); // FIXME: Need dispose? - param.add_(t6 * lr * adaptive_lr * rect, alpha: -1.0); + param.add_(t6 * lr * adaptive_lr * rect, alpha: negative_one_scalar); // FIXME: Need dispose? Use inplace ops? } else { - param.add_(t6, alpha: -1.0); + param.add_(t6, alpha: negative_one_scalar); } } }, closure); From bc130ff87874093d98bc2265761a464f0d3c6f6b Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 17:17:29 +0900 Subject: [PATCH 015/101] Update RMSProp.step. * Update src/TorchSharp/Optimizers/RMSprop.cs. + Update RMSProp.step. - Declare TorchSharp.Scalar explicitly. - Add FIXME for possible unused momentum_scalar. - Add FIXME for possible unused weight_decay_scalar. - Cache momentum > 0. - Cache weight_decay != 0. - Add FIXME for possible no avg dispose. --- src/TorchSharp/Optimizers/RMSprop.cs | 33 +++++++++++++++++----------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/src/TorchSharp/Optimizers/RMSprop.cs b/src/TorchSharp/Optimizers/RMSprop.cs index 9bc77f95f..8333832bb 100644 --- a/src/TorchSharp/Optimizers/RMSprop.cs +++ b/src/TorchSharp/Optimizers/RMSprop.cs @@ -152,11 +152,20 @@ public override Tensor step(Func closure = null) var options = group.Options as Options; var maximize = options.maximize.Value; var momentum = options.momentum.Value; + var need_momentum = momentum > 0; + using var momentum_scalar = momentum.ToScalar(); // FIXME: Omit if not need_momentum? var alpha = options.alpha.Value; + var alpha_bar = 1 - alpha; + using var alpha_scalar = alpha.ToScalar(); + using var alpha_bar_scalar = alpha_bar.ToScalar(); var weight_decay = options.weight_decay.Value; + var need_weight_decay = weight_decay != 0; + using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay? var centered = options.centered.Value; - var eps = options.eps.Value; + using var negative_one_scalar = (-1).ToScalar(); + using var eps_scalar = options.eps.Value.ToScalar(); var lr = options.LearningRate.Value; + using var negative_lr_scalar = (-lr).ToScalar(); foreach (var param in group.Parameters) { @@ -170,28 +179,26 @@ public override Tensor step(Func closure = null) state.step += 1; - if (weight_decay != 0) { - grad = grad.add(param, alpha: weight_decay); - } + if (need_weight_decay) grad = grad.add(param, alpha: weight_decay_scalar); - state.square_avg.mul_(alpha).addcmul_(grad, grad, value: 1 - alpha); + state.square_avg.mul_(alpha_scalar).addcmul_(grad, grad, value: alpha_bar_scalar); - Tensor avg = null; + Tensor avg = null; // FIXME: Need dispose? if (centered) { var grad_avg = state.grad_avg; - grad_avg.mul_(alpha).add_(grad, alpha: 1 - alpha); - avg = state.square_avg.addcmul(grad_avg, grad_avg, value: -1).sqrt_().add_(eps); + grad_avg.mul_(alpha_scalar).add_(grad, alpha: alpha_bar_scalar); + avg = state.square_avg.addcmul(grad_avg, grad_avg, value: negative_one_scalar).sqrt_().add_(eps_scalar); } else { - avg = state.square_avg.sqrt().add_(eps); + avg = state.square_avg.sqrt().add_(eps_scalar); } - if (momentum > 0) { + if (need_momentum) { var buf = state.momentum_buffer; - buf.mul_(momentum).addcdiv_(grad, avg); - param.add_(buf, alpha: -lr); + buf.mul_(momentum_scalar).addcdiv_(grad, avg); + param.add_(buf, alpha: negative_lr_scalar); } else { - param.addcdiv_(grad, avg, -lr); + param.addcdiv_(grad, avg, negative_lr_scalar); } } }, closure); From 64520bc4fbda85cb4f7b0c5069b3bee6e81985c5 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 17:36:01 +0900 Subject: [PATCH 016/101] Update SGD.step. * Update src/TorchSharp/Optimizers/SGD.cs. + Update SGD.step. - Declare TorchSharp.Scalar explicitly. - Cache momentum != 0. - Cache dampening != 1. - Cache weight_decay != 0. - Omit unused TorchSharp.Scalar construction. --- src/TorchSharp/Optimizers/SGD.cs | 36 +++++++++++++++++++------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/src/TorchSharp/Optimizers/SGD.cs b/src/TorchSharp/Optimizers/SGD.cs index fed1f912b..580a4cfeb 100644 --- a/src/TorchSharp/Optimizers/SGD.cs +++ b/src/TorchSharp/Optimizers/SGD.cs @@ -137,14 +137,21 @@ public SGD(IEnumerable parameters, double lr, double momentum = 0.0, public override Tensor step(Func closure = null) { return _step(group => { - + #nullable enable var options = group.Options; - var momentum = options.momentum.Value; - var dampening = options.dampening.Value; - var weight_decay = options.weight_decay.Value; - var nesterov = options.nesterov.Value; - var maximize = options.maximize.Value; - var lr = options.LearningRate.Value; + var momentum = options.momentum!.Value; + var need_momentum = momentum != 0; + using var momentum_scalar = (need_momentum) ? momentum.ToScalar() : null; + var dampening = options.dampening!.Value; + var need_dampening = dampening != 1; + using var dampening_bar_scalar = (need_momentum && need_dampening) ? (1 - dampening).ToScalar() : null; + var weight_decay = options.weight_decay!.Value; + var need_weight_decay = weight_decay != 0; + using var weight_decay_scalar = (need_weight_decay) ? weight_decay.ToScalar() : null; + var nesterov = options.nesterov!.Value; + var maximize = options.maximize!.Value; + var lr = options.LearningRate!.Value; + using var signed_lr_scalar = ((maximize) ? lr : -lr).ToScalar(); foreach (var param in group.Parameters) { @@ -154,22 +161,21 @@ public override Tensor step(Func closure = null) if (grad is null) continue; - if (weight_decay != 0) { - grad = grad.add(param, alpha: weight_decay); - } + if (need_weight_decay) grad = grad.add(param, alpha: weight_decay_scalar!); - if (momentum != 0) { + if (need_momentum) { var buf = state.momentum_buffer; if (buf is null) { buf = grad.clone().detach().DetachFromDisposeScope(); state.momentum_buffer = buf; } else { - buf.mul_(momentum).add_(grad, alpha: (1 - dampening)); + buf.mul_(momentum_scalar!); + if (need_dampening) buf.add_(grad, alpha: dampening_bar_scalar!); } if (nesterov) { - grad = grad.add(buf, alpha: momentum); + grad = grad.add(buf, alpha: momentum_scalar!); } else { grad = buf; } @@ -177,10 +183,10 @@ public override Tensor step(Func closure = null) state.momentum_buffer = buf; } - var alpha = maximize ? lr : -lr; - param.add_(grad, alpha: alpha); + param.add_(grad, alpha: signed_lr_scalar); } + #nullable disable }, closure); } From 2f3c9e3245ce9465458cefcc9ffd39662a9b9bbb Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 18:43:30 +0900 Subject: [PATCH 017/101] Update griffinlim. * Update src/TorchAudio/Functional.cs. + Update griffinlim. - Declare TorchSharp.Scalar explicitly. - Introduce eps_scalar. --- src/TorchAudio/Functional.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchAudio/Functional.cs b/src/TorchAudio/Functional.cs index fdbfb37be..a80959bfd 100644 --- a/src/TorchAudio/Functional.cs +++ b/src/TorchAudio/Functional.cs @@ -197,6 +197,7 @@ public static Tensor griffinlim(Tensor specgram, Tensor window, long n_fft, long // And initialize the previous iterate to 0 var tprev = torch.tensor(0.0, dtype: specgram.dtype, device: specgram.device); + using var eps_scalar = (1e-16).ToScalar(); for (int i = 0; i < n_iter; i++) { // Invert with our current estimate of the phases var inverse = torch.istft( @@ -221,7 +222,7 @@ public static Tensor griffinlim(Tensor specgram, Tensor window, long n_fft, long if (momentum > 0.0) { angles = angles - tprev.mul_(momentum); } - angles = angles.div(angles.abs().add(1e-16)); + angles = angles.div(angles.abs().add(eps_scalar)); // Store the previous iterate tprev = rebuilt; From d1681619f72c1a7dd9ab8445bf933fec1a003144 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 11 Sep 2025 19:13:14 +0900 Subject: [PATCH 018/101] Update AdamW.step. * Update src/TorchSharp/Optimizers/AdamW.cs. + Update AdamW.step. - Declare TorchSharp.Scalar explicitly. - Dispose denom explicitly. --- src/TorchSharp/Optimizers/AdamW.cs | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/src/TorchSharp/Optimizers/AdamW.cs b/src/TorchSharp/Optimizers/AdamW.cs index 4f6c10d85..6dd77a2c3 100644 --- a/src/TorchSharp/Optimizers/AdamW.cs +++ b/src/TorchSharp/Optimizers/AdamW.cs @@ -154,11 +154,18 @@ public override Tensor step(Func closure = null) var options = group.Options as Options; var beta1 = options.beta1.Value; var beta2 = options.beta2.Value; + using var beta1_scalar = beta1.ToScalar(); + using var beta2_scalar = beta2.ToScalar(); + var beta1_bar = 1 - beta1; + var beta2_bar = 1 - beta2; + using var beta1_bar_scalar = beta1_bar.ToScalar(); + using var beta2_bar_scalar = beta2_bar.ToScalar(); var weight_decay = options.weight_decay.Value; var amsgrad = options.amsgrad.Value; var maximize = options.maximize.Value; - var eps = options.eps.Value; + using var eps_scalar = options.eps.Value.ToScalar(); var lr = options.LearningRate.Value; + using var lr_weight_decay_bar_scalar = (1 - lr * weight_decay).ToScalar(); foreach (var param in group.Parameters) { @@ -172,26 +179,28 @@ public override Tensor step(Func closure = null) state.step += 1; - param.mul_(1 - lr * weight_decay); + param.mul_(lr_weight_decay_bar_scalar); var bias_correction1 = 1 - Math.Pow(beta1, state.step); var bias_correction2 = 1 - Math.Pow(beta2, state.step); - state.exp_avg.mul_(beta1).add_(grad, alpha: 1 - beta1); - state.exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value: 1 - beta2); + state.exp_avg.mul_(beta1_scalar).add_(grad, alpha: beta1_bar_scalar); + state.exp_avg_sq.mul_(beta2_scalar).addcmul_(grad, grad, value: beta2_bar_scalar); Tensor denom = null; if (amsgrad) { var t0 = state.max_exp_avg_sq; state.max_exp_avg_sq = torch.maximum(t0, state.exp_avg_sq).DetachFromDisposeScope(); t0.Dispose(); - denom = (state.max_exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps); + denom = (state.max_exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps_scalar); } else { - denom = (state.exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps); + denom = (state.exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps_scalar); } var step_size = lr / bias_correction1; - param.addcdiv_(state.exp_avg, denom, value: -step_size); + using var negative_step_size_scalar = (-step_size).ToScalar(); + param.addcdiv_(state.exp_avg, denom, value: negative_step_size_scalar); + denom.Dispose(); } }, closure); } From 90f406569d124ab20d06b891b32032756350638d Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 11:38:30 +0900 Subject: [PATCH 019/101] Update BatchNorm.forward. * Update src/TorchSharp/NN/Normalization/BatchNorm.cs. + Update BatchNorm.forward. - Declare TorchSharp.Scalar explicitly. - Add FIXME for cache over training. --- src/TorchSharp/NN/Normalization/BatchNorm.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/NN/Normalization/BatchNorm.cs b/src/TorchSharp/NN/Normalization/BatchNorm.cs index 398eae63c..0bdc68f56 100644 --- a/src/TorchSharp/NN/Normalization/BatchNorm.cs +++ b/src/TorchSharp/NN/Normalization/BatchNorm.cs @@ -37,7 +37,8 @@ public override Tensor forward(Tensor input) { if (num_batches_tracked is not null) { - num_batches_tracked.add_(1); + using var one_scalar = 1.ToScalar(); // FIXME: Cache over training? + num_batches_tracked.add_(one_scalar); exponential_average_factor = (this.momentum is null) ? (1.0 / (double)num_batches_tracked) : momentum.Value; } } From a86d88024e57e5308455ddd20e405eecd70b84f9 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 11:43:25 +0900 Subject: [PATCH 020/101] Update torch.normal. * Update src/TorchSharp/Tensor/Factories/Tensor.Factories.cs. + Update torch.normal. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Tensor/Factories/Tensor.Factories.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Tensor/Factories/Tensor.Factories.cs b/src/TorchSharp/Tensor/Factories/Tensor.Factories.cs index b306c0cd7..657803e58 100644 --- a/src/TorchSharp/Tensor/Factories/Tensor.Factories.cs +++ b/src/TorchSharp/Tensor/Factories/Tensor.Factories.cs @@ -117,8 +117,10 @@ public static Tensor eye(long rows, long columns = -1L, ScalarType? dtype = null /// public static Tensor normal(double mean, double std, ReadOnlySpan size, ScalarType? dtype = null, Device? device = null, bool requires_grad = false, Generator? generator = null, string[]? names = null) { + using var mean_scalar = mean.ToScalar(); + using var std_scalar = std.ToScalar(); return randn(size, dtype, device, requires_grad: false, generator, names) - .mul_(std).add_(mean).requires_grad_(requires_grad); + .mul_(std_scalar).add_(mean_scalar).requires_grad_(requires_grad); } /// From 6e546948c8c70cf4566bef1134b93dbfb7c52faf Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 13:05:47 +0900 Subject: [PATCH 021/101] Update torchvision.utils.save_image. * Update src/TorchVision/Utils.cs. + Update torchvision.utils.save_image. - Declare TorchSharp.Scalar explicitly. - Add FIXME for possible torch.Tensor.round_ use. - Add FIXME for no torch.min_int_value. --- src/TorchVision/Utils.cs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/TorchVision/Utils.cs b/src/TorchVision/Utils.cs index 623f332b3..8bd8d231d 100644 --- a/src/TorchVision/Utils.cs +++ b/src/TorchVision/Utils.cs @@ -217,7 +217,11 @@ public static void save_image( using var _ = torch.NewDisposeScope(); var grid = make_grid(tensor, nrow, padding, normalize, value_range, scale_each, pad_value); // Add 0.5 after unnormalizing to [0, 255] to round to nearest integer - var narr = grid.mul(255).add_(0.5).clamp_(0, 255).to(uint8, CPU); + // FIXME: Why not torch.Tensor.round_? + using var uint8_min_scalar = 0.ToScalar(); // FIXME: No torch.min_int_value? + using var uint8_max_scalar = torch.max_int_value(uint8).ToScalar(); + using var eps_scalar = 0.5.ToScalar(); + var narr = grid.mul(uint8_max_scalar).add_(eps_scalar).clamp_(uint8_min_scalar, uint8_max_scalar).to(uint8, CPU); (imager ?? DefaultImager).EncodeImage(narr, format, filestream); } } From 67300a9bc88ead2797590ef2097fe58a15fc74f9 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 13:51:44 +0900 Subject: [PATCH 022/101] Update Rprop.step. * Update src/TorchSharp/Optimizers/Rprop.cs. + Update Rprop.step. - Declare TorchSharp.Scalar explicitly. - Add FIXME for unused lr. - Add FIXME for possible torch.Tensor.sign_ use. - Cache eta{minus,plus} and 1 as torch.Tensor. --- src/TorchSharp/Optimizers/Rprop.cs | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/src/TorchSharp/Optimizers/Rprop.cs b/src/TorchSharp/Optimizers/Rprop.cs index abe9d736e..acf62eb63 100644 --- a/src/TorchSharp/Optimizers/Rprop.cs +++ b/src/TorchSharp/Optimizers/Rprop.cs @@ -138,9 +138,17 @@ public override Tensor step(Func closure = null) var maximize = options.maximize.Value; var etaminus = options.etaminus.Value; var etaplus = options.etaplus.Value; - var min_step = options.min_step.Value; - var max_step = options.max_step.Value; - var lr = options.LearningRate.Value; + using var etaminus_scalar = etaminus.ToScalar(); + using var etaplus_scalar = etaplus.ToScalar(); + using var etaminus_tensor = torch.tensor(etaminus); + using var etaplus_tensor = torch.tensor(etaplus); + using var min_step_scalar = options.min_step.Value.ToScalar(); + using var max_step_scalar = options.max_step.Value.ToScalar(); + var lr = options.LearningRate.Value; // FIXME: Unused? + using var zero_scalar = 0.ToScalar(); + using var one_scalar = 1.ToScalar(); + using var negative_one_scalar = (-1).ToScalar(); + using var one_tensor = torch.tensor(1); foreach (var param in group.Parameters) { @@ -156,18 +164,18 @@ public override Tensor step(Func closure = null) state.step += 1; - var sign = grad.mul(state.prev).sign(); - sign[sign.gt(0)] = (Tensor)etaplus; - sign[sign.lt(0)] = (Tensor)etaminus; - sign[sign.eq(0)] = (Tensor)1; + var sign = grad.mul(state.prev).sign(); // FIXME: Use torch.Tensor.sign_? + sign[sign.gt(zero_scalar)] = etaplus_tensor; + sign[sign.lt(zero_scalar)] = etaminus_tensor; + sign[sign.eq(zero_scalar)] = one_tensor; - state.step_size.mul_(sign).clamp_(min_step, max_step); + state.step_size.mul_(sign).clamp_(min_step_scalar, max_step_scalar); grad = grad.clone(); - grad.index_put_(0, sign.eq(etaminus)); + grad.index_put_(zero_scalar, sign.eq(etaminus_scalar)); - param.addcmul_(grad.sign(), state.step_size, -1); + param.addcmul_(grad.sign(), state.step_size, negative_one_scalar); state.prev.copy_(grad); } From f2523835a885b378733c29f257f24a4bec39cc7c Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 15:00:53 +0900 Subject: [PATCH 023/101] Update torchvision.ops.stochastic_depth. * Update src/TorchVision/Ops/StochasticDepth.cs. + Update torchvision.ops.stochastic_depth. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/Ops/StochasticDepth.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchVision/Ops/StochasticDepth.cs b/src/TorchVision/Ops/StochasticDepth.cs index cb615bc2b..64c450232 100644 --- a/src/TorchVision/Ops/StochasticDepth.cs +++ b/src/TorchVision/Ops/StochasticDepth.cs @@ -53,7 +53,8 @@ public static Tensor stochastic_depth(Tensor input, double p, StochasticDepth.Mo noise.bernoulli_(survival_rate); if (survival_rate > 0) { - noise.div_(survival_rate); + using var survival_rate_scalar = survival_rate.ToScalar(); + noise.div_(survival_rate_scalar); } return input * noise; } From 15233867b17152881e51c8d8cc9b6a7a73caaff8 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 15:12:52 +0900 Subject: [PATCH 024/101] Update torchvision.utils.make_grid. * Update src/TorchVision/Utils.cs. + Update torchvision.utils.make_grid. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/Utils.cs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/TorchVision/Utils.cs b/src/TorchVision/Utils.cs index 8bd8d231d..4d543a326 100644 --- a/src/TorchVision/Utils.cs +++ b/src/TorchVision/Utils.cs @@ -87,8 +87,11 @@ public static Tensor make_grid( tensor = tensor.clone(); // avoid modifying tensor in-place void norm_ip(Tensor img, double low, double high) { - img.clamp_(min: low, max: high); - img.sub_(low).div_(Math.Max(high - low, 1e-5)); + using var low_scalar = low.ToScalar(); + using var high_scalar = high.ToScalar(); + using var denom_scalar = Math.Max(high - low, 1e-5).ToScalar(); + img.clamp_(min: low_scalar, max: high_scalar); + img.sub_(low_scalar).div_(denom_scalar); } void norm_range(Tensor t, (double low, double high)? range) @@ -129,7 +132,8 @@ void norm_range(Tensor t, (double low, double high)? range) var height = tensor.size(2) + padding; var num_channels = tensor.size(1); - var grid = tensor.new_full(new[] { num_channels, height * ymaps + padding, width * xmaps + padding }, pad_value); + using var pad_value_scalar = pad_value.ToScalar(); + var grid = tensor.new_full(new[] { num_channels, height * ymaps + padding, width * xmaps + padding }, pad_value_scalar); var k = 0L; for (long y = 0; y < ymaps; ++y) { From bfc1f2b9829261b23346b97be202852a6c26d27d Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 15:35:47 +0900 Subject: [PATCH 025/101] Update TorchSharp.Modules.ExpRelaxedCategorical.log_prob. * Update src/TorchSharp/Distributions/ExpRelaxedCategorical.cs. + Update TorchSharp.Modules.ExpRelaxedCategorical.log_prob. - Declare TorchSharp.Scalar explicitly. - Add FIXME for possible inplace ops use. --- src/TorchSharp/Distributions/ExpRelaxedCategorical.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/ExpRelaxedCategorical.cs b/src/TorchSharp/Distributions/ExpRelaxedCategorical.cs index 7c42c0548..964460149 100644 --- a/src/TorchSharp/Distributions/ExpRelaxedCategorical.cs +++ b/src/TorchSharp/Distributions/ExpRelaxedCategorical.cs @@ -134,7 +134,9 @@ public override Tensor log_prob(Tensor value) var logitsValue = broadcast_tensors(_logits, value); var logits = logitsValue[0]; value = logitsValue[1]; - var log_scale = (torch.full_like(_temperature, K).lgamma() - _temperature.log().mul(-(K - 1))); + using var K_scalar = K.ToScalar(); + using var negative_Ksub1_scalar = (-(K - 1)).ToScalar(); + var log_scale = torch.full_like(_temperature, K_scalar).lgamma() - _temperature.log().mul(negative_Ksub1_scalar); // FIXME: Use inplace ops? var score = logits - value.mul(_temperature); score = (score - score.logsumexp(dim: -1, keepdim: true)).sum(-1); return (score + log_scale).MoveToOuterDisposeScope(); From 3ec44e40adf879c99f767d13e2f18b86d96eff15 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 15:46:15 +0900 Subject: [PATCH 026/101] Update torchvision.transforms.functional.convert_image_dtype. * Update src/TorchVision/Functional.cs. + Update torchvision.transforms.functional.convert_image_dtype. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/Functional.cs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/TorchVision/Functional.cs b/src/TorchVision/Functional.cs index b26d4715f..23f9d9ca4 100644 --- a/src/TorchVision/Functional.cs +++ b/src/TorchVision/Functional.cs @@ -344,7 +344,9 @@ public static Tensor convert_image_dtype(Tensor image, ScalarType dtype = Scalar } var eps = 1e-3; - using var result = image.mul(output_max + 1.0 - eps); + var factor = output_max + 1.0 - eps; + using var factor_scalar = factor.ToScalar(); + using var result = image.mul(factor_scalar); return result.to_type(dtype); } else { @@ -359,7 +361,8 @@ public static Tensor convert_image_dtype(Tensor image, ScalarType dtype = Scalar if (input_max > output_max) { var factor = (input_max + 1) / (output_max + 1); - using var t0 = torch.div(image, factor); + using var factor_scalar = factor.ToScalar(); + using var t0 = torch.div(image, factor_scalar); return t0.to_type(dtype); } else { var factor = (output_max + 1) / (input_max + 1); From ed5c72df4213ea290bada457e288a280c8b2bdd6 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 16:15:06 +0900 Subject: [PATCH 027/101] Update torchaudio.functional.griffinlim. * Update src/TorchAudio/Functional.cs. + Update torchaudio.functional.griffinlim. - Declare TorchSharp.Scalar explicitly. - Cache momentum > 0.0. - Add FIXME for possible inplace ops use. - Simplify angles initialization. - Use torch.ones instead. --- src/TorchAudio/Functional.cs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/TorchAudio/Functional.cs b/src/TorchAudio/Functional.cs index a80959bfd..7182b751b 100644 --- a/src/TorchAudio/Functional.cs +++ b/src/TorchAudio/Functional.cs @@ -180,20 +180,20 @@ public static Tensor griffinlim(Tensor specgram, Tensor window, long n_fft, long throw new ArgumentOutOfRangeException($"momentum must be in range [0, 1). Found: {momentum}"); } momentum = momentum / (1 + momentum); + var need_momentum = momentum > 0.0; + using var momentum_scalar = (need_momentum) ? momentum.ToScalar() : null; // pack batch var shape = specgram.size(); specgram = specgram.reshape(new long[] { -1, shape[shape.Length - 2], shape[shape.Length - 1] }); - specgram = specgram.pow(1 / power); + using var exponent_scalar = (1 / power).ToScalar(); + specgram = specgram.pow(exponent_scalar); // FIXME: Use inplace ops? Skip if power == 1? // initialize the phase - Tensor angles; - if (rand_init) { - angles = torch.rand(specgram.size(), dtype: _get_complex_dtype(specgram.dtype), device: specgram.device); - } else { - angles = torch.full(specgram.size(), 1, dtype: _get_complex_dtype(specgram.dtype), device: specgram.device); - } + var angles = (rand_init) + ? torch.rand(specgram.size(), dtype: _get_complex_dtype(specgram.dtype), device: specgram.device) + : torch.ones(specgram.size(), dtype: _get_complex_dtype(specgram.dtype), device: specgram.device); // And initialize the previous iterate to 0 var tprev = torch.tensor(0.0, dtype: specgram.dtype, device: specgram.device); @@ -219,8 +219,8 @@ public static Tensor griffinlim(Tensor specgram, Tensor window, long n_fft, long // Update our phase estimates angles = rebuilt; - if (momentum > 0.0) { - angles = angles - tprev.mul_(momentum); + if (need_momentum) { + angles = angles - tprev.mul_(momentum_scalar!); // FIXME: Use inplace ops? } angles = angles.div(angles.abs().add(eps_scalar)); From c8fb482acafc0bdfe2060f53209342c38fb0d7fd Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 16:37:52 +0900 Subject: [PATCH 028/101] Update torchaudio.functional._get_sinc_resample_kernel. * Update src/TorchAudio/Functional.cs. + Update torchaudio.functional._get_sinc_resample_kernel. - Declare TorchSharp.Scalar explicitly. --- src/TorchAudio/Functional.cs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/TorchAudio/Functional.cs b/src/TorchAudio/Functional.cs index 7182b751b..340c160ec 100644 --- a/src/TorchAudio/Functional.cs +++ b/src/TorchAudio/Functional.cs @@ -529,6 +529,8 @@ internal static (torch.Tensor, int) _get_sinc_resample_kernel(int orig_freq, int if (lowpass_filter_width <= 0) { throw new ArgumentOutOfRangeException(); } + using var min_scalar = (-lowpass_filter_width).ToScalar(); + using var max_scalar = lowpass_filter_width.ToScalar(); var kernels_list = new List(); double base_freq = Math.Min(orig_freq, new_freq); @@ -536,11 +538,14 @@ internal static (torch.Tensor, int) _get_sinc_resample_kernel(int orig_freq, int var width = (int)Math.Ceiling(((double)lowpass_filter_width) * orig_freq / base_freq); var idx_dtype = dtype ?? torch.float64; - var idx = torch.arange(-width, width + orig_freq, device: device, dtype: idx_dtype); + using var start_scalar = (-width).ToScalar(); + using var stop_scalar = (width + orig_freq).ToScalar(); + var idx = torch.arange(start_scalar, stop_scalar, device: device, dtype: idx_dtype); + using var zero_scalar = 0.ToScalar(); for (int i = 0; i < new_freq; i++) { var t = (-i / new_freq + idx / orig_freq) * base_freq; - t = t.clamp_(-lowpass_filter_width, lowpass_filter_width); + t = t.clamp_(min_scalar, max_scalar); torch.Tensor window; if (resampling_method == ResamplingMethod.sinc_interpolation) { @@ -555,13 +560,14 @@ internal static (torch.Tensor, int) _get_sinc_resample_kernel(int orig_freq, int } t *= Math.PI; // Tensor.to(Tensor) of TorchSharp desn't change dtype. - var kernel = torch.where(t == 0, torch.tensor(1.0).to(t).type_as(t), torch.sin(t) / t); + var kernel = torch.where(t == zero_scalar, torch.tensor(1.0).to(t).type_as(t), torch.sin(t) / t); kernel.mul_(window); kernels_list.Add(kernel); } var scale = ((double)base_freq) / orig_freq; - var kernels = torch.stack(kernels_list.ToArray()).view(new_freq, 1, -1).mul_(scale); + using var scale_scalar = scale.ToScalar(); + var kernels = torch.stack(kernels_list.ToArray()).view(new_freq, 1, -1).mul_(scale_scalar); if (dtype == null) { kernels = kernels.to(torch.float32); } From ae57dd6938651c8719907c6cba21af565a5c2127 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 17:18:16 +0900 Subject: [PATCH 029/101] Use THSTensor_square{,_}. * Update src/Native/LibTorchSharp/THSTensor.h. + Declare THSTensor_square{,_}. * Update src/Native/LibTorchSharp/THSTensorMath.cpp. + Implement THSTensor_square{,_}. * Update src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs. + Declare THSTensor_square{,_}. * src/TorchSharp/Tensor/Tensor.Math.cs. + Update torch.Tensor.square. - Use THSTensor_square{,_}. - Makes better compatibility against libtorch. - Removes extra cost for TorchSharp.Scalar. * Update src/TorchSharp/Tensor/torch.PointwiseOps.cs. + Introduce torch.square_. --- src/Native/LibTorchSharp/THSTensor.h | 4 +++ src/Native/LibTorchSharp/THSTensorMath.cpp | 10 +++++++ .../PInvoke/LibTorchSharp.THSTensor.cs | 6 ++++ src/TorchSharp/Tensor/Tensor.Math.cs | 28 +++++++++++++++---- src/TorchSharp/Tensor/torch.PointwiseOps.cs | 7 +++++ 5 files changed, 49 insertions(+), 6 deletions(-) diff --git a/src/Native/LibTorchSharp/THSTensor.h b/src/Native/LibTorchSharp/THSTensor.h index 0925cd4e0..86e603690 100644 --- a/src/Native/LibTorchSharp/THSTensor.h +++ b/src/Native/LibTorchSharp/THSTensor.h @@ -1220,6 +1220,10 @@ EXPORT_API(Tensor) THSTensor_sqrt(const Tensor tensor); EXPORT_API(void) THSTensor_sqrt_(const Tensor tensor); +EXPORT_API(Tensor) THSTensor_square(const Tensor tensor); + +EXPORT_API(void) THSTensor_square_(const Tensor tensor); + EXPORT_API(Tensor) THSTensor_std(const Tensor tensor, const bool unbiased); EXPORT_API(Tensor) THSTensor_std_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool unbiased, bool keepdim); diff --git a/src/Native/LibTorchSharp/THSTensorMath.cpp b/src/Native/LibTorchSharp/THSTensorMath.cpp index b1f0e3a23..9d4ac75f1 100644 --- a/src/Native/LibTorchSharp/THSTensorMath.cpp +++ b/src/Native/LibTorchSharp/THSTensorMath.cpp @@ -910,6 +910,16 @@ void THSTensor_sqrt_(const Tensor tensor) CATCH(tensor->sqrt_();) } +Tensor THSTensor_square(const Tensor tensor) +{ + CATCH_TENSOR(tensor->square()); +} + +void THSTensor_square_(const Tensor tensor) +{ + CATCH(tensor->square_();) +} + Tensor THSTensor_sign(const Tensor tensor) { CATCH_TENSOR(tensor->sign()); diff --git a/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs b/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs index bb568ae68..9afcbd103 100644 --- a/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs +++ b/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs @@ -1917,6 +1917,9 @@ internal static extern IntPtr THSTensor_upsample_nearest3d(IntPtr input, [DllImport("LibTorchSharp")] internal static extern IntPtr THSTensor_sqrt(IntPtr tensor); + [DllImport("LibTorchSharp")] + internal static extern IntPtr THSTensor_square(IntPtr tensor); + [DllImport("LibTorchSharp")] internal static extern IntPtr THSTensor_float_power(IntPtr tensor, IntPtr trg); @@ -1998,6 +2001,9 @@ internal static extern IntPtr THSTensor_upsample_nearest3d(IntPtr input, [DllImport("LibTorchSharp")] internal static extern void THSTensor_sqrt_(IntPtr tensor); + [DllImport("LibTorchSharp")] + internal static extern void THSTensor_square_(IntPtr tensor); + [DllImport("LibTorchSharp")] internal static extern IntPtr THSTensor_sign(IntPtr tensor); diff --git a/src/TorchSharp/Tensor/Tensor.Math.cs b/src/TorchSharp/Tensor/Tensor.Math.cs index 829d686ce..e8342467c 100644 --- a/src/TorchSharp/Tensor/Tensor.Math.cs +++ b/src/TorchSharp/Tensor/Tensor.Math.cs @@ -1560,12 +1560,6 @@ public Tensor rsqrt_() return this; } - /// - /// Computes the element-wise square - /// - /// - public Tensor square() => pow(2); - /// /// Computes the element-wise square root /// @@ -1588,6 +1582,28 @@ public Tensor sqrt_() return this; } + /// + /// Computes the element-wise square + /// + /// + public Tensor square() + { + var res = THSTensor_square(Handle); + if (res == IntPtr.Zero) { CheckForErrors(); } + return new Tensor(res); + } + + /// + /// Computes the element-wise square, in place + /// + /// + public Tensor square_() + { + THSTensor_square_(Handle); + CheckForErrors(); + return this; + } + /// /// Returns a new tensor with the signs (-1, 0, 1) of the elements of input. /// diff --git a/src/TorchSharp/Tensor/torch.PointwiseOps.cs b/src/TorchSharp/Tensor/torch.PointwiseOps.cs index 0fccbd8ce..de22fe9dc 100644 --- a/src/TorchSharp/Tensor/torch.PointwiseOps.cs +++ b/src/TorchSharp/Tensor/torch.PointwiseOps.cs @@ -1552,6 +1552,13 @@ public static Tensor quantized_max_pool2d(Tensor input, long[] kernel_size, long /// The input tensor. [Pure]public static Tensor square(Tensor input) => input.square(); + // https://pytorch.org/docs/stable/generated/torch.square + /// + /// Computes the element-wise square, in place + /// + /// The input tensor. + [Pure] public static Tensor square_(Tensor input) => input.square_(); + // https://pytorch.org/docs/stable/generated/torch.sub /// /// Element-wise subtraction From 9cf47bf1083a25f2fe3fc45c405eeffba50340ad Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 17:34:22 +0900 Subject: [PATCH 030/101] Update torchaudio.functional.spectrogram. * Update src/TorchAudio/Functional.cs. + Update torchaudio.functional.spectrogram. - Use torch.Tensor.square. - Declare TorchSharp.Scalar explicitly. - Add FIXME for possible torch.Tensor.square use. --- src/TorchAudio/Functional.cs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/TorchAudio/Functional.cs b/src/TorchAudio/Functional.cs index 340c160ec..2a6d31443 100644 --- a/src/TorchAudio/Functional.cs +++ b/src/TorchAudio/Functional.cs @@ -73,14 +73,15 @@ public static torch.Tensor spectrogram(torch.Tensor waveform, long pad, torch.Te spec_f = spec_f.reshape(spec_shape); if (normalized) { - spec_f /= window.pow(2.0).sum().sqrt(); + spec_f /= window.square().sum().sqrt(); } if (power.HasValue) { if (power.Value == 1.0) { spec_f = spec_f.abs(); } else { - spec_f = spec_f.abs().pow(power.Value); + using var power_scalar = power.Value.ToScalar(); + spec_f = spec_f.abs().pow(power_scalar); // FIXME: Call torch.Tensor.square if power.Value == 2.0? } } From b9e9da468b99ef046820f331a2a283e5bf3a3496 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 17:40:39 +0900 Subject: [PATCH 031/101] Update torchaudio.functional.inverse_spectrogram. * Update src/TorchAudio/Functional.cs. + Update torchaudio.functional.inverse_spectrogram. - Use torch.Tensor.square. --- src/TorchAudio/Functional.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TorchAudio/Functional.cs b/src/TorchAudio/Functional.cs index 2a6d31443..0ebb0c479 100644 --- a/src/TorchAudio/Functional.cs +++ b/src/TorchAudio/Functional.cs @@ -113,7 +113,7 @@ public static torch.Tensor inverse_spectrogram(torch.Tensor spectrogram, long? l using (var d = torch.NewDisposeScope()) { if (normalized) { - spectrogram = spectrogram * window.pow(2.0).sum().sqrt(); + spectrogram = spectrogram * window.square().sum().sqrt(); } // pack batch From 36983ab338659455e6129412a0ce44d0712f7e55 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 17:47:30 +0900 Subject: [PATCH 032/101] Use torch.Tensor.square(). --- src/TorchAudio/Modules/HuBERTPretrainModel.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TorchAudio/Modules/HuBERTPretrainModel.cs b/src/TorchAudio/Modules/HuBERTPretrainModel.cs index 7c6e121c8..a0b84edae 100644 --- a/src/TorchAudio/Modules/HuBERTPretrainModel.cs +++ b/src/TorchAudio/Modules/HuBERTPretrainModel.cs @@ -91,7 +91,7 @@ public override (Tensor?, Tensor?, Tensor) forward( if (this.feature_grad_mult != null && this.feature_grad_mult < 1.0) { x = Wav2Vec2Model.GradMultiply.apply(x, this.feature_grad_mult.Value); } - var features_pen = x.@float().pow(2).mean(); + var features_pen = x.@float().square().mean(); if (lengths is not null) { padding_mask = Wav2Vec2Model._get_padding_mask(x, lengths); } else { From 6e0c00b2304d480d3b172aae37a6a3524d990f36 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 17:54:39 +0900 Subject: [PATCH 033/101] Update InverseMelScale.forward. * Update src/TorchAudio/Transforms/InverseMelScale.cs. + Update InverseMelScale.forward. - Declare TorchSharp.Scalar explicitly. - Use torch.Tensor.square. --- src/TorchAudio/Transforms/InverseMelScale.cs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/TorchAudio/Transforms/InverseMelScale.cs b/src/TorchAudio/Transforms/InverseMelScale.cs index fa3196d23..a3032e43c 100644 --- a/src/TorchAudio/Transforms/InverseMelScale.cs +++ b/src/TorchAudio/Transforms/InverseMelScale.cs @@ -96,18 +96,19 @@ public override Tensor forward(Tensor melspec) learningRate: 0.1, momentum: 0.9); var loss = float.PositiveInfinity; + using var zero_scalar = 0.ToScalar(); for (long i = 0; i < this.max_iter; i++) { using var d2 = torch.NewDisposeScope(); optim.zero_grad(); var diff = melspec - specgram.matmul(this.fb); - var new_loss = diff.pow(2).sum(dim: -1).mean(); + var new_loss = diff.square().sum(dim: -1).mean(); // take sum over mel-frequency then average over other dimensions // so that loss threshold is applied par unit timeframe new_loss.backward(); optim.step(); using (torch.no_grad()) - specgram.set_(specgram.clamp(min: 0)); + specgram.set_(specgram.clamp(min: zero_scalar)); var new_loss_value = new_loss.item(); if (new_loss_value < this.tolerance_loss || Math.Abs(loss - new_loss_value) < this.tolerance_change) { @@ -117,7 +118,7 @@ public override Tensor forward(Tensor melspec) } specgram.requires_grad_(false); - var specgram_tensor = specgram.clamp(min: 0).transpose(-1, -2); + var specgram_tensor = specgram.clamp(min: zero_scalar).transpose(-1, -2); // unpack batch shape[shape.Length - 2] = freq; From d2a533f37d4d33b1685ff9e5d25a03adf0da4b5f Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 18:12:40 +0900 Subject: [PATCH 034/101] Use torch.Tensor.square. --- src/TorchSharp/Distributions/Beta.cs | 2 +- src/TorchSharp/Distributions/Cauchy.cs | 2 +- src/TorchSharp/Distributions/Dirichlet.cs | 2 +- src/TorchSharp/Distributions/Exponential.cs | 2 +- src/TorchSharp/Distributions/FisherSnedecor.cs | 2 +- src/TorchSharp/Distributions/Gamma.cs | 2 +- src/TorchSharp/Distributions/GumbEL.cs | 2 +- src/TorchSharp/Distributions/HalfNormal.cs | 2 +- src/TorchSharp/Distributions/Laplace.cs | 2 +- src/TorchSharp/Distributions/LogNormal.cs | 4 ++-- src/TorchSharp/Distributions/MultiVariateNormal.cs | 4 ++-- src/TorchSharp/Distributions/Normal.cs | 6 +++--- src/TorchSharp/Distributions/Pareto.cs | 2 +- src/TorchSharp/Distributions/Uniform.cs | 2 +- src/TorchSharp/Distributions/Weibull.cs | 2 +- src/TorchVision/Functional.cs | 2 +- src/TorchVision/Ops/Boxes.cs | 4 ++-- src/TorchVision/Ops/Utils.cs | 4 ++-- 18 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/TorchSharp/Distributions/Beta.cs b/src/TorchSharp/Distributions/Beta.cs index 67b4d0d6c..155ab21ba 100644 --- a/src/TorchSharp/Distributions/Beta.cs +++ b/src/TorchSharp/Distributions/Beta.cs @@ -28,7 +28,7 @@ public override Tensor variance { get { using var _ = NewDisposeScope(); var total = concentration0 + concentration1; - return (concentration1 * concentration0 / (total.pow(2) * (total + 1))).MoveToOuterDisposeScope(); + return (concentration1 * concentration0 / (total.square() * (total + 1))).MoveToOuterDisposeScope(); } } diff --git a/src/TorchSharp/Distributions/Cauchy.cs b/src/TorchSharp/Distributions/Cauchy.cs index 516152c80..3017f53ce 100644 --- a/src/TorchSharp/Distributions/Cauchy.cs +++ b/src/TorchSharp/Distributions/Cauchy.cs @@ -78,7 +78,7 @@ public override Tensor rsample(params long[] sample_shape) /// /// public override Tensor log_prob(Tensor value) => - WrappedTensorDisposeScope(() => -Math.Log(Math.PI) - scale.log() - (((value - loc) / scale).pow(2)).log1p()); + WrappedTensorDisposeScope(() => -Math.Log(Math.PI) - scale.log() - (((value - loc) / scale).square()).log1p()); /// /// Returns entropy of distribution, batched over batch_shape. diff --git a/src/TorchSharp/Distributions/Dirichlet.cs b/src/TorchSharp/Distributions/Dirichlet.cs index 68fbbcfa8..98d72e2b6 100644 --- a/src/TorchSharp/Distributions/Dirichlet.cs +++ b/src/TorchSharp/Distributions/Dirichlet.cs @@ -40,7 +40,7 @@ public override Tensor variance { get { using var _ = NewDisposeScope(); var con0 = concentration.sum(-1, true); - return (concentration * (con0 - concentration) / (con0.pow(2) * (con0 + 1))).MoveToOuterDisposeScope(); + return (concentration * (con0 - concentration) / (con0.square() * (con0 + 1))).MoveToOuterDisposeScope(); } } diff --git a/src/TorchSharp/Distributions/Exponential.cs b/src/TorchSharp/Distributions/Exponential.cs index 93e28d589..21b512d7c 100644 --- a/src/TorchSharp/Distributions/Exponential.cs +++ b/src/TorchSharp/Distributions/Exponential.cs @@ -24,7 +24,7 @@ public class Exponential : torch.distributions.ExponentialFamily /// /// The variance of the distribution /// - public override Tensor variance => rate.pow(2); + public override Tensor variance => rate.square(); /// /// The standard deviation of the distribution diff --git a/src/TorchSharp/Distributions/FisherSnedecor.cs b/src/TorchSharp/Distributions/FisherSnedecor.cs index d9bc40a4e..036b383fd 100644 --- a/src/TorchSharp/Distributions/FisherSnedecor.cs +++ b/src/TorchSharp/Distributions/FisherSnedecor.cs @@ -32,7 +32,7 @@ public override Tensor variance { using var _ = torch.NewDisposeScope(); var df2 = this.df2.clone(); df2[df2 <= 4] = torch.tensor(float.NaN); - return (2 * df2.pow(2) * (this.df1 + df2 - 2) / (this.df1 * (df2 - 2).pow(2) * (df2 - 4))).MoveToOuterDisposeScope(); + return (2 * df2.square() * (this.df1 + df2 - 2) / (this.df1 * (df2 - 2).square() * (df2 - 4))).MoveToOuterDisposeScope(); } } diff --git a/src/TorchSharp/Distributions/Gamma.cs b/src/TorchSharp/Distributions/Gamma.cs index 9ee82a5e3..ab5a4751f 100644 --- a/src/TorchSharp/Distributions/Gamma.cs +++ b/src/TorchSharp/Distributions/Gamma.cs @@ -24,7 +24,7 @@ public class Gamma : torch.distributions.ExponentialFamily /// /// The variance of the distribution /// - public override Tensor variance => WrappedTensorDisposeScope(() => concentration / rate.pow(2)); + public override Tensor variance => WrappedTensorDisposeScope(() => concentration / rate.square()); /// /// Constructor diff --git a/src/TorchSharp/Distributions/GumbEL.cs b/src/TorchSharp/Distributions/GumbEL.cs index 1ca1845f4..6efdbefda 100644 --- a/src/TorchSharp/Distributions/GumbEL.cs +++ b/src/TorchSharp/Distributions/GumbEL.cs @@ -30,7 +30,7 @@ internal Gumbel(Tensor loc, Tensor scale, Distribution base_distribution, torch. public override Tensor mode => loc; - public override Tensor variance => stddev.pow(2); + public override Tensor variance => stddev.square(); public override Tensor stddev => pioversqrtsix * scale; diff --git a/src/TorchSharp/Distributions/HalfNormal.cs b/src/TorchSharp/Distributions/HalfNormal.cs index 563d8d91e..526e21112 100644 --- a/src/TorchSharp/Distributions/HalfNormal.cs +++ b/src/TorchSharp/Distributions/HalfNormal.cs @@ -26,7 +26,7 @@ internal HalfNormal(Tensor scale, torch.Generator generator = null) : public override Tensor mode => torch.zeros_like(scale); - public override Tensor variance => scale.pow(2) * (1 - 2 / Math.PI); + public override Tensor variance => scale.square() * (1 - 2 / Math.PI); public override Tensor log_prob(Tensor value) { diff --git a/src/TorchSharp/Distributions/Laplace.cs b/src/TorchSharp/Distributions/Laplace.cs index 4ebb48053..b9b53f528 100644 --- a/src/TorchSharp/Distributions/Laplace.cs +++ b/src/TorchSharp/Distributions/Laplace.cs @@ -31,7 +31,7 @@ public class Laplace : torch.distributions.Distribution /// /// The variance of the distribution /// - public override Tensor variance => 2 * scale.pow(2); + public override Tensor variance => 2 * scale.square(); /// diff --git a/src/TorchSharp/Distributions/LogNormal.cs b/src/TorchSharp/Distributions/LogNormal.cs index 944792a1e..2cd6e08d2 100644 --- a/src/TorchSharp/Distributions/LogNormal.cs +++ b/src/TorchSharp/Distributions/LogNormal.cs @@ -22,11 +22,11 @@ internal LogNormal(Tensor loc, Tensor scale, torch.Generator generator = null) : public Tensor scale { get; private set; } - public override Tensor mean => torch.WrappedTensorDisposeScope(() => (loc + scale.pow(2) / 2).exp()); + public override Tensor mean => torch.WrappedTensorDisposeScope(() => (loc + scale.square() / 2).exp()); public override Tensor mode => torch.WrappedTensorDisposeScope(() => (loc - scale.square()).exp()); - public override Tensor variance => torch.WrappedTensorDisposeScope(() => (scale.pow(2).exp() - 1) * (2 * loc + scale.pow(2)).exp()); + public override Tensor variance => torch.WrappedTensorDisposeScope(() => (scale.square().exp() - 1) * (2 * loc + scale.square()).exp()); protected override void Dispose(bool disposing) { diff --git a/src/TorchSharp/Distributions/MultiVariateNormal.cs b/src/TorchSharp/Distributions/MultiVariateNormal.cs index 721c0a5de..1ccc0c437 100644 --- a/src/TorchSharp/Distributions/MultiVariateNormal.cs +++ b/src/TorchSharp/Distributions/MultiVariateNormal.cs @@ -34,7 +34,7 @@ public class MultivariateNormal : torch.distributions.Distribution /// The variance of the distribution /// public override Tensor variance => - WrappedTensorDisposeScope(() => _unbroadcasted_scale_tril.pow(2).sum(-1).expand(batch_shape + event_shape)); + WrappedTensorDisposeScope(() => _unbroadcasted_scale_tril.square().sum(-1).expand(batch_shape + event_shape)); /// /// Constructor @@ -241,7 +241,7 @@ private Tensor BatchMahalanobis(Tensor bL, Tensor bx) var flat_x = bx.reshape(-1, flat_L.size(0), n); var flat_x_swap = flat_x.permute(1, 2, 0); - var M_swap = torch.linalg.solve_triangular(flat_L, flat_x_swap, upper: false).pow(2).sum(-2); + var M_swap = torch.linalg.solve_triangular(flat_L, flat_x_swap, upper: false).square().sum(-2); var M = M_swap.t(); var permuted_M = M.reshape(TakeAllBut(bx.shape, 1)); diff --git a/src/TorchSharp/Distributions/Normal.cs b/src/TorchSharp/Distributions/Normal.cs index fb2f34aa1..921272210 100644 --- a/src/TorchSharp/Distributions/Normal.cs +++ b/src/TorchSharp/Distributions/Normal.cs @@ -31,7 +31,7 @@ public class Normal : distributions.Distribution /// /// The variance of the distribution /// - public override Tensor variance => scale.pow(2); + public override Tensor variance => scale.square(); /// /// Constructor @@ -91,9 +91,9 @@ public override Tensor rsample(params long[] sample_shape) public override Tensor log_prob(Tensor value) { using var _ = NewDisposeScope(); - var v = scale.pow(2); + var v = scale.square(); var log_scale = scale.log(); - return (-((value - loc).pow(2)) / (2 * v) - log_scale - Math.Log(Math.Sqrt(2 * Math.PI))).MoveToOuterDisposeScope(); + return (-((value - loc).square()) / (2 * v) - log_scale - Math.Log(Math.Sqrt(2 * Math.PI))).MoveToOuterDisposeScope(); } /// diff --git a/src/TorchSharp/Distributions/Pareto.cs b/src/TorchSharp/Distributions/Pareto.cs index 2ff0e22f2..aa3e1d21e 100644 --- a/src/TorchSharp/Distributions/Pareto.cs +++ b/src/TorchSharp/Distributions/Pareto.cs @@ -36,7 +36,7 @@ public override Tensor variance { get { using var _ = torch.NewDisposeScope(); var a = alpha.clamp(min: 2); - return (scale.pow(2) * a / ((a - 1).pow(2) * (a - 2))).MoveToOuterDisposeScope(); + return (scale.square() * a / ((a - 1).square() * (a - 2))).MoveToOuterDisposeScope(); } } diff --git a/src/TorchSharp/Distributions/Uniform.cs b/src/TorchSharp/Distributions/Uniform.cs index 671ec0825..b15b61a47 100644 --- a/src/TorchSharp/Distributions/Uniform.cs +++ b/src/TorchSharp/Distributions/Uniform.cs @@ -25,7 +25,7 @@ public class Uniform : torch.distributions.Distribution /// The variance of the distribution /// public override Tensor variance => - WrappedTensorDisposeScope(() => (high - low).pow(2) / 12); + WrappedTensorDisposeScope(() => (high - low).square() / 12); /// /// Constructor diff --git a/src/TorchSharp/Distributions/Weibull.cs b/src/TorchSharp/Distributions/Weibull.cs index 131b8ec64..13bcff3f2 100644 --- a/src/TorchSharp/Distributions/Weibull.cs +++ b/src/TorchSharp/Distributions/Weibull.cs @@ -44,7 +44,7 @@ protected override void Dispose(bool disposing) public override Tensor variance => WrappedTensorDisposeScope(() => - scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * concentration_reciprocal)) - torch.exp(2 * torch.lgamma(1 + concentration_reciprocal))) + scale.square() * (torch.exp(torch.lgamma(1 + 2 * concentration_reciprocal)) - torch.exp(2 * torch.lgamma(1 + concentration_reciprocal))) ); /// diff --git a/src/TorchVision/Functional.cs b/src/TorchVision/Functional.cs index 23f9d9ca4..e6ba173f3 100644 --- a/src/TorchVision/Functional.cs +++ b/src/TorchVision/Functional.cs @@ -930,7 +930,7 @@ private static Tensor GetGaussianKernel1d(long size, float sigma) using var x = torch.linspace(-ksize_half, ksize_half, size); using var t0 = x / sigma; using var t1 = -t0; - using var t2 = t1.pow(2); + using var t2 = t1.square(); using var pdf = t2 * 0.5f; using var sum = pdf.sum(); diff --git a/src/TorchVision/Ops/Boxes.cs b/src/TorchVision/Ops/Boxes.cs index 134e89a63..9a2524cf4 100644 --- a/src/TorchVision/Ops/Boxes.cs +++ b/src/TorchVision/Ops/Boxes.cs @@ -192,14 +192,14 @@ private static Tensor _box_diou_iou(Tensor boxes1, Tensor boxes2, out Tensor iou var lti = torch.min(boxes1[colon, None, (null, 2)], boxes2[colon, (null, 2)]); var rbi = torch.max(boxes1[colon, None, (2, null)], boxes2[colon, (2, null)]); var whi = _upcast(rbi - lti).clamp(min: 0); // [N,M,2]; - var diagonal_distance_squared = whi[colon, colon, 0].pow(2) + whi[colon, colon, 1].pow(2) + eps; + var diagonal_distance_squared = whi[colon, colon, 0].square() + whi[colon, colon, 1].square() + eps; // centers of boxes var x_p = (boxes1[colon, 0] + boxes1[colon, 2]) / 2; var y_p = (boxes1[colon, 1] + boxes1[colon, 3]) / 2; var x_g = (boxes2[colon, 0] + boxes2[colon, 2]) / 2; var y_g = (boxes2[colon, 1] + boxes2[colon, 3]) / 2; // The distance between boxes' centers squared. - var centers_distance_squared = _upcast((x_p[colon, None] - x_g[None, colon])).pow(2) + _upcast((y_p[colon, None] - y_g[None, colon])).pow(2); + var centers_distance_squared = _upcast((x_p[colon, None] - x_g[None, colon])).square() + _upcast((y_p[colon, None] - y_g[None, colon])).square(); // The distance IoU is the IoU penalized by a normalized // distance between boxes' centers squared. return iou - (centers_distance_squared / diagonal_distance_squared); diff --git a/src/TorchVision/Ops/Utils.cs b/src/TorchVision/Ops/Utils.cs index cbc74acf4..5227ede57 100644 --- a/src/TorchVision/Ops/Utils.cs +++ b/src/TorchVision/Ops/Utils.cs @@ -76,7 +76,7 @@ internal static (Tensor, Tensor) _diou_iou_loss(Tensor boxes1, Tensor boxes2, do var xc2 = max(x2, x2g); var yc2 = max(y2, y2g); - var diagonal_distance_squared = (xc2 - xc1).pow(2) + (yc2 - yc1).pow(2) + eps; + var diagonal_distance_squared = (xc2 - xc1).square() + (yc2 - yc1).square() + eps; // centers of boxes var x_p = (x2 + x1) / 2; var y_p = (y2 + y1) / 2; @@ -84,7 +84,7 @@ internal static (Tensor, Tensor) _diou_iou_loss(Tensor boxes1, Tensor boxes2, do var y_g = (y1g + y2g) / 2; // The distance between boxes' centers squared. - var centers_distance_squared = (x_p - x_g).pow(2) + (y_p - y_g).pow(2); + var centers_distance_squared = (x_p - x_g).square() + (y_p - y_g).square(); // The distance IoU is the IoU penalized by a normalized // distance between boxes' centers squared. var loss = 1 - iou + (centers_distance_squared / diagonal_distance_squared); From f9f81c51d8ba0b722e6e8dff151abf623fe37361 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Fri, 12 Sep 2025 18:32:59 +0900 Subject: [PATCH 035/101] Use torch.Tensor.square. These are exceptional changes for unit tests, which had no special meaning for torch.Tensor.pow use, based on my understanding. --- test/TorchSharpTest/TestAutogradFunction.cs | 2 +- test/TorchSharpTest/TestTorchTensor.cs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/TorchSharpTest/TestAutogradFunction.cs b/test/TorchSharpTest/TestAutogradFunction.cs index 075306d3d..f2043ddca 100644 --- a/test/TorchSharpTest/TestAutogradFunction.cs +++ b/test/TorchSharpTest/TestAutogradFunction.cs @@ -76,7 +76,7 @@ private float TrainXOR(Device device) var input = torch.tensor(new float[] { i, j }, device: device).unsqueeze(0); var output = LinearFunction.apply(input, weight1); output = LinearFunction.apply(torch.nn.functional.tanh(input), weight2); - var loss = (output - (i ^ j)).pow(2); + var loss = (output - (i ^ j)).square(); loss.backward(); optim.step(); lastLoss = loss.item(); diff --git a/test/TorchSharpTest/TestTorchTensor.cs b/test/TorchSharpTest/TestTorchTensor.cs index 69ba732f3..4181dd7ac 100644 --- a/test/TorchSharpTest/TestTorchTensor.cs +++ b/test/TorchSharpTest/TestTorchTensor.cs @@ -3980,9 +3980,9 @@ public void SquareEuclideanDistance() var ones = torch.ones(new long[] { 1, 9 }); var centroids = torch.cat(new Tensor[] { zeros, ones }, 0); - var distanceFromZero = input.reshape(new long[] { -1, 1, 9 }).sub(zeros).pow(2.ToScalar()).sum(new long[] { 2 }); - var distanceFromOne = input.reshape(new long[] { -1, 1, 9 }).sub(ones).pow(2.ToScalar()).sum(new long[] { 2 }); - var distanceFromCentroids = input.reshape(new long[] { -1, 1, 9 }).sub(centroids).pow(2.ToScalar()).sum(new long[] { 2 }); + var distanceFromZero = input.reshape(new long[] { -1, 1, 9 }).sub(zeros).square().sum(new long[] { 2 }); + var distanceFromOne = input.reshape(new long[] { -1, 1, 9 }).sub(ones).square().sum(new long[] { 2 }); + var distanceFromCentroids = input.reshape(new long[] { -1, 1, 9 }).sub(centroids).square().sum(new long[] { 2 }); Assert.True(true); } From 2be26a3df150c3de0bfe9b892060fd2bb303efdb Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 09:27:05 +0900 Subject: [PATCH 036/101] Update TorchSharp.torchvision.AdjustGamma.call. * Update src/TorchVision/AdjustGamma.cs. + Update TorchSharp.torchvision.AdjustGamma.call. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/AdjustGamma.cs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/TorchVision/AdjustGamma.cs b/src/TorchVision/AdjustGamma.cs index 89cd0b5ae..8a08554b7 100644 --- a/src/TorchVision/AdjustGamma.cs +++ b/src/TorchVision/AdjustGamma.cs @@ -22,7 +22,10 @@ public Tensor call(Tensor img) if (!torch.is_floating_point(img)) img = transforms.ConvertImageDtype(torch.float32).call(img); - img = (gain * img.pow(gamma)).clamp(0, 1); + using var gamma_scalar = gamma.ToScalar(); + using var zero_scalar = 0.ToScalar(); + using var one_scalar = 1.ToScalar(); + img = (gain * img.pow(gamma_scalar)).clamp(zero_scalar, one_scalar); return transforms.ConvertImageDtype(dtype).call(img); ; } From c717fb3ad812741be069eb539990443209cd31ad Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 09:33:11 +0900 Subject: [PATCH 037/101] Update torchvision.transforms.functional.adjust_gamma. * Update src/TorchVision/Functional.cs. + Update torchvision.transforms.functional.adjust_gamma. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/Functional.cs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/TorchVision/Functional.cs b/src/TorchVision/Functional.cs index e6ba173f3..641590100 100644 --- a/src/TorchVision/Functional.cs +++ b/src/TorchVision/Functional.cs @@ -110,9 +110,12 @@ public static Tensor adjust_gamma(Tensor img, double gamma, double gain = 1.0) img = img.alias(); } - using var t0 = img.pow(gamma); + using var gamma_scalar = gamma.ToScalar(); + using var t0 = img.pow(gamma_scalar); using var t1 = gain * t0; - using var t2 = t1.clamp(0, 1); + using var zero_scalar = 0.ToScalar(); + using var one_scalar = 1.ToScalar(); + using var t2 = t1.clamp(zero_scalar, one_scalar); return convert_image_dtype(t2, dtype); } From b266f022648e9d8cf053ca26e6e378d99f506608 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 09:37:12 +0900 Subject: [PATCH 038/101] Update torchvision.ops.sigmoid_focal_loss. * Update src/TorchVision/Ops.cs. + Update torchvision.ops.sigmoid_focal_loss. - Declare TorchSharp.Scalar. --- src/TorchVision/Ops.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchVision/Ops.cs b/src/TorchVision/Ops.cs index 987f022f7..a88b341d8 100644 --- a/src/TorchVision/Ops.cs +++ b/src/TorchVision/Ops.cs @@ -18,7 +18,8 @@ public static Tensor sigmoid_focal_loss(Tensor inputs, Tensor targets, float alp var ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction: reduction); var p_t = p * targets + (1 - p) * (1 - targets); - var loss = ce_loss * (1 - p_t).pow(gamma); + using var gamma_scalar = gamma.ToScalar(); + var loss = ce_loss * (1 - p_t).pow(gamma_scalar); if (alpha >= 0) { var alpha_t = alpha * targets + (1 - alpha) * (1 - targets); From 39b5091c16d6818dc095e1eca370aa5a017752ac Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 09:42:43 +0900 Subject: [PATCH 039/101] Update torchvision.ops.sigmoid_focal_loss. * Update src/TorchVision/Ops/Losses.cs. + Update torchvision.ops.sigmoid_focal_loss. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/Ops/Losses.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchVision/Ops/Losses.cs b/src/TorchVision/Ops/Losses.cs index a01824120..4481f8906 100644 --- a/src/TorchVision/Ops/Losses.cs +++ b/src/TorchVision/Ops/Losses.cs @@ -36,7 +36,8 @@ public static Tensor sigmoid_focal_loss( var p = torch.sigmoid(inputs); var ce_loss = binary_cross_entropy_with_logits(inputs, targets, reduction: nn.Reduction.None); var p_t = p * targets + (1 - p) * (1 - targets); - var loss = ce_loss * (1 - p_t).pow(gamma); + using var gamma_scalar = gamma.ToScalar(); + var loss = ce_loss * (1 - p_t).pow(gamma_scalar); if (alpha >= 0) { var alpha_t = alpha * targets + (1 - alpha) * (1 - targets); From 359e846838d9fb0c9f6593b82b75b54d86072839 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 10:28:14 +0900 Subject: [PATCH 040/101] Update TorchSharp.Modules.PReLU constructor. * Update src/TorchSharp/NN/Activation/PReLU.cs. + Update TorchSharp.Modules.PReLU constructor. - Declare TorchSharp.Scalar explicitly. - Use torch.full. --- src/TorchSharp/NN/Activation/PReLU.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/TorchSharp/NN/Activation/PReLU.cs b/src/TorchSharp/NN/Activation/PReLU.cs index 2b48b4a6b..5f3133ed9 100644 --- a/src/TorchSharp/NN/Activation/PReLU.cs +++ b/src/TorchSharp/NN/Activation/PReLU.cs @@ -20,9 +20,9 @@ internal PReLU(long num_parameters, double init, Device? device = null, ScalarTy { this.init = init; this.num_parameters = num_parameters; - - var w = torch.empty(num_parameters, device:device, dtype:dtype); - w.fill_(init); + + using var init_scalar = init.ToScalar(); + var w = torch.full(num_parameters, init_scalar, device: device, dtype: dtype); this.weight = new Parameter(w); } From e4ffef23dcd9e1edd1d8f36166b4d6f522ce313d Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 10:34:35 +0900 Subject: [PATCH 041/101] Update TorchSharp.Modules.Rprop.State.Initialize. * Update src/TorchSharp/Optimizers/Rprop.cs. + Update TorchSharp.Modules.Rprop.State.Initialize. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Optimizers/Rprop.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Optimizers/Rprop.cs b/src/TorchSharp/Optimizers/Rprop.cs index acf62eb63..fd4f7f2b3 100644 --- a/src/TorchSharp/Optimizers/Rprop.cs +++ b/src/TorchSharp/Optimizers/Rprop.cs @@ -316,7 +316,8 @@ public override void Initialize(OptimizerOptions options) this.step = 0; this.prev = torch.zeros_like(_parameter).DetachFromDisposeScope(); - this.step_size = _parameter.new_empty(_parameter.shape).fill_((options as Options).LearningRate).DetachFromDisposeScope(); + using var lr_scalar = ((double)(options as Options).LearningRate!).ToScalar(); + this.step_size = _parameter.new_empty(_parameter.shape).fill_(lr_scalar).DetachFromDisposeScope(); } } From 995ab18b9cf9e8ce94a94c3804244039862dd649 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 10:48:58 +0900 Subject: [PATCH 042/101] Update torchvision.transforms.functional.autocontrast. * Update src/TorchVision/Functional.cs. + Update torchvision.transforms.functional.autocontrast. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/Functional.cs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/TorchVision/Functional.cs b/src/TorchVision/Functional.cs index 641590100..33d1bac98 100644 --- a/src/TorchVision/Functional.cs +++ b/src/TorchVision/Functional.cs @@ -279,8 +279,10 @@ public static Tensor autocontrast(Tensor input) var t3 = t2.nonzero_as_list(); var eq_idxs = t3[0]; - using var t4 = minimum.index_put_(0, eq_idxs); - using var t5 = maximum.index_put_(bound, eq_idxs); + using var zero_scalar = 0.ToScalar(); + using var t4 = minimum.index_put_(zero_scalar, eq_idxs); + using var bound_scalar = bound.ToScalar(); + using var t5 = maximum.index_put_(bound_scalar, eq_idxs); using var t6 = (maximum - minimum); using var t7 = torch.tensor(bound, float32); @@ -289,7 +291,7 @@ public static Tensor autocontrast(Tensor input) using var t8 = (input - minimum); using var t9 = t8 * scale; - using var t10 = t9.clamp(0, bound); + using var t10 = t9.clamp(zero_scalar, bound_scalar); return t10.to(input.dtype); } From c830437be4d60e8b5b530839d0b43a1cc7d5aa8d Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 11:03:26 +0900 Subject: [PATCH 043/101] Update torch.nn.functional.threshold. * Update src/TorchSharp/NN/Activation/Threshold.cs. + Update torch.nn.functional.threshold. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/NN/Activation/Threshold.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/NN/Activation/Threshold.cs b/src/TorchSharp/NN/Activation/Threshold.cs index 6ebd606be..9e8181d9a 100644 --- a/src/TorchSharp/NN/Activation/Threshold.cs +++ b/src/TorchSharp/NN/Activation/Threshold.cs @@ -61,7 +61,9 @@ public static partial class functional /// Do the operation in-place public static Tensor threshold(Tensor x, double threshold, double value, bool inplace = false) { - return inplace ? x.threshold_(threshold, value).alias() : x.threshold(threshold, value); + using var threshold_scalar = threshold.ToScalar(); + using var value_scalar = value.ToScalar(); + return inplace ? x.threshold_(threshold_scalar, value_scalar).alias() : x.threshold(threshold_scalar, value_scalar); } /// From 81d021a838ad1b952e80c17712223f5a41b1a781 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 11:26:06 +0900 Subject: [PATCH 044/101] Update torch.Tensor.softplus. * Update src/TorchSharp/Tensor/Tensor.cs. + Update torch.Tensor.softplus. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Tensor/Tensor.cs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 94dbc5008..feb4e2efb 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -2765,8 +2765,12 @@ public Tensor softmax(long dim, ScalarType? dtype = null) => torch.special.softmax(this, dim, dtype); - public Tensor softplus(double beta = 1, double threshold = 20) => - softplus1(beta, threshold); + public Tensor softplus(double beta = 1, double threshold = 20) + { + using var beta_scalar = beta.ToScalar(); + using var threshold_scalar = threshold.ToScalar(); + return softplus1(beta_scalar, threshold_scalar); + } private Tensor softplus1(Scalar beta, Scalar threshold) { From 3761a603526eccd446f73ac06a93a5c6ef1f6de3 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 11:39:40 +0900 Subject: [PATCH 045/101] Update torch.Tensor.celu{,_}. * Update src/TorchSharp/Tensor/Tensor.cs. + Update torch.Tensor.celu{,_}. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Tensor/Tensor.cs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index feb4e2efb..9f8c7b6c5 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -2838,9 +2838,17 @@ public Tensor rrelu_(double lower = one_eighth, double upper = one_third) return this; } - public Tensor celu() => this.celu(1.0); + public Tensor celu() + { + using var one_scalar = 1.0.ToScalar(); + return this.celu(one_scalar); + } - public Tensor celu_() => this.celu_(1.0); + public Tensor celu_() + { + using var one_scalar = 1.0.ToScalar(); + return this.celu_(one_scalar); + } public Tensor celu(Scalar alpha) { From c69d041571d089e3c33e895c4760be45ece79641 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 11:41:08 +0900 Subject: [PATCH 046/101] Update torch.nn.functional.celu. * Update src/TorchSharp/NN/Activation/CELU.cs. + Update torch.nn.functional.celu. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/NN/Activation/CELU.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/NN/Activation/CELU.cs b/src/TorchSharp/NN/Activation/CELU.cs index ecb85dd47..9241255af 100644 --- a/src/TorchSharp/NN/Activation/CELU.cs +++ b/src/TorchSharp/NN/Activation/CELU.cs @@ -56,7 +56,8 @@ public static partial class functional /// public static Tensor celu(Tensor x, double alpha, bool inplace = false) { - return inplace ? x.celu_(alpha).alias() : x.celu(alpha); + using var alpha_scalar = alpha.ToScalar(); + return inplace ? x.celu_(alpha_scalar).alias() : x.celu(alpha_scalar); } } } From 5daa318f145adb8f0199c9be1c6e6973b20ad6c0 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 11:48:47 +0900 Subject: [PATCH 047/101] Use torch.Tensor.elu_. --- src/TorchSharp/Tensor/Tensor.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 9f8c7b6c5..d7ed9afd9 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -2867,7 +2867,7 @@ public Tensor celu_(Scalar alpha) public Tensor elu(double alpha = 1) => elu(alpha, 1.0, 1.0); - public Tensor elu_(double alpha = 1) => elu(alpha, 1.0, 1.0); + public Tensor elu_(double alpha = 1) => elu_(alpha, 1.0, 1.0); public Tensor elu(Scalar alpha, Scalar scale, Scalar input_scale) { From fdf7c925410873deddbecff4715361221efbdcce Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 11:59:14 +0900 Subject: [PATCH 048/101] Update torch.Tensor.elu{,_}. * Update src/TorchSharp/Tensor/Tensor.cs. + Update torch.Tensor.elu{,_}. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Tensor/Tensor.cs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index d7ed9afd9..d0825651a 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -2865,9 +2865,19 @@ public Tensor celu_(Scalar alpha) return this; } - public Tensor elu(double alpha = 1) => elu(alpha, 1.0, 1.0); + public Tensor elu(double alpha = 1) + { + using var alpha_scalar = alpha.ToScalar(); + using var one_scalar = 1.0.ToScalar(); + return this.elu(alpha_scalar, one_scalar, one_scalar); + } - public Tensor elu_(double alpha = 1) => elu_(alpha, 1.0, 1.0); + public Tensor elu_(double alpha = 1) + { + using var alpha_scalar = alpha.ToScalar(); + using var one_scalar = 1.0.ToScalar(); + return this.elu_(alpha_scalar, one_scalar, one_scalar); + } public Tensor elu(Scalar alpha, Scalar scale, Scalar input_scale) { From 3e38c0f07abc186a327e1fb0c6efdb4e104a2e31 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 12:59:01 +0900 Subject: [PATCH 049/101] Update torch.nn.functional.hardtanh. * Update src/TorchSharp/NN/Activation/Hardtanh.cs. + Update torch.nn.functional.hardtanh. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/NN/Activation/Hardtanh.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/NN/Activation/Hardtanh.cs b/src/TorchSharp/NN/Activation/Hardtanh.cs index fc5683986..cb7613800 100644 --- a/src/TorchSharp/NN/Activation/Hardtanh.cs +++ b/src/TorchSharp/NN/Activation/Hardtanh.cs @@ -65,7 +65,9 @@ public static partial class functional /// public static Tensor hardtanh(Tensor x, double min_val = -1.0, double max_val = 1.0, bool inplace = false) { - return inplace ? x.hardtanh_(min_val, max_val).alias() : x.hardtanh(min_val, max_val); + using var min_val_scalar = min_val.ToScalar(); + using var max_val_scalar = max_val.ToScalar(); + return inplace ? x.hardtanh_(min_val_scalar, max_val_scalar).alias() : x.hardtanh(min_val_scalar, max_val_scalar); } /// From 275eb77ef8cf832130af1d49e15218f4e11bf96a Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 13:08:57 +0900 Subject: [PATCH 050/101] Update torch.nn.functional.leaky_relu. * Update src/TorchSharp/NN/Activation/LeakyReLU.cs. + Update torch.nn.functional.leaky_relu. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/NN/Activation/LeakyReLU.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/NN/Activation/LeakyReLU.cs b/src/TorchSharp/NN/Activation/LeakyReLU.cs index 8851c0da7..8bf1c15a9 100644 --- a/src/TorchSharp/NN/Activation/LeakyReLU.cs +++ b/src/TorchSharp/NN/Activation/LeakyReLU.cs @@ -56,7 +56,8 @@ public static partial class functional /// public static Tensor leaky_relu(Tensor input, double negative_slope = 0.01, bool inplace = false) { - return inplace ? input.leaky_relu_(negative_slope).alias() : input.leaky_relu(negative_slope); + using var negative_slope_scalar = negative_slope.ToScalar(); + return inplace ? input.leaky_relu_(negative_slope_scalar).alias() : input.leaky_relu(negative_slope_scalar); } } } From 8483baa127db64d8929b46aae9a69b8ba04ab963 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 13:19:42 +0900 Subject: [PATCH 051/101] Update AdversarialExampleGeneration.Attack. * Update src/Examples/AdversarialExampleGeneration.cs. + Update AdversarialExampleGeneration.Attack. - Declare TorchSharp.Scalar explicitly. --- src/Examples/AdversarialExampleGeneration.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Examples/AdversarialExampleGeneration.cs b/src/Examples/AdversarialExampleGeneration.cs index 7bfc174b2..8a21662d7 100644 --- a/src/Examples/AdversarialExampleGeneration.cs +++ b/src/Examples/AdversarialExampleGeneration.cs @@ -105,7 +105,9 @@ internal static void Main(string[] args) private static Tensor Attack(Tensor image, double ε, Tensor data_grad) { using (var sign = data_grad.sign()) { - var perturbed = (image + ε * sign).clamp(0.0, 1.0); + using var zero_scalar = 0.0.ToScalar(); + using var one_scalar = 1.0.ToScalar(); + var perturbed = (image + ε * sign).clamp(zero_scalar, one_scalar); return perturbed; } } From af54440c24e94f3e25013800b2a55f38655595e5 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 13:26:10 +0900 Subject: [PATCH 052/101] Update TorchSharp.Modules.Dirichlet.mode. * Update src/TorchSharp/Distributions/Dirichlet.cs. + Update TorchSharp.Modules.Dirichlet.mode. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Dirichlet.cs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/TorchSharp/Distributions/Dirichlet.cs b/src/TorchSharp/Distributions/Dirichlet.cs index 98d72e2b6..a456824fa 100644 --- a/src/TorchSharp/Distributions/Dirichlet.cs +++ b/src/TorchSharp/Distributions/Dirichlet.cs @@ -25,9 +25,11 @@ public override Tensor mode { get { using var _ = NewDisposeScope(); - var concentrationm1 = (concentration - 1).clamp(min: 0.0); + using var zero_scalar = 0.0.ToScalar(); + var concentrationm1 = (concentration - 1).clamp(min: zero_scalar); var mode = concentrationm1 / concentrationm1.sum(-1, true); - var mask = (concentration < 1).all(dim: -1); + using var one_scalar = 1.ToScalar(); + var mask = (concentration < one_scalar).all(dim: -1); mode[mask] = torch.nn.functional.one_hot(mode[mask].argmax(dim: -1), concentrationm1.shape[concentrationm1.ndim-1]).to(mode); return mode.MoveToOuterDisposeScope(); } From 7de5fa3aa34e9428f0a36d789825a21f29e2fe38 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 13:31:09 +0900 Subject: [PATCH 053/101] Update torch.distributions.Distribution.ClampProbs. * Update src/TorchSharp/Distributions/Distribution.cs. + Update torch.distributions.Distribution.ClampProbs. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Distribution.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/Distribution.cs b/src/TorchSharp/Distributions/Distribution.cs index 1ee988899..4a7d9fd17 100644 --- a/src/TorchSharp/Distributions/Distribution.cs +++ b/src/TorchSharp/Distributions/Distribution.cs @@ -183,7 +183,9 @@ protected Tensor ProbsToLogits(Tensor probs, bool isBinary = false) protected Tensor ClampProbs(Tensor probs) { var eps = torch.finfo(probs.dtype).eps; - return probs.clamp(eps, 1 - eps); + using var eps_scalar = eps.ToScalar(); + using var eps_bar_scalar = (1 - eps).ToScalar(); + return probs.clamp(eps_scalar, eps_bar_scalar); } protected Tensor ClampByZero(Tensor x) => (x.clamp_min(0) + x - x.clamp_max(0)) / 2; From 9ce88891a0106d4718d45a7c789043a318280753 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 13:38:56 +0900 Subject: [PATCH 054/101] Update TorchSharp.Modules.NegativeBinomial.mode. * Update src/TorchSharp/Distributions/NegativeBinomial.cs. + Update TorchSharp.Modules.NegativeBinomial.mode. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/NegativeBinomial.cs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/NegativeBinomial.cs b/src/TorchSharp/Distributions/NegativeBinomial.cs index 36506d620..a829a3e44 100644 --- a/src/TorchSharp/Distributions/NegativeBinomial.cs +++ b/src/TorchSharp/Distributions/NegativeBinomial.cs @@ -26,7 +26,10 @@ public class NegativeBinomial : torch.distributions.Distribution /// Mode of the negative binomial distribution. /// public override Tensor mode => - WrappedTensorDisposeScope(() => ((total_count - 1) * logits.exp()).floor_().clamp(min: 0)); + WrappedTensorDisposeScope(() => { + using var zero_scalar = 0.ToScalar(); + return ((total_count - 1) * logits.exp()).floor_().clamp(min: zero_scalar); + }); /// /// The variance of the distribution From b6dd968ef23f196c8c64fb61a4594129b413b3c2 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 13:42:18 +0900 Subject: [PATCH 055/101] Update TorchSharp.Modules.Pareto.mean. * Update src/TorchSharp/Distributions/Pareto.cs. + Update TorchSharp.Modules.Pareto.mean. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Pareto.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/Pareto.cs b/src/TorchSharp/Distributions/Pareto.cs index aa3e1d21e..4e31f81f9 100644 --- a/src/TorchSharp/Distributions/Pareto.cs +++ b/src/TorchSharp/Distributions/Pareto.cs @@ -25,7 +25,8 @@ internal Pareto(Tensor scale, Tensor alpha, Distribution base_distribution, torc public override Tensor mean { get { using var _ = torch.NewDisposeScope(); - var a = alpha.clamp(min: 1); + using var one_scalar = 1.ToScalar(); + var a = alpha.clamp(min: one_scalar); return (a * scale / (a - 1)).MoveToOuterDisposeScope(); } } From afcee2f37726642520f61297bc1fef1ae8d1cb0f Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 13:44:28 +0900 Subject: [PATCH 056/101] Update TorchSharp.Modules.Pareto.variance. * Update src/TorchSharp/Distributions/Pareto.cs. + Update TorchSharp.Modules.Pareto.variance. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Pareto.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/Pareto.cs b/src/TorchSharp/Distributions/Pareto.cs index 4e31f81f9..4ab27684a 100644 --- a/src/TorchSharp/Distributions/Pareto.cs +++ b/src/TorchSharp/Distributions/Pareto.cs @@ -36,7 +36,8 @@ public override Tensor mean { public override Tensor variance { get { using var _ = torch.NewDisposeScope(); - var a = alpha.clamp(min: 2); + using var two_scalar = 2.ToScalar(); + var a = alpha.clamp(min: two_scalar); return (scale.square() * a / ((a - 1).square() * (a - 2))).MoveToOuterDisposeScope(); } } From e5fb29dbbb80373c1f9bb2feb5bb7600611c4200 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 13:55:51 +0900 Subject: [PATCH 057/101] Update torch.distributions.transforms.SigmoidTransform. * Update src/TorchSharp/Distributions/Transforms.cs. + Update torch.distributions.transforms.SigmoidTransform. - Use torch.tensor. - Use torch.Tensor.clamp. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Transforms.cs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/TorchSharp/Distributions/Transforms.cs b/src/TorchSharp/Distributions/Transforms.cs index 48f58f31f..363ab9775 100644 --- a/src/TorchSharp/Distributions/Transforms.cs +++ b/src/TorchSharp/Distributions/Transforms.cs @@ -517,21 +517,25 @@ public class SigmoidTransform : Transform public override bool bijective => true; - protected internal override Tensor _sign() => 1; + protected internal override Tensor _sign() => torch.tensor(1); protected internal override Tensor log_abs_det_jacobian(Tensor x, Tensor y) => -nn.functional.softplus(-x) - nn.functional.softplus(x); protected internal override Tensor _call(Tensor x) { var finfo = torch.finfo(x.dtype); - return torch.WrappedTensorDisposeScope(() => torch.clamp(torch.sigmoid(x), min: finfo.tiny, max: 1 - finfo.eps)); + using var tiny_scalar = finfo.tiny.ToScalar(); + using var eps_bar_scalar = (1 - finfo.eps).ToScalar(); + return torch.WrappedTensorDisposeScope(() => torch.sigmoid(x).clamp(min: tiny_scalar, max: eps_bar_scalar)); } protected internal override Tensor _inverse(Tensor y) { using var _ = torch.NewDisposeScope(); var finfo = torch.finfo(y.dtype); - y = y.clamp(min: finfo.tiny, max: 1 - finfo.eps); + using var tiny_scalar = finfo.tiny.ToScalar(); + using var eps_bar_scalar = (1 - finfo.eps).ToScalar(); + y = y.clamp(min: tiny_scalar, max: eps_bar_scalar); return (y.log() - (-y).log1p()).MoveToOuterDisposeScope(); } } From ce655926871e0e492ea973220b1858d8664b3936 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 14:04:13 +0900 Subject: [PATCH 058/101] Update torchvision.transforms.functional.Blend. * Update src/TorchVision/Functional.cs. + Update torchvision.transforms.functional.Blend. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/Functional.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/TorchVision/Functional.cs b/src/TorchVision/Functional.cs index 33d1bac98..22b175e1b 100644 --- a/src/TorchVision/Functional.cs +++ b/src/TorchVision/Functional.cs @@ -906,7 +906,9 @@ private static Tensor Blend(Tensor img1, Tensor img2, double ratio) using var t0 = img1 * ratio; using var t2 = img2 * (1.0 - ratio); using var t3 = (t0 + t2); - using var t4 = t3.clamp(0, bound); + using var zero_scalar = 0.ToScalar(); + using var bound_scalar = bound.ToScalar(); + using var t4 = t3.clamp(zero_scalar, bound_scalar); return t4.to(img1.dtype); } From 3d7c114ab0b6baa79f47ff43e1dfd53ff871f190 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 14:18:45 +0900 Subject: [PATCH 059/101] Update torchvision.ops.nms. * Update src/TorchVision/Ops.cs. + Update torchvision.ops.nms. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/Ops.cs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/TorchVision/Ops.cs b/src/TorchVision/Ops.cs index a88b341d8..8e8c34780 100644 --- a/src/TorchVision/Ops.cs +++ b/src/TorchVision/Ops.cs @@ -53,6 +53,9 @@ public static Tensor nms(Tensor boxes, Tensor scores, double iou_threshold = 0.5 var areas = (x2 - x1) * (y2 - y1); var (_, order) = scores.sort(0, descending: true); + using var zero_scalar = 0.ToScalar(); + using var one_scalar = 1.ToScalar(); + using var iou_threshold_scalar = iou_threshold.ToScalar(); var keep = new List(); while (order.numel() > 0) { long i; @@ -65,16 +68,17 @@ public static Tensor nms(Tensor boxes, Tensor scores, double iou_threshold = 0.5 keep.Add(i); } - var indices = torch.arange(1, order.shape[0], dtype: ScalarType.Int64); + using var stop_scalar = order.shape[0].ToScalar(); + var indices = torch.arange(one_scalar, stop_scalar, dtype: ScalarType.Int64); order = order[indices]; var xx1 = x1[order].clamp(min: x1[i]); var yy1 = y1[order].clamp(min: y1[i]); var xx2 = x2[order].clamp(max: x2[i]); var yy2 = y2[order].clamp(max: y2[i]); - var inter = (xx2 - xx1).clamp(min: 0) * (yy2 - yy1).clamp(min: 0); + var inter = (xx2 - xx1).clamp(min: zero_scalar) * (yy2 - yy1).clamp(min: zero_scalar); var iou = inter / (areas[i] + areas[order] - inter); - var idx = (iou <= iou_threshold).nonzero().squeeze(); + var idx = (iou <= iou_threshold_scalar).nonzero().squeeze(); if (idx.numel() == 0) { break; } From 51d71c4943e9150ab332611ae14ce9e7eae9dc74 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 14:24:58 +0900 Subject: [PATCH 060/101] Update torchvision.ops.generalized_box_iou. * Update src/TorchVision/Ops/Boxes.cs. + Update torchvision.ops.generalized_box_iou. - Declare TorchSharp.Scalar. --- src/TorchVision/Ops/Boxes.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchVision/Ops/Boxes.cs b/src/TorchVision/Ops/Boxes.cs index 9a2524cf4..98c6bc700 100644 --- a/src/TorchVision/Ops/Boxes.cs +++ b/src/TorchVision/Ops/Boxes.cs @@ -91,7 +91,8 @@ public static Tensor generalized_box_iou(Tensor boxes1, Tensor boxes2) var lti = torch.min(boxes1[colon, None, (null, 2)], boxes2[colon, (null, 2)]); var rbi = torch.max(boxes1[colon, None, (2, null)], boxes2[colon, (2, null)]); - var whi = _upcast(rbi - lti).clamp(min: 0); // [N,M,2] + using var zero_scalar = 0.ToScalar(); + var whi = _upcast(rbi - lti).clamp(min: zero_scalar); // [N,M,2] var areai = whi[colon, colon, 0] * whi[colon, colon, 1]; return (iou - (areai - union) / areai).MoveToOuterDisposeScope(); From 6d45becbb10abde27d7f1147aea9dcf44e489055 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 14:30:37 +0900 Subject: [PATCH 061/101] Update torchvision.ops._box_inter_union. * Update src/TorchVision/Ops/Boxes.cs. + Update torchvision.ops._box_inter_union. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/Ops/Boxes.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchVision/Ops/Boxes.cs b/src/TorchVision/Ops/Boxes.cs index 98c6bc700..bbaaf354b 100644 --- a/src/TorchVision/Ops/Boxes.cs +++ b/src/TorchVision/Ops/Boxes.cs @@ -180,7 +180,8 @@ private static Tensor _box_inter_union(Tensor boxes1, Tensor boxes2, out Tensor var lt = torch.max(boxes1[colon, None, (null, 2)], boxes2[colon, (null, 2)]); // [N,M,2]; var rb = torch.min(boxes1[colon, None, (2, null)], boxes2[colon, (2, null)]); // [N,M,2]; - var wh = _upcast(rb - lt).clamp(min: 0); // [N,M,2]; + using var zero_scalar = 0.ToScalar(); + var wh = _upcast(rb - lt).clamp(min: zero_scalar); // [N,M,2]; var inter = wh[colon, colon, 0] * wh[colon, colon, 1]; // [N,M]; union = area1[colon, None] + area2 - inter; From 4dd9545b6e889c486849d8808d5d4a060be8f0d4 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 14:34:07 +0900 Subject: [PATCH 062/101] Update torchvision.ops._box_diou_iou. * Update src/TorchVision/Ops/Boxes.cs. + Update torchvision.ops._box_diou_iou. - Declare TorchSharp.Scalar explicitly. --- src/TorchVision/Ops/Boxes.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchVision/Ops/Boxes.cs b/src/TorchVision/Ops/Boxes.cs index bbaaf354b..f71048d0c 100644 --- a/src/TorchVision/Ops/Boxes.cs +++ b/src/TorchVision/Ops/Boxes.cs @@ -193,7 +193,8 @@ private static Tensor _box_diou_iou(Tensor boxes1, Tensor boxes2, out Tensor iou iou = box_iou(boxes1, boxes2); var lti = torch.min(boxes1[colon, None, (null, 2)], boxes2[colon, (null, 2)]); var rbi = torch.max(boxes1[colon, None, (2, null)], boxes2[colon, (2, null)]); - var whi = _upcast(rbi - lti).clamp(min: 0); // [N,M,2]; + using var zero_scalar = 0.ToScalar(); + var whi = _upcast(rbi - lti).clamp(min: zero_scalar); // [N,M,2]; var diagonal_distance_squared = whi[colon, colon, 0].square() + whi[colon, colon, 1].square() + eps; // centers of boxes var x_p = (boxes1[colon, 0] + boxes1[colon, 2]) / 2; From d4a7d2df3d5ad81e7fd647d9fa8d3416ebdc3644 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 14:52:01 +0900 Subject: [PATCH 063/101] Update torch.utils.tensorboard.Summary.image. * Update src/TorchSharp/Utils/tensorboard/Summary.cs. + Update torch.utils.tensorboard.Summary.image. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Utils/tensorboard/Summary.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Utils/tensorboard/Summary.cs b/src/TorchSharp/Utils/tensorboard/Summary.cs index dd43d825d..0d2a2291c 100644 --- a/src/TorchSharp/Utils/tensorboard/Summary.cs +++ b/src/TorchSharp/Utils/tensorboard/Summary.cs @@ -177,7 +177,9 @@ public static Tensorboard.Summary image(string tag, Tensor tensor, double rescal tensor = utils.convert_to_HWC(tensor, dataformats); int scale_factor = calc_scale_factor(tensor); tensor = tensor.to_type(ScalarType.Float32); - tensor = (tensor * scale_factor).clip(0, 255).to_type(ScalarType.Byte); + using var min_byte_scalar = 0.ToScalar(); // FIXME: No torch.min_int_value? + using var max_byte_scalar = torch.max_int_value(ScalarType.Byte).ToScalar(); + tensor = (tensor * scale_factor).clip(min_byte_scalar, max_byte_scalar).to_type(ScalarType.Byte); Tensorboard.Summary.Types.Image image = make_image(tensor, rescale); var summary = new Tensorboard.Summary(); summary.Value.Add(new Tensorboard.Summary.Types.Value() { Tag = tag, Image = image }); From 1bf7c451a8013b8de6f9f8c14d92e5279ecb3aaa Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 14:55:23 +0900 Subject: [PATCH 064/101] Update torch.utils.tensorboard.Summary.video. * Update src/TorchSharp/Utils/tensorboard/Summary.cs. + Update torch.utils.tensorboard.Summary.video. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Utils/tensorboard/Summary.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Utils/tensorboard/Summary.cs b/src/TorchSharp/Utils/tensorboard/Summary.cs index 0d2a2291c..f08a35e86 100644 --- a/src/TorchSharp/Utils/tensorboard/Summary.cs +++ b/src/TorchSharp/Utils/tensorboard/Summary.cs @@ -243,7 +243,9 @@ public static Tensorboard.Summary video(string tag, Tensor tensor, int fps) tensor = utils.prepare_video(tensor); int scale_factor = calc_scale_factor(tensor); tensor = tensor.to_type(ScalarType.Float32); - tensor = (tensor * scale_factor).clip(0, 255).to_type(ScalarType.Byte); + using var min_byte_scalar = 0.ToScalar(); // FIXME: No torch.min_int_value? + using var max_byte_scalar = torch.max_int_value(ScalarType.Byte).ToScalar(); + tensor = (tensor * scale_factor).clip(min_byte_scalar, max_byte_scalar).to_type(ScalarType.Byte); Tensorboard.Summary.Types.Image video = make_video(tensor, fps); var summary = new Tensorboard.Summary(); summary.Value.Add(new Tensorboard.Summary.Types.Value() { Tag = tag, Image = video }); From 19fd88fa9e690881bf1f5ec84745771728eec1c5 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 15:03:06 +0900 Subject: [PATCH 065/101] Update TorchSharp.Modules.FisherSnedecor.rsample. * Update src/TorchSharp/Distributions/FisherSnedecor.cs. + Update TorchSharp.Modules.FisherSnedecor.rsample. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/FisherSnedecor.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/TorchSharp/Distributions/FisherSnedecor.cs b/src/TorchSharp/Distributions/FisherSnedecor.cs index 036b383fd..91f9ae706 100644 --- a/src/TorchSharp/Distributions/FisherSnedecor.cs +++ b/src/TorchSharp/Distributions/FisherSnedecor.cs @@ -80,11 +80,11 @@ public override Tensor rsample(params long[] sample_shape) var X1 = gamma1.rsample(sample_shape).view(shape); var X2 = gamma2.rsample(sample_shape).view(shape); - var tiny = torch.finfo(X2.dtype).tiny; - X2.clamp_(min: tiny); + using var tiny_scalar = torch.finfo(X2.dtype).tiny.ToScalar(); + X2.clamp_(min: tiny_scalar); var Y = X1 / X2; - return Y.clamp_(min: tiny).MoveToOuterDisposeScope(); + return Y.clamp_(min: tiny_scalar).MoveToOuterDisposeScope(); } /// From 29d5d2b231f625d01a61e4a7758a7ec76709078f Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 15:07:48 +0900 Subject: [PATCH 066/101] Update TorchSharp.Modules.Gamma.mode. * Update src/TorchSharp/Distributions/Gamma.cs. + Update TorchSharp.Modules.Gamma.mode. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Gamma.cs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/Gamma.cs b/src/TorchSharp/Distributions/Gamma.cs index ab5a4751f..5f16718d9 100644 --- a/src/TorchSharp/Distributions/Gamma.cs +++ b/src/TorchSharp/Distributions/Gamma.cs @@ -19,7 +19,10 @@ public class Gamma : torch.distributions.ExponentialFamily /// public override Tensor mean => WrappedTensorDisposeScope(() => concentration / rate); - public override Tensor mode => WrappedTensorDisposeScope(() => ((concentration - 1) / rate).clamp_(min: 0)); + public override Tensor mode => WrappedTensorDisposeScope(() => { + using var zero_scalar = 0.ToScalar(); + return ((concentration - 1) / rate).clamp_(min: zero_scalar); + }); /// /// The variance of the distribution From 1bc5cb1e06fd26cb8ac3e0cf6496db0d7f2e43b8 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 15:14:00 +0900 Subject: [PATCH 067/101] Update TorchSharp.Modules.Gamma.rsample. * Update src/TorchSharp/Distributions/Gamma.cs. + Update TorchSharp.Modules.Gamma.rsample. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Gamma.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/Gamma.cs b/src/TorchSharp/Distributions/Gamma.cs index 5f16718d9..35cf34917 100644 --- a/src/TorchSharp/Distributions/Gamma.cs +++ b/src/TorchSharp/Distributions/Gamma.cs @@ -65,7 +65,8 @@ public override Tensor rsample(params long[] sample_shape) using var _ = torch.NewDisposeScope(); var shape = ExtendedShape(sample_shape); var value = torch._standard_gamma(concentration.expand(shape), generator: generator) / rate.expand(shape); - return value.detach().clamp_(min: torch.finfo(value.dtype).tiny).MoveToOuterDisposeScope(); + using var tiny_scalar = torch.finfo(value.dtype).tiny.ToScalar(); + return value.detach().clamp_(min: tiny_scalar).MoveToOuterDisposeScope(); } /// From 93eb240db8e299e797bb599936802360670d85ac Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 15:18:16 +0900 Subject: [PATCH 068/101] Update TorchSharp.Modules.Uniform.cdf. * Update src/TorchSharp/Distributions/Uniform.cs. + Update TorchSharp.Modules.Uniform.cdf. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Uniform.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/Uniform.cs b/src/TorchSharp/Distributions/Uniform.cs index b15b61a47..5dfa209ce 100644 --- a/src/TorchSharp/Distributions/Uniform.cs +++ b/src/TorchSharp/Distributions/Uniform.cs @@ -83,7 +83,9 @@ public override Tensor log_prob(Tensor value) /// public override Tensor cdf(Tensor value) { - return torch.WrappedTensorDisposeScope(() => ((value - low) / (high - low)).clamp_(0, 1)); + using var zero_scalar = 0.ToScalar(); + using var one_scalar = 1.ToScalar(); + return torch.WrappedTensorDisposeScope(() => ((value - low) / (high - low)).clamp_(zero_scalar, one_scalar)); } /// From b70ed58b180c7e507d34711a2846bfe7cdec34a5 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 15:29:26 +0900 Subject: [PATCH 069/101] Use torch.Tensor.clamp_{max,min}_. --- src/TorchSharp/Tensor/torch.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/TorchSharp/Tensor/torch.cs b/src/TorchSharp/Tensor/torch.cs index 6892d2b69..fd7c37105 100644 --- a/src/TorchSharp/Tensor/torch.cs +++ b/src/TorchSharp/Tensor/torch.cs @@ -112,11 +112,11 @@ public static Tensor row_stack(IList tensors) public static Tensor clamp_max(Tensor input, Scalar max) => input.clamp_max(max); - public static Tensor clamp_max_(Tensor input, Scalar max) => input.clamp_max(max); + public static Tensor clamp_max_(Tensor input, Scalar max) => input.clamp_max_(max); public static Tensor clamp_min(Tensor input, Scalar min) => input.clamp_min(min); - public static Tensor clamp_min_(Tensor input, Scalar min) => input.clamp_min(min); + public static Tensor clamp_min_(Tensor input, Scalar min) => input.clamp_min_(min); /// /// Expands the dimension dim of the self tensor over multiple dimensions of sizes given by sizes. From ccb1678c0627f45b645ddaba85fdd18e1b6f7f38 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 15:34:05 +0900 Subject: [PATCH 070/101] Declare TorchSharp.Scalar explicitly. * Update src/TorchSharp/Distributions/Distribution.cs. + Update torch.distributions.Distribution.ClampByZero. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Distribution.cs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/Distribution.cs b/src/TorchSharp/Distributions/Distribution.cs index 4a7d9fd17..c381a078b 100644 --- a/src/TorchSharp/Distributions/Distribution.cs +++ b/src/TorchSharp/Distributions/Distribution.cs @@ -188,7 +188,11 @@ protected Tensor ClampProbs(Tensor probs) return probs.clamp(eps_scalar, eps_bar_scalar); } - protected Tensor ClampByZero(Tensor x) => (x.clamp_min(0) + x - x.clamp_max(0)) / 2; + protected Tensor ClampByZero(Tensor x) + { + using var zero_scalar = 0.ToScalar(); + return (x.clamp_min(zero_scalar) + x - x.clamp_max(zero_scalar)) / 2; + } protected torch.Generator generator; bool disposedValue; From 34cd0ac1b6b63a9808d921bed33caaff01ee345f Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 15:45:26 +0900 Subject: [PATCH 071/101] Update TorchSharp.Modules.GaussianNLLLoss.forward. * Update src/TorchSharp/NN/Losses.cs. + Update TorchSharp.Modules.GaussianNLLLoss.forward. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/NN/Losses.cs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/TorchSharp/NN/Losses.cs b/src/TorchSharp/NN/Losses.cs index 5e514bef5..b90a00c05 100644 --- a/src/TorchSharp/NN/Losses.cs +++ b/src/TorchSharp/NN/Losses.cs @@ -999,10 +999,12 @@ public override Tensor forward(Tensor input, Tensor target, Tensor variance) variance = variance.view(target.shape[0], -1); if (variance.shape[1] != input.shape[1] && variance.shape[1] != 1) throw new ArgumentException("variance has the wrong shape"); - if ((variance < 0).any().cpu().item()) throw new ArgumentException("variance has negative entry/entries"); + using var zero_scalar = 0.ToScalar(); + if ((variance < zero_scalar).any().cpu().item()) throw new ArgumentException("variance has negative entry/entries"); using (var _ = torch.no_grad()) - variance = variance.clamp_min(eps); + using (var eps_scalar = eps.ToScalar()) + variance = variance.clamp_min(eps_scalar); var loss = 0.5 * (variance.log() + (input - target).square() / variance).view(input.shape[0], -1).sum(dim: stackalloc long[] { 1 }); From fd3b78243b6972eb7ed8cc9d67a7f553cb529a3d Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 16:01:30 +0900 Subject: [PATCH 072/101] Use torch.tensor. --- src/TorchSharp/Tensor/Tensor.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index d0825651a..de8fb7240 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -3538,7 +3538,7 @@ public Tensor erfinv_() public Tensor eq(Tensor target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); var res = NativeMethods.THSTensor_eq(Handle, target.Handle); if (res == IntPtr.Zero) { CheckForErrors(); } return new Tensor(res); @@ -3548,7 +3548,7 @@ public Tensor eq(Tensor target) public Tensor eq_(Tensor target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); NativeMethods.THSTensor_eq_(Handle, target.Handle); CheckForErrors(); return this; @@ -3556,7 +3556,7 @@ public Tensor eq_(Tensor target) public Tensor eq(Scalar target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); var res = NativeMethods.THSTensor_eq_scalar(Handle, target.Handle); if (res == IntPtr.Zero) { CheckForErrors(); } return new Tensor(res); @@ -3564,7 +3564,7 @@ public Tensor eq(Scalar target) public Tensor eq_(Scalar target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); NativeMethods.THSTensor_eq_scalar_(Handle, target.Handle); CheckForErrors(); return this; From 9336c227505412c8e9d7ceeb93beb578a63d5323 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 16:05:36 +0900 Subject: [PATCH 073/101] Update torch.distributions.constraints._OneHot.check. * Update src/TorchSharp/Distributions/Constraints.cs. + Update torch.distributions.constraints._OneHot.check. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Constraints.cs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/TorchSharp/Distributions/Constraints.cs b/src/TorchSharp/Distributions/Constraints.cs index 0c4aa26f5..14d1a1339 100644 --- a/src/TorchSharp/Distributions/Constraints.cs +++ b/src/TorchSharp/Distributions/Constraints.cs @@ -137,8 +137,10 @@ public _OneHot() : base(true, 1) { } public override Tensor check(Tensor value) { - var is_boolean = (value == 0) | (value == 1); - var is_normalized = value.sum(-1).eq(1); + using var zero_scalar = 0.ToScalar(); + using var one_scalar = 1.ToScalar(); + var is_boolean = (value == zero_scalar) | (value == one_scalar); + var is_normalized = value.sum(-1).eq(one_scalar); return is_boolean.all(-1) & is_normalized; } } From 1d80bf8237c827f23389bb0cd4e8911b6a42a1e3 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 16:10:28 +0900 Subject: [PATCH 074/101] Update torch.distributions.constraints._PositiveDefinite.check. * Update src/TorchSharp/Distributions/Constraints.cs. + Update torch.distributions.constraints._PositiveDefinite.check. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Constraints.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/Constraints.cs b/src/TorchSharp/Distributions/Constraints.cs index 14d1a1339..321a791b9 100644 --- a/src/TorchSharp/Distributions/Constraints.cs +++ b/src/TorchSharp/Distributions/Constraints.cs @@ -505,7 +505,8 @@ public override Tensor check(Tensor value) var sym_check = base.check(value); if (!sym_check.all().item()) return sym_check; - return torch.linalg.cholesky_ex(value).info.eq(0); + using var zero_scalar = 0.ToScalar(); + return torch.linalg.cholesky_ex(value).info.eq(zero_scalar); } } From 5c2e631ccb9699c750d2a0430fa463e3ee3c295d Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 16:22:53 +0900 Subject: [PATCH 075/101] Use torch.tensor. --- src/TorchSharp/Tensor/Tensor.cs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index de8fb7240..926e17cc0 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -3595,7 +3595,7 @@ public bool allclose(Tensor target, double rtol = 1e-05, double atol = 1e-08, bo public Tensor ge(Tensor target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); var res = NativeMethods.THSTensor_ge(Handle, target.Handle); if (res == IntPtr.Zero) { CheckForErrors(); } return new Tensor(res); @@ -3605,7 +3605,7 @@ public Tensor ge(Tensor target) public Tensor ge_(Tensor target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); NativeMethods.THSTensor_ge_(Handle, target.Handle); CheckForErrors(); return this; @@ -3613,7 +3613,7 @@ public Tensor ge_(Tensor target) public Tensor ge(Scalar target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); var res = NativeMethods.THSTensor_ge_scalar(Handle, target.Handle); if (res == IntPtr.Zero) { CheckForErrors(); } return new Tensor(res); @@ -3621,7 +3621,7 @@ public Tensor ge(Scalar target) public Tensor ge_(Scalar target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); NativeMethods.THSTensor_ge_scalar_(Handle, target.Handle); CheckForErrors(); return this; @@ -3629,7 +3629,7 @@ public Tensor ge_(Scalar target) public Tensor gt(Tensor target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); var res = NativeMethods.THSTensor_gt(Handle, target.Handle); if (res == IntPtr.Zero) { CheckForErrors(); } return new Tensor(res); @@ -3639,7 +3639,7 @@ public Tensor gt(Tensor target) public Tensor gt_(Tensor target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); NativeMethods.THSTensor_gt_(Handle, target.Handle); CheckForErrors(); return this; @@ -3647,7 +3647,7 @@ public Tensor gt_(Tensor target) public Tensor gt(Scalar target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); var res = NativeMethods.THSTensor_gt_scalar(Handle, target.Handle); if (res == IntPtr.Zero) { CheckForErrors(); } return new Tensor(res); @@ -3655,7 +3655,7 @@ public Tensor gt(Scalar target) public Tensor gt_(Scalar target) { - if (target is null) return false; + if (target is null) return torch.tensor(false); NativeMethods.THSTensor_gt_scalar_(Handle, target.Handle); CheckForErrors(); return this; From fc1b8932f0744c4c4c542ce7082b82acf592d5ab Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 16:24:49 +0900 Subject: [PATCH 076/101] Update torch.distributions.constraints._PositiveSemiDefinite.check. * Update src/TorchSharp/Distributions/Constraints.cs. + Update torch.distributions.constraints._PositiveSemiDefinite.check. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Constraints.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/Constraints.cs b/src/TorchSharp/Distributions/Constraints.cs index 321a791b9..fabb5c2c1 100644 --- a/src/TorchSharp/Distributions/Constraints.cs +++ b/src/TorchSharp/Distributions/Constraints.cs @@ -491,7 +491,8 @@ public override Tensor check(Tensor value) var sym_check = base.check(value); if (!sym_check.all().item()) return sym_check; - return torch.linalg.eigvalsh(value).ge(0).all(-1); + using var zero_scalar = 0.ToScalar(); + return torch.linalg.eigvalsh(value).ge(zero_scalar).all(-1); } } From d552c5a21c052790ef72ba828f6c42e2b2d8e673 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 17:04:14 +0900 Subject: [PATCH 077/101] Update torch.distributions.constraints._CorrCholesky.check. * Update src/TorchSharp/Distributions/Constraints.cs. + Update torch.distributions.constraints._CorrCholesky.check. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/Constraints.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/TorchSharp/Distributions/Constraints.cs b/src/TorchSharp/Distributions/Constraints.cs index fabb5c2c1..59fe22892 100644 --- a/src/TorchSharp/Distributions/Constraints.cs +++ b/src/TorchSharp/Distributions/Constraints.cs @@ -435,9 +435,9 @@ public _CorrCholesky() : base(false, 2) { } public override Tensor check(Tensor value) { - var tol = torch.finfo(value.dtype).eps * value.size(-1) * 10; // 10 is an adjustable fudge factor + using var tol_scalar = (torch.finfo(value.dtype).eps * value.size(-1) * 10).ToScalar(); // 10 is an adjustable fudge factor var row_norm = torch.linalg.norm(value.detach(), dims: new[] { -1L }); - var unit_row_norm = (row_norm - 1.0).abs().le(tol).all(dim: -1); + var unit_row_norm = (row_norm - 1.0).abs().le(tol_scalar).all(dim: -1); return lc.check(value) & unit_row_norm; } From 636303ae8a315f74cb787771160c88e0de208abf Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 17:17:58 +0900 Subject: [PATCH 078/101] Update TransformerModel.GenerateSquareSubsequentMask. * Update src/Examples/SequenceToSequence.cs. + Update TransformerModel.GenerateSquareSubsequentMask. - Declare TorchSharp.Scalar explicitly. --- src/Examples/SequenceToSequence.cs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/Examples/SequenceToSequence.cs b/src/Examples/SequenceToSequence.cs index 436c05a67..be7e9672b 100644 --- a/src/Examples/SequenceToSequence.cs +++ b/src/Examples/SequenceToSequence.cs @@ -234,10 +234,14 @@ public TransformerModel(long ntokens, long ninputs, long nheads, long nhidden, l public Tensor GenerateSquareSubsequentMask(long size) { - var mask = (torch.ones(new long[] { size, size }) == 1).triu().transpose(0, 1); + using var zero_scalar = 0.ToScalar(); + using var one_scalar = 1.ToScalar(); + using var float_negative_infinity_scalar = float.NegativeInfinity.ToScalar(); + using var float_zero_scalar = 0.0f.ToScalar(); // FIXME: Equivalent to zero_scalar? + var mask = (torch.ones(new long[] { size, size }) == one_scalar).triu().transpose(0, 1); return mask.to_type(ScalarType.Float32) - .masked_fill(mask == 0, float.NegativeInfinity) - .masked_fill(mask == 1, 0.0f).to(device); + .masked_fill(mask == zero_scalar, float_negative_infinity_scalar) + .masked_fill(mask == one_scalar, float_zero_scalar).to(device); } private void InitWeights() From 6c8656a36f21c1383366771d4689e14b7cdb4dc3 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 17:24:00 +0900 Subject: [PATCH 079/101] Update TorchSharp.Modules.Tacotron2.forward. * Update src/TorchAudio/Modules/Tacotron2.cs. + Update TorchSharp.Modules.Tacotron2.forward. - Declare TorchSharp.Scalar explicitly. --- src/TorchAudio/Modules/Tacotron2.cs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/TorchAudio/Modules/Tacotron2.cs b/src/TorchAudio/Modules/Tacotron2.cs index edf3d06b4..d38919a70 100644 --- a/src/TorchAudio/Modules/Tacotron2.cs +++ b/src/TorchAudio/Modules/Tacotron2.cs @@ -147,9 +147,11 @@ public override (Tensor, Tensor, Tensor, Tensor) forward( mask = mask.expand(this.n_mels, mask.size(0), mask.size(1)); mask = mask.permute(1, 0, 2); - mel_specgram = mel_specgram.masked_fill(mask, 0.0); - mel_specgram_postnet = mel_specgram_postnet.masked_fill(mask, 0.0); - gate_outputs = gate_outputs.masked_fill(mask[TensorIndex.Colon, 0, TensorIndex.Colon], 1e3); + using var zero_scalar = 0.0.ToScalar(); + mel_specgram = mel_specgram.masked_fill(mask, zero_scalar); + mel_specgram_postnet = mel_specgram_postnet.masked_fill(mask, zero_scalar); + using var eps_scalar = 1e3.ToScalar(); + gate_outputs = gate_outputs.masked_fill(mask[TensorIndex.Colon, 0, TensorIndex.Colon], eps_scalar); } return (mel_specgram, mel_specgram_postnet, gate_outputs, alignments); From bb92724ecd0c2aa7c81eff2ba2f6e3c7b07b7fbe Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 17:28:57 +0900 Subject: [PATCH 080/101] Update TorchSharp.Modules.Tacotron2.Attention.forward. * Update src/TorchAudio/Modules/Tacotron2.cs. + Update TorchSharp.Modules.Tacotron2.Attention.forward. - Declare TorchSharp.Scalar explicitly. --- src/TorchAudio/Modules/Tacotron2.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchAudio/Modules/Tacotron2.cs b/src/TorchAudio/Modules/Tacotron2.cs index d38919a70..1421a53a8 100644 --- a/src/TorchAudio/Modules/Tacotron2.cs +++ b/src/TorchAudio/Modules/Tacotron2.cs @@ -336,7 +336,8 @@ public override (Tensor, Tensor) forward( { var alignment = this._get_alignment_energies(attention_hidden_state, processed_memory, attention_weights_cat); - alignment = alignment.masked_fill(mask, this.score_mask_value); + using var score_mask_value_scalar = this.score_mask_value.ToScalar(); + alignment = alignment.masked_fill(mask, score_mask_value_scalar); var attention_weights = F.softmax(alignment, dim: 1); var attention_context = torch.bmm(attention_weights.unsqueeze(1), memory); From 0858332612c4b5306226e2e2645ac0f9620bbca5 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 17:32:54 +0900 Subject: [PATCH 081/101] Update TorchSharp.Modules.NegativeBinomial.log_prob. * Update src/TorchSharp/Distributions/NegativeBinomial.cs. + Update TorchSharp.Modules.NegativeBinomial.log_prob. - Declare TorchSharp.Scalar explicitly. --- src/TorchSharp/Distributions/NegativeBinomial.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/TorchSharp/Distributions/NegativeBinomial.cs b/src/TorchSharp/Distributions/NegativeBinomial.cs index a829a3e44..4b4b737ff 100644 --- a/src/TorchSharp/Distributions/NegativeBinomial.cs +++ b/src/TorchSharp/Distributions/NegativeBinomial.cs @@ -104,7 +104,8 @@ public override Tensor log_prob(Tensor value) using var _ = NewDisposeScope(); var log_unnormalized_prob = (total_count * (-_logits).log_sigmoid() + value * logits.log_sigmoid()); var log_normalization = (-torch.lgamma(total_count + value) + torch.lgamma(1.0 + value) + torch.lgamma(total_count)); - log_normalization = log_normalization.masked_fill(total_count + value == 0, 0); + using var zero_scalar = 0.ToScalar(); + log_normalization = log_normalization.masked_fill(total_count + value == zero_scalar, zero_scalar); return (log_unnormalized_prob - log_normalization).MoveToOuterDisposeScope(); } From 8880435d3a0367ac14196c461448b9134feba379 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 18:11:15 +0900 Subject: [PATCH 082/101] Simplify. --- src/TorchSharp/Tensor/Tensor.cs | 90 +++++++-------------------------- 1 file changed, 18 insertions(+), 72 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 926e17cc0..feafa4029 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -6265,95 +6265,41 @@ public Tensor where(Tensor condition, Tensor y) // Operators overloading - public static Tensor operator ==(Tensor left, Tensor right) - { - return left.eq(right); - } + public static Tensor operator ==(Tensor left, Tensor right) => left.eq(right); - public static Tensor operator ==(Tensor left, Scalar right) - { - return left.eq(right); - } + public static Tensor operator ==(Tensor left, Scalar right) => left.eq(right); - public static Tensor operator ==(Scalar left, Tensor right) - { - return right.eq(left); - } + public static Tensor operator ==(Scalar left, Tensor right) => right.eq(left); - public static Tensor operator !=(Tensor left, Tensor right) - { - return left.ne(right); - } + public static Tensor operator !=(Tensor left, Tensor right) => left.ne(right); - public static Tensor operator !=(Tensor left, Scalar right) - { - return left.ne(right); - } + public static Tensor operator !=(Tensor left, Scalar right) => left.ne(right); - public static Tensor operator !=(Scalar left, Tensor right) - { - return right.ne(left); - } + public static Tensor operator !=(Scalar left, Tensor right) => right.ne(left); - public static Tensor operator <(Tensor left, Tensor right) - { - return left.lt(right); - } + public static Tensor operator <(Tensor left, Tensor right) => left.lt(right); - public static Tensor operator <(Tensor left, Scalar right) - { - return left.lt(right); - } + public static Tensor operator <(Tensor left, Scalar right) => left.lt(right); - public static Tensor operator <(Scalar left, Tensor right) - { - return right.gt(left); - } + public static Tensor operator <(Scalar left, Tensor right) => right.gt(left); - public static Tensor operator <=(Tensor left, Tensor right) - { - return left.le(right); - } + public static Tensor operator <=(Tensor left, Tensor right) => left.le(right); - public static Tensor operator <=(Tensor left, Scalar right) - { - return left.le(right); - } + public static Tensor operator <=(Tensor left, Scalar right) => left.le(right); - public static Tensor operator <=(Scalar left, Tensor right) - { - return right.ge(left); - } + public static Tensor operator <=(Scalar left, Tensor right) => right.ge(left); - public static Tensor operator >(Tensor left, Tensor right) - { - return left.gt(right); - } + public static Tensor operator >(Tensor left, Tensor right) => left.gt(right); - public static Tensor operator >(Tensor left, Scalar right) - { - return left.gt(right); - } + public static Tensor operator >(Tensor left, Scalar right) => left.gt(right); - public static Tensor operator >(Scalar left, Tensor right) - { - return right.lt(left); - } + public static Tensor operator >(Scalar left, Tensor right) => right.lt(left); - public static Tensor operator >=(Tensor left, Tensor right) - { - return left.ge(right); - } + public static Tensor operator >=(Tensor left, Tensor right) => left.ge(right); - public static Tensor operator >=(Tensor left, Scalar right) - { - return left.ge(right); - } + public static Tensor operator >=(Tensor left, Scalar right) => left.ge(right); - public static Tensor operator >=(Scalar left, Tensor right) - { - return right.le(left); - } + public static Tensor operator >=(Scalar left, Tensor right) => right.le(left); /// /// Useful when assigning a .NET numeric value to an index of a Tensor. From 23f60dc3a83276cf597a7a43668bab2e2d2de426 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Tue, 16 Sep 2025 18:49:52 +0900 Subject: [PATCH 083/101] Add more operators. This is an exceptional change. Since comparison operators are widely used, I'd have to give up fix one by one for now. Introducing operators take {int,long,float,double} would cover most cases to prevent missing TorchSharp.Scalar.Dispose. However, these would leave TorchSharp.Scalar construction cost as is. * Update src/TorchSharp/Tensor/Tensor.cs. + Add more operator ==. + Add more operator !=. + Add more operator <. + Add more operator <=. + Add more operator >. + Add more operator >=. --- src/TorchSharp/Tensor/Tensor.cs | 246 ++++++++++++++++++++++++++++++++ 1 file changed, 246 insertions(+) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index feafa4029..855cf93e9 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -6271,36 +6271,282 @@ public Tensor where(Tensor condition, Tensor y) public static Tensor operator ==(Scalar left, Tensor right) => right.eq(left); + public static Tensor operator ==(Tensor left, int right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } + public static Tensor operator ==(Tensor left, long right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } + public static Tensor operator ==(Tensor left, float right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } + public static Tensor operator ==(Tensor left, double right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } + public static Tensor operator ==(int left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } + public static Tensor operator ==(long left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } + public static Tensor operator ==(float left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } + public static Tensor operator ==(double left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } + public static Tensor operator !=(Tensor left, Tensor right) => left.ne(right); public static Tensor operator !=(Tensor left, Scalar right) => left.ne(right); public static Tensor operator !=(Scalar left, Tensor right) => right.ne(left); + public static Tensor operator !=(Tensor left, int right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } + public static Tensor operator !=(Tensor left, long right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } + public static Tensor operator !=(Tensor left, float right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } + public static Tensor operator !=(Tensor left, double right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } + public static Tensor operator !=(int left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } + public static Tensor operator !=(long left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } + public static Tensor operator !=(float left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } + public static Tensor operator !=(double left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } + public static Tensor operator <(Tensor left, Tensor right) => left.lt(right); public static Tensor operator <(Tensor left, Scalar right) => left.lt(right); public static Tensor operator <(Scalar left, Tensor right) => right.gt(left); + public static Tensor operator <(Tensor left, int right) + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } + public static Tensor operator <(Tensor left, long right) + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } + public static Tensor operator <(Tensor left, float right) + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } + public static Tensor operator <(Tensor left, double right) + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } + public static Tensor operator <(int left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } + public static Tensor operator <(long left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } + public static Tensor operator <(float left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } + public static Tensor operator <(double left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } + public static Tensor operator <=(Tensor left, Tensor right) => left.le(right); public static Tensor operator <=(Tensor left, Scalar right) => left.le(right); public static Tensor operator <=(Scalar left, Tensor right) => right.ge(left); + public static Tensor operator <=(Tensor left, int right) + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } + public static Tensor operator <=(Tensor left, long right) + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } + public static Tensor operator <=(Tensor left, float right) + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } + public static Tensor operator <=(Tensor left, double right) + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } + public static Tensor operator <=(int left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } + public static Tensor operator <=(long left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } + public static Tensor operator <=(float left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } + public static Tensor operator <=(double left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } + public static Tensor operator >(Tensor left, Tensor right) => left.gt(right); public static Tensor operator >(Tensor left, Scalar right) => left.gt(right); public static Tensor operator >(Scalar left, Tensor right) => right.lt(left); + public static Tensor operator >(Tensor left, int right) + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } + public static Tensor operator >(Tensor left, long right) + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } + public static Tensor operator >(Tensor left, float right) + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } + public static Tensor operator >(Tensor left, double right) + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } + public static Tensor operator >(int left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } + public static Tensor operator >(long left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } + public static Tensor operator >(float left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } + public static Tensor operator >(double left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } + public static Tensor operator >=(Tensor left, Tensor right) => left.ge(right); public static Tensor operator >=(Tensor left, Scalar right) => left.ge(right); public static Tensor operator >=(Scalar left, Tensor right) => right.le(left); + public static Tensor operator >=(Tensor left, int right) + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } + public static Tensor operator >=(Tensor left, long right) + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } + public static Tensor operator >=(Tensor left, float right) + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } + public static Tensor operator >=(Tensor left, double right) + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } + public static Tensor operator >=(int left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } + public static Tensor operator >=(long left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } + public static Tensor operator >=(float left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } + public static Tensor operator >=(double left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } + /// /// Useful when assigning a .NET numeric value to an index of a Tensor. /// From f5f7c5c68e7814e03b93182613c5308a66def30f Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 17 Sep 2025 11:43:08 +0900 Subject: [PATCH 084/101] Add more operators. Support other types which are covered by implicit conversion to TorchSharp.Scalar; byte, sbyte, short, Half, bool, (float, float), System.Numerics.Complex. * Update src/TorchSharp/Tensor/Tensor.cs. + Add more operator ==. + Add more operator !=. + Add more operator <. + Add more operator <=. + Add more operator >. + Add more operator >=. --- src/TorchSharp/Tensor/Tensor.cs | 444 ++++++++++++++++++++++++++++++++ 1 file changed, 444 insertions(+) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 855cf93e9..e59963bdf 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -6271,6 +6271,21 @@ public Tensor where(Tensor condition, Tensor y) public static Tensor operator ==(Scalar left, Tensor right) => right.eq(left); + public static Tensor operator ==(Tensor left, byte right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } + public static Tensor operator ==(Tensor left, sbyte right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } + public static Tensor operator ==(Tensor left, short right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } public static Tensor operator ==(Tensor left, int right) { using Scalar right_scalar = right.ToScalar(); @@ -6281,6 +6296,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left == right_scalar; } +#if NET6_0_OR_GREATER + public static Tensor operator ==(Tensor left, Half right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } +#endif public static Tensor operator ==(Tensor left, float right) { using Scalar right_scalar = right.ToScalar(); @@ -6291,6 +6313,36 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left == right_scalar; } + public static Tensor operator ==(Tensor left, bool right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } + public static Tensor operator ==(Tensor left, (float, float) right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } + public static Tensor operator ==(Tensor left, System.Numerics.Complex right) + { + using Scalar right_scalar = right.ToScalar(); + return left == right_scalar; + } + public static Tensor operator ==(byte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } + public static Tensor operator ==(sbyte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } + public static Tensor operator ==(short left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } public static Tensor operator ==(int left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6301,6 +6353,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar == right; } +#if NET6_0_OR_GREATER + public static Tensor operator ==(Half left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } +#endif public static Tensor operator ==(float left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6311,6 +6370,21 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar == right; } + public static Tensor operator ==(bool left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } + public static Tensor operator ==((float, float) left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } + public static Tensor operator ==(System.Numerics.Complex left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar == right; + } public static Tensor operator !=(Tensor left, Tensor right) => left.ne(right); @@ -6318,6 +6392,21 @@ public Tensor where(Tensor condition, Tensor y) public static Tensor operator !=(Scalar left, Tensor right) => right.ne(left); + public static Tensor operator !=(Tensor left, byte right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } + public static Tensor operator !=(Tensor left, sbyte right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } + public static Tensor operator !=(Tensor left, short right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } public static Tensor operator !=(Tensor left, int right) { using Scalar right_scalar = right.ToScalar(); @@ -6328,6 +6417,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left != right_scalar; } +#if NET6_0_OR_GREATER + public static Tensor operator !=(Tensor left, Half right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } +#endif public static Tensor operator !=(Tensor left, float right) { using Scalar right_scalar = right.ToScalar(); @@ -6338,6 +6434,36 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left != right_scalar; } + public static Tensor operator !=(Tensor left, bool right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } + public static Tensor operator !=(Tensor left, (float, float) right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } + public static Tensor operator !=(Tensor left, System.Numerics.Complex right) + { + using Scalar right_scalar = right.ToScalar(); + return left != right_scalar; + } + public static Tensor operator !=(byte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } + public static Tensor operator !=(sbyte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } + public static Tensor operator !=(short left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } public static Tensor operator !=(int left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6348,6 +6474,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar != right; } +#if NET6_0_OR_GREATER + public static Tensor operator !=(Half left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } +#endif public static Tensor operator !=(float left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6358,6 +6491,21 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar != right; } + public static Tensor operator !=(bool left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } + public static Tensor operator !=((float, float) left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } + public static Tensor operator !=(System.Numerics.Complex left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar != right; + } public static Tensor operator <(Tensor left, Tensor right) => left.lt(right); @@ -6365,6 +6513,21 @@ public Tensor where(Tensor condition, Tensor y) public static Tensor operator <(Scalar left, Tensor right) => right.gt(left); + public static Tensor operator <(Tensor left, byte right) + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } + public static Tensor operator <(Tensor left, sbyte right) + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } + public static Tensor operator <(Tensor left, short right) + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } public static Tensor operator <(Tensor left, int right) { using Scalar right_scalar = right.ToScalar(); @@ -6375,6 +6538,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left < right_scalar; } +#if NET6_0_OR_GREATER + public static Tensor operator <(Tensor left, Half right) + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } +#endif public static Tensor operator <(Tensor left, float right) { using Scalar right_scalar = right.ToScalar(); @@ -6385,6 +6555,36 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left < right_scalar; } + public static Tensor operator <(Tensor left, bool right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } + public static Tensor operator <(Tensor left, (float, float) right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } + public static Tensor operator <(Tensor left, System.Numerics.Complex right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left < right_scalar; + } + public static Tensor operator <(byte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } + public static Tensor operator <(sbyte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } + public static Tensor operator <(short left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } public static Tensor operator <(int left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6395,6 +6595,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar < right; } +#if NET6_0_OR_GREATER + public static Tensor operator <(Half left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } +#endif public static Tensor operator <(float left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6405,6 +6612,21 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar < right; } + public static Tensor operator <(bool left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } + public static Tensor operator <((float, float) left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } + public static Tensor operator <(System.Numerics.Complex left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar < right; + } public static Tensor operator <=(Tensor left, Tensor right) => left.le(right); @@ -6412,6 +6634,21 @@ public Tensor where(Tensor condition, Tensor y) public static Tensor operator <=(Scalar left, Tensor right) => right.ge(left); + public static Tensor operator <=(Tensor left, byte right) + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } + public static Tensor operator <=(Tensor left, sbyte right) + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } + public static Tensor operator <=(Tensor left, short right) + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } public static Tensor operator <=(Tensor left, int right) { using Scalar right_scalar = right.ToScalar(); @@ -6422,6 +6659,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left <= right_scalar; } +#if NET6_0_OR_GREATER + public static Tensor operator <=(Tensor left, Half right) + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } +#endif public static Tensor operator <=(Tensor left, float right) { using Scalar right_scalar = right.ToScalar(); @@ -6432,6 +6676,36 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left <= right_scalar; } + public static Tensor operator <=(Tensor left, bool right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } + public static Tensor operator <=(Tensor left, (float, float) right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } + public static Tensor operator <=(Tensor left, System.Numerics.Complex right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left <= right_scalar; + } + public static Tensor operator <=(byte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } + public static Tensor operator <=(sbyte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } + public static Tensor operator <=(short left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } public static Tensor operator <=(int left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6442,6 +6716,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar <= right; } +#if NET6_0_OR_GREATER + public static Tensor operator <=(Half left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } +#endif public static Tensor operator <=(float left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6452,6 +6733,21 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar <= right; } + public static Tensor operator <=(bool left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } + public static Tensor operator <=((float, float) left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } + public static Tensor operator <=(System.Numerics.Complex left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar <= right; + } public static Tensor operator >(Tensor left, Tensor right) => left.gt(right); @@ -6459,6 +6755,21 @@ public Tensor where(Tensor condition, Tensor y) public static Tensor operator >(Scalar left, Tensor right) => right.lt(left); + public static Tensor operator >(Tensor left, byte right) + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } + public static Tensor operator >(Tensor left, sbyte right) + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } + public static Tensor operator >(Tensor left, short right) + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } public static Tensor operator >(Tensor left, int right) { using Scalar right_scalar = right.ToScalar(); @@ -6469,6 +6780,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left > right_scalar; } +#if NET6_0_OR_GREATER + public static Tensor operator >(Tensor left, Half right) + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } +#endif public static Tensor operator >(Tensor left, float right) { using Scalar right_scalar = right.ToScalar(); @@ -6479,6 +6797,36 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left > right_scalar; } + public static Tensor operator >(Tensor left, bool right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } + public static Tensor operator >(Tensor left, (float, float) right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } + public static Tensor operator >(Tensor left, System.Numerics.Complex right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left > right_scalar; + } + public static Tensor operator >(byte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } + public static Tensor operator >(sbyte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } + public static Tensor operator >(short left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } public static Tensor operator >(int left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6489,6 +6837,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar > right; } +#if NET6_0_OR_GREATER + public static Tensor operator >(Half left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } +#endif public static Tensor operator >(float left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6499,6 +6854,21 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar > right; } + public static Tensor operator >(bool left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } + public static Tensor operator >((float, float) left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } + public static Tensor operator >(System.Numerics.Complex left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar > right; + } public static Tensor operator >=(Tensor left, Tensor right) => left.ge(right); @@ -6506,6 +6876,21 @@ public Tensor where(Tensor condition, Tensor y) public static Tensor operator >=(Scalar left, Tensor right) => right.le(left); + public static Tensor operator >=(Tensor left, byte right) + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } + public static Tensor operator >=(Tensor left, sbyte right) + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } + public static Tensor operator >=(Tensor left, short right) + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } public static Tensor operator >=(Tensor left, int right) { using Scalar right_scalar = right.ToScalar(); @@ -6516,6 +6901,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left >= right_scalar; } +#if NET6_0_OR_GREATER + public static Tensor operator >=(Tensor left, Half right) + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } +#endif public static Tensor operator >=(Tensor left, float right) { using Scalar right_scalar = right.ToScalar(); @@ -6526,6 +6918,36 @@ public Tensor where(Tensor condition, Tensor y) using Scalar right_scalar = right.ToScalar(); return left >= right_scalar; } + public static Tensor operator >=(Tensor left, bool right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } + public static Tensor operator >=(Tensor left, (float, float) right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } + public static Tensor operator >=(Tensor left, System.Numerics.Complex right) // FIXME: Well defined? + { + using Scalar right_scalar = right.ToScalar(); + return left >= right_scalar; + } + public static Tensor operator >=(byte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } + public static Tensor operator >=(sbyte left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } + public static Tensor operator >=(short left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } public static Tensor operator >=(int left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6536,6 +6958,13 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar >= right; } +#if NET6_0_OR_GREATER + public static Tensor operator >=(Half left, Tensor right) + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } +#endif public static Tensor operator >=(float left, Tensor right) { using Scalar left_scalar = left.ToScalar(); @@ -6546,6 +6975,21 @@ public Tensor where(Tensor condition, Tensor y) using Scalar left_scalar = left.ToScalar(); return left_scalar >= right; } + public static Tensor operator >=(bool left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } + public static Tensor operator >=((float, float) left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } + public static Tensor operator >=(System.Numerics.Complex left, Tensor right) // FIXME: Well defined? + { + using Scalar left_scalar = left.ToScalar(); + return left_scalar >= right; + } /// /// Useful when assigning a .NET numeric value to an index of a Tensor. From c2da2a724c1ea2a55778892a01b862bb928ad1d4 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 17 Sep 2025 13:17:30 +0900 Subject: [PATCH 085/101] Call PrintValue w/ explicitly declared TorchSharp.Scalar. * Update src/TorchSharp/Tensor/Tensor.cs. + Call PrintValue w/ explicitly declared TorchSharp.Scalar. --- src/TorchSharp/Tensor/Tensor.cs | 42 ++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index e59963bdf..5762d2116 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -7297,7 +7297,8 @@ private static string ToNumpyString(Tensor t, long mdim, bool isFCreate, string var sb = new StringBuilder(isFCreate ? string.Join("", Enumerable.Repeat(' ', (int)(mdim - dim))) : ""); if (dim == 0) { - PrintValue(sb, t.dtype, t.ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t.ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); return sb.ToString(); ; } @@ -7309,25 +7310,30 @@ private static string ToNumpyString(Tensor t, long mdim, bool isFCreate, string else if (dim == 1) { if (currentSize <= torch.maxColumns) { for (var i = 0; i < currentSize - 1; i++) { - PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t[i].ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); sb.Append(',').Append(' '); } - PrintValue(sb, t.dtype, t[currentSize - 1].ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t[currentSize - 1].ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); } else { for (var i = 0; i < leadingCols; i++) { - PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t[i].ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); sb.Append(',').Append(' '); } sb.Append("... "); for (var i = currentSize - trailingCols; i < currentSize - 1; i++) { - PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t[i].ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); sb.Append(',').Append(' '); } - PrintValue(sb, t.dtype, t[currentSize - 1].ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t[currentSize - 1].ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); } } else { var newline = string.Join("", Enumerable.Repeat(newLine, (int)dim - 1).ToList()); @@ -7385,7 +7391,8 @@ private static string ToCSharpString(Tensor t, long mdim, bool isFCreate, string sb.Append(", value = "); if (t.Dimensions == 0) { - PrintValue(sb, t.dtype, t.ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t.ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); return sb.ToString(); ; } } @@ -7459,16 +7466,19 @@ private static string ToCSharpString(Tensor t, long mdim, bool isFCreate, string else if (dim == 1) { if (currentSize <= torch.maxColumns) { for (var i = 0; i < currentSize - 1; i++) { - PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t[i].ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); sb.Append(appendChar); sb.Append(',').Append(' '); } - PrintValue(sb, t.dtype, t[currentSize - 1].ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t[currentSize - 1].ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); sb.Append(appendChar); } else { for (var i = 0; i < leadingCols; i++) { - PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t[i].ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); sb.Append(appendChar); sb.Append(',').Append(' '); } @@ -7476,12 +7486,14 @@ private static string ToCSharpString(Tensor t, long mdim, bool isFCreate, string sb.Append("... "); for (var i = currentSize - trailingCols; i < currentSize - 1; i++) { - PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t[i].ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); sb.Append(appendChar); sb.Append(',').Append(' '); } - PrintValue(sb, t.dtype, t[currentSize - 1].ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = t[currentSize - 1].ToScalar()) + PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo); sb.Append(appendChar); } } else { @@ -7540,7 +7552,8 @@ private string ToJuliaString(string fltFormat, int width, CultureInfo? cultureIn if (Dimensions == 0) { builder.Append(", value = "); - PrintValue(builder, dtype, this.ToScalar(), fltFormat, actualCulturInfo); + using (var scalar = this.ToScalar()) + PrintValue(builder, dtype, scalar, fltFormat, actualCulturInfo); } else if (Dimensions == 1) { @@ -7671,7 +7684,8 @@ private static void BuildRow(List row, Tensor t, int width, string fltFo for (int i = 0; i < t.shape[0]; i++) { var builder = new StringBuilder(); - PrintValue(builder, type, t[i].ToScalar(), fltFormat, cultureInfo); + using (var scalar = t[i].ToScalar()) + PrintValue(builder, type, scalar, fltFormat, cultureInfo); var str = builder.ToString(); From 2c469dafbf81e547703e481cc518fe4cf0782793 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 17 Sep 2025 13:49:21 +0900 Subject: [PATCH 086/101] Update TensorExtensionMethods.To*(this Tensor value). * Update src/TorchSharp/Tensor/TensorExtensionMethods.cs. + Update TensorExtensionMethods.To*(this Tensor value). - Declare TorchSharp.Scalar explicitly. --- .../Tensor/TensorExtensionMethods.cs | 70 +++++++++++++++---- 1 file changed, 57 insertions(+), 13 deletions(-) diff --git a/src/TorchSharp/Tensor/TensorExtensionMethods.cs b/src/TorchSharp/Tensor/TensorExtensionMethods.cs index 2f4fa81dc..89046dafb 100644 --- a/src/TorchSharp/Tensor/TensorExtensionMethods.cs +++ b/src/TorchSharp/Tensor/TensorExtensionMethods.cs @@ -597,82 +597,126 @@ public static Tensor ToTensor(this T scalar, Device? device = null, bool requ /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static (float Real, float Imaginary) ToComplexFloat32(this Tensor value) => value.ToScalar().ToComplexFloat32(); + public static (float Real, float Imaginary) ToComplexFloat32(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToComplexFloat32(); + } /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static System.Numerics.Complex ToComplexFloat64(this Tensor value) => value.ToScalar().ToComplexFloat64(); + public static System.Numerics.Complex ToComplexFloat64(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToComplexFloat64(); + } #if NET6_0_OR_GREATER /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static Half ToHalf(this Tensor value) => value.ToScalar().ToHalf(); + public static Half ToHalf(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToHalf(); + } #endif /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static float ToSingle(this Tensor value) => value.ToScalar().ToSingle(); + public static float ToSingle(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToSingle(); + } /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static double ToDouble(this Tensor value) => value.ToScalar().ToDouble(); + public static double ToDouble(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToDouble(); + } /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static sbyte ToSByte(this Tensor value) => value.ToScalar().ToSByte(); + public static sbyte ToSByte(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToSByte(); + } /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static byte ToByte(this Tensor value) => value.ToScalar().ToByte(); + public static byte ToByte(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToByte(); + } /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static short ToInt16(this Tensor value) => value.ToScalar().ToInt16(); + public static short ToInt16(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToInt16(); + } /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static int ToInt32(this Tensor value) => value.ToScalar().ToInt32(); + public static int ToInt32(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToInt32(); + } /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static long ToInt64(this Tensor value) => value.ToScalar().ToInt64(); + public static long ToInt64(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToInt64(); + } /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static bool ToBoolean(this Tensor value) => value.ToScalar().ToBoolean(); + public static bool ToBoolean(this Tensor value) + { + using var scalar = value.ToScalar(); + return scalar.ToBoolean(); + } /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static (float Real, float Imaginary) ToComplex32(this Tensor value) => value.ToScalar().ToComplexFloat32(); + public static (float Real, float Imaginary) ToComplex32(this Tensor value) => ToComplexFloat32(value); /// /// Explicitly convert a singleton tensor to a .NET scalar value. /// /// The input tensor - public static System.Numerics.Complex ToComplex64(this Tensor value) => value.ToScalar().ToComplexFloat64(); + public static System.Numerics.Complex ToComplex64(this Tensor value) => ToComplexFloat64(value); /// /// Multiply the dimensions of a tensor shape to provide a complete size. From f52c6c46dfd5b1209b7980726db962d37783d7e2 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 17 Sep 2025 14:41:22 +0900 Subject: [PATCH 087/101] Introduce torch.Tensor.fill_ overloads. * Update src/TorchSharp/Tensor/Tensor.cs. + Introduce torch.Tensor.fill_ overloads. --- src/TorchSharp/Tensor/Tensor.cs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 5762d2116..58e611648 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -670,6 +670,31 @@ public Tensor fill_(Scalar value) return this; } + /// + public Tensor fill_(byte value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } + /// + public Tensor fill_(sbyte value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } + /// + public Tensor fill_(short value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } + /// + public Tensor fill_(int value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } + /// + public Tensor fill_(long value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } +#if NET6_0_OR_GREATER + /// + public Tensor fill_(Half value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } +#endif + /// + public Tensor fill_(float value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } + /// + public Tensor fill_(double value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } + /// + public Tensor fill_(bool value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } + /// + public Tensor fill_((float, float) value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } + /// + public Tensor fill_(System.Numerics.Complex value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); } + /// /// Gets the type of the tensor elements. /// From cee753b4c110e35c0c4b25638d6479d68fba5db0 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 17 Sep 2025 15:08:19 +0900 Subject: [PATCH 088/101] Update TorchSharp.Modules.Rprop. * Update src/TorchSharp/Optimizers/Rprop.cs. + Update TorchSharp.Modules.Rprop.step. - Cosmetic. - Use torch.Tensor.masked_fill_. + Update TorchSharp.Modules.Rprop.State.Initialize. - Use a torch.Tensor.fill_ overload. --- src/TorchSharp/Optimizers/Rprop.cs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/src/TorchSharp/Optimizers/Rprop.cs b/src/TorchSharp/Optimizers/Rprop.cs index fd4f7f2b3..6dae2a11a 100644 --- a/src/TorchSharp/Optimizers/Rprop.cs +++ b/src/TorchSharp/Optimizers/Rprop.cs @@ -136,19 +136,14 @@ public override Tensor step(Func closure = null) var options = group.Options as Options; var maximize = options.maximize.Value; - var etaminus = options.etaminus.Value; - var etaplus = options.etaplus.Value; - using var etaminus_scalar = etaminus.ToScalar(); - using var etaplus_scalar = etaplus.ToScalar(); - using var etaminus_tensor = torch.tensor(etaminus); - using var etaplus_tensor = torch.tensor(etaplus); + using var etaminus_scalar = options.etaminus.Value.ToScalar(); + using var etaplus_scalar = options.etaplus.Value.ToScalar(); using var min_step_scalar = options.min_step.Value.ToScalar(); using var max_step_scalar = options.max_step.Value.ToScalar(); var lr = options.LearningRate.Value; // FIXME: Unused? using var zero_scalar = 0.ToScalar(); using var one_scalar = 1.ToScalar(); using var negative_one_scalar = (-1).ToScalar(); - using var one_tensor = torch.tensor(1); foreach (var param in group.Parameters) { @@ -165,9 +160,9 @@ public override Tensor step(Func closure = null) state.step += 1; var sign = grad.mul(state.prev).sign(); // FIXME: Use torch.Tensor.sign_? - sign[sign.gt(zero_scalar)] = etaplus_tensor; - sign[sign.lt(zero_scalar)] = etaminus_tensor; - sign[sign.eq(zero_scalar)] = one_tensor; + sign.masked_fill_(sign.gt(zero_scalar), etaplus_scalar); + sign.masked_fill_(sign.lt(zero_scalar), etaminus_scalar); + sign.masked_fill_(sign.eq(zero_scalar), one_scalar); state.step_size.mul_(sign).clamp_(min_step_scalar, max_step_scalar); @@ -316,8 +311,7 @@ public override void Initialize(OptimizerOptions options) this.step = 0; this.prev = torch.zeros_like(_parameter).DetachFromDisposeScope(); - using var lr_scalar = ((double)(options as Options).LearningRate!).ToScalar(); - this.step_size = _parameter.new_empty(_parameter.shape).fill_(lr_scalar).DetachFromDisposeScope(); + this.step_size = _parameter.new_empty(_parameter.shape).fill_((double)(options as Options).LearningRate!).DetachFromDisposeScope(); } } From b077e6a564d87646463a82a0b7d6d92cbfcbcf13 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 17 Sep 2025 15:52:23 +0900 Subject: [PATCH 089/101] Introduce torch.Tensor.index_put_ overloads. * Update src/TorchSharp/Tensor/Tensor.cs. + Introduce torch.Tensor.index_put_ overloads. --- src/TorchSharp/Tensor/Tensor.cs | 50 +++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 58e611648..fcd6e2e8d 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -1708,6 +1708,31 @@ public Tensor index_put_(Scalar value, params TensorIndex[] indices) } } + /// + public Tensor index_put_(byte value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(sbyte value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(short value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(int value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(long value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } +#if NET6_0_OR_GREATER + /// + public Tensor index_put_(Half value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } +#endif + /// + public Tensor index_put_(float value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(double value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(bool value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_((float, float) value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(System.Numerics.Complex value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// /// Index into the tensor using Python-like indexing expressions and place a scalar tensor at the index. /// @@ -1716,6 +1741,31 @@ public Tensor index_put_(Scalar value, params Tensor[] indices) return index_put_(value, indices.Select(t => TensorIndex.Tensor(t)).ToArray()); } + /// + public Tensor index_put_(byte value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(sbyte value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(short value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(int value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(long value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } +#if NET6_0_OR_GREATER + /// + public Tensor index_put_(Half value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } +#endif + /// + public Tensor index_put_(float value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(double value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(bool value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_((float, float) value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// + public Tensor index_put_(System.Numerics.Complex value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); } + /// /// Returns a new tensor which indexes the input tensor along dimension dim using the entries in index which is a LongTensor. /// From ea787402f2c9cad6347dc787dc35e7a88bd13fd4 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 17 Sep 2025 16:20:56 +0900 Subject: [PATCH 090/101] Introduce torch.Tensor.index_add{,_} overloads. * Update src/TorchSharp/Tensor/Tensor.cs. + Introduce torch.Tensor.index_add{,_} overloads. --- src/TorchSharp/Tensor/Tensor.cs | 50 +++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index fcd6e2e8d..b9fe501dd 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -1884,6 +1884,31 @@ public Tensor index_add(long dim, Tensor index, Tensor source, Scalar alpha) return new Tensor(res); } + /// + public Tensor index_add(long dim, Tensor index, Tensor source, byte alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } + /// + public Tensor index_add(long dim, Tensor index, Tensor source, sbyte alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } + /// + public Tensor index_add(long dim, Tensor index, Tensor source, short alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } + /// + public Tensor index_add(long dim, Tensor index, Tensor source, int alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } + /// + public Tensor index_add(long dim, Tensor index, Tensor source, long alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } +#if NET6_0_OR_GREATER + /// + public Tensor index_add(long dim, Tensor index, Tensor source, Half alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } +#endif + /// + public Tensor index_add(long dim, Tensor index, Tensor source, float alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } + /// + public Tensor index_add(long dim, Tensor index, Tensor source, double alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } + /// + public Tensor index_add(long dim, Tensor index, Tensor source, bool alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } // FIXME: Well defined? + /// + public Tensor index_add(long dim, Tensor index, Tensor source, (float, float) alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } + /// + public Tensor index_add(long dim, Tensor index, Tensor source, System.Numerics.Complex alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } + /// /// Accumulate, in place, the elements of alpha times source into the input tensor by adding to the indices in the order given in index. /// @@ -1904,6 +1929,31 @@ public Tensor index_add_(long dim, Tensor index, Tensor source, Scalar alpha) return this; } + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, byte alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, sbyte alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, short alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, int alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, long alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } +#if NET6_0_OR_GREATER + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, Half alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } +#endif + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, float alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, double alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, bool alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } // FIXME: Well defined? + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, (float, float) alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } + /// + public Tensor index_add_(long dim, Tensor index, Tensor source, System.Numerics.Complex alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } + /// /// Copies the elements of the source tensor into the input tensor by selecting the indices in the order given in index. /// From 16f7e9bd20fa83714a545a7094b868bdf07d33d8 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Wed, 17 Sep 2025 17:22:10 +0900 Subject: [PATCH 091/101] Introduce torch.Tensor.index_fill{,_} overloads. * Update src/TorchSharp/Tensor/Tensor.cs. + Introduce torch.Tensor.index_fill{,_} overloads. --- src/TorchSharp/Tensor/Tensor.cs | 50 +++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index b9fe501dd..f8d7173af 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -2013,6 +2013,31 @@ public Tensor index_fill(long dim, Tensor index, Scalar value) return new Tensor(res); } + /// + public Tensor index_fill(long dim, Tensor index, byte value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } + /// + public Tensor index_fill(long dim, Tensor index, sbyte value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } + /// + public Tensor index_fill(long dim, Tensor index, short value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } + /// + public Tensor index_fill(long dim, Tensor index, int value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } + /// + public Tensor index_fill(long dim, Tensor index, long value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } +#if NET6_0_OR_GREATER + /// + public Tensor index_fill(long dim, Tensor index, Half value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } +#endif + /// + public Tensor index_fill(long dim, Tensor index, float value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } + /// + public Tensor index_fill(long dim, Tensor index, double value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } + /// + public Tensor index_fill(long dim, Tensor index, bool value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } + /// + public Tensor index_fill(long dim, Tensor index, (float, float) value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } + /// + public Tensor index_fill(long dim, Tensor index, System.Numerics.Complex value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); } + /// /// Fills, in place, the elements of the input tensor with value value by selecting the indices in the order given in index. /// @@ -2032,6 +2057,31 @@ public Tensor index_fill_(long dim, Tensor index, Scalar value) return this; } + /// + public Tensor index_fill_(long dim, Tensor index, byte value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } + /// + public Tensor index_fill_(long dim, Tensor index, sbyte value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } + /// + public Tensor index_fill_(long dim, Tensor index, short value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } + /// + public Tensor index_fill_(long dim, Tensor index, int value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } + /// + public Tensor index_fill_(long dim, Tensor index, long value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } +#if NET6_0_OR_GREATER + /// + public Tensor index_fill_(long dim, Tensor index, Half value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } +#endif + /// + public Tensor index_fill_(long dim, Tensor index, float value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } + /// + public Tensor index_fill_(long dim, Tensor index, double value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } + /// + public Tensor index_fill_(long dim, Tensor index, bool value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } + /// + public Tensor index_fill_(long dim, Tensor index, (float, float) value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } + /// + public Tensor index_fill_(long dim, Tensor index, System.Numerics.Complex value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); } + /// /// Returns a tensor with the same data and number of elements as the input tensor but with the specified shape. /// From effda51b3b10b3427a59390becf5b31d77e82181 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 18 Sep 2025 11:24:48 +0900 Subject: [PATCH 092/101] Introduce torch.Tensor.threshold{,_} overloads. * Update src/TorchSharp/Tensor/Tensor.cs. + Introduce torch.Tensor.threshold{,_} overloads. --- src/TorchSharp/Tensor/Tensor.cs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index f8d7173af..11a8a6647 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -2404,6 +2404,21 @@ public Tensor threshold(Scalar threshold, Scalar value) return new Tensor(res); } + // FIXME: Consider in cases of threshold and value are not same typed? + public Tensor threshold(byte threshold, byte value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } + public Tensor threshold(sbyte threshold, sbyte value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } + public Tensor threshold(short threshold, short value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } + public Tensor threshold(int threshold, int value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } + public Tensor threshold(long threshold, long value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } +#if NET6_0_OR_GREATER + public Tensor threshold(Half threshold, Half value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } +#endif + public Tensor threshold(float threshold, float value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } + public Tensor threshold(double threshold, double value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } + public Tensor threshold(bool threshold, bool value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } // FIXME: Well defined? + public Tensor threshold((float, float) threshold, (float, float) value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } // FIXME: Well defined? + public Tensor threshold(System.Numerics.Complex threshold, System.Numerics.Complex value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } // FIXME: Well defined? + public Tensor threshold_(Scalar threshold, Scalar value) { NativeMethods.THSTensor_threshold_(Handle, threshold.Handle, value.Handle); @@ -2411,6 +2426,21 @@ public Tensor threshold_(Scalar threshold, Scalar value) return this; } + // FIXME: Consider in cases of threshold and value are not same typed? + public Tensor threshold_(byte threshold, byte value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } + public Tensor threshold_(sbyte threshold, sbyte value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } + public Tensor threshold_(short threshold, short value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } + public Tensor threshold_(int threshold, int value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } + public Tensor threshold_(long threshold, long value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } +#if NET6_0_OR_GREATER + public Tensor threshold_(Half threshold, Half value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } +#endif + public Tensor threshold_(float threshold, float value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } + public Tensor threshold_(double threshold, double value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } + public Tensor threshold_(bool threshold, bool value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } // FIXME: Well defined? + public Tensor threshold_((float, float) threshold, (float, float) value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } // FIXME: Well defined? + public Tensor threshold_(System.Numerics.Complex threshold, System.Numerics.Complex value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } // FIXME: Well defined? + /// /// Returns a view of the tensor conjugated and with the last two dimensions transposed. /// From 48281ed489cc8948e3cb4626680deca901b1362c Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 18 Sep 2025 11:26:05 +0900 Subject: [PATCH 093/101] Update torch.nn.functional.threshold. * Update src/TorchSharp/NN/Activation/Threshold.cs. + Update torch.nn.functional.threshold. - Use torch.Tensor.threshold{,_} overloads. --- src/TorchSharp/NN/Activation/Threshold.cs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/TorchSharp/NN/Activation/Threshold.cs b/src/TorchSharp/NN/Activation/Threshold.cs index 9e8181d9a..6ebd606be 100644 --- a/src/TorchSharp/NN/Activation/Threshold.cs +++ b/src/TorchSharp/NN/Activation/Threshold.cs @@ -61,9 +61,7 @@ public static partial class functional /// Do the operation in-place public static Tensor threshold(Tensor x, double threshold, double value, bool inplace = false) { - using var threshold_scalar = threshold.ToScalar(); - using var value_scalar = value.ToScalar(); - return inplace ? x.threshold_(threshold_scalar, value_scalar).alias() : x.threshold(threshold_scalar, value_scalar); + return inplace ? x.threshold_(threshold, value).alias() : x.threshold(threshold, value); } /// From e90139f7436f1c2f9c0349596e2fcb401ba969a8 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 18 Sep 2025 12:01:48 +0900 Subject: [PATCH 094/101] Introduce torch.Tensor.softplus overloads. * Update src/TorchSharp/Tensor/Tensor.cs. + Introduce torch.Tensor.softplus overloads. --- src/TorchSharp/Tensor/Tensor.cs | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 11a8a6647..6ce8b5f69 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -2969,15 +2969,7 @@ public Tensor positive() public Tensor softmax(long dim, ScalarType? dtype = null) => torch.special.softmax(this, dim, dtype); - - public Tensor softplus(double beta = 1, double threshold = 20) - { - using var beta_scalar = beta.ToScalar(); - using var threshold_scalar = threshold.ToScalar(); - return softplus1(beta_scalar, threshold_scalar); - } - - private Tensor softplus1(Scalar beta, Scalar threshold) + public Tensor softplus(Scalar beta, Scalar threshold) // FIXME: No default beta and threshold? { var res = NativeMethods.THSTensor_softplus(Handle, beta.Handle, threshold.Handle); if (res == IntPtr.Zero) @@ -2985,6 +2977,22 @@ private Tensor softplus1(Scalar beta, Scalar threshold) return new Tensor(res); } + // FIXME: Consider in cases of beta and threshold are not same typed? + public Tensor softplus(byte beta = 1, byte threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? + public Tensor softplus(sbyte beta = 1, sbyte threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? + public Tensor softplus(short beta = 1, short threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? + public Tensor softplus(int beta = 1, int threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? + public Tensor softplus(long beta = 1, long threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? +#if NET6_0_OR_GREATER + public Tensor softplus(Half beta, Half threshold) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: No default beta and threshold? +#endif + public Tensor softplus(float beta = 1, float threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } + public Tensor softplus(double beta = 1, double threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } + public Tensor softplus(bool beta, bool threshold) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? No default beta and threshold? + public Tensor softplus((float, float) beta, (float, float) threshold) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? No default beta and threshold? + public Tensor softplus(System.Numerics.Complex beta, System.Numerics.Complex threshold) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? No default beta and threshold? + public Tensor softplus() => softplus(1.0, 20.0); + public Tensor ravel() { var res = NativeMethods.THSTensor_ravel(Handle); From 1287b2dcf2fe9cc7a3bcc282727de0c8ac72a9e8 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 18 Sep 2025 13:46:22 +0900 Subject: [PATCH 095/101] Add more torch.Tensor.celu{,_} overloads. * Update src/TorchSharp/Tensor/Tensor.cs. + Add more torch.Tensor.celu{,_} overloads. --- src/TorchSharp/Tensor/Tensor.cs | 42 +++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 6ce8b5f69..4843479d3 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -3051,18 +3051,6 @@ public Tensor rrelu_(double lower = one_eighth, double upper = one_third) return this; } - public Tensor celu() - { - using var one_scalar = 1.0.ToScalar(); - return this.celu(one_scalar); - } - - public Tensor celu_() - { - using var one_scalar = 1.0.ToScalar(); - return this.celu_(one_scalar); - } - public Tensor celu(Scalar alpha) { var res = NativeMethods.THSTensor_celu(Handle, alpha.Handle); @@ -3071,6 +3059,21 @@ public Tensor celu(Scalar alpha) return new Tensor(res); } + public Tensor celu(byte alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } + public Tensor celu(sbyte alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } + public Tensor celu(short alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } + public Tensor celu(int alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } + public Tensor celu(long alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } +#if NET6_0_OR_GREATER + public Tensor celu(Half alpha) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } // FIXME: No default alpha? +#endif + public Tensor celu(float alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } + public Tensor celu(double alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } + public Tensor celu(bool alpha) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } // FIXME: Well defined? No default alpha? + public Tensor celu((float, float) alpha) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } // FIXME: Well defined? No default alpha? + public Tensor celu(System.Numerics.Complex alpha) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } // FIXME: Well defined? No default alpha? + public Tensor celu() => celu(1.0); + public Tensor celu_(Scalar alpha) { NativeMethods.THSTensor_celu_(Handle, alpha.Handle); @@ -3078,6 +3081,21 @@ public Tensor celu_(Scalar alpha) return this; } + public Tensor celu_(byte alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } + public Tensor celu_(sbyte alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } + public Tensor celu_(short alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } + public Tensor celu_(int alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } + public Tensor celu_(long alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } +#if NET6_0_OR_GREATER + public Tensor celu_(Half alpha) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } // FIXME: No default alpha? +#endif + public Tensor celu_(float alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } + public Tensor celu_(double alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } + public Tensor celu_(bool alpha) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } // FIXME: Well defined? No default alpha? + public Tensor celu_((float, float) alpha) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } // FIXME: Well defined? No default alpha? + public Tensor celu_(System.Numerics.Complex alpha) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } // FIXME: Well defined? No default alpha? + public Tensor celu_() => celu_(1.0); + public Tensor elu(double alpha = 1) { using var alpha_scalar = alpha.ToScalar(); From 3da984911a12beb7ff58d179d44ba5c7e1fc5aff Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 18 Sep 2025 13:47:16 +0900 Subject: [PATCH 096/101] Update torch.nn.functional.celu. * Update src/TorchSharp/NN/Activation/CELU.cs. + Update torch.nn.functional.celu. - Use torch.Tensor.celu{,_} overloads. --- src/TorchSharp/NN/Activation/CELU.cs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/TorchSharp/NN/Activation/CELU.cs b/src/TorchSharp/NN/Activation/CELU.cs index 9241255af..ecb85dd47 100644 --- a/src/TorchSharp/NN/Activation/CELU.cs +++ b/src/TorchSharp/NN/Activation/CELU.cs @@ -56,8 +56,7 @@ public static partial class functional /// public static Tensor celu(Tensor x, double alpha, bool inplace = false) { - using var alpha_scalar = alpha.ToScalar(); - return inplace ? x.celu_(alpha_scalar).alias() : x.celu(alpha_scalar); + return inplace ? x.celu_(alpha).alias() : x.celu(alpha); } } } From 2a2b66e3b920cb83755972cd28a9f4eef36aa233 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 18 Sep 2025 14:09:37 +0900 Subject: [PATCH 097/101] Add more torch.Tensor.elu{,_} overloads. * Update src/TorchSharp/Tensor/Tensor.cs. + Add more torch.Tensor.elu{,_} overloads. --- src/TorchSharp/Tensor/Tensor.cs | 46 +++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 4843479d3..140322618 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -3096,20 +3096,6 @@ public Tensor celu_(Scalar alpha) public Tensor celu_(System.Numerics.Complex alpha) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } // FIXME: Well defined? No default alpha? public Tensor celu_() => celu_(1.0); - public Tensor elu(double alpha = 1) - { - using var alpha_scalar = alpha.ToScalar(); - using var one_scalar = 1.0.ToScalar(); - return this.elu(alpha_scalar, one_scalar, one_scalar); - } - - public Tensor elu_(double alpha = 1) - { - using var alpha_scalar = alpha.ToScalar(); - using var one_scalar = 1.0.ToScalar(); - return this.elu_(alpha_scalar, one_scalar, one_scalar); - } - public Tensor elu(Scalar alpha, Scalar scale, Scalar input_scale) { var res = NativeMethods.THSTensor_elu(Handle, alpha.Handle, scale.Handle, input_scale.Handle); @@ -3118,6 +3104,22 @@ public Tensor elu(Scalar alpha, Scalar scale, Scalar input_scale) return new Tensor(res); } + // FIXME: Consider in cases of alpha, scale and input_scale are not same typed? + public Tensor elu(byte alpha = 1, byte scale = 1, byte input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu(sbyte alpha = 1, sbyte scale = 1, sbyte input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu(short alpha = 1, short scale = 1, short input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu(int alpha = 1, int scale = 1, int input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu(long alpha = 1, long scale = 1, long input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } +#if NET6_0_OR_GREATER + public Tensor elu(Half alpha, Half scale, Half input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: No default alpha, scale and input_scale? +#endif + public Tensor elu(float alpha = 1, float scale = 1, float input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu(double alpha = 1, double scale = 1, double input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu(bool alpha, bool scale, bool input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale? + public Tensor elu((float, float) alpha, (float, float) scale, (float, float) input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale? + public Tensor elu(System.Numerics.Complex alpha, System.Numerics.Complex scale, System.Numerics.Complex input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale? + public Tensor elu() => elu(1.0, 1.0, 1.0); + public Tensor elu_(Scalar alpha, Scalar scale, Scalar input_scale) { NativeMethods.THSTensor_elu_(Handle, alpha.Handle, scale.Handle, input_scale.Handle); @@ -3125,6 +3127,22 @@ public Tensor elu_(Scalar alpha, Scalar scale, Scalar input_scale) return this; } + // FIXME: Consider in cases of alpha, scale and input_scale are not same typed? + public Tensor elu_(byte alpha = 1, byte scale = 1, byte input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu_(sbyte alpha = 1, sbyte scale = 1, sbyte input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu_(short alpha = 1, short scale = 1, short input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu_(int alpha = 1, int scale = 1, int input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu_(long alpha = 1, long scale = 1, long input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } +#if NET6_0_OR_GREATER + public Tensor elu_(Half alpha, Half scale, Half input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: No default alpha, scale and input_scale? +#endif + public Tensor elu_(float alpha = 1, float scale = 1, float input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu_(double alpha = 1, double scale = 1, double input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } + public Tensor elu_(bool alpha, bool scale, bool input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale? + public Tensor elu_((float, float) alpha, (float, float) scale, (float, float) input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale? + public Tensor elu_(System.Numerics.Complex alpha, System.Numerics.Complex scale, System.Numerics.Complex input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale? + public Tensor elu_() => elu_(1.0, 1.0, 1.0); + public Tensor gelu() { var res = NativeMethods.THSTensor_gelu(Handle); From 41a0db8c27faa88c8ed36e62e4cd11cabf69c823 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 18 Sep 2025 14:21:54 +0900 Subject: [PATCH 098/101] Introduce torch.Tensor.hardtanh{,_} overloads. * Update src/TorchSharp/NN/Activation/Hardtanh.cs. + Introduce torch.Tensor.hardtanh{,_} overloads. --- src/TorchSharp/Tensor/Tensor.cs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 140322618..8d1d13302 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -3204,6 +3204,20 @@ public Tensor hardtanh(Scalar min, Scalar max) return this; } + public Tensor hardtanh(byte min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } + public Tensor hardtanh(sbyte min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } + public Tensor hardtanh(short min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } + public Tensor hardtanh(int min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } + public Tensor hardtanh(long min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } +#if NET6_0_OR_GREATER + public Tensor hardtanh(Half min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } +#endif + public Tensor hardtanh(float min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } + public Tensor hardtanh(double min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } + public Tensor hardtanh(bool min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } // FIXME: Well defined? + public Tensor hardtanh((float, float) min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } // FIXME: Well defined? + public Tensor hardtanh(System.Numerics.Complex min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } // FIXME: Well defined? + public Tensor hardtanh_(Scalar min, Scalar max) { NativeMethods.THSTensor_hardtanh_(Handle, min.Handle, max.Handle); @@ -3211,6 +3225,20 @@ public Tensor hardtanh_(Scalar min, Scalar max) return this; } + public Tensor hardtanh_(byte min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } + public Tensor hardtanh_(sbyte min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } + public Tensor hardtanh_(short min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } + public Tensor hardtanh_(int min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } + public Tensor hardtanh_(long min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } +#if NET6_0_OR_GREATER + public Tensor hardtanh_(Half min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } +#endif + public Tensor hardtanh_(float min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } + public Tensor hardtanh_(double min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } + public Tensor hardtanh_(bool min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } // FIXME: Well defined? + public Tensor hardtanh_((float, float) min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } // FIXME: Well defined? + public Tensor hardtanh_(System.Numerics.Complex min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } // FIXME: Well defined? + public Tensor heaviside(Tensor other) { var res = NativeMethods.THSTensor_heaviside(Handle, other.Handle); From b2304280ced91bfdc3484955e858a75e795fa64c Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 18 Sep 2025 14:23:01 +0900 Subject: [PATCH 099/101] Update torch.nn.functional.hardtanh. * Update src/TorchSharp/NN/Activation/Hardtanh.cs. + Update torch.nn.functional.hardtanh. - Use torch.Tensor.hardtanh{,_} overloads. --- src/TorchSharp/NN/Activation/Hardtanh.cs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/TorchSharp/NN/Activation/Hardtanh.cs b/src/TorchSharp/NN/Activation/Hardtanh.cs index cb7613800..fc5683986 100644 --- a/src/TorchSharp/NN/Activation/Hardtanh.cs +++ b/src/TorchSharp/NN/Activation/Hardtanh.cs @@ -65,9 +65,7 @@ public static partial class functional /// public static Tensor hardtanh(Tensor x, double min_val = -1.0, double max_val = 1.0, bool inplace = false) { - using var min_val_scalar = min_val.ToScalar(); - using var max_val_scalar = max_val.ToScalar(); - return inplace ? x.hardtanh_(min_val_scalar, max_val_scalar).alias() : x.hardtanh(min_val_scalar, max_val_scalar); + return inplace ? x.hardtanh_(min_val, max_val).alias() : x.hardtanh(min_val, max_val); } /// From 789555b50b828441112bfc55c90c96116735ca27 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 18 Sep 2025 15:10:55 +0900 Subject: [PATCH 100/101] Introduce torch.Tensor.leaky_relu{,_} overloads. * Update src/TorchSharp/Tensor/Tensor.cs. + Introduce torch.Tensor.leaky_relu{,_} overloads. --- src/TorchSharp/Tensor/Tensor.cs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs index 8d1d13302..a7fc52f61 100644 --- a/src/TorchSharp/Tensor/Tensor.cs +++ b/src/TorchSharp/Tensor/Tensor.cs @@ -3377,6 +3377,20 @@ public Tensor leaky_relu(Scalar negative_slope) return new Tensor(res); } + public Tensor leaky_relu(byte negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } // FIXME: Well defined? + public Tensor leaky_relu(sbyte negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } + public Tensor leaky_relu(short negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } + public Tensor leaky_relu(int negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } + public Tensor leaky_relu(long negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } +#if NET6_0_OR_GREATER + public Tensor leaky_relu(Half negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } +#endif + public Tensor leaky_relu(float negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } + public Tensor leaky_relu(double negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } + public Tensor leaky_relu(bool negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } // FIXME: Well defined? + public Tensor leaky_relu((float, float) negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } // FIXME: Well defined? + public Tensor leaky_relu(System.Numerics.Complex negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } // FIXME: Well defined? + public Tensor leaky_relu_(Scalar negative_slope) { NativeMethods.THSTensor_leaky_relu_(Handle, negative_slope.Handle); @@ -3384,6 +3398,20 @@ public Tensor leaky_relu_(Scalar negative_slope) return this; } + public Tensor leaky_relu_(byte negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } // FIXME: Well defined? + public Tensor leaky_relu_(sbyte negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } + public Tensor leaky_relu_(short negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } + public Tensor leaky_relu_(int negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } + public Tensor leaky_relu_(long negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } +#if NET6_0_OR_GREATER + public Tensor leaky_relu_(Half negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } +#endif + public Tensor leaky_relu_(float negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } + public Tensor leaky_relu_(double negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } + public Tensor leaky_relu_(bool negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } // FIXME: Well defined? + public Tensor leaky_relu_((float, float) negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } // FIXME: Well defined? + public Tensor leaky_relu_(System.Numerics.Complex negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } // FIXME: Well defined? + public Tensor selu() { var res = NativeMethods.THSTensor_selu(Handle); From 48e0ca85f1fa9d34b038097448a27d2b4291d3f6 Mon Sep 17 00:00:00 2001 From: Masaru Kimura Date: Thu, 18 Sep 2025 15:12:00 +0900 Subject: [PATCH 101/101] Update torch.nn.functional.leaky_relu. * Update src/TorchSharp/NN/Activation/LeakyReLU.cs. + Update torch.nn.functional.leaky_relu. - Use torch.Tensor.leaky_relu{,_} overloads. --- src/TorchSharp/NN/Activation/LeakyReLU.cs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/TorchSharp/NN/Activation/LeakyReLU.cs b/src/TorchSharp/NN/Activation/LeakyReLU.cs index 8bf1c15a9..8851c0da7 100644 --- a/src/TorchSharp/NN/Activation/LeakyReLU.cs +++ b/src/TorchSharp/NN/Activation/LeakyReLU.cs @@ -56,8 +56,7 @@ public static partial class functional /// public static Tensor leaky_relu(Tensor input, double negative_slope = 0.01, bool inplace = false) { - using var negative_slope_scalar = negative_slope.ToScalar(); - return inplace ? input.leaky_relu_(negative_slope_scalar).alias() : input.leaky_relu(negative_slope_scalar); + return inplace ? input.leaky_relu_(negative_slope).alias() : input.leaky_relu(negative_slope); } } }