diff --git a/spec/Tensor.Comp.TensorDot.Outer.Spec.savi b/spec/Tensor.Comp.TensorDot.Outer.Spec.savi index 30ce530..03ef6f9 100644 --- a/spec/Tensor.Comp.TensorDot.Outer.Spec.savi +++ b/spec/Tensor.Comp.TensorDot.Outer.Spec.savi @@ -5,12 +5,12 @@ :it "is equivalent to matrix multiplication for rank-2 tensors" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.tensordot_outer!("example" - g.const!("A", Tensor(F64).from_array([ + g.tensordot_outer("example" + g.const("A", Tensor(F64).from_array([ 1.0, 2.0 3.0, 4.0 ]).try_reshape(Tensor.Shape.new([2, 2]))) - g.const!("B", Tensor(F64).from_array([ + g.const("B", Tensor(F64).from_array([ 5.0, 6.0 7.0, 8.0 ]).try_reshape(Tensor.Shape.new([2, 2]))) @@ -26,15 +26,15 @@ :it "handles larger-rank tensors by applying to the outer axes" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.tensordot_outer!("example" - g.const!("A", Tensor(F64).from_array([ + g.tensordot_outer("example" + g.const("A", Tensor(F64).from_array([ 1, 2, 3 4, 5, 6 7, 8, 9 10, 11, 12 ]).try_reshape(Tensor.Shape.new([2, 2, 3]))) - g.const!("B", Tensor(F64).from_array([ + g.const("B", Tensor(F64).from_array([ 13, 14 15, 16 diff --git a/spec/Tensor.Gen.Random.Spec.savi b/spec/Tensor.Gen.Random.Spec.savi index 489de9c..ae45d51 100644 --- a/spec/Tensor.Gen.Random.Spec.savi +++ b/spec/Tensor.Gen.Random.Spec.savi @@ -3,16 +3,16 @@ :const describes: "Tensor.Gen.Random" :it "raises its internal counter with each graph node that uses it" - _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( - random = g.gen_random!("random" - g.const!("seed", Tensor(U32).from_array([2, 3])) + _WithGraphHelper.run(@env) -> (g, session | + random = g.gen_random("random" + g.const("seed", Tensor(U32).from_array([2, 3])) ) shape = Tensor.Shape.scalar - example1 = g.random_uniform!("example1", random, Tensor(F64), shape) - example2 = g.random_uniform!("example2", random, Tensor(F64), shape) - example3 = g.random_uniform!("example3", random, Tensor(F64), shape) - example4 = g.random_uniform!("example4", random, Tensor(F64), shape) - example5 = g.random_uniform!("example5", random, Tensor(F64), shape) + example1 = g.random_uniform("example1", random, Tensor(F64), shape) + example2 = g.random_uniform("example2", random, Tensor(F64), shape) + example3 = g.random_uniform("example3", random, Tensor(F64), shape) + example4 = g.random_uniform("example4", random, Tensor(F64), shape) + example5 = g.random_uniform("example5", random, Tensor(F64), shape) assert: [ session.compute!(example1).as!(Tensor(F64)).into_array.first! @@ -27,4 +27,4 @@ 0.913356627721398900 0.007108289495397546 ] - )) + ) diff --git a/spec/Tensor.Graph.Spec.savi b/spec/Tensor.Graph.Spec.savi index 8299f30..05e6bdb 100644 --- a/spec/Tensor.Graph.Spec.savi +++ b/spec/Tensor.Graph.Spec.savi @@ -8,38 +8,38 @@ graph = Tensor.Graph.new session = Tensor.Graph.Session.new(graph) - try ( - a = graph.new_operation("Const", "a") -> (builder | - builder - .set_attr_type("dtype", Tensor(F64).element_type_code) - .set_attr_tensor!("value", a_value) - .finish! - ) - assert: a.output(0).shape.rank == 2 - assert: a.output(0).shape.into_array == [2, 2] + a = graph.new_operation("Const", "a") -> (builder | + builder + .set_attr_type("dtype", Tensor(F64).element_type_code) + .set_attr_tensor("value", a_value) + .finish + ) - b = graph.new_operation("Const", "b") -> (builder | - builder - .set_attr_type("dtype", Tensor(F64).element_type_code) - .set_attr_tensor!("value", b_value) - .finish! - ) - product1 = graph.new_operation("MatMul", "product1") -> (builder | - builder - .add_input(a.output(0)) - .add_input(b.output(0)) - .finish! - ) - product2 = graph.new_operation("MatMul", "product2") -> (builder | - builder - .add_input(a.output(0)) - .add_input(b.output(0)) - .set_attr_bool("transpose_a", True) - .finish! - ) + assert: a.output(0).shape.rank == 2 + assert: a.output(0).shape.into_array == [2, 2] + b = graph.new_operation("Const", "b") -> (builder | + builder + .set_attr_type("dtype", Tensor(F64).element_type_code) + .set_attr_tensor("value", b_value) + .finish + ) + product1 = graph.new_operation("MatMul", "product1") -> (builder | + builder + .add_input(a.output(0)) + .add_input(b.output(0)) + .finish + ) + product2 = graph.new_operation("MatMul", "product2") -> (builder | + builder + .add_input(a.output(0)) + .add_input(b.output(0)) + .set_attr_bool("transpose_a", True) + .finish + ) + try ( result = session.compute!(product1.output(0)) assert: result.as!(Tensor(F64)).into_array == [ 1.0 * 5.0 + 2.0 * 7.0, 1.0 * 6.0 + 2.0 * 8.0 // row1⋅col1, row1⋅col2 @@ -61,25 +61,27 @@ assert no_error: error! ) - :it "complains when creating an operation with an invalid type" + :it "complains when evaluating an operation with an invalid type" g = Tensor.Graph.new - assert error: ( + session = Tensor.Graph.Session.new(g) + + assert error: session.compute!( g.new_operation("Bogus", "example") -> (builder | - builder.finish! - ) + builder.finish + ).output(0) ) assert: g.errors.first!.code == Tensor.Graph.Error.Code.InvalidArgument assert: g.errors.first!.message.includes("Op type not registered 'Bogus'") :it "optimizes to minimize a loss function with gradient descent" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( - learning_rate = g.const!("learning_rate", Tensor(F64).scalar(0.25)) + learning_rate = g.const("learning_rate", Tensor(F64).scalar(0.25)) - x = g.variable!("x", Tensor(F64), []) - loss = g.square!("square", x) + x = g.variable("x", Tensor(F64), []) + loss = g.square("square", x) grad = g.graph.add_gradients!([loss], [x]).first! - x2 = g.apply_gradient_descent!("apply_grad", grad, x, learning_rate) + x2 = g.apply_gradient_descent("apply_grad", grad, x, learning_rate) result Tensor.Any = Tensor(F64).scalar(5) [ diff --git a/spec/Tensor.Op.Add.Spec.savi b/spec/Tensor.Op.Add.Spec.savi index f20dcec..d601aed 100644 --- a/spec/Tensor.Op.Add.Spec.savi +++ b/spec/Tensor.Op.Add.Spec.savi @@ -5,9 +5,9 @@ :it "computes arithmetic addition" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.add!("example" - g.const!("x", Tensor(I32).from_array([1, 2, 3, 4])) - g.const!("y", Tensor(I32).from_array([5, 6, 7, 8])) + g.add("example" + g.const("x", Tensor(I32).from_array([1, 2, 3, 4])) + g.const("y", Tensor(I32).from_array([5, 6, 7, 8])) ) ) @@ -17,9 +17,9 @@ :it "can broadcast smaller sizes/shapes across larger sizes/shapes" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.add!("example" - g.const!("x", Tensor(I32).from_array([-1, -3, -5])) - g.const!("y", Tensor(I32).from_array([ + g.add("example" + g.const("x", Tensor(I32).from_array([-1, -3, -5])) + g.const("y", Tensor(I32).from_array([ 1, 2, 3 4, 5, 6 7, 8, 9 diff --git a/spec/Tensor.Op.Bitcast.Spec.savi b/spec/Tensor.Op.Bitcast.Spec.savi index bcf27a2..b81a749 100644 --- a/spec/Tensor.Op.Bitcast.Spec.savi +++ b/spec/Tensor.Op.Bitcast.Spec.savi @@ -5,8 +5,8 @@ :it "distributes bits into a larger number of narrower elements" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.bitcast!("example" - g.const!("input", Tensor(U16).from_array([0x0246, 0x8ace])) + g.bitcast("example" + g.const("input", Tensor(U16).from_array([0x0246, 0x8ace])) Tensor(U8) ) ) @@ -18,8 +18,8 @@ :it "consolidates bits into a smaller number of wider elements" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.bitcast!("example" - g.const!("input", Tensor(U8).from_array([0x46, 0x02, 0xce, 0x8a]) + g.bitcast("example" + g.const("input", Tensor(U8).from_array([0x46, 0x02, 0xce, 0x8a]) .try_reshape(Tensor.Shape.new([2, 2])) ) Tensor(U16) @@ -31,10 +31,12 @@ :it "complains on narrow to wide with more than one wide result per row" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.bitcast!("example" - g.const!("input", Tensor(U8).from_array([0x46, 0x02, 0xce, 0x8a]) - // this would work if we did a reshape like: [2, 2] + assert error: session.compute!( + g.bitcast("example" + g.const("input", Tensor(U8).from_array([0x46, 0x02, 0xce, 0x8a]) + // this would work if we did a reshape like: [2, 2] + ) + Tensor(U16) ) - Tensor(U16) ) ) diff --git a/spec/Tensor.Op.Cast.Spec.savi b/spec/Tensor.Op.Cast.Spec.savi index d9b03b1..a24f92c 100644 --- a/spec/Tensor.Op.Cast.Spec.savi +++ b/spec/Tensor.Op.Cast.Spec.savi @@ -5,8 +5,8 @@ :it "does bounds-wrapping when converting to a narrower integer type" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.cast!("example" - g.const!("input", Tensor(I16).from_array([0, 1, 0xffff, 0x7890])) + g.cast("example" + g.const("input", Tensor(I16).from_array([0, 1, 0xffff, 0x7890])) Tensor(I8) ) ) @@ -17,8 +17,8 @@ :it "rounds a floating-point value to its nearest integer value" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.cast!("example" - g.const!("input", Tensor(F64).from_array([2.4, 2.5, 2.6, -2.5])) + g.cast("example" + g.const("input", Tensor(F64).from_array([2.4, 2.5, 2.6, -2.5])) Tensor(I32) ) ) @@ -29,8 +29,8 @@ :it "rounds to the nearest representable less-precise floating-point value" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.cast!("example" - g.const!("input", Tensor(F64).from_array([-1e26])) + g.cast("example" + g.const("input", Tensor(F64).from_array([-1e26])) Tensor(F32) ) ) @@ -41,8 +41,8 @@ :it "can be set to round with floating-point truncation (toward zero)" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.cast_with_floating_point_truncation!("example" - g.const!("input", Tensor(F64).from_array([-1e26])) + g.cast_with_floating_point_truncation("example" + g.const("input", Tensor(F64).from_array([-1e26])) Tensor(F32) ) ) diff --git a/spec/Tensor.Op.Concat.Spec.savi b/spec/Tensor.Op.Concat.Spec.savi index 431ba1d..d9e6846 100644 --- a/spec/Tensor.Op.Concat.Spec.savi +++ b/spec/Tensor.Op.Concat.Spec.savi @@ -8,10 +8,10 @@ :it "combines the list of tensors into one new tensor" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.concat!("example" + g.concat("example" [ - g.const!("input_a", @f64_2x2(1, 2, 3, 4)) - g.const!("input_b", @f64_2x2(5, 6, 7, 8)) + g.const("input_a", @f64_2x2(1, 2, 3, 4)) + g.const("input_b", @f64_2x2(5, 6, 7, 8)) ] ) ) @@ -28,10 +28,10 @@ :it "can combine along a different axis" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.concat!("example" + g.concat("example" [ - g.const!("input_a", @f64_2x2(1, 2, 3, 4)) - g.const!("input_b", @f64_2x2(5, 6, 7, 8)) + g.const("input_a", @f64_2x2(1, 2, 3, 4)) + g.const("input_b", @f64_2x2(5, 6, 7, 8)) ] 1 // axis ) @@ -46,31 +46,37 @@ :it "complains when the inputs are of different types" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.concat!("example" - [ - g.const!("input_a", Tensor(F64).from_array([1, 2, 3, 4])) - g.const!("input_b", Tensor(F32).from_array([5, 6, 7, 8])) - ] + assert error: session.compute!( + g.concat("example" + [ + g.const("input_a", Tensor(F64).from_array([1, 2, 3, 4])) + g.const("input_b", Tensor(F32).from_array([5, 6, 7, 8])) + ] + ) ) ) :it "complains when the inputs are of different shapes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.concat!("example" - [ - g.const!("input_a", @f64_2x2(1, 2, 3, 4)) - g.const!("input_b", @f64_2x2(5, 6, 7, 8).try_reshape(Tensor.Shape.new([1, 4]))) - ] + assert error: session.compute!( + g.concat("example" + [ + g.const("input_a", @f64_2x2(1, 2, 3, 4)) + g.const("input_b", @f64_2x2(5, 6, 7, 8).try_reshape(Tensor.Shape.new([1, 4]))) + ] + ) ) ) :it "complains when the given axis is greater-or-equal to the inputs' rank" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.concat!("example" - [ - g.const!("input_a", @f64_2x2(1, 2, 3, 4)) - g.const!("input_b", @f64_2x2(5, 6, 7, 8)) - ] - 2 + assert error: session.compute!( + g.concat("example" + [ + g.const("input_a", @f64_2x2(1, 2, 3, 4)) + g.const("input_b", @f64_2x2(5, 6, 7, 8)) + ] + 2 + ) ) ) diff --git a/spec/Tensor.Op.Const.Spec.savi b/spec/Tensor.Op.Const.Spec.savi index 0591756..8105c7b 100644 --- a/spec/Tensor.Op.Const.Spec.savi +++ b/spec/Tensor.Op.Const.Spec.savi @@ -5,7 +5,7 @@ :it "emits a constant tensor value" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.const!("example" + g.const("example" Tensor(F64).from_array([1, 2, 3, 4]) ) ) diff --git a/spec/Tensor.Op.Greater.Spec.savi b/spec/Tensor.Op.Greater.Spec.savi index 6d1205a..cd11fe5 100644 --- a/spec/Tensor.Op.Greater.Spec.savi +++ b/spec/Tensor.Op.Greater.Spec.savi @@ -5,9 +5,9 @@ :it "checks if the 1st operand's values are greater than those of the 2nd" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.greater!("example" - g.const!("x", Tensor(I32).from_array([1, 2, 3])) - g.const!("y", Tensor(I32).from_array([3, 2, 1])) + g.greater("example" + g.const("x", Tensor(I32).from_array([1, 2, 3])) + g.const("y", Tensor(I32).from_array([3, 2, 1])) ) ) @@ -17,9 +17,9 @@ :it "may optionally include equal values as being true" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.greater_or_equal!("example" - g.const!("x", Tensor(I32).from_array([1, 2, 3])) - g.const!("y", Tensor(I32).from_array([3, 2, 1])) + g.greater_or_equal("example" + g.const("x", Tensor(I32).from_array([1, 2, 3])) + g.const("y", Tensor(I32).from_array([3, 2, 1])) ) ) @@ -28,24 +28,30 @@ :it "complains if the operands are of different types" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.greater!("example" - g.const!("x", Tensor(I32).from_array([1, 2, 3])) - g.const!("y", Tensor(I8).from_array([3, 2, 1])) + assert error: session.compute!( + g.greater("example" + g.const("x", Tensor(I32).from_array([1, 2, 3])) + g.const("y", Tensor(I8).from_array([3, 2, 1])) + ) ) ) :it "complains if the operands are of different sizes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.greater!("example" - g.const!("x", Tensor(I32).from_array([1, 2, 3])) - g.const!("y", Tensor(I32).from_array([3, 2, 1, 0])) + assert error: session.compute!( + g.greater("example" + g.const("x", Tensor(I32).from_array([1, 2, 3])) + g.const("y", Tensor(I32).from_array([3, 2, 1, 0])) + ) ) ) :it "complains if the operands are of different shapes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.greater!("example" - g.const!("x", Tensor(I32).from_array([0, 1, 2, 3])) - g.const!("y", Tensor(I32).from_array([3, 2, 1, 0]).try_reshape(Tensor.Shape.new([2, 2]))) + assert error: session.compute!( + g.greater("example" + g.const("x", Tensor(I32).from_array([0, 1, 2, 3])) + g.const("y", Tensor(I32).from_array([3, 2, 1, 0]).try_reshape(Tensor.Shape.new([2, 2]))) + ) ) ) diff --git a/spec/Tensor.Op.Lesser.Spec.savi b/spec/Tensor.Op.Lesser.Spec.savi index 6473900..433c82a 100644 --- a/spec/Tensor.Op.Lesser.Spec.savi +++ b/spec/Tensor.Op.Lesser.Spec.savi @@ -5,9 +5,9 @@ :it "checks if the 1st operand's values are lesser than those of the 2nd" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.lesser!("example" - g.const!("x", Tensor(I32).from_array([1, 2, 3])) - g.const!("y", Tensor(I32).from_array([3, 2, 1])) + g.lesser("example" + g.const("x", Tensor(I32).from_array([1, 2, 3])) + g.const("y", Tensor(I32).from_array([3, 2, 1])) ) ) @@ -17,9 +17,9 @@ :it "may optionally include equal values as being true" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.lesser_or_equal!("example" - g.const!("x", Tensor(I32).from_array([1, 2, 3])) - g.const!("y", Tensor(I32).from_array([3, 2, 1])) + g.lesser_or_equal("example" + g.const("x", Tensor(I32).from_array([1, 2, 3])) + g.const("y", Tensor(I32).from_array([3, 2, 1])) ) ) @@ -28,24 +28,30 @@ :it "complains if the operands are of different types" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.lesser!("example" - g.const!("x", Tensor(I32).from_array([1, 2, 3])) - g.const!("y", Tensor(I8).from_array([3, 2, 1])) + assert error: session.compute!( + g.lesser("example" + g.const("x", Tensor(I32).from_array([1, 2, 3])) + g.const("y", Tensor(I8).from_array([3, 2, 1])) + ) ) ) :it "complains if the operands are of different sizes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.lesser!("example" - g.const!("x", Tensor(I32).from_array([1, 2, 3])) - g.const!("y", Tensor(I32).from_array([3, 2, 1, 0])) + assert error: session.compute!( + g.lesser("example" + g.const("x", Tensor(I32).from_array([1, 2, 3])) + g.const("y", Tensor(I32).from_array([3, 2, 1, 0])) + ) ) ) :it "complains if the operands are of different shapes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.lesser!("example" - g.const!("x", Tensor(I32).from_array([0, 1, 2, 3])) - g.const!("y", Tensor(I32).from_array([3, 2, 1, 0]).try_reshape(Tensor.Shape.new([2, 2]))) + assert error: session.compute!( + g.lesser("example" + g.const("x", Tensor(I32).from_array([0, 1, 2, 3])) + g.const("y", Tensor(I32).from_array([3, 2, 1, 0]).try_reshape(Tensor.Shape.new([2, 2]))) + ) ) ) diff --git a/spec/Tensor.Op.Logical.Spec.savi b/spec/Tensor.Op.Logical.Spec.savi index 0731808..510de8c 100644 --- a/spec/Tensor.Op.Logical.Spec.savi +++ b/spec/Tensor.Op.Logical.Spec.savi @@ -5,8 +5,8 @@ :it "computes boolean 'NOT' operations" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.logical_not!("example" - g.const!("input", Tensor(Bool).from_array([True, False])) + g.logical_not("example" + g.const("input", Tensor(Bool).from_array([True, False])) ) ) @@ -15,17 +15,19 @@ :it "can't do logical 'NOT' operations on non-booleans" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.logical_not!("example" - g.const!("input", Tensor(F64).from_array([1, 2, 3, 4])) + assert error: session.compute!( + g.logical_not("example" + g.const("input", Tensor(F64).from_array([1, 2, 3, 4])) + ) ) ) :it "computes boolean 'AND' operations" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.logical_and!("example" - g.const!("x", Tensor(Bool).from_array([True, True, False, False])) - g.const!("y", Tensor(Bool).from_array([True, False, True, False])) + g.logical_and("example" + g.const("x", Tensor(Bool).from_array([True, True, False, False])) + g.const("y", Tensor(Bool).from_array([True, False, True, False])) ) ) @@ -34,26 +36,32 @@ :it "can't do logical 'AND' operations on non-booleans" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.logical_and!("example" - g.const!("x", Tensor(F64).from_array([1, 2, 3, 4])) - g.const!("y", Tensor(F64).from_array([5, 6, 7, 8])) + assert error: session.compute!( + g.logical_and("example" + g.const("x", Tensor(F64).from_array([1, 2, 3, 4])) + g.const("y", Tensor(F64).from_array([5, 6, 7, 8])) + ) ) ) :it "can't do logical 'AND' operations with operands of different sizes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.logical_and!("example" - g.const!("x", Tensor(Bool).from_array([True, True, False, False])) - g.const!("y", Tensor(Bool).from_array([True, False, True])) + assert error: session.compute!( + g.logical_and("example" + g.const("x", Tensor(Bool).from_array([True, True, False, False])) + g.const("y", Tensor(Bool).from_array([True, False, True])) + ) ) ) :it "can't do logical 'AND' operations with operands of different shapes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.logical_and!("example" - g.const!("x", Tensor(Bool).from_array([True, True, False, False])) - g.const!("y", Tensor(Bool).from_array([True, False, True, False]) - .try_reshape(Tensor.Shape.new([2, 2])) + assert error: session.compute!( + g.logical_and("example" + g.const("x", Tensor(Bool).from_array([True, True, False, False])) + g.const("y", Tensor(Bool).from_array([True, False, True, False]) + .try_reshape(Tensor.Shape.new([2, 2])) + ) ) ) ) @@ -61,9 +69,9 @@ :it "computes boolean 'OR' operations" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.logical_or!("example" - g.const!("x", Tensor(Bool).from_array([True, True, False, False])) - g.const!("y", Tensor(Bool).from_array([True, False, True, False])) + g.logical_or("example" + g.const("x", Tensor(Bool).from_array([True, True, False, False])) + g.const("y", Tensor(Bool).from_array([True, False, True, False])) ) ) @@ -72,26 +80,32 @@ :it "can't do logical 'OR' operations on non-booleans" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.logical_or!("example" - g.const!("x", Tensor(F64).from_array([1, 2, 3, 4])) - g.const!("y", Tensor(F64).from_array([5, 6, 7, 8])) + assert error: session.compute!( + g.logical_or("example" + g.const("x", Tensor(F64).from_array([1, 2, 3, 4])) + g.const("y", Tensor(F64).from_array([5, 6, 7, 8])) + ) ) ) :it "can't do logical 'OR' operations with operands of different sizes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.logical_or!("example" - g.const!("x", Tensor(Bool).from_array([True, True, False, False])) - g.const!("y", Tensor(Bool).from_array([True, False, True])) + assert error: session.compute!( + g.logical_or("example" + g.const("x", Tensor(Bool).from_array([True, True, False, False])) + g.const("y", Tensor(Bool).from_array([True, False, True])) + ) ) ) :it "can't do logical 'OR' operations with operands of different shapes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.logical_or!("example" - g.const!("x", Tensor(Bool).from_array([True, True, False, False])) - g.const!("y", Tensor(Bool).from_array([True, False, True, False]) - .try_reshape(Tensor.Shape.new([2, 2])) + assert error: session.compute!( + g.logical_or("example" + g.const("x", Tensor(Bool).from_array([True, True, False, False])) + g.const("y", Tensor(Bool).from_array([True, False, True, False]) + .try_reshape(Tensor.Shape.new([2, 2])) + ) ) ) ) diff --git a/spec/Tensor.Op.MatMul.Spec.savi b/spec/Tensor.Op.MatMul.Spec.savi index c2a205f..2a26f41 100644 --- a/spec/Tensor.Op.MatMul.Spec.savi +++ b/spec/Tensor.Op.MatMul.Spec.savi @@ -8,9 +8,9 @@ :it "computes matrix multiplication" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.matmul!("example" - g.const!("A", @f64_2x2(1.0, 2.0, 3.0, 4.0)) - g.const!("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) + g.matmul("example" + g.const("A", @f64_2x2(1.0, 2.0, 3.0, 4.0)) + g.const("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) ) ) @@ -23,9 +23,9 @@ :it "computes matrix multiplication with the first matrix transposed" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.matmul_with_a_transposed!("example" - g.const!("A", @f64_2x2(1.0, 2.0, 3.0, 4.0)) - g.const!("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) + g.matmul_with_a_transposed("example" + g.const("A", @f64_2x2(1.0, 2.0, 3.0, 4.0)) + g.const("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) ) ) @@ -38,9 +38,9 @@ :it "computes matrix multiplication with the second matrix transposed" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.matmul_with_b_transposed!("example" - g.const!("A", @f64_2x2(1.0, 2.0, 3.0, 4.0)) - g.const!("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) + g.matmul_with_b_transposed("example" + g.const("A", @f64_2x2(1.0, 2.0, 3.0, 4.0)) + g.const("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) ) ) @@ -53,9 +53,9 @@ :it "computes matrix multiplication with both matrices transposed" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.matmul_with_both_transposed!("example" - g.const!("A", @f64_2x2(1.0, 2.0, 3.0, 4.0)) - g.const!("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) + g.matmul_with_both_transposed("example" + g.const("A", @f64_2x2(1.0, 2.0, 3.0, 4.0)) + g.const("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) ) ) @@ -67,24 +67,30 @@ :it "complains when one of the inputs is a scalar (rank 0 tensor)" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.matmul!("example" - g.const!("A", Tensor(F64).scalar(99)) - g.const!("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) + assert error: session.compute!( + g.matmul("example" + g.const("A", Tensor(F64).scalar(99)) + g.const("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) + ) ) ) :it "complains when one of the inputs is a vector (rank 1 tensor)" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.matmul!("example" - g.const!("A", Tensor(F64).from_array([1, 2, 3, 4])) - g.const!("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) + assert error: session.compute!( + g.matmul("example" + g.const("A", Tensor(F64).from_array([1, 2, 3, 4])) + g.const("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) + ) ) ) :it "complains when one of the inputs has a rank higher 2" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.matmul!("example" - g.const!("A", @f64_2x2(1.0, 2.0, 3.0, 4.0).try_reshape(Tensor.Shape.new([2, 1, 2]))) - g.const!("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) + assert error: session.compute!( + g.matmul("example" + g.const("A", @f64_2x2(1.0, 2.0, 3.0, 4.0).try_reshape(Tensor.Shape.new([2, 1, 2]))) + g.const("B", @f64_2x2(5.0, 6.0, 7.0, 8.0)) + ) ) ) diff --git a/spec/Tensor.Op.Multiply.Spec.savi b/spec/Tensor.Op.Multiply.Spec.savi index da285f9..ab8f9c4 100644 --- a/spec/Tensor.Op.Multiply.Spec.savi +++ b/spec/Tensor.Op.Multiply.Spec.savi @@ -5,9 +5,9 @@ :it "computes arithmetic multiplication" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.multiply!("example" - g.const!("x", Tensor(I32).from_array([1, 2, 3, 4])) - g.const!("y", Tensor(I32).from_array([5, 6, 7, 8])) + g.multiply("example" + g.const("x", Tensor(I32).from_array([1, 2, 3, 4])) + g.const("y", Tensor(I32).from_array([5, 6, 7, 8])) ) ) @@ -17,9 +17,9 @@ :it "can broadcast smaller sizes/shapes across larger sizes/shapes" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.multiply!("example" - g.const!("x", Tensor(I32).from_array([1, -3, 5])) - g.const!("y", Tensor(I32).from_array([ + g.multiply("example" + g.const("x", Tensor(I32).from_array([1, -3, 5])) + g.const("y", Tensor(I32).from_array([ 1, 2, 3 4, 5, 6 7, 8, 9 diff --git a/spec/Tensor.Op.Pack.Spec.savi b/spec/Tensor.Op.Pack.Spec.savi index 830f268..a12d600 100644 --- a/spec/Tensor.Op.Pack.Spec.savi +++ b/spec/Tensor.Op.Pack.Spec.savi @@ -8,10 +8,10 @@ :it "combines the list of tensors into one new tensor" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.pack!("example" + g.pack("example" [ - g.const!("input_a", @f64_2x2(1, 2, 3, 4)) - g.const!("input_b", @f64_2x2(5, 6, 7, 8)) + g.const("input_a", @f64_2x2(1, 2, 3, 4)) + g.const("input_b", @f64_2x2(5, 6, 7, 8)) ] ) ) @@ -29,10 +29,10 @@ :it "can combine along a different axis" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.pack!("example" + g.pack("example" [ - g.const!("input_a", @f64_2x2(1, 2, 3, 4)) - g.const!("input_b", @f64_2x2(5, 6, 7, 8)) + g.const("input_a", @f64_2x2(1, 2, 3, 4)) + g.const("input_b", @f64_2x2(5, 6, 7, 8)) ] 1 // axis ) @@ -50,31 +50,37 @@ :it "complains when the inputs are of different types" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.pack!("example" - [ - g.const!("input_a", Tensor(F64).from_array([1, 2, 3, 4])) - g.const!("input_b", Tensor(F32).from_array([5, 6, 7, 8])) - ] + assert error: session.compute!( + g.pack("example" + [ + g.const("input_a", Tensor(F64).from_array([1, 2, 3, 4])) + g.const("input_b", Tensor(F32).from_array([5, 6, 7, 8])) + ] + ) ) ) :it "complains when the inputs are of different shapes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.pack!("example" - [ - g.const!("input_a", @f64_2x2(1, 2, 3, 4)) - g.const!("input_b", @f64_2x2(5, 6, 7, 8).try_reshape(Tensor.Shape.new([1, 4]))) - ] + assert error: session.compute!( + g.pack("example" + [ + g.const("input_a", @f64_2x2(1, 2, 3, 4)) + g.const("input_b", @f64_2x2(5, 6, 7, 8).try_reshape(Tensor.Shape.new([1, 4]))) + ] + ) ) ) :it "complains when the requested axis is greater than the inputs' rank" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.pack!("example" - [ - g.const!("input_a", @f64_2x2(1, 2, 3, 4)) - g.const!("input_b", @f64_2x2(5, 6, 7, 8)) - ] - 3 // axis + assert error: session.compute!( + g.pack("example" + [ + g.const("input_a", @f64_2x2(1, 2, 3, 4)) + g.const("input_b", @f64_2x2(5, 6, 7, 8)) + ] + 3 // axis + ) ) ) diff --git a/spec/Tensor.Op.Random.Spec.savi b/spec/Tensor.Op.Random.Spec.savi index 3a7156e..9f1c8c8 100644 --- a/spec/Tensor.Op.Random.Spec.savi +++ b/spec/Tensor.Op.Random.Spec.savi @@ -3,14 +3,14 @@ :const describes: "Tensor.Op.Random" :fun random!(g Tensor.Graph.Helper.Methods) - g.gen_random!("random" - g.const!("seed", Tensor(U32).from_array([2, 3])) + g.gen_random("random" + g.const("seed", Tensor(U32).from_array([2, 3])) ) :it "generates random floating-point values between zero and one" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.random_uniform!("example", @random!(g) + g.random_uniform("example", @random!(g) Tensor(F64) Tensor.Shape.new([5, 3]) ) @@ -29,7 +29,7 @@ :it "generates random integer values" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.random_uniform_integers!("example", @random!(g) + g.random_uniform_integers("example", @random!(g) Tensor(U32) Tensor.Shape.new([5, 3]) ) @@ -48,7 +48,7 @@ :it "generates random integer values within a given bounds" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.random_uniform_bounded_integers!("example", @random!(g) + g.random_uniform_bounded_integers("example", @random!(g) Tensor(I32) Tensor.Shape.new([5, 12]) Tensor(I32).scalar(0) @@ -68,8 +68,8 @@ :it "can't generate uniform floating-point values with an integer type" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: ( - g.random_uniform!("example", @random!(g) + assert error: session.compute!( + g.random_uniform("example", @random!(g) Tensor(I32) Tensor.Shape.new([5, 3]) ) @@ -78,8 +78,8 @@ :it "can't generate uniform integer values with a floating-point type" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: ( - g.random_uniform_integers!("example", @random!(g) + assert error: session.compute!( + g.random_uniform_integers("example", @random!(g) Tensor(F64) Tensor.Shape.new([5, 3]) ) @@ -88,8 +88,8 @@ :it "can't generate uniform bounded integer values with a floating-point type" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: ( - g.random_uniform_bounded_integers!("example", @random!(g) + assert error: session.compute!( + g.random_uniform_bounded_integers("example", @random!(g) Tensor(F64) Tensor.Shape.new([5, 3]) Tensor(F64).scalar(0) diff --git a/spec/Tensor.Op.Reshape.Spec.savi b/spec/Tensor.Op.Reshape.Spec.savi index d27e7ee..b48b751 100644 --- a/spec/Tensor.Op.Reshape.Spec.savi +++ b/spec/Tensor.Op.Reshape.Spec.savi @@ -5,8 +5,8 @@ :it "emits a variation on the tensor which has a changed shape" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.reshape!("example" - g.const!("input", Tensor(F64).from_array([1, 2, 3, 4, 5, 6])) + g.reshape("example" + g.const("input", Tensor(F64).from_array([1, 2, 3, 4, 5, 6])) Tensor.Shape.new([2, 3]) ) ) @@ -18,8 +18,8 @@ :it "can use -1 to indicate 'all remaining size' in the output shape" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.reshape!("example" - g.const!("input", Tensor(F64).from_array([1, 2, 3, 4, 5, 6])) + g.reshape("example" + g.const("input", Tensor(F64).from_array([1, 2, 3, 4, 5, 6])) Tensor.Shape.new([-1, 3]) ) ) @@ -30,8 +30,10 @@ :it "complains when the requested shape doesn't align with the current size" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.reshape!("example" - g.const!("input", Tensor(F64).from_array([1, 2, 3, 4, 5, 6, 7, 8])) - Tensor.Shape.new([2, 3]) + assert error: session.compute!( + g.reshape("example" + g.const("input", Tensor(F64).from_array([1, 2, 3, 4, 5, 6, 7, 8])) + Tensor.Shape.new([2, 3]) + ) ) ) diff --git a/spec/Tensor.Op.Select.Spec.savi b/spec/Tensor.Op.Select.Spec.savi index 6c75ff8..b6a8874 100644 --- a/spec/Tensor.Op.Select.Spec.savi +++ b/spec/Tensor.Op.Select.Spec.savi @@ -5,10 +5,10 @@ :it "uses the condition input to select between one value operand and another" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.select!("example" - g.const!("cond", Tensor(Bool).from_array([True, False, True, False])) - g.const!("t", Tensor(F64).from_array([1, 2, 3, 4])) - g.const!("f", Tensor(F64).from_array([5, 6, 7, 8])) + g.select("example" + g.const("cond", Tensor(Bool).from_array([True, False, True, False])) + g.const("t", Tensor(F64).from_array([1, 2, 3, 4])) + g.const("f", Tensor(F64).from_array([5, 6, 7, 8])) ) ) @@ -17,33 +17,37 @@ :it "complains if the condition input is a non-boolean type" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.select!("example" - g.const!("cond", Tensor(F64).from_array([1, 0, 1, 0])) - g.const!("t", Tensor(F64).from_array([1, 2, 3, 4])) - g.const!("f", Tensor(F64).from_array([5, 6, 7, 8])) + assert error: session.compute!( + g.select("example" + g.const("cond", Tensor(F64).from_array([1, 0, 1, 0])) + g.const("t", Tensor(F64).from_array([1, 2, 3, 4])) + g.const("f", Tensor(F64).from_array([5, 6, 7, 8])) + ) ) ) :it "complains if the two value operands are not the same type" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.select!("example" - g.const!("cond", Tensor(Bool).from_array([True, False, True, False])) - g.const!("t", Tensor(F64).from_array([1, 2, 3, 4])) - g.const!("f", Tensor(F32).from_array([5, 6, 7, 8])) + assert error: session.compute!( + g.select("example" + g.const("cond", Tensor(Bool).from_array([True, False, True, False])) + g.const("t", Tensor(F64).from_array([1, 2, 3, 4])) + g.const("f", Tensor(F32).from_array([5, 6, 7, 8])) + ) ) ) :it "can broadcast smaller sizes/shapes across larger sizes/shapes" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.select!("example" - g.const!("cond", Tensor(Bool).from_array([True, False, True])) - g.const!("t", Tensor(F64).from_array([ + g.select("example" + g.const("cond", Tensor(Bool).from_array([True, False, True])) + g.const("t", Tensor(F64).from_array([ 1, 2, 3 4, 5, 6 7, 8, 9 ]).try_reshape(Tensor.Shape.new([3, 3]))) - g.const!("f", Tensor(F64).from_array([ + g.const("f", Tensor(F64).from_array([ 100 200 300 @@ -60,9 +64,11 @@ :it "can't broadcast across incompatible sizes/shapes" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.select!("example" - g.const!("cond", Tensor(Bool).from_array([True, False, True, False])) - g.const!("t", Tensor(F64).from_array([1, 2, 3, 4])) - g.const!("f", Tensor(F32).from_array([5, 6, 7])) + assert error: session.compute!( + g.select("example" + g.const("cond", Tensor(Bool).from_array([True, False, True, False])) + g.const("t", Tensor(F64).from_array([1, 2, 3, 4])) + g.const("f", Tensor(F32).from_array([5, 6, 7])) + ) ) ) \ No newline at end of file diff --git a/spec/Tensor.Op.Shape.Spec.savi b/spec/Tensor.Op.Shape.Spec.savi index 7b513fa..e7719e9 100644 --- a/spec/Tensor.Op.Shape.Spec.savi +++ b/spec/Tensor.Op.Shape.Spec.savi @@ -5,8 +5,8 @@ :it "emits the shape of the input tensor" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.shape!("shape" - g.const!("input", Tensor(F64).from_array([1, 2, 3, 4, 5, 6]).reshape!(Tensor.Shape.new([2, 3]))) + g.shape("shape" + g.const("input", Tensor(F64).from_array([1, 2, 3, 4, 5, 6]).try_reshape(Tensor.Shape.new([2, 3]))) ) ) diff --git a/spec/Tensor.Op.Slice.Spec.savi b/spec/Tensor.Op.Slice.Spec.savi index 4ead56e..0fe410b 100644 --- a/spec/Tensor.Op.Slice.Spec.savi +++ b/spec/Tensor.Op.Slice.Spec.savi @@ -5,8 +5,8 @@ :it "slices a contiguous portion of the input tensor" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.slice!("example" - g.const!("input" + g.slice("example" + g.const("input" Tensor(F64).from_array([ 10, 11, 12, 13, 14 15, 16, 17, 18, 19 @@ -39,11 +39,11 @@ ] )) - :it "complains on session compute if the output shape is out of bounds" + :it "complains if the output shape is out of bounds" _WithGraphHelper.run(@env, False) -> (g, session | assert error: session.compute!( - g.slice!("example" - g.const!("input" + g.slice("example" + g.const("input" Tensor(F64).from_array([ 10, 11, 12, 13, 14 15, 16, 17, 18, 19 diff --git a/spec/Tensor.Op.Softmax.Spec.savi b/spec/Tensor.Op.Softmax.Spec.savi index e233f63..3ce69c8 100644 --- a/spec/Tensor.Op.Softmax.Spec.savi +++ b/spec/Tensor.Op.Softmax.Spec.savi @@ -5,8 +5,8 @@ :it "computes the softmax function of a vector (tensor rank 1)" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.softmax!("example" - g.const!("input", Tensor(F64).from_array([1, 2, 3, 4, 5])) + g.softmax("example" + g.const("input", Tensor(F64).from_array([1, 2, 3, 4, 5])) ) ) @@ -21,9 +21,9 @@ :it "complains when applied to a scalar (rank 0 tensor)" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: ( - g.softmax!("example" - g.const!("input", Tensor(F64).scalar(99)) + assert error: session.compute!( + g.softmax("example" + g.const("input", Tensor(F64).scalar(99)) ) ) ) @@ -31,8 +31,8 @@ :it "when applied to a higher rank, computes each inner row separately" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.softmax!("example" - g.const!("input" + g.softmax("example" + g.const("input" Tensor(F64).from_array([ 1, 2, 3 1, 2, 0 // with implicit bias, this is equivalent to 2, 3, 1 diff --git a/spec/Tensor.Op.SplitV.Spec.savi b/spec/Tensor.Op.SplitV.Spec.savi index ec643b4..14758c4 100644 --- a/spec/Tensor.Op.SplitV.Spec.savi +++ b/spec/Tensor.Op.SplitV.Spec.savi @@ -4,8 +4,8 @@ :it "splits a tensor into varying sized slices along the given axis" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( - op = g.split_varying!("example" - g.const!("input" + op = g.split_varying("example" + g.const("input" Tensor(F64).from_array([ 10, 11, 12, 13, 14 15, 16, 17, 18, 19 @@ -85,78 +85,84 @@ :it "complains if the axis parameter is out of bounds" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.split_varying!("example" - g.const!("input" - Tensor(F64).from_array([ - 10, 11, 12, 13, 14 - 15, 16, 17, 18, 19 - 20, 21, 22, 23, 24 - 25, 26, 27, 28, 29 - - 30, 31, 32, 33, 34 - 35, 36, 37, 38, 39 - 40, 41, 42, 43, 44 - 45, 46, 47, 48, 49 - - 50, 51, 52, 53, 54 - 55, 56, 57, 58, 59 - 60, 61, 62, 63, 64 - 65, 66, 67, 68, 69 - ]).try_reshape(Tensor.Shape.new([3, 4, 5])) + assert error: session.compute!( + g.split_varying("example" + g.const("input" + Tensor(F64).from_array([ + 10, 11, 12, 13, 14 + 15, 16, 17, 18, 19 + 20, 21, 22, 23, 24 + 25, 26, 27, 28, 29 + + 30, 31, 32, 33, 34 + 35, 36, 37, 38, 39 + 40, 41, 42, 43, 44 + 45, 46, 47, 48, 49 + + 50, 51, 52, 53, 54 + 55, 56, 57, 58, 59 + 60, 61, 62, 63, 64 + 65, 66, 67, 68, 69 + ]).try_reshape(Tensor.Shape.new([3, 4, 5])) + ) + 3 // out of bounds + [-1] ) - 3 // out of bounds - [-1] ) ) :it "complains if the sum of slice sizes is too small" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.split_varying!("example" - g.const!("input" - Tensor(F64).from_array([ - 10, 11, 12, 13, 14 - 15, 16, 17, 18, 19 - 20, 21, 22, 23, 24 - 25, 26, 27, 28, 29 - - 30, 31, 32, 33, 34 - 35, 36, 37, 38, 39 - 40, 41, 42, 43, 44 - 45, 46, 47, 48, 49 - - 50, 51, 52, 53, 54 - 55, 56, 57, 58, 59 - 60, 61, 62, 63, 64 - 65, 66, 67, 68, 69 - ]).try_reshape(Tensor.Shape.new([3, 4, 5])) + assert error: session.compute!( + g.split_varying("example" + g.const("input" + Tensor(F64).from_array([ + 10, 11, 12, 13, 14 + 15, 16, 17, 18, 19 + 20, 21, 22, 23, 24 + 25, 26, 27, 28, 29 + + 30, 31, 32, 33, 34 + 35, 36, 37, 38, 39 + 40, 41, 42, 43, 44 + 45, 46, 47, 48, 49 + + 50, 51, 52, 53, 54 + 55, 56, 57, 58, 59 + 60, 61, 62, 63, 64 + 65, 66, 67, 68, 69 + ]).try_reshape(Tensor.Shape.new([3, 4, 5])) + ) + 2 + [1, 2, 1] // sum is 4 but sum of 5 is expected ) - 2 - [1, 2, 1] // sum is 4 but sum of 5 is expected ) ) :it "complains if the sum of slice sizes is too large" _WithGraphHelper.run(@env, False) -> (g, session | - assert error: g.split_varying!("example" - g.const!("input" - Tensor(F64).from_array([ - 10, 11, 12, 13, 14 - 15, 16, 17, 18, 19 - 20, 21, 22, 23, 24 - 25, 26, 27, 28, 29 - - 30, 31, 32, 33, 34 - 35, 36, 37, 38, 39 - 40, 41, 42, 43, 44 - 45, 46, 47, 48, 49 - - 50, 51, 52, 53, 54 - 55, 56, 57, 58, 59 - 60, 61, 62, 63, 64 - 65, 66, 67, 68, 69 - ]).try_reshape(Tensor.Shape.new([3, 4, 5])) + assert error: session.compute!( + g.split_varying("example" + g.const("input" + Tensor(F64).from_array([ + 10, 11, 12, 13, 14 + 15, 16, 17, 18, 19 + 20, 21, 22, 23, 24 + 25, 26, 27, 28, 29 + + 30, 31, 32, 33, 34 + 35, 36, 37, 38, 39 + 40, 41, 42, 43, 44 + 45, 46, 47, 48, 49 + + 50, 51, 52, 53, 54 + 55, 56, 57, 58, 59 + 60, 61, 62, 63, 64 + 65, 66, 67, 68, 69 + ]).try_reshape(Tensor.Shape.new([3, 4, 5])) + ) + 2 + [2, 2, 2] // sum is 6 but sum of 5 is expected ) - 2 - [2, 2, 2] // sum is 6 but sum of 5 is expected ) ) \ No newline at end of file diff --git a/spec/Tensor.Op.Square.Spec.savi b/spec/Tensor.Op.Square.Spec.savi index 272ff75..bc1043d 100644 --- a/spec/Tensor.Op.Square.Spec.savi +++ b/spec/Tensor.Op.Square.Spec.savi @@ -5,8 +5,8 @@ :it "computes arithmetic squares" _WithGraphHelper.run(@env) -> (g, session | assert no_error: ( result = session.compute!( - g.square!("example" - g.const!("input", Tensor(I32).from_array([1, -2, 3, -4])) + g.square("example" + g.const("input", Tensor(I32).from_array([1, -2, 3, -4])) ) ) diff --git a/src/Tensor.Comp.Tensordot.Outer.savi b/src/Tensor.Comp.Tensordot.Outer.savi index 83201a5..c8e32bc 100644 --- a/src/Tensor.Comp.Tensordot.Outer.savi +++ b/src/Tensor.Comp.Tensordot.Outer.savi @@ -6,29 +6,29 @@ :: It is effectively a matrix multiplication, with the first dimension of the :: first tensor and the last dimension of the second tensor as the matrix axes. :module Tensor.Comp.TensorDot.Outer - :fun build!(g Tensor.Graph.Helper.Methods, name String, a, b) - a_shape = g.shape!("\(name).a_shape", a) - b_shape = g.shape!("\(name).b_shape", b) + :fun build(g Tensor.Graph.Helper.Methods, name String, a, b) + a_shape = g.shape("\(name).a_shape", a) + b_shape = g.shape("\(name).b_shape", b) - a_shape_split = g.split_varying!("\(name).a_shape_split", a_shape, 0, [-1, 1]) - b_shape_split = g.split_varying!("\(name).b_shape_split", b_shape, 0, [1, -1]) + a_shape_split = g.split_varying("\(name).a_shape_split", a_shape, 0, [-1, 1]) + b_shape_split = g.split_varying("\(name).b_shape_split", b_shape, 0, [1, -1]) a_free_dims = a_shape_split.output_slice(0) a_target_dim = a_shape_split.output_slice(1) b_target_dim = b_shape_split.output_slice(0) b_free_dims = b_shape_split.output_slice(1) - neg_one = g.const!("\(name).neg_one", Tensor(I32).from_array([-1])) - zero_axis = g.const!("\(name).zero_axis", Tensor(I32).scalar(0)) + neg_one = g.const("\(name).neg_one", Tensor(I32).from_array([-1])) + zero_axis = g.const("\(name).zero_axis", Tensor(I32).scalar(0)) - g.reshape_dynamic!("\(name).result" - g.matmul!("\(name).matmul" - g.reshape_dynamic!("\(name).a_reshape", a - g.concat_dynamic!("\(name).a_new_shape", [neg_one, a_target_dim], zero_axis) + g.reshape_dynamic("\(name).result" + g.matmul("\(name).matmul" + g.reshape_dynamic("\(name).a_reshape", a + g.concat_dynamic("\(name).a_new_shape", [neg_one, a_target_dim], zero_axis) ) - g.reshape_dynamic!("\(name).b_reshape", b - g.concat_dynamic!("\(name).b_new_shape", [b_target_dim, neg_one], zero_axis) + g.reshape_dynamic("\(name).b_reshape", b + g.concat_dynamic("\(name).b_new_shape", [b_target_dim, neg_one], zero_axis) ) ) - g.concat_dynamic!("\(name).c_new_shape", [a_free_dims, b_free_dims], zero_axis) + g.concat_dynamic("\(name).c_new_shape", [a_free_dims, b_free_dims], zero_axis) ) diff --git a/src/Tensor.Gen.Random.savi b/src/Tensor.Gen.Random.savi index 2a224b7..c717de5 100644 --- a/src/Tensor.Gen.Random.savi +++ b/src/Tensor.Gen.Random.savi @@ -20,7 +20,7 @@ :: Seed a new pseudo-random source in the given graph, with the given seed. :: :: The seed must be a graph node output of type `Tensor(U32)` and shape `[2]`. - :fun non new!( + :fun non new( graph Tensor.Graph name String seed Tensor.Graph.CanOutput @@ -30,29 +30,29 @@ case algorithm == ( | Tensor.Gen.Random.Algorithm.Philox | // TODO: Try to simplify this once I've got a test case up and running with it. - philox_scrambled = Tensor.Op.Random.Uniform.Integers.new!( + philox_scrambled = Tensor.Op.Random.Uniform.Integers.new( g.graph, "\(name).seed_scramble" algorithm - g.const!("\(name).seed_scramble.const", Tensor(U64).from_array([ + g.const("\(name).seed_scramble.const", Tensor(U64).from_array([ 0x02461e293ec8f720 ])) - g.cast!("\(name).seed_scramble.cast", seed, Tensor(U64)) + g.cast("\(name).seed_scramble.cast", seed, Tensor(U64)) Tensor(U32) - g.const!("\(name).seed_scramble.shape", Tensor(I32).from_array([4])) + g.const("\(name).seed_scramble.shape", Tensor(I32).from_array([4])) ) - philox_key = g.reshape!("\(name).key.reshape" - g.bitcast!("\(name).key.bitcast" - g.slice!("\(name).key.slice", philox_scrambled, [0], Tensor.Shape.new([2])) + philox_key = g.reshape("\(name).key.reshape" + g.bitcast("\(name).key.bitcast" + g.slice("\(name).key.slice", philox_scrambled, [0], Tensor.Shape.new([2])) Tensor(U64) ) Tensor.Shape.new([1]) ) - philox_base_counter = g.pack!("\(name).counter", [ - g.const!("\(name).counter.zero" + philox_base_counter = g.pack("\(name).counter", [ + g.const("\(name).counter.zero" Tensor(U64).scalar(0) ) - g.bitcast!("\(name).counter.bitcast" - g.slice!("\(name).counter.slice", philox_scrambled, [2], Tensor.Shape.new([2])) + g.bitcast("\(name).counter.bitcast" + g.slice("\(name).counter.slice", philox_scrambled, [2], Tensor.Shape.new([2])) Tensor(U64) ) ]) @@ -71,24 +71,23 @@ graph name algorithm - Tensor.Op.Bitcast.new!(g.graph, "\(name).key" - Tensor.Op.Cast.new!(g.graph, "\(name).seed.cast" + Tensor.Op.Bitcast.new(g.graph, "\(name).key" + Tensor.Op.Cast.new(g.graph, "\(name).seed.cast" seed Tensor(U32) ) Tensor(U64) ) - Tensor.Op.Const.new!(g.graph, "\(name).counter" + Tensor.Op.Const.new(g.graph, "\(name).counter" Tensor(U64).from_array([0]) ) ) | - g.graph.errors << Tensor.Graph.Error.new( + invalid = Tensor.Graph.Operation._new_invalid(graph, name, Tensor.Graph.Error.new( Tensor.Graph.Error.Code.InvalidArgument "PRNG algorithm \(algorithm) has no seeding system implemented for it" - ) - - error! + )).output(0) + @_new(graph, name, algorithm, invalid, invalid) ) :: Get a counter graph output suitable for use in pseudo-random generation, @@ -100,7 +99,7 @@ :: This is what ensures that different graph nodes that use the same random :: source will get distinct values when generating (but those distinct :: values will still be deterministic based on the seed and graph layout). - :fun ref use_counter!(reserve_count U64) + :fun ref use_counter(reserve_count U64) g = Tensor.Graph.Helper.new(@_graph) // TODO: Why does TensorFlow use 256? Can we justify it explicitly? @@ -117,9 +116,9 @@ // taking `add([0, offset])` or `add([1, offset])` based on whether the // former has a result whose second index is greater than that index // of the original tensor value prior to adding. - g.add!(name + g.add(name @_base_counter - g.const!("\(name).const" + g.const("\(name).const" if (@algorithm == Tensor.Gen.Random.Algorithm.Philox) ( Tensor(U64).from_array([0, offset]) | diff --git a/src/Tensor.Graph.Helper.savi b/src/Tensor.Graph.Helper.savi index 644e774..d7ff664 100644 --- a/src/Tensor.Graph.Helper.savi +++ b/src/Tensor.Graph.Helper.savi @@ -9,22 +9,22 @@ /// // Error-related convenience methods - :fun ref error_invalid_argument!(message) None + :fun ref error_invalid_argument(message) None @graph.errors << Tensor.Graph.Error.new( Tensor.Graph.Error.Code.InvalidArgument, message ) - :fun ref error_out_of_range!(message) None + :fun ref error_out_of_range(message) None @graph.errors << Tensor.Graph.Error.new( Tensor.Graph.Error.Code.OutOfRange, message ) - :fun ref error_not_found!(message) None + :fun ref error_not_found(message) None @graph.errors << Tensor.Graph.Error.new( Tensor.Graph.Error.Code.NotFound, message ) - :fun ref error_unimplemented!(message) None + :fun ref error_unimplemented(message) None @graph.errors << Tensor.Graph.Error.new( Tensor.Graph.Error.Code.Unimplemented, message ) @@ -32,59 +32,59 @@ /// // Value Sources - :fun ref const!(name, value) - Tensor.Op.Const.new!(@graph, name, value) + :fun ref const(name, value) + Tensor.Op.Const.new(@graph, name, value) - :fun ref placeholder!(name, output_type, output_shape) - Tensor.Op.Placeholder.new!(@graph, name, output_type, output_shape) + :fun ref placeholder(name, output_type, output_shape) + Tensor.Op.Placeholder.new(@graph, name, output_type, output_shape) - :fun ref variable!(name, output_type, output_shape) - Tensor.Op.Variable.new!(@graph, name, output_type, output_shape) + :fun ref variable(name, output_type, output_shape) + Tensor.Op.Variable.new(@graph, name, output_type, output_shape) /// // Variable Mutations - :fun ref apply_gradient_descent!(name, gradient, var, learning_rate) - Tensor.Op.Optimize.GradientDescent.new!(@graph, name, gradient, var, learning_rate) + :fun ref apply_gradient_descent(name, gradient, var, learning_rate) + Tensor.Op.Optimize.GradientDescent.new(@graph, name, gradient, var, learning_rate) /// // Pseudo-Random Sources - :fun ref gen_random!(name + :fun ref gen_random(name seed algorithm Tensor.Gen.Random.Algorithm = Tensor.Gen.Random.Algorithm.Philox ) - Tensor.Gen.Random.new!(@graph, name, seed, algorithm) + Tensor.Gen.Random.new(@graph, name, seed, algorithm) - :fun ref random_uniform!(name + :fun ref random_uniform(name gen_random Tensor.Gen.Random output_type output_shape Tensor.Shape'box ) output_shape_tensor = output_shape.to_tensor - Tensor.Op.Random.Uniform.new!(@graph, name + Tensor.Op.Random.Uniform.new(@graph, name gen_random.algorithm gen_random.key - gen_random.use_counter!(output_shape_tensor.element_count.u64) + gen_random.use_counter(output_shape_tensor.element_count.u64) output_type - @const!("\(name).output_shape", output_shape_tensor) + @const("\(name).output_shape", output_shape_tensor) ) - :fun ref random_uniform_integers!(name + :fun ref random_uniform_integers(name gen_random Tensor.Gen.Random output_type output_shape Tensor.Shape'box ) output_shape_tensor = output_shape.to_tensor - Tensor.Op.Random.Uniform.Integers.new!(@graph, name + Tensor.Op.Random.Uniform.Integers.new(@graph, name gen_random.algorithm gen_random.key - gen_random.use_counter!(output_shape_tensor.element_count.u64) + gen_random.use_counter(output_shape_tensor.element_count.u64) output_type - @const!("\(name).output_shape", output_shape_tensor) + @const("\(name).output_shape", output_shape_tensor) ) - :fun ref random_uniform_bounded_integers!(name + :fun ref random_uniform_bounded_integers(name gen_random Tensor.Gen.Random output_type output_shape Tensor.Shape'box @@ -92,156 +92,156 @@ max_val Tensor.Any ) output_shape_tensor = output_shape.to_tensor - Tensor.Op.Random.Uniform.BoundedIntegers.new!(@graph, name + Tensor.Op.Random.Uniform.BoundedIntegers.new(@graph, name gen_random.algorithm gen_random.key - gen_random.use_counter!(output_shape_tensor.element_count.u64) + gen_random.use_counter(output_shape_tensor.element_count.u64) output_type - @const!("\(name).output_shape", output_shape_tensor) - @const!("\(name).min", min_val) - @const!("\(name).max", max_val) + @const("\(name).output_shape", output_shape_tensor) + @const("\(name).min", min_val) + @const("\(name).max", max_val) ) /// // Logical Operations - :fun ref logical_not!(name, input) - Tensor.Op.Logical.Not.new!(@graph, name, input) + :fun ref logical_not(name, input) + Tensor.Op.Logical.Not.new(@graph, name, input) - :fun ref logical_and!(name, x, y) - Tensor.Op.Logical.And.new!(@graph, name, x, y) + :fun ref logical_and(name, x, y) + Tensor.Op.Logical.And.new(@graph, name, x, y) - :fun ref logical_or!(name, x, y) - Tensor.Op.Logical.Or.new!(@graph, name, x, y) + :fun ref logical_or(name, x, y) + Tensor.Op.Logical.Or.new(@graph, name, x, y) /// // Arithmetic Unary Operations - :fun ref square!(name, input) - Tensor.Op.Square.new!(@graph, name, input) + :fun ref square(name, input) + Tensor.Op.Square.new(@graph, name, input) /// // Arithmetic Binary Operations - :fun ref add!(name, x, y) - Tensor.Op.Add.new!(@graph, name, x, y) + :fun ref add(name, x, y) + Tensor.Op.Add.new(@graph, name, x, y) - :fun ref multiply!(name, x, y) - Tensor.Op.Multiply.new!(@graph, name, x, y) + :fun ref multiply(name, x, y) + Tensor.Op.Multiply.new(@graph, name, x, y) /// // Comparative Operations - :fun ref greater!(name, x, y) - Tensor.Op.Greater.new!(@graph, name, x, y, False) + :fun ref greater(name, x, y) + Tensor.Op.Greater.new(@graph, name, x, y, False) - :fun ref greater_or_equal!(name, x, y) - Tensor.Op.Greater.new!(@graph, name, x, y, True) + :fun ref greater_or_equal(name, x, y) + Tensor.Op.Greater.new(@graph, name, x, y, True) - :fun ref lesser!(name, x, y) - Tensor.Op.Lesser.new!(@graph, name, x, y, False) + :fun ref lesser(name, x, y) + Tensor.Op.Lesser.new(@graph, name, x, y, False) - :fun ref lesser_or_equal!(name, x, y) - Tensor.Op.Lesser.new!(@graph, name, x, y, True) + :fun ref lesser_or_equal(name, x, y) + Tensor.Op.Lesser.new(@graph, name, x, y, True) /// // Type/Shape Conversions - :fun ref bitcast!(name, input, output_type) - Tensor.Op.Bitcast.new!(@graph, name, input, output_type) + :fun ref bitcast(name, input, output_type) + Tensor.Op.Bitcast.new(@graph, name, input, output_type) - :fun ref cast!(name, input, output_type) - Tensor.Op.Cast.new!(@graph, name, input, output_type, False) + :fun ref cast(name, input, output_type) + Tensor.Op.Cast.new(@graph, name, input, output_type, False) - :fun ref cast_with_floating_point_truncation!(name, input, output_type) - Tensor.Op.Cast.new!(@graph, name, input, output_type, True) + :fun ref cast_with_floating_point_truncation(name, input, output_type) + Tensor.Op.Cast.new(@graph, name, input, output_type, True) - :fun ref shape!(name, input) - Tensor.Op.Shape.new!(@graph, name, input) + :fun ref shape(name, input) + Tensor.Op.Shape.new(@graph, name, input) - :fun ref reshape!(name, input, output_shape Tensor.Shape'box) - Tensor.Op.Reshape.new!(@graph, name, input - @const!("\(name).new_shape", output_shape.to_tensor) + :fun ref reshape(name, input, output_shape Tensor.Shape'box) + Tensor.Op.Reshape.new(@graph, name, input + @const("\(name).new_shape", output_shape.to_tensor) ) - :fun ref reshape_dynamic!(name, input, output_shape) - Tensor.Op.Reshape.new!(@graph, name, input, output_shape) + :fun ref reshape_dynamic(name, input, output_shape) + Tensor.Op.Reshape.new(@graph, name, input, output_shape) /// // Fan-out/Fan-in Operations - :fun ref pack!(name, inputs, axis USize = 0) - Tensor.Op.Pack.new!(@graph, name, inputs, axis) + :fun ref pack(name, inputs, axis USize = 0) + Tensor.Op.Pack.new(@graph, name, inputs, axis) - :fun ref concat!(name, inputs, axis USize = 0) - Tensor.Op.Concat.new!(@graph, name, inputs - @const!("\(name).axis" + :fun ref concat(name, inputs, axis USize = 0) + Tensor.Op.Concat.new(@graph, name, inputs + @const("\(name).axis" Tensor(I32).scalar(axis.i32) ) ) - :fun ref concat_dynamic!(name, inputs, axis) - Tensor.Op.Concat.new!(@graph, name, inputs, axis) + :fun ref concat_dynamic(name, inputs, axis) + Tensor.Op.Concat.new(@graph, name, inputs, axis) - :fun ref slice!( + :fun ref slice( name input begin_indices Array(USize) output_shape Tensor.Shape'box ) - @slice_dynamic!(name, input - @const!("\(name).begin_indices" - Tensor(I64).generate(begin_indices.size) -> (i | begin_indices[i]!.i64) + @slice_dynamic(name, input + @const("\(name).begin_indices" + Tensor(I64).generate(begin_indices.size) -> (i | try (begin_indices[i]!.i64 | 0)) ) - @const!("\(name).output_shape", output_shape.to_tensor) + @const("\(name).output_shape", output_shape.to_tensor) ) - :fun ref slice_dynamic!(name, input, begin_indices, output_shape) - Tensor.Op.Slice.new!(@graph, name, input, begin_indices, output_shape) + :fun ref slice_dynamic(name, input, begin_indices, output_shape) + Tensor.Op.Slice.new(@graph, name, input, begin_indices, output_shape) - :fun ref split_varying!(name, input, axis USize, split_sizes Array(USize)) - Tensor.Op.SplitV.new!(@graph, name, input - @const!("\(name).axis" + :fun ref split_varying(name, input, axis USize, split_sizes Array(USize)) + Tensor.Op.SplitV.new(@graph, name, input + @const("\(name).axis" Tensor(I32).scalar(axis.i32) ) - @const!("\(name).split_sizes" - Tensor(I64).generate(split_sizes.size) -> (i | split_sizes[i]!.i64) + @const("\(name).split_sizes" + Tensor(I64).generate(split_sizes.size) -> (i | try (split_sizes[i]!.i64 | 0)) ) split_sizes.size ) - :fun ref split_varying_dynamic!(name, input, axis, split_sizes, split_sizes_count) - Tensor.Op.SplitV.new!(@graph, name, input, axis, split_sizes, split_sizes_count) + :fun ref split_varying_dynamic(name, input, axis, split_sizes, split_sizes_count) + Tensor.Op.SplitV.new(@graph, name, input, axis, split_sizes, split_sizes_count) /// // Other Unary Operations - :fun ref softmax!(name, input) - Tensor.Op.Softmax.new!(@graph, name, input) + :fun ref softmax(name, input) + Tensor.Op.Softmax.new(@graph, name, input) /// // Other Binary Operations - :fun ref matmul!(name, a, b) - Tensor.Op.MatMul.new!(@graph, name, a, b, False, False) + :fun ref matmul(name, a, b) + Tensor.Op.MatMul.new(@graph, name, a, b, False, False) - :fun ref matmul_with_a_transposed!(name, a, b) - Tensor.Op.MatMul.new!(@graph, name, a, b, True, False) + :fun ref matmul_with_a_transposed(name, a, b) + Tensor.Op.MatMul.new(@graph, name, a, b, True, False) - :fun ref matmul_with_b_transposed!(name, a, b) - Tensor.Op.MatMul.new!(@graph, name, a, b, False, True) + :fun ref matmul_with_b_transposed(name, a, b) + Tensor.Op.MatMul.new(@graph, name, a, b, False, True) - :fun ref matmul_with_both_transposed!(name, a, b) - Tensor.Op.MatMul.new!(@graph, name, a, b, True, True) + :fun ref matmul_with_both_transposed(name, a, b) + Tensor.Op.MatMul.new(@graph, name, a, b, True, True) /// // Other Ternary Operations - :fun ref select!(name, condition, true_case, false_case) - Tensor.Op.Select.new!(@graph, name, condition, true_case, false_case) + :fun ref select(name, condition, true_case, false_case) + Tensor.Op.Select.new(@graph, name, condition, true_case, false_case) /// // Composite Operations - :fun ref tensordot_outer!(name, a, b) - Tensor.Comp.TensorDot.Outer.build!(@, name, a, b) + :fun ref tensordot_outer(name, a, b) + Tensor.Comp.TensorDot.Outer.build(@, name, a, b) diff --git a/src/Tensor.Graph.Input.savi b/src/Tensor.Graph.Input.savi index 4033366..672f256 100644 --- a/src/Tensor.Graph.Input.savi +++ b/src/Tensor.Graph.Input.savi @@ -3,14 +3,18 @@ :let index USize :new box (@op, @index) - :fun _to_ffi + :fun _to_ffi! _FFI.Input._new(@op._ptr, @index.i32) :fun source - Tensor.Graph.Output._from_ffi_in_graph( - // TODO: Remove this cap-hacking cast: - _FFI.Cast(Tensor.Graph'box, Tensor.Graph).pointer(@op.graph) - @_to_ffi.source + try ( + Tensor.Graph.Output._from_ffi_in_graph( + // TODO: Remove this cap-hacking cast: + _FFI.Cast(Tensor.Graph'box, Tensor.Graph).pointer(@op.graph) + @_to_ffi!.source + ) + | + Tensor.Graph.Output.new(@op, USize.max_value) // (invalid output) ) :fun with_value(value): Tensor.Graph.Input.WithValue.new(@, value) diff --git a/src/Tensor.Graph.Operation.savi b/src/Tensor.Graph.Operation.savi index 776185d..46bac7d 100644 --- a/src/Tensor.Graph.Operation.savi +++ b/src/Tensor.Graph.Operation.savi @@ -6,9 +6,12 @@ // No need for a finalizer - memory for this `_ptr` is owned by `graph._ptr` + :fun is_valid: @_ptr.is_not_null + :fun input(index): Tensor.Graph.Input.new(@, index) // TODO: partial? :fun output(index): Tensor.Graph.Output.new(@, index) // TODO: partial? - :fun output_count: @_ffi.get_num_outputs(@_ptr).usize + :fun output_count + if @_ptr.is_null (USize.max_value | @_ffi.get_num_outputs(@_ptr).usize) :new box _new(@graph, @name, @_ptr) @@ -18,6 +21,13 @@ name = String.val_from_cpointer(name_ptr, name_len, name_len) @_new(graph, name, ptr) + :fun non _new_invalid(graph Tensor.Graph, name, error Tensor.Graph.Error) + graph.errors << error + @_new(graph, name, CPointer(_FFI.Operation).null) + + :fun non _new_invalid_already_captured_error(graph, name) + @_new(graph, name, CPointer(_FFI.Operation).null) + :class Tensor.Graph.Operation.Builder :fun non _ffi: _FFI.Operation.Builder :var _ptr: CPointer(_FFI.Operation.Builder).null @@ -33,13 +43,17 @@ :fun ref add_input(can_output Tensor.Graph.CanOutput) return @ if @_ptr.is_null - @_ffi.add_input(@_ptr, can_output.output._to_ffi) + try ( + @_ffi.add_input(@_ptr, can_output.output._to_ffi!) + | + @_ptr = @_ptr.null + ) @ :fun ref add_input_list(can_outputs Array(Tensor.Graph.CanOutput)'box) return @ if @_ptr.is_null outputs = Array(_FFI.Output).new(can_outputs.size) - can_outputs.each -> (can_output | outputs << can_output.output._to_ffi) + can_outputs.each -> (can_output | try (outputs << can_output.output._to_ffi!)) @_ffi.add_input_list(@_ptr, outputs.cpointer, outputs.size.i32) @ @@ -90,7 +104,7 @@ ) @ - :fun ref set_attr_tensor!(attr_name String, value Tensor.Any) // TODO: what is the right cap for Tensor? + :fun ref set_attr_tensor(attr_name String, value Tensor.Any) // TODO: what is the right cap for Tensor? return @ if @_ptr.is_null @_ffi.set_attr_tensor( @_ptr @@ -98,14 +112,24 @@ value._ptr @graph._status_ptr ) - @graph._check_status! + try ( + @graph._check_status! + | + @_ptr = @_ptr.null + ) @ - :fun ref finish! - operation_ptr = @_ffi.finish(@_ptr, @graph._status_ptr) - @_ptr = @_ptr.null - @graph._check_status! - Tensor.Graph.Operation._new(@graph, @_name, operation_ptr) + :fun ref finish + try ( + if @_ptr.is_null error! + operation_ptr = @_ffi.finish(@_ptr, @graph._status_ptr) + @graph._check_status! + @_ptr = @_ptr.null + Tensor.Graph.Operation._new(@graph, @_name, operation_ptr) + | + @_ptr = @_ptr.null + Tensor.Graph.Operation._new_invalid_already_captured_error(@graph, @_name) + ) :module _FFI.Operation diff --git a/src/Tensor.Graph.Output.savi b/src/Tensor.Graph.Output.savi index 6a28d1f..8b80144 100644 --- a/src/Tensor.Graph.Output.savi +++ b/src/Tensor.Graph.Output.savi @@ -12,15 +12,17 @@ :fun output: @ :fun name: "\(@op.name).outputs[\(@index)]" - :fun type: @_to_ffi.type + // :fun type: try (@_to_ffi!.type | 0) // TODO: is this okay? :fun shape - status_ptr = CPointer(_FFI.Status).null // TODO: is this okay? + ffi = try (@_to_ffi! | return Tensor.Shape.unknown) + + // We don't bother checking the status here because we know that the + // operation comes from the given graph, and thus it cannot error here. + status_ptr = CPointer(_FFI.Status).null rank = try ( - _FFI.Output._get_rank( - @op.graph._ptr, @_to_ffi, status_ptr - ).usize! + _FFI.Output._get_rank(@op.graph._ptr, ffi, status_ptr).usize! | return Tensor.Shape.unknown ) @@ -32,7 +34,7 @@ // for inspiration, but generalize to `Array`. _FFI.Output._get_shape( - @op.graph._ptr, @_to_ffi, shape.cpointer, rank.i32, status_ptr + @op.graph._ptr, ffi, shape.cpointer, rank.i32, status_ptr ) Tensor.Shape.new(shape) @@ -42,7 +44,8 @@ @op._ptr.address == that.op._ptr.address && @index == that.index - :fun _to_ffi + :fun _to_ffi! + error! if @op._ptr.is_null _FFI.Output._new(@op._ptr, @index.i32) :fun non _from_ffi_in_graph(graph, ffi _FFI.Output) diff --git a/src/Tensor.Graph.Session.savi b/src/Tensor.Graph.Session.savi index d7dac74..da8b71b 100644 --- a/src/Tensor.Graph.Session.savi +++ b/src/Tensor.Graph.Session.savi @@ -50,12 +50,12 @@ input_ffis = Array(_FFI.Input).new(inputs.size) input_tensor_ptrs = Array(CPointer(_FFI.Tensor)).new(inputs.size) inputs.each -> (input | - input_ffis << input.input._to_ffi + input_ffis << input.input._to_ffi! input_tensor_ptrs << input.value._ptr ) output = can_output.output - output_ffi = output._to_ffi + output_ffi = output._to_ffi! target_op_ptr = output.op._ptr output_tensor_ptr = CPointer(_FFI.Tensor).null @@ -93,7 +93,7 @@ input_ffis = Array(_FFI.Input).new(inputs.size) input_tensor_ptrs = Array(CPointer(_FFI.Tensor)).new(inputs.size) inputs.each -> (input | - input_ffis << input.input._to_ffi + input_ffis << input.input._to_ffi! input_tensor_ptrs << input.value._ptr ) @@ -101,7 +101,7 @@ output_tensor_ptrs = Array(CPointer(_FFI.Tensor)).new(outputs.size) target_op_ptrs = Array(CPointer(_FFI.Operation)).new(outputs.size) outputs.each -> (output | - output_ffis << output._to_ffi + output_ffis << output._to_ffi! output_tensor_ptrs << CPointer(_FFI.Tensor).null target_op_ptrs << output.op._ptr ) @@ -113,12 +113,12 @@ // Inputs input_ffis.cpointer input_tensor_ptrs.cpointer - inputs.size.i32 + input_ffis.size.i32 // Outputs output_ffis.cpointer output_tensor_ptrs.cpointer - outputs.size.i32 + output_ffis.size.i32 // Target Operations target_op_ptrs.cpointer diff --git a/src/Tensor.Graph.savi b/src/Tensor.Graph.savi index dd83105..a1cb690 100644 --- a/src/Tensor.Graph.savi +++ b/src/Tensor.Graph.savi @@ -25,7 +25,7 @@ oper_name String ) Tensor.Graph.Operation // Yielding the builder instead of returning it lets us guarantee that - // the caller will call `finish!` on the builder, which is critical + // the caller will call `finish` on the builder, which is critical // for preserving memory safety of the C API we are wrapping. :yields Tensor.Graph.Operation.Builder for Tensor.Graph.Operation @@ -39,12 +39,12 @@ y_ffis = Array(_FFI.Output).new(y_list.size) dy_ffis = Array(_FFI.Output).new(y_list.size) y_list.each -> (y | - y_ffis << y.output._to_ffi + try (y_ffis << y.output._to_ffi!) dy_ffis << _FFI.Output._new_temporarily_null ) x_ffis = Array(_FFI.Output).new(x_list.size) - x_list.each -> (x | x_ffis << x.output._to_ffi) + x_list.each -> (x | try (x_ffis << x.output._to_ffi!)) @_ffi.add_gradients( @_ptr diff --git a/src/Tensor.Op.Add.savi b/src/Tensor.Op.Add.savi index f6f71c2..91ce533 100644 --- a/src/Tensor.Op.Add.savi +++ b/src/Tensor.Op.Add.savi @@ -14,10 +14,10 @@ :struct box Tensor.Op.Add :is Tensor.Op - :fun non new!(graph Tensor.Graph, name, x, y) + :fun non new(graph Tensor.Graph, name, x, y) @_new(graph.new_operation("AddV2", name) -> (builder | builder .add_input(x) .add_input(y) - .finish! + .finish )) diff --git a/src/Tensor.Op.Bitcast.savi b/src/Tensor.Op.Bitcast.savi index f536b79..3d2882c 100644 --- a/src/Tensor.Op.Bitcast.savi +++ b/src/Tensor.Op.Bitcast.savi @@ -4,7 +4,7 @@ :: For a similar-value-preserving conversion, use `Tensor.Op.Cast` instead. :struct box Tensor.Op.Bitcast :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name input output_type Tensor.Any'non ) @@ -13,5 +13,5 @@ .add_input(input) // TODO: also T? .set_attr_type("type", output_type.element_type_code) - .finish! + .finish )) diff --git a/src/Tensor.Op.Cast.savi b/src/Tensor.Op.Cast.savi index 5aa8a07..a8cfe50 100644 --- a/src/Tensor.Op.Cast.savi +++ b/src/Tensor.Op.Cast.savi @@ -20,7 +20,7 @@ :: :struct box Tensor.Op.Cast :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name input output_type Tensor.Any'non truncate = False @@ -31,5 +31,5 @@ // TODO: SrcT? .set_attr_type("DstT", output_type.element_type_code) .set_attr_bool("Truncate", truncate) - .finish! + .finish )) diff --git a/src/Tensor.Op.Concat.savi b/src/Tensor.Op.Concat.savi index ed7041e..bb22a25 100644 --- a/src/Tensor.Op.Concat.savi +++ b/src/Tensor.Op.Concat.savi @@ -7,7 +7,7 @@ :: The given axis must be a rank-0 tensor (i.e. a scalar). :struct box Tensor.Op.Concat :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name inputs Array(Tensor.Graph.CanOutput)'box axis ) @@ -16,5 +16,5 @@ .add_input_list(inputs) .add_input(axis) .set_attr_i64("N", inputs.size.i64) - .finish! + .finish )) diff --git a/src/Tensor.Op.Const.savi b/src/Tensor.Op.Const.savi index 502bcd1..9f5a1e8 100644 --- a/src/Tensor.Op.Const.savi +++ b/src/Tensor.Op.Const.savi @@ -4,12 +4,12 @@ :: If you want to supply a value at runtime, use `Tensor.Op.Placeholder`. :struct box Tensor.Op.Const :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name value ) @_new(graph.new_operation("Const", name) -> (builder | builder - .set_attr_tensor!("value", value) + .set_attr_tensor("value", value) .set_attr_type("dtype", value.element_type_code) - .finish! + .finish )) diff --git a/src/Tensor.Op.Greater.savi b/src/Tensor.Op.Greater.savi index c04f148..4ae463c 100644 --- a/src/Tensor.Op.Greater.savi +++ b/src/Tensor.Op.Greater.savi @@ -8,8 +8,8 @@ :struct box Tensor.Op.Greater :is Tensor.Op - :fun non new!(graph Tensor.Graph, name, x, y, or_equal = False) + :fun non new(graph Tensor.Graph, name, x, y, or_equal = False) op_name = if or_equal ("GreaterEqual" | "Greater") @_new(graph.new_operation(op_name, name) -> (builder | - builder.add_input(x).add_input(y).finish! + builder.add_input(x).add_input(y).finish )) diff --git a/src/Tensor.Op.Lesser.savi b/src/Tensor.Op.Lesser.savi index bbd9c74..912c1d9 100644 --- a/src/Tensor.Op.Lesser.savi +++ b/src/Tensor.Op.Lesser.savi @@ -8,8 +8,8 @@ :struct box Tensor.Op.Lesser :is Tensor.Op - :fun non new!(graph Tensor.Graph, name, x, y, or_equal = False) + :fun non new(graph Tensor.Graph, name, x, y, or_equal = False) op_name = if or_equal ("LessEqual" | "Less") @_new(graph.new_operation(op_name, name) -> (builder | - builder.add_input(x).add_input(y).finish! + builder.add_input(x).add_input(y).finish )) diff --git a/src/Tensor.Op.Logical.savi b/src/Tensor.Op.Logical.savi index f2e9727..7c07b2e 100644 --- a/src/Tensor.Op.Logical.savi +++ b/src/Tensor.Op.Logical.savi @@ -3,9 +3,9 @@ :struct box Tensor.Op.Logical.Not :is Tensor.Op - :fun non new!(graph Tensor.Graph, name, input) + :fun non new(graph Tensor.Graph, name, input) @_new(graph.new_operation("LogicalNot", name) -> (builder | - builder.add_input(input).finish! + builder.add_input(input).finish )) :: Computes the logical "AND" operation in boolean logic. @@ -14,9 +14,9 @@ :struct box Tensor.Op.Logical.And :is Tensor.Op - :fun non new!(graph Tensor.Graph, name, x, y) + :fun non new(graph Tensor.Graph, name, x, y) @_new(graph.new_operation("LogicalAnd", name) -> (builder | - builder.add_input(x).add_input(y).finish! + builder.add_input(x).add_input(y).finish )) :: Computes the logical "OR" operation in boolean logic. @@ -25,7 +25,7 @@ :struct box Tensor.Op.Logical.Or :is Tensor.Op - :fun non new!(graph Tensor.Graph, name, x, y) + :fun non new(graph Tensor.Graph, name, x, y) @_new(graph.new_operation("LogicalOr", name) -> (builder | - builder.add_input(x).add_input(y).finish! + builder.add_input(x).add_input(y).finish )) diff --git a/src/Tensor.Op.MatMul.savi b/src/Tensor.Op.MatMul.savi index ad2aebc..3a5f091 100644 --- a/src/Tensor.Op.MatMul.savi +++ b/src/Tensor.Op.MatMul.savi @@ -46,7 +46,7 @@ :struct box Tensor.Op.MatMul :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name a b transpose_a Bool = False @@ -58,5 +58,5 @@ .add_input(b) .set_attr_bool("transpose_a", transpose_a) .set_attr_bool("transpose_b", transpose_b) - .finish! + .finish )) diff --git a/src/Tensor.Op.Multiply.savi b/src/Tensor.Op.Multiply.savi index 2fc4673..9edb796 100644 --- a/src/Tensor.Op.Multiply.savi +++ b/src/Tensor.Op.Multiply.savi @@ -14,10 +14,10 @@ :struct box Tensor.Op.Multiply :is Tensor.Op - :fun non new!(graph Tensor.Graph, name, x, y) + :fun non new(graph Tensor.Graph, name, x, y) @_new(graph.new_operation("Mul", name) -> (builder | builder .add_input(x) .add_input(y) - .finish! + .finish )) diff --git a/src/Tensor.Op.Optimize.savi b/src/Tensor.Op.Optimize.savi index 701b486..9e277b3 100644 --- a/src/Tensor.Op.Optimize.savi +++ b/src/Tensor.Op.Optimize.savi @@ -6,7 +6,7 @@ :struct box Tensor.Op.Optimize.GradientDescent :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name gradient Tensor.Graph.CanOutput var Tensor.Op.Variable learning_rate Tensor.Graph.CanOutput @@ -16,7 +16,7 @@ .add_input(var.reference) .add_input(learning_rate) .add_input(gradient) - .finish! + .finish )) // TODO: Tensor.Op.Optimize.AdaMax => ApplyAdaMax diff --git a/src/Tensor.Op.Pack.savi b/src/Tensor.Op.Pack.savi index 6064d0f..f8b5389 100644 --- a/src/Tensor.Op.Pack.savi +++ b/src/Tensor.Op.Pack.savi @@ -8,7 +8,7 @@ :: the new shape. Raises an error if `axis` is greater than the inputs' rank. :struct box Tensor.Op.Pack :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name inputs Array(Tensor.Graph.CanOutput)'box axis USize = 0 ) @@ -16,5 +16,5 @@ builder .add_input_list(inputs) .set_attr_i64("axis", axis.i64) - .finish! + .finish )) diff --git a/src/Tensor.Op.Placeholder.savi b/src/Tensor.Op.Placeholder.savi index 4c03ec0..97b499c 100644 --- a/src/Tensor.Op.Placeholder.savi +++ b/src/Tensor.Op.Placeholder.savi @@ -2,7 +2,7 @@ :struct box Tensor.Op.Placeholder :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name output_type Tensor.Any'non output_shape ) @@ -10,7 +10,7 @@ builder .set_attr_type("dtype", output_type.element_type_code) .set_attr_shape("shape", output_shape) - .finish! + .finish )) :fun with_value(value): @op.input(0).with_value(value) diff --git a/src/Tensor.Op.Random.savi b/src/Tensor.Op.Random.savi index 6de77f5..64e9ef6 100644 --- a/src/Tensor.Op.Random.savi +++ b/src/Tensor.Op.Random.savi @@ -7,7 +7,7 @@ :struct box Tensor.Op.Random.Uniform :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name algorithm Tensor.Gen.Random.Algorithm key counter @@ -19,11 +19,11 @@ .add_input(output_shape) .add_input(key) .add_input(counter) - .add_input(Tensor.Op.Const.new!(graph, "\(name).algorithm" + .add_input(Tensor.Op.Const.new(graph, "\(name).algorithm" Tensor(I32).scalar(algorithm.i32) )) .set_attr_type("dtype", output_type.element_type_code) - .finish! + .finish )) :: Generate pseudo-random integers in a uniform distribution, with values @@ -35,7 +35,7 @@ :struct box Tensor.Op.Random.Uniform.Integers :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name algorithm Tensor.Gen.Random.Algorithm key counter @@ -47,11 +47,11 @@ .add_input(output_shape) .add_input(key) .add_input(counter) - .add_input(Tensor.Op.Const.new!(graph, "\(name).algorithm" + .add_input(Tensor.Op.Const.new(graph, "\(name).algorithm" Tensor(I32).scalar(algorithm.i32) )) .set_attr_type("dtype", output_type.element_type_code) - .finish! + .finish )) :: Generate pseudo-random integers in a uniform distribution, bounded to a @@ -63,7 +63,7 @@ :struct box Tensor.Op.Random.Uniform.BoundedIntegers :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name algorithm Tensor.Gen.Random.Algorithm key counter @@ -77,11 +77,11 @@ .add_input(output_shape) .add_input(key) .add_input(counter) - .add_input(Tensor.Op.Const.new!(graph, "\(name).algorithm" + .add_input(Tensor.Op.Const.new(graph, "\(name).algorithm" Tensor(I32).scalar(algorithm.i32) )) .add_input(min_value) .add_input(max_value) .set_attr_type("dtype", output_type.element_type_code) - .finish! + .finish )) diff --git a/src/Tensor.Op.Reshape.savi b/src/Tensor.Op.Reshape.savi index d1b9a6e..af785ec 100644 --- a/src/Tensor.Op.Reshape.savi +++ b/src/Tensor.Op.Reshape.savi @@ -5,7 +5,7 @@ :: values inside the graph rather than tensors held locally (not in a graph). :struct box Tensor.Op.Reshape :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name input output_shape Tensor.Graph.CanOutput ) @@ -13,5 +13,5 @@ builder .add_input(input) .add_input(output_shape) - .finish! + .finish )) diff --git a/src/Tensor.Op.Select.savi b/src/Tensor.Op.Select.savi index b9746f9..1cb67c1 100644 --- a/src/Tensor.Op.Select.savi +++ b/src/Tensor.Op.Select.savi @@ -18,7 +18,7 @@ :struct box Tensor.Op.Select :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name condition_tensor true_case_tensor false_case_tensor @@ -28,5 +28,5 @@ .add_input(condition_tensor) .add_input(true_case_tensor) .add_input(false_case_tensor) - .finish! + .finish )) diff --git a/src/Tensor.Op.Shape.savi b/src/Tensor.Op.Shape.savi index aaa610c..e1d24df 100644 --- a/src/Tensor.Op.Shape.savi +++ b/src/Tensor.Op.Shape.savi @@ -1,9 +1,9 @@ :: Get the shape of the input tensor, returned as a tensor. :struct box Tensor.Op.Shape :is Tensor.Op - :fun non new!(graph Tensor.Graph, name, input) + :fun non new(graph Tensor.Graph, name, input) @_new(graph.new_operation("Shape", name) -> (builder | builder .add_input(input) - .finish! + .finish )) diff --git a/src/Tensor.Op.Slice.savi b/src/Tensor.Op.Slice.savi index b603ec1..beb2c77 100644 --- a/src/Tensor.Op.Slice.savi +++ b/src/Tensor.Op.Slice.savi @@ -7,7 +7,7 @@ :: with the same number of elements as the total dimension count of the input. :struct box Tensor.Op.Slice :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name input begin_indices output_shape @@ -17,5 +17,5 @@ .add_input(input) .add_input(begin_indices) .add_input(output_shape) - .finish! + .finish )) diff --git a/src/Tensor.Op.Softmax.savi b/src/Tensor.Op.Softmax.savi index d19668c..dccc83c 100644 --- a/src/Tensor.Op.Softmax.savi +++ b/src/Tensor.Op.Softmax.savi @@ -51,11 +51,11 @@ :struct box Tensor.Op.Softmax :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name logits ) @_new(graph.new_operation("Softmax", name) -> (builder | builder .add_input(logits) - .finish! + .finish )) diff --git a/src/Tensor.Op.SplitV.savi b/src/Tensor.Op.SplitV.savi index 3dd51d6..f498d26 100644 --- a/src/Tensor.Op.SplitV.savi +++ b/src/Tensor.Op.SplitV.savi @@ -18,7 +18,7 @@ :: Get (by index) one of the slices emitted by this operation. :fun output_slice(index): @op.output(index) - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name input split_axis slice_sizes @@ -30,5 +30,5 @@ .add_input(slice_sizes) .add_input(split_axis) .set_attr_i64("num_split", slice_sizes_count.i64) - .finish! + .finish )) diff --git a/src/Tensor.Op.Square.savi b/src/Tensor.Op.Square.savi index 9a71f5e..14788e1 100644 --- a/src/Tensor.Op.Square.savi +++ b/src/Tensor.Op.Square.savi @@ -2,9 +2,9 @@ :struct box Tensor.Op.Square :is Tensor.Op - :fun non new!(graph Tensor.Graph, name, input) + :fun non new(graph Tensor.Graph, name, input) @_new(graph.new_operation("Square", name) -> (builder | builder .add_input(input) - .finish! + .finish )) diff --git a/src/Tensor.Op.Variable.savi b/src/Tensor.Op.Variable.savi index b240c45..df2c68c 100644 --- a/src/Tensor.Op.Variable.savi +++ b/src/Tensor.Op.Variable.savi @@ -2,7 +2,7 @@ :struct box Tensor.Op.Variable :is Tensor.Op - :fun non new!(graph Tensor.Graph, name + :fun non new(graph Tensor.Graph, name output_type Tensor.Any'non output_shape ) @@ -10,7 +10,7 @@ builder .set_attr_type("dtype", output_type.element_type_code) .set_attr_shape("shape", output_shape) - .finish! + .finish ) reference = graph.new_operation("VariableV2", name) -> (builder | builder @@ -18,18 +18,18 @@ .set_attr_shape("shape", output_shape) // TODO: add support for non-default `container` attr? // TODO: add support for non-default `shared_name` attr? - .finish! + .finish ) initial_assign = graph.new_operation("Assign", "\(name).assign_initial_value") -> (builder | builder .add_input(reference.output(0)) .add_input(placeholder.output(0)) - .finish! + .finish ) initial_snapshot = graph.new_operation("Identity", "\(name).initial_snapshot") -> (builder | builder .add_input(initial_assign.output(0)) - .finish! + .finish ) @_new(initial_snapshot) @@ -59,7 +59,7 @@ // :struct box Tensor.Op.Variable.Assign // :is Tensor.Op -// :fun non new!(graph Tensor.Graph, name +// :fun non new(graph Tensor.Graph, name // var Tensor.Op.Variable // from Tensor.Graph.CanOutput // ) @@ -69,18 +69,18 @@ // .add_input(from) // // TODO: add support for disabling `validate_shape` attr? // // TODO: add support for disabling `use_locking` attr? -// .finish! +// .finish // )) // // TODO: Documentation // :struct box Tensor.Op.Variable.Read // :is Tensor.Op -// :fun non new!(graph Tensor.Graph, name, var Tensor.Op.Variable) +// :fun non new(graph Tensor.Graph, name, var Tensor.Op.Variable) // @_new(graph.new_operation("Identity", name) -> (builder | // builder // .add_input(var) // // TODO: add support for disabling `validate_shape` attr? // // TODO: add support for disabling `use_locking` attr? -// .finish! +// .finish // ))