diff --git a/build/Dependencies.props b/build/Dependencies.props index 196d7b9bad..3b56411602 100644 --- a/build/Dependencies.props +++ b/build/Dependencies.props @@ -22,9 +22,9 @@ 4.5.0 4.5.0 4.5.0 - 1.14.0 - 1 - 0.11.8.1 + 2.3.1 + 2 + 0.20.1 diff --git a/src/Microsoft.ML.TensorFlow/TensorTypeExtensions.cs b/src/Microsoft.ML.TensorFlow/TensorTypeExtensions.cs index 2004d631d8..63983d248b 100644 --- a/src/Microsoft.ML.TensorFlow/TensorTypeExtensions.cs +++ b/src/Microsoft.ML.TensorFlow/TensorTypeExtensions.cs @@ -5,6 +5,7 @@ using System; using Microsoft.ML.Internal.Utilities; using Tensorflow; +using Utils = Microsoft.ML.Internal.Utilities.Utils; namespace Microsoft.ML.TensorFlow { diff --git a/src/Microsoft.ML.TensorFlow/TensorflowTransform.cs b/src/Microsoft.ML.TensorFlow/TensorflowTransform.cs index 6c46f59b2d..998ad30418 100644 --- a/src/Microsoft.ML.TensorFlow/TensorflowTransform.cs +++ b/src/Microsoft.ML.TensorFlow/TensorflowTransform.cs @@ -20,6 +20,7 @@ using Tensorflow; using static Microsoft.ML.TensorFlow.TensorFlowUtils; using static Tensorflow.Binding; +using Utils = Microsoft.ML.Internal.Utilities.Utils; [assembly: LoadableClass(TensorFlowTransformer.Summary, typeof(IDataTransform), typeof(TensorFlowTransformer), typeof(TensorFlowEstimator.Options), typeof(SignatureDataTransform), TensorFlowTransformer.UserName, TensorFlowTransformer.ShortName)] @@ -280,6 +281,7 @@ internal TensorFlowTransformer(IHostEnvironment env, Session session, string[] o _addBatchDimensionInput = addBatchDimensionInput; Inputs = inputColumnNames; Outputs = outputColumnNames; + tf.compat.v1.disable_eager_execution(); (TFOutputTypes, OutputTypes, TFOutputOperations) = GetOutputInfo(Host, Session, Outputs); (TFInputTypes, TFInputShapes, TFInputOperations) = GetInputInfo(Host, Session, Inputs, batchSize); @@ -344,7 +346,7 @@ internal static TensorShape GetTensorShape(TF_Output output, Graph graph, Status new ObjectDisposedException(nameof(graph)); var cstatus = status == null ? new Status() : status; - var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus); + var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus.Handle); cstatus.Check(); @@ -352,7 +354,7 @@ internal static TensorShape GetTensorShape(TF_Output output, Graph graph, Status return new TensorShape(new int[0]); var dims = new long[n]; - c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus); + c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus.Handle); cstatus.Check(); return new TensorShape(dims.Select(x => (int)x).ToArray()); } @@ -426,12 +428,14 @@ private protected override void SaveModel(ModelSaveContext ctx) ctx.Writer.WriteBoolByte(_addBatchDimensionInput); if (isFrozen) { - Status status = new Status(); - var buffer = Session.graph.ToGraphDef(status); - ctx.SaveBinaryStream("TFModel", w => + using (var status = new Status()) + using (var buffer = Session.graph.ToGraphDef(status)) { - w.WriteByteArray(buffer.MemoryBlock.ToArray()); - }); + ctx.SaveBinaryStream("TFModel", w => + { + w.WriteByteArray(buffer.DangerousMemoryBlock.ToArray()); + }); + } } Host.AssertNonEmpty(Inputs); @@ -801,48 +805,10 @@ public Tensor GetTensor() // This is done to reduce memory allocation every time tensor is created. _denseData = new T[_vBuffer.Length]; _vBuffer.CopyTo(_denseData); - var tensor = CastDataAndReturnAsTensor(_denseData); + var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_denseData, _tfShape); return tensor; } - private Tensor CastDataAndReturnAsTensor(T[] data) - { - if (typeof(T) == typeof(sbyte)) - return new Tensor((sbyte[])(object)data, _dims, TF_DataType.TF_INT8); - else if (typeof(T) == typeof(long)) - return new Tensor((long[])(object)data, _dims, TF_DataType.TF_INT64); - else if (typeof(T) == typeof(Int32)) - return new Tensor((Int32[])(object)data, _dims, TF_DataType.TF_INT32); - else if (typeof(T) == typeof(Int16)) - return new Tensor((Int16[])(object)data, _dims, TF_DataType.TF_INT16); - else if (typeof(T) == typeof(byte)) - return new Tensor((byte[])(object)data, _dims, TF_DataType.TF_UINT8); - else if (typeof(T) == typeof(ulong)) - return new Tensor((ulong[])(object)data, _dims, TF_DataType.TF_UINT64); - else if (typeof(T) == typeof(UInt32)) - return new Tensor((UInt32[])(object)data, _dims, TF_DataType.TF_UINT32); - else if (typeof(T) == typeof(UInt16)) - return new Tensor((UInt16[])(object)data, _dims, TF_DataType.TF_UINT16); - else if (typeof(T) == typeof(bool)) - return new Tensor((bool[])(object)data, _dims, TF_DataType.TF_BOOL); - else if (typeof(T) == typeof(float)) - return new Tensor((float[])(object)data, _dims, TF_DataType.TF_FLOAT); - else if (typeof(T) == typeof(double)) - return new Tensor((double[])(object)data, _dims, TF_DataType.TF_DOUBLE); - else if (typeof(T) == typeof(ReadOnlyMemory)) - { - byte[][] bytes = new byte[_vBuffer.Length][]; - for (int i = 0; i < bytes.Length; i++) - { - bytes[i] = Encoding.UTF8.GetBytes(((ReadOnlyMemory)(object)data[i]).ToArray()); - } - - return new Tensor(bytes, _tfShape.dims.Select(x => (long)x).ToArray()); - } - - return new Tensor(new NDArray(data, _tfShape)); - } - public void BufferTrainingData() { _srcgetter(ref _vBuffer); @@ -853,7 +819,7 @@ public void BufferTrainingData() public Tensor GetBufferedBatchTensor() { _position = 0; - var tensor = CastDataAndReturnAsTensor(_bufferedData); + var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_denseData, _tfShape); _bufferedData = new T[_bufferedDataSize]; return tensor; diff --git a/src/Microsoft.ML.TensorFlow/TensorflowUtils.cs b/src/Microsoft.ML.TensorFlow/TensorflowUtils.cs index b1c8a78a5b..dd25bb104b 100644 --- a/src/Microsoft.ML.TensorFlow/TensorflowUtils.cs +++ b/src/Microsoft.ML.TensorFlow/TensorflowUtils.cs @@ -12,8 +12,10 @@ using Microsoft.ML.Runtime; using Microsoft.ML.TensorFlow; using Microsoft.ML.Transforms; +using NumSharp; using Tensorflow; using static Tensorflow.Binding; +using Utils = Microsoft.ML.Internal.Utilities.Utils; namespace Microsoft.ML.TensorFlow { @@ -410,6 +412,46 @@ internal static bool IsTypeSupported(TF_DataType tfoutput) } } + internal static Tensor CastDataAndReturnAsTensor(T[] data, TensorShape tfShape) + { + var dims = tfShape.dims.Select(x => (long)x).ToArray(); + + if (typeof(T) == typeof(sbyte)) + return new Tensor((sbyte[])(object)data, dims, TF_DataType.TF_INT8); + else if (typeof(T) == typeof(long)) + return new Tensor((long[])(object)data, dims, TF_DataType.TF_INT64); + else if (typeof(T) == typeof(Int32)) + return new Tensor((Int32[])(object)data, dims, TF_DataType.TF_INT32); + else if (typeof(T) == typeof(Int16)) + return new Tensor((Int16[])(object)data, dims, TF_DataType.TF_INT16); + else if (typeof(T) == typeof(byte)) + return new Tensor((byte[])(object)data, dims, TF_DataType.TF_UINT8); + else if (typeof(T) == typeof(ulong)) + return new Tensor((ulong[])(object)data, dims, TF_DataType.TF_UINT64); + else if (typeof(T) == typeof(UInt32)) + return new Tensor((UInt32[])(object)data, dims, TF_DataType.TF_UINT32); + else if (typeof(T) == typeof(UInt16)) + return new Tensor((UInt16[])(object)data, dims, TF_DataType.TF_UINT16); + else if (typeof(T) == typeof(bool)) + return new Tensor((bool[])(object)data, dims, TF_DataType.TF_BOOL); + else if (typeof(T) == typeof(float)) + return new Tensor((float[])(object)data, dims, TF_DataType.TF_FLOAT); + else if (typeof(T) == typeof(double)) + return new Tensor((double[])(object)data, dims, TF_DataType.TF_DOUBLE); + else if (typeof(T) == typeof(ReadOnlyMemory)) + { + string[] strings = new string[data.Length]; + for (int i = 0; i < strings.Length; i++) + { + strings[i] = data[i].ToString(); + } + + return new Tensor(strings); + } + + return new Tensor(new NDArray(data, tfShape)); + } + /// /// Use the runner class to easily configure inputs, outputs and targets to be passed to the session runner. /// @@ -491,7 +533,7 @@ public Tensor[] Run() { c_api.TF_SessionRun(_session, null, _inputs, _inputValues, _inputs.Length, _outputs, _outputValues, _outputValues.Length, _operations, - _operations.Length, IntPtr.Zero, _status); + _operations.Length, IntPtr.Zero, _status.Handle); } catch (Exception ex) { diff --git a/src/Microsoft.ML.Vision/DnnRetrainTransform.cs b/src/Microsoft.ML.Vision/DnnRetrainTransform.cs index 2388ba8e19..240b2773f7 100644 --- a/src/Microsoft.ML.Vision/DnnRetrainTransform.cs +++ b/src/Microsoft.ML.Vision/DnnRetrainTransform.cs @@ -19,6 +19,7 @@ using Tensorflow; using static Microsoft.ML.TensorFlow.TensorFlowUtils; using static Tensorflow.Binding; +using Utils = Microsoft.ML.Internal.Utilities.Utils; [assembly: LoadableClass(DnnRetrainTransformer.Summary, typeof(IDataTransform), typeof(DnnRetrainTransformer), typeof(DnnRetrainEstimator.Options), typeof(SignatureDataTransform), DnnRetrainTransformer.UserName, DnnRetrainTransformer.ShortName)] @@ -607,7 +608,7 @@ internal static TensorShape GetTensorShape(TF_Output output, Graph graph, Status new ObjectDisposedException(nameof(graph)); var cstatus = status == null ? new Status() : status; - var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus); + var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus.Handle); cstatus.Check(); @@ -615,7 +616,7 @@ internal static TensorShape GetTensorShape(TF_Output output, Graph graph, Status return new TensorShape(new int[0]); var dims = new long[n]; - c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus); + c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus.Handle); cstatus.Check(); return new TensorShape(dims.Select(x => (int)x).ToArray()); } @@ -1040,49 +1041,11 @@ public Tensor GetBufferedBatchTensor() } else { - var tensor = CastDataAndReturnAsTensor(_bufferedData); + var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_bufferedData, _tfShape); _position = 0; return tensor; } } - - private Tensor CastDataAndReturnAsTensor(T[] data) - { - if (typeof(T) == typeof(sbyte)) - return new Tensor((sbyte[])(object)data, _dims, TF_DataType.TF_INT8); - else if (typeof(T) == typeof(long)) - return new Tensor((long[])(object)data, _dims, TF_DataType.TF_INT64); - else if (typeof(T) == typeof(Int32)) - return new Tensor((Int32[])(object)data, _dims, TF_DataType.TF_INT32); - else if (typeof(T) == typeof(Int16)) - return new Tensor((Int16[])(object)data, _dims, TF_DataType.TF_INT16); - else if (typeof(T) == typeof(byte)) - return new Tensor((byte[])(object)data, _dims, TF_DataType.TF_UINT8); - else if (typeof(T) == typeof(ulong)) - return new Tensor((ulong[])(object)data, _dims, TF_DataType.TF_UINT64); - else if (typeof(T) == typeof(UInt32)) - return new Tensor((UInt32[])(object)data, _dims, TF_DataType.TF_UINT32); - else if (typeof(T) == typeof(UInt16)) - return new Tensor((UInt16[])(object)data, _dims, TF_DataType.TF_UINT16); - else if (typeof(T) == typeof(bool)) - return new Tensor((bool[])(object)data, _dims, TF_DataType.TF_BOOL); - else if (typeof(T) == typeof(float)) - return new Tensor((float[])(object)data, _dims, TF_DataType.TF_FLOAT); - else if (typeof(T) == typeof(float)) - return new Tensor((double[])(object)data, _dims, TF_DataType.TF_DOUBLE); - else if (typeof(T) == typeof(ReadOnlyMemory)) - { - byte[][] bytes = new byte[_bufferedData.Length][]; - for (int i = 0; i < bytes.Length; i++) - { - bytes[i] = Encoding.UTF8.GetBytes(((ReadOnlyMemory)(object)data[i]).ToArray()); - } - - return new Tensor(bytes, _tfShape.dims.Select(x => (long)x).ToArray()); - } - - return new Tensor(new NDArray(data, _tfShape)); - } } private class TensorValueGetterVec : ITensorValueGetter @@ -1126,45 +1089,7 @@ public Tensor GetTensor() // This is done to reduce memory allocation every time tensor is created. _denseData = new T[_vBuffer.Length]; _vBuffer.CopyTo(_denseData); - return CastDataAndReturnAsTensor(_denseData); - } - - private Tensor CastDataAndReturnAsTensor(T[] data) - { - if (typeof(T) == typeof(sbyte)) - return new Tensor((sbyte[])(object)data, _dims, TF_DataType.TF_INT8); - else if (typeof(T) == typeof(long)) - return new Tensor((long[])(object)data, _dims, TF_DataType.TF_INT64); - else if (typeof(T) == typeof(Int32)) - return new Tensor((Int32[])(object)data, _dims, TF_DataType.TF_INT32); - else if (typeof(T) == typeof(Int16)) - return new Tensor((Int16[])(object)data, _dims, TF_DataType.TF_INT16); - else if (typeof(T) == typeof(byte)) - return new Tensor((byte[])(object)data, _dims, TF_DataType.TF_UINT8); - else if (typeof(T) == typeof(ulong)) - return new Tensor((ulong[])(object)data, _dims, TF_DataType.TF_UINT64); - else if (typeof(T) == typeof(UInt32)) - return new Tensor((UInt32[])(object)data, _dims, TF_DataType.TF_UINT32); - else if (typeof(T) == typeof(UInt16)) - return new Tensor((UInt16[])(object)data, _dims, TF_DataType.TF_UINT16); - else if (typeof(T) == typeof(bool)) - return new Tensor((bool[])(object)data, _dims, TF_DataType.TF_BOOL); - else if (typeof(T) == typeof(float)) - return new Tensor((float[])(object)data, _dims, TF_DataType.TF_FLOAT); - else if (typeof(T) == typeof(double)) - return new Tensor((double[])(object)data, _dims, TF_DataType.TF_DOUBLE); - else if (typeof(T) == typeof(ReadOnlyMemory)) - { - byte[][] bytes = new byte[_vBuffer.Length][]; - for (int i = 0; i < bytes.Length; i++) - { - bytes[i] = Encoding.UTF8.GetBytes(((ReadOnlyMemory)(object)data[i]).ToArray()); - } - - return new Tensor(bytes, _tfShape.dims.Select(x => (long)x).ToArray()); - } - - return new Tensor(new NDArray(data, _tfShape)); + return TensorFlowUtils.CastDataAndReturnAsTensor(_denseData, _tfShape); } public void BufferTrainingData() @@ -1177,7 +1102,7 @@ public void BufferTrainingData() public Tensor GetBufferedBatchTensor() { _position = 0; - var tensor = CastDataAndReturnAsTensor(_bufferedData); + var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_bufferedData, _tfShape); _bufferedData = new T[_bufferedDataSize]; return tensor; } diff --git a/src/Microsoft.ML.Vision/ImageClassificationTrainer.cs b/src/Microsoft.ML.Vision/ImageClassificationTrainer.cs index 5f94ff282b..e5fe486ae5 100644 --- a/src/Microsoft.ML.Vision/ImageClassificationTrainer.cs +++ b/src/Microsoft.ML.Vision/ImageClassificationTrainer.cs @@ -530,6 +530,7 @@ internal ImageClassificationTrainer(IHostEnvironment env, Options options) Host.CheckNonEmpty(options.LabelColumnName, nameof(options.LabelColumnName)); Host.CheckNonEmpty(options.ScoreColumnName, nameof(options.ScoreColumnName)); Host.CheckNonEmpty(options.PredictedLabelColumnName, nameof(options.PredictedLabelColumnName)); + tf.compat.v1.disable_eager_execution(); if (string.IsNullOrEmpty(options.WorkspacePath)) { @@ -752,15 +753,15 @@ private void CheckTrainingParameters(Options options) var decodedImage4d = tf.expand_dims(decodedImageAsFloat, 0); var resizeShape = tf.stack(new int[] { inputDim.Item1, inputDim.Item2 }); var resizeShapeAsInt = tf.cast(resizeShape, dtype: tf.int32); - var resizedImage = tf.image.resize_bilinear(decodedImage4d, resizeShapeAsInt, false, "ResizeTensor"); + var resizedImage = tf.image.resize_bilinear(decodedImage4d, resizeShapeAsInt, false, name: "ResizeTensor"); return (jpegData, resizedImage); } private static Tensor EncodeByteAsString(VBuffer buffer) { int length = buffer.Length; - var size = c_api.TF_StringEncodedSize((UIntPtr)length); - var handle = c_api.TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8)); + var size = c_api.TF_StringEncodedSize((ulong)length); + var handle = c_api.TF_AllocateTensor(TF_DataType.TF_STRING, Array.Empty(), 0, ((ulong)size + 8)); IntPtr tensor = c_api.TF_TensorData(handle); Marshal.WriteInt64(tensor, 0); @@ -769,7 +770,7 @@ private static Tensor EncodeByteAsString(VBuffer buffer) unsafe { fixed (byte* src = buffer.GetValues()) - c_api.TF_StringEncode(src, (UIntPtr)length, (sbyte*)(tensor + sizeof(Int64)), size, status); + c_api.TF_StringEncode(src, (ulong)length, (byte*)(tensor + sizeof(Int64)), size, status.Handle); } status.Check(true); @@ -1213,7 +1214,7 @@ private void UpdateTransferLearningModelOnDisk(int classCount) sess.Dispose(); } - private void VariableSummaries(RefVariable var) + private void VariableSummaries(ResourceVariable var) { tf_with(tf.name_scope("summaries"), delegate { @@ -1256,7 +1257,7 @@ private void VariableSummaries(RefVariable var) Tensor logits = null; tf_with(tf.name_scope(layerName), scope => { - RefVariable layerWeights = null; + ResourceVariable layerWeights = null; tf_with(tf.name_scope("weights"), delegate { var initialValue = tf.truncated_normal(new int[] { bottleneck_tensor_size, classCount }, @@ -1266,7 +1267,7 @@ private void VariableSummaries(RefVariable var) VariableSummaries(layerWeights); }); - RefVariable layerBiases = null; + ResourceVariable layerBiases = null; tf_with(tf.name_scope("biases"), delegate { TensorShape shape = new TensorShape(classCount); @@ -1445,14 +1446,15 @@ private protected override void SaveCore(ModelSaveContext ctx) ctx.Writer.Write(_imagePreprocessorTensorOutput); ctx.Writer.Write(_graphInputTensor); ctx.Writer.Write(_graphOutputTensor); - - Status status = new Status(); - var buffer = _session.graph.ToGraphDef(status); - ctx.SaveBinaryStream("TFModel", w => + using(var status = new Status()) + using(var buffer = _session.graph.ToGraphDef(status)) { - w.WriteByteArray(buffer.MemoryBlock.ToArray()); - }); - status.Check(true); + ctx.SaveBinaryStream("TFModel", w => + { + w.WriteByteArray(buffer.DangerousMemoryBlock.ToArray()); + }); + status.Check(true); + } } private class Classifier diff --git a/test/Microsoft.ML.TestFramework/Attributes/TensorflowFactAttribute.cs b/test/Microsoft.ML.TestFramework/Attributes/TensorflowFactAttribute.cs index f7ae0ecf91..d2376ef3ac 100644 --- a/test/Microsoft.ML.TestFramework/Attributes/TensorflowFactAttribute.cs +++ b/test/Microsoft.ML.TestFramework/Attributes/TensorflowFactAttribute.cs @@ -3,6 +3,7 @@ // See the LICENSE file in the project root for more information. using System; +using System.Runtime.InteropServices; using Microsoft.ML.TestFrameworkCommon.Attributes; namespace Microsoft.ML.TestFramework.Attributes @@ -12,14 +13,17 @@ namespace Microsoft.ML.TestFramework.Attributes /// public sealed class TensorFlowFactAttribute : EnvironmentSpecificFactAttribute { - public TensorFlowFactAttribute() : base("TensorFlow is 64-bit only") + public TensorFlowFactAttribute() : base("TensorFlow is 64-bit only and is not supported on Linux with libc < v2.23") { } /// protected override bool IsEnvironmentSupported() { - return Environment.Is64BitProcess; + return Environment.Is64BitProcess && + ( RuntimeInformation.IsOSPlatform(OSPlatform.Windows) || + AttributeHelpers.CheckLibcVersionGreaterThanMinimum(new Version(2, 23))); + } } } diff --git a/test/Microsoft.ML.TestFramework/Attributes/TensorflowTheoryAttribute.cs b/test/Microsoft.ML.TestFramework/Attributes/TensorflowTheoryAttribute.cs index 24b73a56f5..feed8047d7 100644 --- a/test/Microsoft.ML.TestFramework/Attributes/TensorflowTheoryAttribute.cs +++ b/test/Microsoft.ML.TestFramework/Attributes/TensorflowTheoryAttribute.cs @@ -3,6 +3,9 @@ // See the LICENSE file in the project root for more information. using System; +using System.Runtime.InteropServices; +using Microsoft.ML.TestFrameworkCommon.Attributes; + namespace Microsoft.ML.TestFramework.Attributes { /// @@ -10,14 +13,16 @@ namespace Microsoft.ML.TestFramework.Attributes /// public sealed class TensorFlowTheoryAttribute : EnvironmentSpecificTheoryAttribute { - public TensorFlowTheoryAttribute() : base("TensorFlow is 64-bit only") + public TensorFlowTheoryAttribute() : base("TensorFlow is 64-bit only and is not supported on Linux with libc < v2.23") { } /// protected override bool IsEnvironmentSupported() { - return Environment.Is64BitProcess; + return Environment.Is64BitProcess && + (RuntimeInformation.IsOSPlatform(OSPlatform.Windows) || + AttributeHelpers.CheckLibcVersionGreaterThanMinimum(new Version(2, 23))); } } } \ No newline at end of file diff --git a/test/Microsoft.ML.Tests/ScenariosWithDirectInstantiation/TensorflowTests.cs b/test/Microsoft.ML.Tests/ScenariosWithDirectInstantiation/TensorflowTests.cs index 60517fcd97..c5b9e839fc 100644 --- a/test/Microsoft.ML.Tests/ScenariosWithDirectInstantiation/TensorflowTests.cs +++ b/test/Microsoft.ML.Tests/ScenariosWithDirectInstantiation/TensorflowTests.cs @@ -278,7 +278,7 @@ private List GetShapeData() }); } - [ConditionalFact(typeof(Environment), nameof(Environment.Is64BitProcess))] // TensorFlow is 64-bit only + [TensorFlowFact] // TensorFlow is 64-bit only public void TensorFlowTransformInputShapeTest() { var modelLocation = "model_shape_test";