Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update tensorflow.net to 0.20.0 #5404

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
c073e6b
upgrade to 3.1
LittleLittleCloud Apr 28, 2020
8f0fc1a
write inline data using invariantCulture
LittleLittleCloud Apr 28, 2020
e96d716
Merge branch 'master' of https://github.com/dotnet/machinelearning
LittleLittleCloud Apr 29, 2020
8c17bbe
Merge branch 'master' of https://github.com/dotnet/machinelearning
LittleLittleCloud May 14, 2020
b2947f5
Merge branch 'master' of https://github.com/dotnet/machinelearning
LittleLittleCloud May 18, 2020
ff1c909
Merge branch 'master' of https://github.com/dotnet/machinelearning
LittleLittleCloud May 20, 2020
dad9055
Merge branch 'master' of https://github.com/dotnet/machinelearning
LittleLittleCloud Jul 20, 2020
741d77a
Merge branch 'master' of https://github.com/dotnet/machinelearning
LittleLittleCloud Jul 21, 2020
594c828
Merge branch 'master' of https://github.com/dotnet/machinelearning
LittleLittleCloud Aug 26, 2020
cc4def8
Merge branch 'master' of https://github.com/dotnet/machinelearning
LittleLittleCloud Sep 14, 2020
7cd6d8a
upodate tensorflow
Sep 21, 2020
fdf7b73
update Microsoft.ML.Vision
Sep 21, 2020
d0c261e
fix test && comment
Sep 28, 2020
48064a1
udpate tensorflow.net to 0.20.1
Sep 29, 2020
35d4e25
update tf major version
Sep 29, 2020
2aafe7b
downgrade tf runtime to 1.14.1
Sep 29, 2020
e7e657c
Update Dependencies.props
LittleLittleCloud Sep 30, 2020
b92bc45
Update Dependencies.props
LittleLittleCloud Oct 8, 2020
b314ea5
update tffact to stop running test on linux with glibc < 2.3)
Oct 8, 2020
80f3c16
fix TensorFlowTransformInputShapeTest
Oct 8, 2020
65271c3
use tf.v1 api
Oct 8, 2020
ec4a5ee
fix comment:
LittleLittleCloud Oct 9, 2020
df6cbcb
fix building error
LittleLittleCloud Oct 9, 2020
0610a9b
Merge branch 'master' of https://github.com/dotnet/machinelearning
LittleLittleCloud Oct 12, 2020
159d17c
Merge branch 'master' into u/xiaoyun/updateTensorflow.Net
LittleLittleCloud Oct 12, 2020
3d00c41
fix test
LittleLittleCloud Oct 12, 2020
fae6864
fix nit
LittleLittleCloud Oct 12, 2020
5f3e34b
remove linq
LittleLittleCloud Oct 13, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions build/Dependencies.props
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@
<SystemDrawingCommonPackageVersion>4.5.0</SystemDrawingCommonPackageVersion>
<SystemIOFileSystemAccessControl>4.5.0</SystemIOFileSystemAccessControl>
<SystemSecurityPrincipalWindows>4.5.0</SystemSecurityPrincipalWindows>
<TensorFlowVersion>1.14.0</TensorFlowVersion>
<TensorFlowMajorVersion>1</TensorFlowMajorVersion>
<TensorflowDotNETVersion>0.11.8.1</TensorflowDotNETVersion>
<TensorFlowVersion>2.3.1</TensorFlowVersion>
<TensorFlowMajorVersion>2</TensorFlowMajorVersion>
<TensorflowDotNETVersion>0.20.1</TensorflowDotNETVersion>
</PropertyGroup>

<!-- Model Builder Dependencies -->
Expand Down
1 change: 1 addition & 0 deletions src/Microsoft.ML.TensorFlow/TensorTypeExtensions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
using System;
using Microsoft.ML.Internal.Utilities;
using Tensorflow;
using Utils = Microsoft.ML.Internal.Utilities.Utils;

namespace Microsoft.ML.TensorFlow
{
Expand Down
60 changes: 13 additions & 47 deletions src/Microsoft.ML.TensorFlow/TensorflowTransform.cs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
using Tensorflow;
using static Microsoft.ML.TensorFlow.TensorFlowUtils;
using static Tensorflow.Binding;
using Utils = Microsoft.ML.Internal.Utilities.Utils;

[assembly: LoadableClass(TensorFlowTransformer.Summary, typeof(IDataTransform), typeof(TensorFlowTransformer),
typeof(TensorFlowEstimator.Options), typeof(SignatureDataTransform), TensorFlowTransformer.UserName, TensorFlowTransformer.ShortName)]
Expand Down Expand Up @@ -280,6 +281,7 @@ internal TensorFlowTransformer(IHostEnvironment env, Session session, string[] o
_addBatchDimensionInput = addBatchDimensionInput;
Inputs = inputColumnNames;
Outputs = outputColumnNames;
tf.compat.v1.disable_eager_execution();

(TFOutputTypes, OutputTypes, TFOutputOperations) = GetOutputInfo(Host, Session, Outputs);
(TFInputTypes, TFInputShapes, TFInputOperations) = GetInputInfo(Host, Session, Inputs, batchSize);
Expand Down Expand Up @@ -344,15 +346,15 @@ internal static TensorShape GetTensorShape(TF_Output output, Graph graph, Status
new ObjectDisposedException(nameof(graph));

var cstatus = status == null ? new Status() : status;
var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus);
var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus.Handle);

cstatus.Check();

if (n == -1)
return new TensorShape(new int[0]);

var dims = new long[n];
c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus);
c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus.Handle);
cstatus.Check();
return new TensorShape(dims.Select(x => (int)x).ToArray());
}
Expand Down Expand Up @@ -426,12 +428,14 @@ private protected override void SaveModel(ModelSaveContext ctx)
ctx.Writer.WriteBoolByte(_addBatchDimensionInput);
if (isFrozen)
{
Status status = new Status();
var buffer = Session.graph.ToGraphDef(status);
ctx.SaveBinaryStream("TFModel", w =>
using (var status = new Status())
using (var buffer = Session.graph.ToGraphDef(status))
{
w.WriteByteArray(buffer.MemoryBlock.ToArray());
});
ctx.SaveBinaryStream("TFModel", w =>
{
w.WriteByteArray(buffer.DangerousMemoryBlock.ToArray());
});
}
}

Host.AssertNonEmpty(Inputs);
Expand Down Expand Up @@ -801,48 +805,10 @@ public Tensor GetTensor()
// This is done to reduce memory allocation every time tensor is created.
_denseData = new T[_vBuffer.Length];
_vBuffer.CopyTo(_denseData);
var tensor = CastDataAndReturnAsTensor(_denseData);
var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_denseData, _tfShape);
return tensor;
}

private Tensor CastDataAndReturnAsTensor(T[] data)
{
if (typeof(T) == typeof(sbyte))
return new Tensor((sbyte[])(object)data, _dims, TF_DataType.TF_INT8);
else if (typeof(T) == typeof(long))
return new Tensor((long[])(object)data, _dims, TF_DataType.TF_INT64);
else if (typeof(T) == typeof(Int32))
return new Tensor((Int32[])(object)data, _dims, TF_DataType.TF_INT32);
else if (typeof(T) == typeof(Int16))
return new Tensor((Int16[])(object)data, _dims, TF_DataType.TF_INT16);
else if (typeof(T) == typeof(byte))
return new Tensor((byte[])(object)data, _dims, TF_DataType.TF_UINT8);
else if (typeof(T) == typeof(ulong))
return new Tensor((ulong[])(object)data, _dims, TF_DataType.TF_UINT64);
else if (typeof(T) == typeof(UInt32))
return new Tensor((UInt32[])(object)data, _dims, TF_DataType.TF_UINT32);
else if (typeof(T) == typeof(UInt16))
return new Tensor((UInt16[])(object)data, _dims, TF_DataType.TF_UINT16);
else if (typeof(T) == typeof(bool))
return new Tensor((bool[])(object)data, _dims, TF_DataType.TF_BOOL);
else if (typeof(T) == typeof(float))
return new Tensor((float[])(object)data, _dims, TF_DataType.TF_FLOAT);
else if (typeof(T) == typeof(double))
return new Tensor((double[])(object)data, _dims, TF_DataType.TF_DOUBLE);
else if (typeof(T) == typeof(ReadOnlyMemory<char>))
{
byte[][] bytes = new byte[_vBuffer.Length][];
for (int i = 0; i < bytes.Length; i++)
{
bytes[i] = Encoding.UTF8.GetBytes(((ReadOnlyMemory<char>)(object)data[i]).ToArray());
}

return new Tensor(bytes, _tfShape.dims.Select(x => (long)x).ToArray());
}

return new Tensor(new NDArray(data, _tfShape));
}

public void BufferTrainingData()
{
_srcgetter(ref _vBuffer);
Expand All @@ -853,7 +819,7 @@ public void BufferTrainingData()
public Tensor GetBufferedBatchTensor()
{
_position = 0;
var tensor = CastDataAndReturnAsTensor(_bufferedData);
var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_denseData, _tfShape);

_bufferedData = new T[_bufferedDataSize];
return tensor;
Expand Down
44 changes: 43 additions & 1 deletion src/Microsoft.ML.TensorFlow/TensorflowUtils.cs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@
using Microsoft.ML.Runtime;
using Microsoft.ML.TensorFlow;
using Microsoft.ML.Transforms;
using NumSharp;
using Tensorflow;
using static Tensorflow.Binding;
using Utils = Microsoft.ML.Internal.Utilities.Utils;

namespace Microsoft.ML.TensorFlow
{
Expand Down Expand Up @@ -410,6 +412,46 @@ internal static bool IsTypeSupported(TF_DataType tfoutput)
}
}

internal static Tensor CastDataAndReturnAsTensor<T>(T[] data, TensorShape tfShape)
{
var dims = tfShape.dims.Select(x => (long)x).ToArray();

if (typeof(T) == typeof(sbyte))
return new Tensor((sbyte[])(object)data, dims, TF_DataType.TF_INT8);
else if (typeof(T) == typeof(long))
return new Tensor((long[])(object)data, dims, TF_DataType.TF_INT64);
else if (typeof(T) == typeof(Int32))
return new Tensor((Int32[])(object)data, dims, TF_DataType.TF_INT32);
else if (typeof(T) == typeof(Int16))
return new Tensor((Int16[])(object)data, dims, TF_DataType.TF_INT16);
else if (typeof(T) == typeof(byte))
return new Tensor((byte[])(object)data, dims, TF_DataType.TF_UINT8);
else if (typeof(T) == typeof(ulong))
return new Tensor((ulong[])(object)data, dims, TF_DataType.TF_UINT64);
else if (typeof(T) == typeof(UInt32))
return new Tensor((UInt32[])(object)data, dims, TF_DataType.TF_UINT32);
else if (typeof(T) == typeof(UInt16))
return new Tensor((UInt16[])(object)data, dims, TF_DataType.TF_UINT16);
else if (typeof(T) == typeof(bool))
return new Tensor((bool[])(object)data, dims, TF_DataType.TF_BOOL);
else if (typeof(T) == typeof(float))
return new Tensor((float[])(object)data, dims, TF_DataType.TF_FLOAT);
else if (typeof(T) == typeof(double))
return new Tensor((double[])(object)data, dims, TF_DataType.TF_DOUBLE);
else if (typeof(T) == typeof(ReadOnlyMemory<char>))
{
string[] strings = new string[data.Length];
for (int i = 0; i < strings.Length; i++)
{
strings[i] = data[i].ToString();
}

return new Tensor(strings);
}

return new Tensor(new NDArray(data, tfShape));
}

/// <summary>
/// Use the runner class to easily configure inputs, outputs and targets to be passed to the session runner.
/// </summary>
Expand Down Expand Up @@ -491,7 +533,7 @@ public Tensor[] Run()
{
c_api.TF_SessionRun(_session, null, _inputs, _inputValues,
_inputs.Length, _outputs, _outputValues, _outputValues.Length, _operations,
_operations.Length, IntPtr.Zero, _status);
_operations.Length, IntPtr.Zero, _status.Handle);
}
catch (Exception ex)
{
Expand Down
87 changes: 6 additions & 81 deletions src/Microsoft.ML.Vision/DnnRetrainTransform.cs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
using Tensorflow;
using static Microsoft.ML.TensorFlow.TensorFlowUtils;
using static Tensorflow.Binding;
using Utils = Microsoft.ML.Internal.Utilities.Utils;

[assembly: LoadableClass(DnnRetrainTransformer.Summary, typeof(IDataTransform), typeof(DnnRetrainTransformer),
typeof(DnnRetrainEstimator.Options), typeof(SignatureDataTransform), DnnRetrainTransformer.UserName, DnnRetrainTransformer.ShortName)]
Expand Down Expand Up @@ -607,15 +608,15 @@ internal static TensorShape GetTensorShape(TF_Output output, Graph graph, Status
new ObjectDisposedException(nameof(graph));

var cstatus = status == null ? new Status() : status;
var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus);
var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus.Handle);

cstatus.Check();

if (n == -1)
return new TensorShape(new int[0]);

var dims = new long[n];
c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus);
c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus.Handle);
cstatus.Check();
return new TensorShape(dims.Select(x => (int)x).ToArray());
}
Expand Down Expand Up @@ -1040,49 +1041,11 @@ public Tensor GetBufferedBatchTensor()
}
else
{
var tensor = CastDataAndReturnAsTensor(_bufferedData);
var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_bufferedData, _tfShape);
_position = 0;
return tensor;
}
}

private Tensor CastDataAndReturnAsTensor(T[] data)
{
if (typeof(T) == typeof(sbyte))
return new Tensor((sbyte[])(object)data, _dims, TF_DataType.TF_INT8);
else if (typeof(T) == typeof(long))
return new Tensor((long[])(object)data, _dims, TF_DataType.TF_INT64);
else if (typeof(T) == typeof(Int32))
return new Tensor((Int32[])(object)data, _dims, TF_DataType.TF_INT32);
else if (typeof(T) == typeof(Int16))
return new Tensor((Int16[])(object)data, _dims, TF_DataType.TF_INT16);
else if (typeof(T) == typeof(byte))
return new Tensor((byte[])(object)data, _dims, TF_DataType.TF_UINT8);
else if (typeof(T) == typeof(ulong))
return new Tensor((ulong[])(object)data, _dims, TF_DataType.TF_UINT64);
else if (typeof(T) == typeof(UInt32))
return new Tensor((UInt32[])(object)data, _dims, TF_DataType.TF_UINT32);
else if (typeof(T) == typeof(UInt16))
return new Tensor((UInt16[])(object)data, _dims, TF_DataType.TF_UINT16);
else if (typeof(T) == typeof(bool))
return new Tensor((bool[])(object)data, _dims, TF_DataType.TF_BOOL);
else if (typeof(T) == typeof(float))
return new Tensor((float[])(object)data, _dims, TF_DataType.TF_FLOAT);
else if (typeof(T) == typeof(float))
return new Tensor((double[])(object)data, _dims, TF_DataType.TF_DOUBLE);
else if (typeof(T) == typeof(ReadOnlyMemory<char>))
{
byte[][] bytes = new byte[_bufferedData.Length][];
for (int i = 0; i < bytes.Length; i++)
{
bytes[i] = Encoding.UTF8.GetBytes(((ReadOnlyMemory<char>)(object)data[i]).ToArray());
}

return new Tensor(bytes, _tfShape.dims.Select(x => (long)x).ToArray());
}

return new Tensor(new NDArray(data, _tfShape));
}
}

private class TensorValueGetterVec<T> : ITensorValueGetter
Expand Down Expand Up @@ -1126,45 +1089,7 @@ public Tensor GetTensor()
// This is done to reduce memory allocation every time tensor is created.
_denseData = new T[_vBuffer.Length];
_vBuffer.CopyTo(_denseData);
return CastDataAndReturnAsTensor(_denseData);
}

private Tensor CastDataAndReturnAsTensor(T[] data)
{
if (typeof(T) == typeof(sbyte))
return new Tensor((sbyte[])(object)data, _dims, TF_DataType.TF_INT8);
else if (typeof(T) == typeof(long))
return new Tensor((long[])(object)data, _dims, TF_DataType.TF_INT64);
else if (typeof(T) == typeof(Int32))
return new Tensor((Int32[])(object)data, _dims, TF_DataType.TF_INT32);
else if (typeof(T) == typeof(Int16))
return new Tensor((Int16[])(object)data, _dims, TF_DataType.TF_INT16);
else if (typeof(T) == typeof(byte))
return new Tensor((byte[])(object)data, _dims, TF_DataType.TF_UINT8);
else if (typeof(T) == typeof(ulong))
return new Tensor((ulong[])(object)data, _dims, TF_DataType.TF_UINT64);
else if (typeof(T) == typeof(UInt32))
return new Tensor((UInt32[])(object)data, _dims, TF_DataType.TF_UINT32);
else if (typeof(T) == typeof(UInt16))
return new Tensor((UInt16[])(object)data, _dims, TF_DataType.TF_UINT16);
else if (typeof(T) == typeof(bool))
return new Tensor((bool[])(object)data, _dims, TF_DataType.TF_BOOL);
else if (typeof(T) == typeof(float))
return new Tensor((float[])(object)data, _dims, TF_DataType.TF_FLOAT);
else if (typeof(T) == typeof(double))
return new Tensor((double[])(object)data, _dims, TF_DataType.TF_DOUBLE);
else if (typeof(T) == typeof(ReadOnlyMemory<char>))
{
byte[][] bytes = new byte[_vBuffer.Length][];
for (int i = 0; i < bytes.Length; i++)
{
bytes[i] = Encoding.UTF8.GetBytes(((ReadOnlyMemory<char>)(object)data[i]).ToArray());
}

return new Tensor(bytes, _tfShape.dims.Select(x => (long)x).ToArray());
}

return new Tensor(new NDArray(data, _tfShape));
return TensorFlowUtils.CastDataAndReturnAsTensor(_denseData, _tfShape);
}

public void BufferTrainingData()
Expand All @@ -1177,7 +1102,7 @@ public void BufferTrainingData()
public Tensor GetBufferedBatchTensor()
{
_position = 0;
var tensor = CastDataAndReturnAsTensor(_bufferedData);
var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_bufferedData, _tfShape);
_bufferedData = new T[_bufferedDataSize];
return tensor;
}
Expand Down
Loading