From ee640b89f23d2adc44c1265acf91beac2976d717 Mon Sep 17 00:00:00 2001 From: Alexandre Gattiker Date: Fri, 24 Jan 2020 21:27:59 +0100 Subject: [PATCH 1/3] . --- code/scoring/conda_dependencies.yml | 2 +- code/scoring/score.py | 35 ++++++++++++++++++++++------- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/code/scoring/conda_dependencies.yml b/code/scoring/conda_dependencies.yml index 9c5505e2..c97a2722 100644 --- a/code/scoring/conda_dependencies.yml +++ b/code/scoring/conda_dependencies.yml @@ -34,4 +34,4 @@ dependencies: - joblib==0.14.0 - gunicorn==19.9.0 - flask==1.1.1 - + - inference-schema[numpy-support] diff --git a/code/scoring/score.py b/code/scoring/score.py index b78a435c..ad819661 100644 --- a/code/scoring/score.py +++ b/code/scoring/score.py @@ -23,24 +23,43 @@ ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ -import json import numpy -from azureml.core.model import Model import joblib +import os +from inference_schema.schema_decorators \ + import input_schema, output_schema +from inference_schema.parameter_types.numpy_parameter_type \ + import NumpyParameterType def init(): + # load the model from file into a global object global model - # load the model from file into a global object - model_path = Model.get_model_path( - model_name="sklearn_regression_model.pkl") + # AZUREML_MODEL_DIR is an environment variable created during service + # deployment. It contains the path to the folder containing the model. + path = os.environ['AZUREML_MODEL_DIR'] + model_path = None + for root, dirs, files in os.walk(path): + for file in files: + if '.pkl' in file: + model_path = os.path.join(path, file) + if model_path is None: + raise ValueError(".pkl model not found") model = joblib.load(model_path) -def run(raw_data, request_headers): - data = json.loads(raw_data)["data"] - data = numpy.array(data) +input_sample = numpy.array([ + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], + [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]]) +output_sample = numpy.array([10, 20]) + + +# Inference_schema generates a schema for your web service +# It then creates an OpenAPI (Swagger) specification for the web service +@input_schema('data', NumpyParameterType(input_sample)) +@output_schema(NumpyParameterType(output_sample)) +def run(data, request_headers): result = model.predict(data) # Demonstrate how we can log custom data into the Application Insights From 44032208b1842bd4e259d4c8cc44b71b406bd095 Mon Sep 17 00:00:00 2001 From: Alexandre Gattiker Date: Fri, 24 Jan 2020 23:11:37 +0100 Subject: [PATCH 2/3] Update score.py --- code/scoring/score.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/code/scoring/score.py b/code/scoring/score.py index ad819661..4e36e3e8 100644 --- a/code/scoring/score.py +++ b/code/scoring/score.py @@ -52,11 +52,15 @@ def init(): input_sample = numpy.array([ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]]) -output_sample = numpy.array([10, 20]) +output_sample = numpy.array([ + 5021.509689995557, + 3693.645386402646]) + # Inference_schema generates a schema for your web service # It then creates an OpenAPI (Swagger) specification for the web service +# at http:///swagger.json @input_schema('data', NumpyParameterType(input_sample)) @output_schema(NumpyParameterType(output_sample)) def run(data, request_headers): From f1b9c6ff420ce8e2f79c4667541dacadb9c2f66e Mon Sep 17 00:00:00 2001 From: Alexandre Gattiker Date: Fri, 24 Jan 2020 23:15:03 +0100 Subject: [PATCH 3/3] Update score.py --- code/scoring/score.py | 1 - 1 file changed, 1 deletion(-) diff --git a/code/scoring/score.py b/code/scoring/score.py index 4e36e3e8..10227fcc 100644 --- a/code/scoring/score.py +++ b/code/scoring/score.py @@ -57,7 +57,6 @@ def init(): 3693.645386402646]) - # Inference_schema generates a schema for your web service # It then creates an OpenAPI (Swagger) specification for the web service # at http:///swagger.json