Skip to content

Latest commit

 

History

History
139 lines (97 loc) · 3.87 KB

APIExamples.md

File metadata and controls

139 lines (97 loc) · 3.87 KB

API Code snippets

Converting between MLModel and Spec

import coremltools

# Load MLModel
mlmodel = coremltools.models.MLModel('path/to/the/model.mlmodel')

# use model for prediction
mlmodel.predict(...)

# save the model
mlmodel.save('path/to/the/saved/model.mlmodel')

# Get spec from the model
spec = mlmodel.get_spec()

# print input/output description for the model
print(spec.description)

# get the type of Model (NeuralNetwork, SupportVectorRegressor, Pipeline etc)
print(spec.WhichOneof('Type'))

# save out the model directly from the spec
coremltools.models.utils.save_spec(spec,'path/to/the/saved/model.mlmodel')

# convert spec to MLModel, this step compiles the model as well
mlmodel = coremltools.models.MLModel(spec)

# Load the spec from the saved .mlmodel file directly
spec = coremltools.models.utils.load_spec('path/to/the/model.mlmodel')

Visualizing Neural Network CoreML models

import coremltools

nn_mlmodel = coremltools.models.MLModel('path/to/the/model.mlmodel')
nn_mlmodel.visualize_spec()

# To print a succinct description of the neural network
spec = nn_mlmodel.get_spec()
from  coremltools.models.neural_network.printer import print_network_spec
print_network_spec(spec)

Another useful tool for visualizing CoreML models and models from other frameworks: Netron

Printing the pre-processing parameters

This is useful for image based neural network models

import coremltools

spec = coremltools.models.utils.load_spec('path/to/the/saved/model.mlmodel')

# Get neural network portion of the spec
if spec.WhichOneof('Type') == 'neuralNetworkClassifier':
  nn = spec.neuralNetworkClassifier
if spec.WhichOneof('Type') == 'neuralNetwork':
  nn = spec.neuralNetwork
elif spec.WhichOneof('Type') == 'neuralNetworkRegressor':
  nn = spec.neuralNetworkRegressor
else:
    raise ValueError('MLModel must have a neural network')
    
print(nn.preprocessing)

Changing MLMultiArray input/output datatypes

Here is the list of supported datatypes. For instance, change the datatype from 'double' to 'float32':

import coremltools
from coremltools.proto import FeatureTypes_pb2 as ft

model = coremltools.models.MLModel('path/to/the/saved/model.mlmodel')
spec = model.get_spec()

def _set_type_as_float32(feature):
  if feature.type.HasField('multiArrayType'):
    feature.type.multiArrayType.dataType = ft.ArrayFeatureType.FLOAT32

# iterate over the inputs
for input_ in spec.description.input:
    _set_type_as_float32(input_)
    
# iterate over the outputs
for output_ in spec.description.output:
    _set_type_as_float32(output_)

model = coremltools.models.MLModel(spec)
model.save('path/to/the/saved/model.mlmodel')

Prediction with an image input

An mlmodel that takes an input of type image requires a PIL image during the prediction call.

import coremltools
import numpy as np
import PIL.Image

model = coremltools.models.MLModel('path/to/the/saved/model.mlmodel')

Height = 20 # use the correct input image height 
Width = 60 # use the correct input image width


# Scenario 1: load an image from disk
def load_image(path, resize_to=None):
    # resize_to: (Width, Height)
    img = PIL.Image.open(path)
    if resize_to is not None:
        img = img.resize(resize_to, PIL.Image.ANTIALIAS)
    img_np = np.array(img).astype(np.float32)
    return img_np, img

# load the image and resize using PIL utilities 
_, img = load_image('/path/to/image.jpg' ,resize_to=(Width, Height))
out_dict = model.predict({'image': img})

# Scenario 2: load an image from a numpy array
shape = (Height, Width, 3)  # height x width x RGB
data = np.zeros(shape, dtype=np.uint8)
# manipulate numpy data
pil_img = PIL.Image.fromarray(data)
out_dict = model.predict({'image': pil_img})