Skip to content

Commit 5765495

Browse files
authored
7.0b2 Release (#1945)
* 7.0b2 Release * Fix flake8 errors * Delete comment copied from trees documentation * Skip unit test
1 parent d52d536 commit 5765495

File tree

128 files changed

+14460
-5978
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

128 files changed

+14460
-5978
lines changed

coremlpython/CoreMLPythonUtils.mm

Lines changed: 26 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -444,35 +444,40 @@ static size_t sizeOfArrayElement(MLMultiArrayDataType type) {
444444
return py::none();
445445
}
446446
MLMultiArrayDataType type = value.dataType;
447+
if (type == MLMultiArrayDataTypeFloat16) {
448+
// Cast to fp32 because py:array doesn't support fp16.
449+
// TODO: rdar://92239209 : return np.float16 instead of np.float32 when multiarray type is Float16
450+
value = [MLMultiArray multiArrayByConcatenatingMultiArrays:@[value] alongAxis:0 dataType:MLMultiArrayDataTypeFloat32];
451+
type = value.dataType;
452+
}
453+
447454
std::vector<size_t> shape = Utils::convertNSArrayToCpp(value.shape);
448455
std::vector<size_t> strides = Utils::convertNSArrayToCpp(value.strides);
449456

450457
// convert strides to numpy (bytes) instead of mlkit (elements)
451458
for (size_t& stride : strides) {
452459
stride *= sizeOfArrayElement(type);
453460
}
454-
455-
switch (type) {
456-
case MLMultiArrayDataTypeInt32:
457-
return py::array(shape, strides, static_cast<const int32_t*>(value.dataPointer));
458-
case MLMultiArrayDataTypeFloat32:
459-
return py::array(shape, strides, static_cast<const float*>(value.dataPointer));
460-
case MLMultiArrayDataTypeFloat16:
461-
{
462-
// create a float32 array, cast float16 values and copy into it
463-
// TODO: rdar://92239209 : return np.float16 instead of np.float32 when multiarray type is Float16
464-
std::vector<float> value_fp32(value.count, 0.0);
465-
for (size_t i=0; i<value.count; i++) {
466-
value_fp32[i] = [value[i] floatValue];
467-
}
468-
return py::array(shape, strides, value_fp32.data());
461+
462+
__block py::object array;
463+
[value getBytesWithHandler:^(const void *bytes, NSInteger size) {
464+
switch (type) {
465+
case MLMultiArrayDataTypeInt32:
466+
array = py::array(shape, strides, reinterpret_cast<const int32_t *>(bytes));
467+
break;
468+
case MLMultiArrayDataTypeFloat32:
469+
array = py::array(shape, strides, reinterpret_cast<const float *>(bytes));
470+
break;
471+
case MLMultiArrayDataTypeFloat64:
472+
array = py::array(shape, strides, reinterpret_cast<const double *>(bytes));
473+
break;
474+
default:
475+
assert(false);
476+
array = py::object();
469477
}
470-
case MLMultiArrayDataTypeDouble:
471-
return py::array(shape, strides, static_cast<const double*>(value.dataPointer));
472-
default:
473-
assert(false);
474-
return py::object();
475-
}
478+
}];
479+
480+
return array;
476481
}
477482

478483
py::object Utils::convertDictionaryValueToPython(NSDictionary<NSObject *,NSNumber *> * dict) {

coremltools/converters/_converters_entry.py

Lines changed: 45 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
)
1515
from coremltools import ComputeUnit as _ComputeUnit
1616
from coremltools import __version__ as _ct_version
17+
from coremltools import _logger as logger
1718
from coremltools._deps import _HAS_TF_1, _HAS_TF_2, _HAS_TORCH
1819
from coremltools.converters._profile_utils import _profile
1920
from coremltools.converters.mil._deployment_compatibility import (
@@ -156,6 +157,8 @@ def convert(
156157
``ct.utils.rename_feature`` API.
157158
- If ``dtype`` is not specified, it defaults to the ``dtype`` of the
158159
inputs in the TF model.
160+
- For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
161+
When ``inputs`` not provided or ``dtype`` not specified. The float 32 inputs defaults to float 16.
159162
160163
* PyTorch:
161164
- The ``inputs`` parameter is required.
@@ -166,7 +169,10 @@ def convert(
166169
- If the ``name`` argument is specified with ``TensorType`` or
167170
``ImageType``, the converted Core ML model will have inputs with
168171
the same name.
169-
- If ``dtype`` is missing, it defaults to float 32.
172+
- If ``dtype`` is missing:
173+
* For ``minimum_deployment_target <= ct.target.macOS12``, it defaults to float 32.
174+
* For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
175+
It defaults to float 16.
170176
171177
outputs : list of ``TensorType`` or ``ImageType`` (optional)
172178
@@ -206,13 +212,20 @@ def convert(
206212
- If specified, the ``name`` with ``TensorType`` or ``ImageType``
207213
must correspond to a node in the TF graph. In this case, the model
208214
will be converted up to that node.
215+
- For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
216+
If ``dtype`` not specified, the outputs inferred of type float 32
217+
defaults to float 16.
209218
210219
* PyTorch:
211220
212221
- If specified, the length of the list must match the number of
213222
outputs returned by the PyTorch model.
214223
- If ``name`` is specified, it is applied to the output names of the
215224
converted Core ML model.
225+
- For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
226+
If ``dtype`` not specified, the outputs inferred of type float 32
227+
defaults to float 16.
228+
216229
217230
classifier_config : ClassifierConfig class (optional)
218231
The configuration if the MLModel is intended to be a classifier.
@@ -221,7 +234,7 @@ def convert(
221234
A member of the ``coremltools.target`` enum.
222235
The value of this parameter determines the type of the model
223236
representation produced by the converter. To learn about the differences
224-
between neural networks and ML programs, see
237+
between ML programs and neural networks, see
225238
`ML Programs <https://coremltools.readme.io/docs/ml-programs>`_.
226239
227240
- The converter produces a neural network (``neuralnetwork``) if:
@@ -239,33 +252,34 @@ def convert(
239252
coremltools.target.tvOS15:
240253
241254
- If neither the ``minimum_deployment_target`` nor the ``convert_to``
242-
parameter is specified, the converter produces the neural network
255+
parameter is specified, the converter produces the ML programs
243256
model type with as minimum of a deployment target as possible.
244257
- If this parameter is specified and ``convert_to`` is also specified,
245258
they must be compatible. The following are examples of invalid values:
246259
::
247-
# Invalid:
248-
convert_to="neuralnetwork", minimum_deployment_target=coremltools.target.iOS15
249260
# Invalid:
250261
convert_to="mlprogram", minimum_deployment_target=coremltools.target.iOS14
251262
263+
# Invalid:
264+
convert_to="neuralnetwork", minimum_deployment_target=coremltools.target.iOS15
265+
252266
convert_to : str (optional)
253-
Must be one of [``'neuralnetwork'``, ``'mlprogram'``, ``'milinternal'``].
267+
Must be one of [``'mlprogram'``, ``'neuralnetwork'``, ``'milinternal'``].
254268
The value of this parameter determines the type of the model
255269
representation produced by the converter. To learn about the
256-
differences between neural networks and ML programs, see
270+
differences between ML programs and neural networks, see
257271
`ML Programs <https://coremltools.readme.io/docs/ml-programs>`_.
258272
273+
- ``'mlprogram'`` : Returns an MLModel (``coremltools.models.MLModel``)
274+
containing a MILSpec.Program proto, which is the Core ML program format.
275+
The model saved from this returned object is executable on iOS15,
276+
macOS12, watchOS8, and tvOS15.
259277
- ``'neuralnetwork'``: Returns an MLModel (``coremltools.models.MLModel``)
260278
containing a NeuralNetwork proto, which is the original Core ML format.
261279
The model saved from this returned object is executable either on
262280
iOS13/macOS10.15/watchOS6/tvOS13 and newer, or on
263281
iOS14/macOS11/watchOS7/tvOS14 and newer, depending on the layers used
264282
in the model.
265-
- ``'mlprogram'`` : Returns an MLModel (``coremltools.models.MLModel``)
266-
containing a MILSpec.Program proto, which is the Core ML program format.
267-
The model saved from this returned object is executable on iOS15,
268-
macOS12, watchOS8, and tvOS15.
269283
- ``'milinternal'``: Returns an MIL program object
270284
(``coremltools.converters.mil.Program``). An MIL program is primarily
271285
used for debugging and inspection. It can be converted to an MLModel for
@@ -275,7 +289,7 @@ def convert(
275289
ct.convert(mil_program, convert_to="mlprogram")
276290
277291
- If neither the ``minimum_deployment_target`` nor the ``convert_to``
278-
parameter is specified, the converter produces the neural network
292+
parameter is specified, the converter produces the ML programs
279293
model type with as minimum of a deployment target as possible.
280294
281295
compute_precision : coremltools.precision enumeration or ct.transform.FP16ComputePrecision() (optional)
@@ -504,10 +518,11 @@ def skip_real_div_ops(op):
504518
exact_target,
505519
minimum_deployment_target,
506520
)
521+
need_fp16_cast_pass = _need_fp16_cast_pass(compute_precision, exact_target)
507522

508523
if pass_pipeline is None:
509524
pass_pipeline = PassPipeline()
510-
if not _need_fp16_cast_pass(compute_precision, exact_target):
525+
if not need_fp16_cast_pass:
511526
pass_pipeline.remove_passes({"common::add_fp16_cast"})
512527
if isinstance(compute_precision, FP16ComputePrecision):
513528
# For backward compatibility with the `op_selector` param in FP16ComputePrecision.
@@ -527,6 +542,12 @@ def skip_real_div_ops(op):
527542
if specification_version is None:
528543
specification_version = _set_default_specification_version(exact_target)
529544

545+
use_default_fp16_io = (
546+
specification_version is not None
547+
and specification_version >= AvailableTarget.iOS16
548+
and need_fp16_cast_pass
549+
)
550+
530551
mlmodel = mil_convert(
531552
model,
532553
convert_from=exact_source,
@@ -540,6 +561,7 @@ def skip_real_div_ops(op):
540561
debug=debug,
541562
specification_version=specification_version,
542563
main_pipeline=pass_pipeline,
564+
use_default_fp16_io=use_default_fp16_io,
543565
)
544566

545567
if exact_target == "mlprogram" and mlmodel._input_has_infinite_upper_bound():
@@ -890,6 +912,15 @@ def _determine_target(convert_to, minimum_deployment_target):
890912
"""
891913
Infer the precise backend target, which could be one of ``milinternal``, ``neuralnetwork`` or ``mlprogram``
892914
"""
915+
if minimum_deployment_target is None and convert_to is None:
916+
logger.warning(
917+
"When both 'convert_to' and 'minimum_deployment_target' not specified, "
918+
"'convert_to' is set to \"mlprogram\" and 'minimum_deployment_targer' is set to "
919+
"ct.target.iOS15 (which is same as ct.target.macOS12). "
920+
"Note: the model will not run on systems older than iOS15/macOS12/watchOS8/tvOS15. "
921+
"In order to make your model run on older system, please set the 'minimum_deployment_target' to iOS14/iOS13. "
922+
"Details please see the link: https://coremltools.readme.io/docs/unified-conversion-api#target-conversion-formats"
923+
)
893924
if minimum_deployment_target is not None:
894925
if convert_to == "mlprogram" and minimum_deployment_target < AvailableTarget.iOS15:
895926
raise ValueError(
@@ -908,7 +939,7 @@ def _determine_target(convert_to, minimum_deployment_target):
908939
return convert_to
909940
else:
910941
if minimum_deployment_target is None:
911-
return "neuralnetwork"
942+
return "mlprogram"
912943
elif minimum_deployment_target <= AvailableTarget.iOS14:
913944
return "neuralnetwork"
914945
else:

coremltools/converters/mil/backend/mil/helper.py

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,12 @@
1010
import coremltools.proto.FeatureTypes_pb2 as ft
1111
import coremltools.proto.MIL_pb2 as pm
1212
from coremltools.converters.mil.mil import types
13-
from coremltools.converters.mil.mil.types import (builtin_to_proto_types,
14-
builtin_to_string,
15-
numpy_type_to_builtin_type,
16-
type_to_builtin_type)
13+
from coremltools.converters.mil.mil.types import (
14+
BUILTIN_TO_PROTO_TYPES,
15+
builtin_to_string,
16+
numpy_type_to_builtin_type,
17+
type_to_builtin_type,
18+
)
1719
from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type
1820
from coremltools.models.utils import _WEIGHTS_DIR_NAME, _WEIGHTS_FILE_NAME
1921

@@ -91,20 +93,30 @@ def update_tensortype(t_type, shape, data_type):
9193
set_proto_dim(t_dim, s)
9294

9395
def _tensor_field_by_type(tensor_val, builtin_type):
96+
"""
97+
Pick the field based on the builtin_type.
98+
99+
The field is defined in TensorValue in ``mlmodel/format/MIL.proto``.
100+
The picked field need to be consistent with how it will be read by MIL.
101+
For example, int8 is serialized to ``bytes`` field while int16 is serialized to ``ints`` field.
102+
"""
94103
if builtin_type == types.bool:
95104
return tensor_val.bools.values
96105
elif types.is_int(builtin_type):
97-
if (builtin_type == types.int64 or builtin_type == types.uint64):
106+
if builtin_type == types.int64 or builtin_type == types.uint64:
98107
return tensor_val.longInts.values
99108
if builtin_type in (types.int8, types.uint8, types.uint32):
100109
return tensor_val.bytes.values
110+
if builtin_type == types.int16 or builtin_type == types.uint16:
111+
# TODO (rdar://111797203): Serialize to byte after MIL changes to read from byte field.
112+
return tensor_val.ints.values
101113
return tensor_val.ints.values
102114
elif types.is_float(builtin_type):
103-
if (builtin_type == types.fp64):
115+
if builtin_type == types.fp64:
104116
return tensor_val.doubles.values
105-
elif (builtin_type == types.fp32):
117+
elif builtin_type == types.fp32:
106118
return tensor_val.floats.values
107-
elif (builtin_type == types.fp16):
119+
elif builtin_type == types.fp16:
108120
return tensor_val.bytes.values
109121
else:
110122
raise TypeError(
@@ -177,14 +189,8 @@ def create_scalar_value(py_scalar):
177189

178190
# Set the tensor value
179191
t_field = _tensor_field_by_type(t_val, builtin_type)
180-
if builtin_type in (
181-
types.fp16,
182-
types.int8,
183-
types.uint8,
184-
types.int16,
185-
types.uint16,
186-
types.uint32,
187-
):
192+
if builtin_type in (types.fp16, types.int8, types.uint8, types.uint32):
193+
# Serialize to bytes because MIL read them from the "bytes" field in TensorValue.
188194
val.immediateValue.tensor.bytes.values = np_val_to_py_type(py_scalar)
189195
else:
190196
if builtin_type == types.str:
@@ -243,7 +249,7 @@ def create_file_value_tensor(file_name, offset, dim, data_type):
243249

244250

245251
def types_to_proto_primitive(valuetype):
246-
if valuetype not in builtin_to_proto_types:
252+
if valuetype not in BUILTIN_TO_PROTO_TYPES:
247253
additional_error_msg = ""
248254
if valuetype in (types.complex64, types.complex128):
249255
additional_error_msg = (
@@ -253,7 +259,7 @@ def types_to_proto_primitive(valuetype):
253259
raise ValueError(
254260
f"Unknown map from SSA type {valuetype} to Proto type. {additional_error_msg}"
255261
)
256-
return builtin_to_proto_types[valuetype]
262+
return BUILTIN_TO_PROTO_TYPES[valuetype]
257263

258264

259265
def types_to_proto(valuetype):

coremltools/converters/mil/backend/mil/load.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -429,7 +429,12 @@ def load(prog, weights_dir, resume_on_errors=False, specification_version=_SPECI
429429
# Classifier outputs are set up separately, so default to fp32 for now.
430430
dataType = ft.ArrayFeatureType.ArrayDataType.FLOAT32
431431

432-
array_type = ft.ArrayFeatureType(shape=None, dataType=dataType)
432+
output_shape = (
433+
None
434+
if any_symbolic(var.shape) or types.is_primitive(var.sym_type)
435+
else var.shape
436+
)
437+
array_type = ft.ArrayFeatureType(shape=output_shape, dataType=dataType)
433438
output_feature_type.multiArrayType.CopyFrom(array_type)
434439
output_features.append(ml.FeatureDescription(name=var.name, type=output_feature_type))
435440
elif (types.is_dict(var.sym_type)):

coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def _adjust_var_dtype_helper(var, dtype):
7979
def _get_io_supported_types(opset_version: target) -> Set[type]:
8080
"""Get Core ML I/O supported data types based on opset version."""
8181
supported_types = {types.fp32, types.int32}
82-
if opset_version >= target.iOS16:
82+
if opset_version is not None and opset_version >= target.iOS16:
8383
supported_types.add(types.fp16)
8484
return supported_types
8585

@@ -88,7 +88,7 @@ def _get_runtime_supported_types(opset_version: target) -> Set[type]:
8888
"""Get Core ML Runtime supported data types based on opset version."""
8989
supported_types = {types.fp16, types.fp32, types.int32, types.str, types.bool}
9090
if opset_version >= target.iOS17:
91-
supported_types.update({types.int16, types.uint16})
91+
supported_types.update({types.int8, types.uint8, types.int16, types.uint16})
9292
return supported_types
9393

9494

0 commit comments

Comments
 (0)