1414)
1515from coremltools import ComputeUnit as _ComputeUnit
1616from coremltools import __version__ as _ct_version
17+ from coremltools import _logger as logger
1718from coremltools ._deps import _HAS_TF_1 , _HAS_TF_2 , _HAS_TORCH
1819from coremltools .converters ._profile_utils import _profile
1920from coremltools .converters .mil ._deployment_compatibility import (
@@ -156,6 +157,8 @@ def convert(
156157 ``ct.utils.rename_feature`` API.
157158 - If ``dtype`` is not specified, it defaults to the ``dtype`` of the
158159 inputs in the TF model.
160+ - For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
161+ When ``inputs`` not provided or ``dtype`` not specified. The float 32 inputs defaults to float 16.
159162
160163 * PyTorch:
161164 - The ``inputs`` parameter is required.
@@ -166,7 +169,10 @@ def convert(
166169 - If the ``name`` argument is specified with ``TensorType`` or
167170 ``ImageType``, the converted Core ML model will have inputs with
168171 the same name.
169- - If ``dtype`` is missing, it defaults to float 32.
172+ - If ``dtype`` is missing:
173+ * For ``minimum_deployment_target <= ct.target.macOS12``, it defaults to float 32.
174+ * For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
175+ It defaults to float 16.
170176
171177 outputs : list of ``TensorType`` or ``ImageType`` (optional)
172178
@@ -206,13 +212,20 @@ def convert(
206212 - If specified, the ``name`` with ``TensorType`` or ``ImageType``
207213 must correspond to a node in the TF graph. In this case, the model
208214 will be converted up to that node.
215+ - For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
216+ If ``dtype`` not specified, the outputs inferred of type float 32
217+ defaults to float 16.
209218
210219 * PyTorch:
211220
212221 - If specified, the length of the list must match the number of
213222 outputs returned by the PyTorch model.
214223 - If ``name`` is specified, it is applied to the output names of the
215224 converted Core ML model.
225+ - For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
226+ If ``dtype`` not specified, the outputs inferred of type float 32
227+ defaults to float 16.
228+
216229
217230 classifier_config : ClassifierConfig class (optional)
218231 The configuration if the MLModel is intended to be a classifier.
@@ -221,7 +234,7 @@ def convert(
221234 A member of the ``coremltools.target`` enum.
222235 The value of this parameter determines the type of the model
223236 representation produced by the converter. To learn about the differences
224- between neural networks and ML programs , see
237+ between ML programs and neural networks , see
225238 `ML Programs <https://coremltools.readme.io/docs/ml-programs>`_.
226239
227240 - The converter produces a neural network (``neuralnetwork``) if:
@@ -239,33 +252,34 @@ def convert(
239252 coremltools.target.tvOS15:
240253
241254 - If neither the ``minimum_deployment_target`` nor the ``convert_to``
242- parameter is specified, the converter produces the neural network
255+ parameter is specified, the converter produces the ML programs
243256 model type with as minimum of a deployment target as possible.
244257 - If this parameter is specified and ``convert_to`` is also specified,
245258 they must be compatible. The following are examples of invalid values:
246259 ::
247- # Invalid:
248- convert_to="neuralnetwork", minimum_deployment_target=coremltools.target.iOS15
249260 # Invalid:
250261 convert_to="mlprogram", minimum_deployment_target=coremltools.target.iOS14
251262
263+ # Invalid:
264+ convert_to="neuralnetwork", minimum_deployment_target=coremltools.target.iOS15
265+
252266 convert_to : str (optional)
253- Must be one of [``'neuralnetwork '``, ``'mlprogram '``, ``'milinternal'``].
267+ Must be one of [``'mlprogram '``, ``'neuralnetwork '``, ``'milinternal'``].
254268 The value of this parameter determines the type of the model
255269 representation produced by the converter. To learn about the
256- differences between neural networks and ML programs , see
270+ differences between ML programs and neural networks , see
257271 `ML Programs <https://coremltools.readme.io/docs/ml-programs>`_.
258272
273+ - ``'mlprogram'`` : Returns an MLModel (``coremltools.models.MLModel``)
274+ containing a MILSpec.Program proto, which is the Core ML program format.
275+ The model saved from this returned object is executable on iOS15,
276+ macOS12, watchOS8, and tvOS15.
259277 - ``'neuralnetwork'``: Returns an MLModel (``coremltools.models.MLModel``)
260278 containing a NeuralNetwork proto, which is the original Core ML format.
261279 The model saved from this returned object is executable either on
262280 iOS13/macOS10.15/watchOS6/tvOS13 and newer, or on
263281 iOS14/macOS11/watchOS7/tvOS14 and newer, depending on the layers used
264282 in the model.
265- - ``'mlprogram'`` : Returns an MLModel (``coremltools.models.MLModel``)
266- containing a MILSpec.Program proto, which is the Core ML program format.
267- The model saved from this returned object is executable on iOS15,
268- macOS12, watchOS8, and tvOS15.
269283 - ``'milinternal'``: Returns an MIL program object
270284 (``coremltools.converters.mil.Program``). An MIL program is primarily
271285 used for debugging and inspection. It can be converted to an MLModel for
@@ -275,7 +289,7 @@ def convert(
275289 ct.convert(mil_program, convert_to="mlprogram")
276290
277291 - If neither the ``minimum_deployment_target`` nor the ``convert_to``
278- parameter is specified, the converter produces the neural network
292+ parameter is specified, the converter produces the ML programs
279293 model type with as minimum of a deployment target as possible.
280294
281295 compute_precision : coremltools.precision enumeration or ct.transform.FP16ComputePrecision() (optional)
@@ -504,10 +518,11 @@ def skip_real_div_ops(op):
504518 exact_target ,
505519 minimum_deployment_target ,
506520 )
521+ need_fp16_cast_pass = _need_fp16_cast_pass (compute_precision , exact_target )
507522
508523 if pass_pipeline is None :
509524 pass_pipeline = PassPipeline ()
510- if not _need_fp16_cast_pass ( compute_precision , exact_target ) :
525+ if not need_fp16_cast_pass :
511526 pass_pipeline .remove_passes ({"common::add_fp16_cast" })
512527 if isinstance (compute_precision , FP16ComputePrecision ):
513528 # For backward compatibility with the `op_selector` param in FP16ComputePrecision.
@@ -527,6 +542,12 @@ def skip_real_div_ops(op):
527542 if specification_version is None :
528543 specification_version = _set_default_specification_version (exact_target )
529544
545+ use_default_fp16_io = (
546+ specification_version is not None
547+ and specification_version >= AvailableTarget .iOS16
548+ and need_fp16_cast_pass
549+ )
550+
530551 mlmodel = mil_convert (
531552 model ,
532553 convert_from = exact_source ,
@@ -540,6 +561,7 @@ def skip_real_div_ops(op):
540561 debug = debug ,
541562 specification_version = specification_version ,
542563 main_pipeline = pass_pipeline ,
564+ use_default_fp16_io = use_default_fp16_io ,
543565 )
544566
545567 if exact_target == "mlprogram" and mlmodel ._input_has_infinite_upper_bound ():
@@ -890,6 +912,15 @@ def _determine_target(convert_to, minimum_deployment_target):
890912 """
891913 Infer the precise backend target, which could be one of ``milinternal``, ``neuralnetwork`` or ``mlprogram``
892914 """
915+ if minimum_deployment_target is None and convert_to is None :
916+ logger .warning (
917+ "When both 'convert_to' and 'minimum_deployment_target' not specified, "
918+ "'convert_to' is set to \" mlprogram\" and 'minimum_deployment_targer' is set to "
919+ "ct.target.iOS15 (which is same as ct.target.macOS12). "
920+ "Note: the model will not run on systems older than iOS15/macOS12/watchOS8/tvOS15. "
921+ "In order to make your model run on older system, please set the 'minimum_deployment_target' to iOS14/iOS13. "
922+ "Details please see the link: https://coremltools.readme.io/docs/unified-conversion-api#target-conversion-formats"
923+ )
893924 if minimum_deployment_target is not None :
894925 if convert_to == "mlprogram" and minimum_deployment_target < AvailableTarget .iOS15 :
895926 raise ValueError (
@@ -908,7 +939,7 @@ def _determine_target(convert_to, minimum_deployment_target):
908939 return convert_to
909940 else :
910941 if minimum_deployment_target is None :
911- return "neuralnetwork "
942+ return "mlprogram "
912943 elif minimum_deployment_target <= AvailableTarget .iOS14 :
913944 return "neuralnetwork"
914945 else :
0 commit comments