Skip to content

Commit 07ffd86

Browse files
DawerGGitesh  Dawer
and
Gitesh Dawer
authored
8.3 release (#2492)
* 8.3 release * Disable flake8 + Fix external CI --------- Co-authored-by: Gitesh Dawer <gdawer@apple.com>
1 parent 27139e2 commit 07ffd86

File tree

125 files changed

+18972
-1256
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

125 files changed

+18972
-1256
lines changed

.gitlab-ci.yml

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,16 @@ stages:
99
##
1010
#########################################################################
1111

12-
check_python_flake8:
13-
tags:
14-
- macOS_M1
15-
stage: check
16-
script:
17-
- python -m pip install --upgrade pip
18-
- pip install flake8
19-
- flake8 ./coremltools --count --select=E9,F5,F63,F7,F82 --show-source --statistics
12+
13+
# TODO: Enable Flake8 once it's integrated in the regular dev workflow
14+
# check_python_flake8:
15+
# tags:
16+
# - macOS_M1
17+
# stage: check
18+
# script:
19+
# - python -m pip install --upgrade pip
20+
# - pip install flake8
21+
# - flake8 ./coremltools --count --select=E9,F5,F63,F7,F82 --show-source --statistics
2022

2123

2224
#########################################################################

NOTICE.txt

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
4747
See the License for the specific language governing permissions and
4848
limitations under the License.
4949

50-
This project contains content in the files coremltools/optimize/torch/quantization/modules/conv_transpose.py and coremltools/optimize/torch/quantization/modules/conv_transpose_fused.py which are adapted from pytorch (https://github.com/pytorch/). The license for these follows:
50+
This project contains content in the files coremltools/optimize/torch/quantization/modules/conv_transpose.py, coremltools/optimize/torch/quantization/modules/conv_transpose_fused.py, coremltools/optimize/torch/quantization/modules/learnable_fake_quantize.py,
51+
coremltools/optimize/torch/modules/observers.py, coremltools/optimize/torch/_utils/torch_utils.py which are adapted from pytorch (https://github.com/pytorch/). The license for these follows:
5152

5253
Copyright (c) 2016 Facebook, Inc (Adam Paszke)
5354

@@ -79,3 +80,21 @@ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
7980
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
8081
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
8182
POSSIBILITY OF SUCH DAMAGE.
83+
84+
This project contains content in the files coremltools/optimize/torch/modules/learnable_fake_quantize.py and coremltools/optimize/torch/modules/observers.py which are adapted from MQBench (https://github.com/ModelTC/MQBench)
85+
86+
Apache License 2.0
87+
88+
Copyright 2025 MQBench
89+
90+
Licensed under the Apache License, Version 2.0 (the "License");
91+
you may not use this file except in compliance with the License.
92+
You may obtain a copy of the License at
93+
94+
http://www.apache.org/licenses/LICENSE-2.0
95+
96+
Unless required by applicable law or agreed to in writing, software
97+
distributed under the License is distributed on an "AS IS" BASIS,
98+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
99+
See the License for the specific language governing permissions and
100+
limitations under the License.

coremltools/converters/_converters_entry.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -743,7 +743,7 @@ def _validate_outputs_argument(outputs):
743743
raise ValueError('"outputs" must be of type list')
744744
if len(outputs) == 0:
745745
return None, None
746-
if not all(map(lambda t: isinstance(t, (ImageType, str, TensorType)), outputs)):
746+
if not all(map(lambda t: ((isinstance(t, InputType) and t.can_be_output()) or isinstance(t, str)), outputs)):
747747
raise ValueError('Elements in "outputs" must be ct.TensorType or ct.ImageType or str')
748748

749749
msg_inconsistent_types = 'all elements of "outputs" must either be of type str ' \
@@ -754,10 +754,10 @@ def _validate_outputs_argument(outputs):
754754
raise ValueError(msg_inconsistent_types)
755755
return outputs, [TensorType(name=name) for name in outputs]
756756

757-
if isinstance(outputs[0], InputType):
758-
if not all([isinstance(t, TensorType) or isinstance(t, ImageType) for t in outputs]):
757+
if isinstance(outputs[0], InputType) and outputs[0].can_be_output():
758+
if not all([(isinstance(t, InputType) and t.can_be_output()) for t in outputs]):
759759
raise ValueError(msg_inconsistent_types)
760-
if any([t.shape is not None for t in outputs]):
760+
if any([(isinstance(t, TensorType) or isinstance(t, ImageType)) and t.shape is not None for t in outputs]):
761761
msg = "The 'shape' argument must not be specified for the outputs, since it is " \
762762
"automatically inferred from the input shapes and the ops in the model"
763763
raise ValueError(msg)
@@ -1011,7 +1011,7 @@ def _determine_source(model, source,
10111011
# validate that the outputs passed by the user are of type ImageType/TensorType
10121012
if output_argument_as_specified_by_user is not None and not all(
10131013
[
1014-
isinstance(t, TensorType) or isinstance(t, ImageType)
1014+
(isinstance(t, InputType) and t.can_be_output())
10151015
for t in output_argument_as_specified_by_user
10161016
]
10171017
):

coremltools/converters/mil/backend/backend_helper.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
55

66
from coremltools import proto
7-
from coremltools.converters.mil.input_types import ColorLayout
7+
from coremltools.converters.mil import input_types
88
from coremltools.converters.mil.mil.passes.defs.preprocess import NameSanitizer
99

1010

@@ -46,11 +46,11 @@ def _get_probability_var_for_classifier(prog, classifier_config):
4646

4747

4848
def _get_colorspace_enum(color_layout):
49-
if color_layout == ColorLayout.GRAYSCALE:
49+
if color_layout == input_types.ColorLayout.GRAYSCALE:
5050
return proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.GRAYSCALE
51-
elif color_layout == ColorLayout.GRAYSCALE_FLOAT16:
51+
elif color_layout == input_types.ColorLayout.GRAYSCALE_FLOAT16:
5252
return proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.GRAYSCALE_FLOAT16
53-
elif color_layout == ColorLayout.BGR:
53+
elif color_layout == input_types.ColorLayout.BGR:
5454
return proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.BGR
5555
else:
5656
return proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.RGB
@@ -60,12 +60,18 @@ def _validate_image_input_output_shapes(color_layout, shape, name, is_input=True
6060
if len(shape) != 4:
6161
raise ValueError("Image {}, '{}', must have rank 4. Instead it has rank {}".
6262
format(io_str, name, len(shape)))
63-
if color_layout in (ColorLayout.BGR, ColorLayout.RGB):
63+
if color_layout in (input_types.ColorLayout.BGR, input_types.ColorLayout.RGB):
6464
if shape[1] != 3 or shape[0] != 1:
65-
raise ValueError("Shape of the RGB/BGR image {}, '{}', must be of kind (1, 3, H, W), "
66-
"i.e., first two dimensions must be (1, 3), instead they are: {}".
67-
format(io_str, name, shape[:2]))
68-
elif color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16):
65+
raise ValueError(
66+
"Shape of the RGB/BGR image {}, '{}', must be of kind (1, 3, H, W), "
67+
"i.e., first two dimensions must be (1, 3), instead they are: {}".format(
68+
io_str, name, shape[:2]
69+
)
70+
)
71+
elif color_layout in (
72+
input_types.ColorLayout.GRAYSCALE,
73+
input_types.ColorLayout.GRAYSCALE_FLOAT16,
74+
):
6975
if shape[1] != 1 or shape[0] != 1:
7076
raise ValueError("Shape of the Grayscale image {}, '{}', must be of kind (1, 1, H, W), "
7177
"i.e., first two dimensions must be (1, 1), instead they are: {}".

coremltools/converters/mil/backend/mil/load.py

Lines changed: 16 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
)
1919
from coremltools import _logger as logger
2020
from coremltools import proto
21+
from coremltools.converters.mil import input_types as mil_input_types
2122
from coremltools.converters.mil import mil
2223
from coremltools.converters.mil.backend.backend_helper import _get_probability_var_for_classifier
2324
from coremltools.converters.mil.backend.mil import helper
@@ -33,13 +34,6 @@
3334
types_to_proto_primitive,
3435
)
3536
from coremltools.converters.mil.backend.nn.load import _set_optional_inputs
36-
from coremltools.converters.mil.input_types import (
37-
ClassifierConfig,
38-
EnumeratedShapes,
39-
ImageType,
40-
RangeDim,
41-
TensorType,
42-
)
4337
from coremltools.converters.mil.mil import Block
4438
from coremltools.converters.mil.mil import Builder as mb
4539
from coremltools.converters.mil.mil import Function, Operation, Program, Var, mil_list, types
@@ -604,7 +598,7 @@ def __init__(
604598
mil_proto: proto.MIL_pb2.Program,
605599
predicted_feature_name: str,
606600
predicted_probabilities_name: str,
607-
classifier_config: ClassifierConfig,
601+
classifier_config: "mil_input_types.ClassifierConfig",
608602
convert_to: str,
609603
convert_from: str,
610604
):
@@ -649,7 +643,7 @@ def get_func_input(self, func: mil.Function) -> List["proto.Model_pb2.FeatureDes
649643
input_shape_map = {}
650644

651645
for input_type in input_types:
652-
if isinstance(input_type, ImageType):
646+
if isinstance(input_type, mil_input_types.ImageType):
653647
image_input_names[input_type.name] = input_type
654648
# error checking for input(s) marked as images
655649
if input_type.name not in list(func.inputs.keys()):
@@ -766,8 +760,8 @@ def get_func_input(self, func: mil.Function) -> List["proto.Model_pb2.FeatureDes
766760
default_bound_used = False
767761
input_type = input_shape_map.get(name, None)
768762

769-
if isinstance(input_type, ImageType):
770-
if isinstance(input_type.shape, EnumeratedShapes):
763+
if isinstance(input_type, mil_input_types.ImageType):
764+
if isinstance(input_type.shape, mil_input_types.EnumeratedShapes):
771765
enumerated_shapes = []
772766
for s in input_type.shape.shapes:
773767
enumerated_shapes.append(
@@ -781,14 +775,14 @@ def get_func_input(self, func: mil.Function) -> List["proto.Model_pb2.FeatureDes
781775
H = input_type.shape.shape[-2]
782776
W = input_type.shape.shape[-1]
783777

784-
if isinstance(H, RangeDim):
778+
if isinstance(H, mil_input_types.RangeDim):
785779
img_range.add_height_range((H.lower_bound, H.upper_bound))
786780
elif is_symbolic(H):
787781
img_range.add_height_range((default_lower_bound, default_upper_bound))
788782
default_bound_used = True
789783
else:
790784
img_range.add_height_range((H, H))
791-
if isinstance(W, RangeDim):
785+
if isinstance(W, mil_input_types.RangeDim):
792786
img_range.add_width_range((W.lower_bound, W.upper_bound))
793787
elif is_symbolic(W):
794788
img_range.add_width_range((default_lower_bound, default_upper_bound))
@@ -799,16 +793,16 @@ def get_func_input(self, func: mil.Function) -> List["proto.Model_pb2.FeatureDes
799793
flexible_shape_utils._update_image_size_range_for_feature(
800794
input_features[-1], img_range
801795
)
802-
elif isinstance(input_type, TensorType):
803-
if isinstance(input_type.shape, EnumeratedShapes):
796+
elif isinstance(input_type, mil_input_types.TensorType):
797+
if isinstance(input_type.shape, mil_input_types.EnumeratedShapes):
804798
flexible_shape_utils._add_multiarray_ndshape_enumeration_for_feature(
805799
input_features[-1], [tuple(s.shape) for s in input_type.shape.shapes]
806800
)
807801
else:
808802
lb = []
809803
ub = []
810804
for s in input_type.shape.shape:
811-
if isinstance(s, RangeDim):
805+
if isinstance(s, mil_input_types.RangeDim):
812806
lb.append(s.lower_bound)
813807
ub.append(s.upper_bound)
814808
elif is_symbolic(s):
@@ -842,7 +836,7 @@ def get_func_input(self, func: mil.Function) -> List["proto.Model_pb2.FeatureDes
842836
"Some dimensions in the input shape are unknown, hence they are set to flexible ranges "
843837
f"with lower bound and default value = {default_lower_bound}, and upper bound = "
844838
f"{default_upper_bound}. To set different values for the default shape and upper bound, "
845-
"please use the ct.RangeDim() method as described here: "
839+
"please use the ct.mil_input_types.RangeDim() method as described here: "
846840
"https://coremltools.readme.io/docs/flexible-inputs#set-the-range-for-each-dimension.",
847841
UserWarning,
848842
)
@@ -870,7 +864,9 @@ def get_func_output(self, func: mil.Function) -> List["proto.Model_pb2.FeatureDe
870864
for i, var in enumerate(func.outputs):
871865
output_feature_type = proto.FeatureTypes_pb2.FeatureType()
872866
if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type):
873-
if output_types is not None and isinstance(output_types[i], ImageType):
867+
if output_types is not None and isinstance(
868+
output_types[i], mil_input_types.ImageType
869+
):
874870
if not types.is_tensor(var.sym_type):
875871
raise ValueError(
876872
"Image output, '{}', is a scalar, but it should be a tensor of rank 4".format(
@@ -883,7 +879,7 @@ def get_func_output(self, func: mil.Function) -> List["proto.Model_pb2.FeatureDe
883879
shape = var.sym_type.get_shape()
884880
if any_variadic(shape):
885881
raise ValueError(
886-
"Variable rank model outputs, that are ImageTypes, are not supported"
882+
"Variable rank model outputs, that are mil_input_types.ImageTypes, are not supported"
887883
)
888884
if any_symbolic(shape):
889885
# For flexible shape output, we set the imageSizeRange to [1, -1],
@@ -1054,7 +1050,7 @@ def load(
10541050
if prog.default_function_name not in prog.functions:
10551051
raise ValueError(f"Default function {prog.default_function_name} not found in program")
10561052

1057-
# if user has specified "ClassifierConfig", then add the "classify" op to the prog
1053+
# if user has specified "mil_input_types.ClassifierConfig", then add the "classify" op to the prog
10581054
classifier_config = kwargs.get("classifier_config", None)
10591055
predicted_feature_name, predicted_probabilities_name = None, None
10601056
if classifier_config is not None:

coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
# Use of this source code is governed by a BSD-3-clause license that can be
44
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
55

6-
from typing import Set
6+
from typing import Set, Tuple
77

88
from coremltools import _logger as logger
99
from coremltools.converters.mil._deployment_compatibility import AvailableTarget as target
@@ -67,7 +67,7 @@ class adjust_io_to_supported_types(AbstractGraphPass):
6767

6868
def apply(self, prog):
6969
for name, func in prog.functions.items():
70-
is_main_funtion = name == "main"
70+
is_main_funtion = (name == "main")
7171
_adjust_io_to_supported_types(func, is_main_funtion)
7272

7373

@@ -78,11 +78,18 @@ def _adjust_var_dtype_helper(var, dtype):
7878
var._sym_type = types.tensor(dtype, var.sym_type.get_shape())
7979

8080

81-
def _get_io_supported_types(opset_version: target) -> Set[type]:
81+
def _get_io_supported_types(opset_version: target, input_types: Tuple["InputType"]) -> Set[type]:
8282
"""Get Core ML I/O supported data types based on opset version."""
83+
# We need to do a lazy import in order to avoid errors during import
84+
from coremltools.converters.mil.input_types import ImageType
85+
8386
supported_types = {types.fp32, types.int32}
8487
if opset_version is not None and opset_version >= target.iOS16:
8588
supported_types.add(types.fp16)
89+
if opset_version is not None and opset_version >= target.iOS17:
90+
if any(map(lambda t: isinstance(t, ImageType) and t.grayscale_use_uint8,
91+
input_types)):
92+
supported_types.add(types.uint8)
8693
return supported_types
8794

8895

@@ -104,7 +111,7 @@ def _adjust_main_inputs(func):
104111
2. If the original dtype is supported in Core ML Runtime, we insert a cast op to cast the
105112
input from the changed dtype to the original dtype.
106113
"""
107-
_IO_SUPPORTED_TYPES = _get_io_supported_types(func.opset_version)
114+
_IO_SUPPORTED_TYPES = _get_io_supported_types(func.opset_version, func.input_types)
108115
_RUNTIME_SUPPORTED_TYPES = _get_runtime_supported_types(func.opset_version)
109116

110117
for input_name, input_var in func.inputs.items():
@@ -116,17 +123,23 @@ def _adjust_main_inputs(func):
116123
convert_to_dtype_str = types.builtin_to_string(convert_to_dtype)
117124
should_insert_cast = input_var.dtype in _RUNTIME_SUPPORTED_TYPES
118125
_adjust_var_dtype_helper(input_var, convert_to_dtype)
126+
human_readable_supported_types = list(
127+
map(types.builtin_to_string, _IO_SUPPORTED_TYPES)
128+
)
119129
logger.warning(
120130
f"\nInput '{input_var.name}' is of dtype {input_dtype_str}. The Core ML I/O does "
121-
f"not support this dtype (supported dtypes are: {_IO_SUPPORTED_TYPES}). Consider "
131+
f"not support this dtype (supported dtypes are: {human_readable_supported_types}). Consider "
122132
f"setting `minimum_deployment_target` to a higher IOS version for more supported "
123133
f"dtypes. This input is changed to {convert_to_dtype_str}.\n"
124134
)
125135

126136
if not should_insert_cast:
137+
human_readable_supported_types = list(
138+
map(types.builtin_to_string, _RUNTIME_SUPPORTED_TYPES)
139+
)
127140
logger.warning(
128141
f"The original input dtype {input_dtype_str} is not supported in "
129-
f"Core ML Runtime (supported dtypes are: {_RUNTIME_SUPPORTED_TYPES}). Consider "
142+
f"Core ML Runtime (supported dtypes are: {human_readable_supported_types}). Consider "
130143
f"setting `minimum_deployment_target` to a higher IOS version for more "
131144
f"supported dtypes. We just changed the dtype and won't insert any cast op."
132145
)
@@ -160,7 +173,7 @@ def _adjust_main_inputs(func):
160173
@block_context_manager
161174
def _adjust_main_outputs(func):
162175
"""Adjust the outputs in the main func to make sure they have Core ML I/O supported types."""
163-
_IO_SUPPORTED_TYPES = _get_io_supported_types(func.opset_version)
176+
_IO_SUPPORTED_TYPES = _get_io_supported_types(func.opset_version, func.input_types)
164177

165178
new_outputs = []
166179
for output_var in func.outputs:

0 commit comments

Comments
 (0)