Skip to content

Commit 27139e2

Browse files
authored
Revert "perf: Prefer generator expressions over list comprehensions (#2486)" (#2491)
This reverts commit 37b9aeb.
1 parent 37b9aeb commit 27139e2

34 files changed

+66
-65
lines changed

coremltools/converters/_converters_entry.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -750,14 +750,14 @@ def _validate_outputs_argument(outputs):
750750
'or of types ct.ImageType/ct.TensorType'
751751
if isinstance(outputs[0], str):
752752
# if one of the elements is a string, all elements must be strings
753-
if not all(isinstance(t, str) for t in outputs):
753+
if not all([isinstance(t, str) for t in outputs]):
754754
raise ValueError(msg_inconsistent_types)
755755
return outputs, [TensorType(name=name) for name in outputs]
756756

757757
if isinstance(outputs[0], InputType):
758-
if not all(isinstance(t, TensorType) or isinstance(t, ImageType) for t in outputs):
758+
if not all([isinstance(t, TensorType) or isinstance(t, ImageType) for t in outputs]):
759759
raise ValueError(msg_inconsistent_types)
760-
if any(t.shape is not None for t in outputs):
760+
if any([t.shape is not None for t in outputs]):
761761
msg = "The 'shape' argument must not be specified for the outputs, since it is " \
762762
"automatically inferred from the input shapes and the ops in the model"
763763
raise ValueError(msg)
@@ -777,9 +777,9 @@ def _validate_outputs_argument(outputs):
777777
output_names = [t.name for t in outputs]
778778
# verify that either all of the entries in output_names is "None" or none of them is "None"
779779
msg_consistent_names = 'Either none or all the outputs must have the "name" argument specified'
780-
if output_names[0] is None and not all(name is None for name in output_names):
780+
if output_names[0] is None and not all([name is None for name in output_names]):
781781
raise ValueError(msg_consistent_names)
782-
if output_names[0] is not None and not all(name is not None for name in output_names):
782+
if output_names[0] is not None and not all([name is not None for name in output_names]):
783783
raise ValueError(msg_consistent_names)
784784
if output_names[0] is not None:
785785
if len(set(output_names)) != len(output_names):
@@ -914,7 +914,7 @@ def _flatten_list(_inputs):
914914
if inputs is not None:
915915
raise_if_duplicated(inputs)
916916

917-
if inputs is not None and not all(isinstance(_input, InputType) for _input in inputs):
917+
if inputs is not None and not all([isinstance(_input, InputType) for _input in inputs]):
918918
raise ValueError("Input should be a list of TensorType or ImageType")
919919

920920
elif exact_source == "pytorch":

coremltools/converters/mil/backend/mil/load.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -551,7 +551,7 @@ def remove_output(block, prob_var):
551551
classes = classes.splitlines()
552552
elif isinstance(classes_in, list): # list[int or str]
553553
classes = classes_in
554-
assert all(isinstance(x, (int, str)) for x in classes), message
554+
assert all([isinstance(x, (int, str)) for x in classes]), message
555555
else:
556556
raise ValueError(message)
557557

coremltools/converters/mil/backend/nn/load.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def load(prog, **kwargs):
266266

267267
proto = builder.spec
268268
# image input
269-
has_image_input = any(isinstance(s, ImageType) for s in input_types)
269+
has_image_input = any([isinstance(s, ImageType) for s in input_types])
270270
if has_image_input:
271271
proto = _convert_to_image_input(proto, input_types,
272272
skip_model_load=kwargs.get("skip_model_load", False))
@@ -284,7 +284,7 @@ def load(prog, **kwargs):
284284
shape = var.sym_type.get_shape()
285285
if any_variadic(shape):
286286
raise ValueError("Variable rank model outputs, that are ImageTypes, are not supported")
287-
if any(is_symbolic(d) for d in shape):
287+
if any([is_symbolic(d) for d in shape]):
288288
raise NotImplementedError("Image output '{}' has symbolic dimensions in its shape".
289289
format(var.name))
290290
_validate_image_input_output_shapes(output_types[i].color_layout, shape, var.name, is_input=False)

coremltools/converters/mil/backend/nn/op_mapping.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ def _try_convert_global_pool(const_context, builder, op, mode):
196196

197197
if tuple(op.outputs[0].shape[:-2]) != tuple(op.inputs["x"].shape[:-2]):
198198
return False
199-
if not all(s == 1 for s in op.outputs[0].shape[-2:]):
199+
if not all([s == 1 for s in op.outputs[0].shape[-2:]]):
200200
return False
201201

202202
builder.add_pooling(
@@ -795,11 +795,11 @@ def _add_elementwise_binary(
795795
# INTERNAL_MUL_XYKN not implemented
796796
continue
797797
if all(shape_x[indices] == shape_y[indices]):
798-
if all(True if i in indices else s == 1 for i, s in enumerate(shape_x)):
798+
if all([True if i in indices else s == 1 for i, s in enumerate(shape_x)]):
799799
internal_y = op.x
800800
internal_x = op.y
801801
break
802-
if all(True if i in indices else s == 1 for i, s in enumerate(shape_y)):
802+
if all([True if i in indices else s == 1 for i, s in enumerate(shape_y)]):
803803
internal_x = op.x
804804
internal_y = op.y
805805
break
@@ -3323,7 +3323,7 @@ def stack(const_context, builder, op):
33233323
def split(const_context, builder, op):
33243324
split = op.sizes
33253325
split = [size for size in split if size != 0]
3326-
has_equal_splits = all(size == split[0] for size in split)
3326+
has_equal_splits = all([size == split[0] for size in split])
33273327
num_splits = len(split)
33283328
output_names = [op.outputs[i].name for i in range(len(op.sizes)) if op.sizes[i] != 0]
33293329

@@ -3545,7 +3545,7 @@ def _realloc_list(const_context, builder, ls_var, index_var, value_var, mode):
35453545

35463546
# check if elem_shape is runtime-determined
35473547
elem_shape = tuple(value_var.shape)
3548-
has_dynamic_shape = any(is_symbolic(i) for i in elem_shape)
3548+
has_dynamic_shape = any([is_symbolic(i) for i in elem_shape])
35493549

35503550
# get the fill shape of the tensor array
35513551
# [length, elem_dim1, elem_dim2, ...]

coremltools/converters/mil/converter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def __call__(self, model, *args, **kwargs):
5858
type(inputs)
5959
)
6060
)
61-
if not all(isinstance(i, input_types.InputType) for i in inputs):
61+
if not all([isinstance(i, input_types.InputType) for i in inputs]):
6262
raise ValueError(
6363
"Type of inputs should be list or tuple of TensorType or ImageType, got {} instead.".format(
6464
[type(i) for i in inputs]

coremltools/converters/mil/debugging_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def validate_inputs(func, input_vars):
8484
else:
8585
input_values.append(v)
8686

87-
if all(x in reachable_vars for x in input_values):
87+
if all([x in reachable_vars for x in input_values]):
8888
reachable_vars.update(op.outputs)
8989

9090
for out in func.outputs:

coremltools/converters/mil/frontend/_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ def _swap(a, b):
293293
return b, a
294294

295295
a_var, b_var = vars
296-
is_dynamic = any(any_symbolic(var.shape) for var in vars)
296+
is_dynamic = any([any_symbolic(var.shape) for var in vars])
297297
# list of equations supported for explicit mil translations
298298
vec_bnqd_bnkd_bnqk = (
299299
[0, 1, 2, 3],
@@ -436,10 +436,10 @@ def get_output_names(outputs) -> Optional[List[str]]:
436436

437437
output_names = None
438438
if outputs is not None:
439-
assert all(isinstance(t, InputType) for t in outputs), \
439+
assert all([isinstance(t, InputType) for t in outputs]), \
440440
"outputs must be a list of ct.ImageType or ct.TensorType"
441441
output_names = [t.name for t in outputs]
442-
if all(name is None for name in output_names):
442+
if all([name is None for name in output_names]):
443443
output_names = None
444444
return output_names
445445

coremltools/converters/mil/frontend/tensorflow/converter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ def __init__(
179179
type(inputs)
180180
)
181181
)
182-
if not all(isinstance(i, InputType) for i in inputs):
182+
if not all([isinstance(i, InputType) for i in inputs]):
183183
raise ValueError(
184184
"Type of inputs should be list or tuple of TensorType or ImageType, got {} instead.".format(
185185
[type(i) for i in inputs]
@@ -238,7 +238,7 @@ def __init__(
238238
for inputtype in self.inputs:
239239
if not isinstance(inputtype.shape, InputShape):
240240
continue
241-
if any(isinstance(s, RangeDim) for s in inputtype.shape.shape):
241+
if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]):
242242
continue
243243
if inputtype.name not in graph:
244244
raise ValueError(

coremltools/converters/mil/frontend/tensorflow/ops.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -633,7 +633,7 @@ def ExtractImagePatches(context, node):
633633
padding = node.attr.get("padding")
634634
if x.rank != 4:
635635
raise ValueError("input for ExtractImagePatches should be a 4D tensor.")
636-
if not all(rate == 1 for rate in rates):
636+
if not all([rate == 1 for rate in rates]):
637637
raise NotImplementedError(
638638
"only rates with all 1s is implemented for ExtractImagePatches."
639639
)
@@ -3022,7 +3022,7 @@ def Pack(context, node):
30223022
else:
30233023
x = mb.expand_dims(x=values[0], axes=[axis], name=node.name)
30243024
else:
3025-
if all(_is_scalar(input.sym_type) for input in values):
3025+
if all([_is_scalar(input.sym_type) for input in values]):
30263026
x = mb.concat(values=values, axis=axis, name=node.name)
30273027
else:
30283028
x = mb.stack(values=values, axis=axis, name=node.name)

coremltools/converters/mil/frontend/tensorflow/ssa_passes/tf_lstm_to_core_lstm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def _try_get_last_cell_state_in_tf_lstm_block(op: Operation) -> Var:
106106
return cs
107107
if len(cs.consuming_blocks) > 1:
108108
return None
109-
if not all(child_op.op_type == "slice_by_index" for child_op in cs.child_ops):
109+
if not all([child_op.op_type == "slice_by_index" for child_op in cs.child_ops]):
110110
return None
111111
child_ops = cs.child_ops[:]
112112
block = op.enclosing_block

coremltools/converters/mil/frontend/tensorflow/test/test_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1767,7 +1767,7 @@ def build_model_static_weights(x):
17671767
)
17681768

17691769
test_static_W()
1770-
if not any(True if d > 1 else False for d in dilations):
1770+
if not any([True if d > 1 else False for d in dilations]):
17711771
if backend[0] == "neuralnetwork":
17721772
pytest.skip("dynamic conv with groups > 1 is not supported on the neuralnetwork backend")
17731773
test_dynamic_W()

coremltools/converters/mil/frontend/tensorflow/test/testing_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def tf_graph_to_mlmodel(
164164
input_types.append(
165165
ct.TensorType(name=input_placeholder.name.split(":")[0], shape=input_shape)
166166
)
167-
if any(dim.value is None for dim in input_placeholder.shape):
167+
if any([dim.value is None for dim in input_placeholder.shape]):
168168
has_dynamic_shape = True
169169
if has_dynamic_shape:
170170
inputs_for_conversion = input_types

coremltools/converters/mil/frontend/torch/ops.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6217,7 +6217,7 @@ def _check_args(tensor_inputs, indexing) -> None:
62176217
assert isinstance(tensor_inputs, (list, tuple))
62186218
if len(tensor_inputs) < 2:
62196219
raise ValueError("Requires >= 2 tensor inputs.")
6220-
if any(tensor_input.rank > 1 for tensor_input in tensor_inputs):
6220+
if any([tensor_input.rank > 1 for tensor_input in tensor_inputs]):
62216221
raise ValueError("meshgrid received non-1d tensor.")
62226222

62236223
if indexing not in ("ij", "xy"):
@@ -7200,7 +7200,7 @@ def where(context, node):
72007200
if not types.is_bool(cond.dtype):
72017201
# cond must be bool type
72027202
cond = mb.cast(x=cond, dtype="bool")
7203-
if not any(any_symbolic(x.shape) for x in (cond, a, b)):
7203+
if not any([any_symbolic(x.shape) for x in (cond, a, b)]):
72047204
# broadcast all tensors to the same shape
72057205
cond, a, b = _utils.pymil_broadcast_tensors([cond, a, b])
72067206
result = mb.select(cond=cond, a=a, b=b, name=node.name)
@@ -7692,7 +7692,7 @@ def _pad_packed_sequence(context, node):
76927692

76937693
# we only support pack and unpack translation for static tensor shape,
76947694
# i.e., the three dimensions are all known during compile time.
7695-
if any(is_symbolic(x) for x in input_tensor.shape):
7695+
if any([is_symbolic(x) for x in input_tensor.shape]):
76967696
raise NotImplementedError("Only static shape of PackedSequence object is supported.")
76977697

76987698
# the input always has batch first layout.

coremltools/converters/mil/mil/block.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1126,7 +1126,7 @@ def set_output_types(self, outputs: Optional[List["_input_types.InputType"]] = N
11261126
if outputs is not None:
11271127
if not (
11281128
isinstance(outputs, list)
1129-
and all(isinstance(out, _input_types.InputType) for out in outputs)
1129+
and all([isinstance(out, _input_types.InputType) for out in outputs])
11301130
):
11311131
raise TypeError(
11321132
"main outputs should be a list of type ct.TensorType or ct.ImageType"
@@ -1136,6 +1136,6 @@ def set_output_types(self, outputs: Optional[List["_input_types.InputType"]] = N
11361136
def set_input_types(self, input_types: List["_input_types.InputType"]):
11371137
if not isinstance(input_types, tuple):
11381138
raise ValueError("main inputs should be tuple of TensorType or ImageType")
1139-
elif not all(isinstance(inp, _input_types.InputType) for inp in input_types):
1139+
elif not all([isinstance(inp, _input_types.InputType) for inp in input_types]):
11401140
raise ValueError("main inputs should be tuple of InputSpec")
11411141
self.input_types = input_types

coremltools/converters/mil/mil/ops/defs/iOS15/conv.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ def type_inference(self):
182182
custom_pad = None if self.pad_type.val != 'custom' else self.pad.val
183183

184184
is_weight_dynamic = not self.weight.is_descendant_of_const
185-
if is_weight_dynamic and any(True if d > 1 else False for d in dilations):
185+
if is_weight_dynamic and any([True if d > 1 else False for d in dilations]):
186186
raise ValueError("Convolution with dynamic weights does not support dilations!")
187187

188188
N = inshape[0]

coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_binary.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def _cast_check_value_inferene(self, a, b):
6565
"""
6666
If one of the input is tensor, cast the result to tensor.
6767
"""
68-
to_cast = any(isinstance(x, np.ndarray) for x in (a, b))
68+
to_cast = any([isinstance(x, np.ndarray) for x in [a, b]])
6969
result = self.get_operator()(a, b)
7070
return result if not to_cast else np.array(result)
7171

coremltools/converters/mil/mil/ops/defs/iOS15/normalization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ def type_inference(self):
283283

284284
# check valid axes
285285
positive_axes = [axis + rank if axis < 0 else axis for axis in self.axes.val]
286-
if not all(axis >= 0 and axis < rank for axis in positive_axes):
286+
if not all([axis >= 0 and axis < rank for axis in positive_axes]):
287287
raise ValueError("axes must in the range of [-x.rank, x.rank-1].")
288288

289289
# check shape of gamma and beta

coremltools/converters/mil/mil/ops/defs/iOS15/tensor_operation.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1091,7 +1091,7 @@ def value_inference(self):
10911091
symbolic_tensor = np.reshape(np.array(symbolic_tensor), shape)
10921092
values.append(symbolic_tensor)
10931093

1094-
if any(val is None for val in values):
1094+
if any([val is None for val in values]):
10951095
return None
10961096

10971097
if not isinstance(values[0], np.ndarray) or values[0].shape == ():
@@ -1301,13 +1301,13 @@ def type_inference(self):
13011301
@precondition(allow=VALUE | SYMBOL | NONE)
13021302
def value_inference(self):
13031303

1304-
is_all_rank_zero = all(v.rank == 0 for v in self.values)
1304+
is_all_rank_zero = all([v.rank == 0 for v in self.values])
13051305
values = [
13061306
v.sym_val if v.sym_val is not None else get_new_symbol()
13071307
for v in self.values
13081308
]
13091309

1310-
if any(is_symbolic(v) for v in values) and not is_all_rank_zero:
1310+
if any([is_symbolic(v) for v in values]) and not is_all_rank_zero:
13111311
return None
13121312

13131313
return np.stack(values, self.axis.val)

coremltools/converters/mil/mil/ops/tests/iOS14/test_scatter_gather.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -478,7 +478,7 @@ def prog(x):
478478
res = mb.gather(x=params, indices=indices, axis=-1)
479479
return res
480480

481-
if any(idx > 2 for idx in indices_val):
481+
if any([idx > 2 for idx in indices_val]):
482482
with pytest.raises(IndexError, match="index 3 is out of bounds for axis 1 with size 3"):
483483
mb.program(
484484
input_specs=[mb.TensorSpec(shape=(1,), dtype=types.fp32)],
@@ -672,7 +672,7 @@ def prog(x):
672672
res = mb.gather_along_axis(x=params, indices=indices, axis=0)
673673
return res
674674

675-
if any(idx > 1 for sub_indices in indices_val for idx in sub_indices):
675+
if any([idx > 1 for sub_indices in indices_val for idx in sub_indices]):
676676
with pytest.raises(IndexError, match="index 2 is out of bounds for axis 0 with size 2"):
677677
mb.program(
678678
input_specs=[mb.TensorSpec(shape=(1,), dtype=types.fp32)],

coremltools/converters/mil/mil/ops/tests/iOS17/test_scatter_gather.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ def build_dynamic(data, indices, updates):
131131

132132
if not isinstance(expected_error_msg, tuple):
133133
expected_error_msg = expected_error_msg
134-
assert any(err in str(excinfo.value) for err in expected_error_msg)
134+
assert any([err in str(excinfo.value) for err in expected_error_msg])
135135

136136
class TestScatterAlongAxis:
137137
@pytest.mark.parametrize(
@@ -224,7 +224,7 @@ def build_dynamic(data, indices, updates):
224224

225225
if not isinstance(expected_error_msg, tuple):
226226
expected_error_msg = expected_error_msg
227-
assert any(err in str(excinfo.value) for err in expected_error_msg)
227+
assert any([err in str(excinfo.value) for err in expected_error_msg])
228228

229229

230230
class TestScatterNd:
@@ -295,7 +295,7 @@ def build_dynamic(data, indices, updates):
295295
)
296296
if not isinstance(expected_error_msg, tuple):
297297
expected_error_msg = expected_error_msg
298-
assert any(err in str(excinfo.value) for err in expected_error_msg)
298+
assert any([err in str(excinfo.value) for err in expected_error_msg])
299299

300300

301301
class TestGather(_TestGatherIOS16):
@@ -333,7 +333,7 @@ def prog(x):
333333
input_specs=[mb.TensorSpec(shape=(1,), dtype=types.fp32)],
334334
opset_version=backend.opset_version,
335335
)(prog)
336-
elif any(idx > 2 for idx in indices_val):
336+
elif any([idx > 2 for idx in indices_val]):
337337
# If the indices are not validated during type inference for IOS17, the `gather` op's
338338
# value inference will raise error for out-of-bound index.
339339
with pytest.raises(IndexError, match="index 3 is out of bounds for axis 1 with size 3"):
@@ -410,7 +410,7 @@ def prog(x):
410410
input_specs=[mb.TensorSpec(shape=(1,), dtype=types.fp32)],
411411
opset_version=backend.opset_version,
412412
)(prog)
413-
elif any(idx > 1 for sub_indices in indices_val for idx in sub_indices):
413+
elif any([idx > 1 for sub_indices in indices_val for idx in sub_indices]):
414414
# If the indices are not validated during type inference for IOS17, the `gather` op's
415415
# value inference will raise error for out-of-bound index.
416416
with pytest.raises(IndexError, match="index 2 is out of bounds for axis 0 with size 2"):

coremltools/converters/mil/mil/passes/defs/cleanup/noop_elimination.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ def remove_slice_by_index(op):
127127

128128
if op.stride is not None and op.stride.val is not None:
129129
stride = op.stride.val.flatten().tolist()
130-
if any(x < 0 for x in stride):
130+
if any([x < 0 for x in stride]):
131131
return False
132132

133133
if op.enclosing_block.try_replace_uses_of_var_after_op(

coremltools/converters/mil/mil/passes/defs/optimize_activation_quantization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ def _try_match_and_transform_pattern(
161161

162162
# Reject if 1st operation is not `conv`/`add`/`pool`.
163163
SUPPORTED_OP_TYPES = ["conv", "add", "avg_pool", "max_pool"]
164-
if any(_check_child_op_type(dequantize_op, val) for val in SUPPORTED_OP_TYPES):
164+
if any([_check_child_op_type(dequantize_op, val) for val in SUPPORTED_OP_TYPES]):
165165
pass
166166
else:
167167
return False

coremltools/converters/mil/mil/passes/defs/optimize_linear.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ def _try_to_transform(self, matmul_op, add_op, block):
239239
d_out = weight.shape[1] if not transpose_weight else weight.shape[0]
240240
bias = add_op.x.val if add_op.x.val is not None else add_op.y.val
241241
if len(bias.shape) > 1:
242-
if any(d != 1 for d in bias.shape[:-1]):
242+
if any([d != 1 for d in bias.shape[:-1]]):
243243
return # cannot transform
244244

245245
# squeeze leading dims of size 1

0 commit comments

Comments
 (0)