Skip to content

Commit 1a0d051

Browse files
YifanShenSZyifan_shen3
and
yifan_shen3
authored
8.2 Release (#2433)
* 8.2 Release * bump version to 8.2 * fix flake8 error https://gitlab.com/coremltools1/coremltools/-/jobs/8889872126 * fix CI flakiness: avoid duplicate elements in input tensor so output indices are unique --------- Co-authored-by: yifan_shen3 <yifan_shen3@apple.com>
1 parent babbb03 commit 1a0d051

File tree

109 files changed

+3198
-2487
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

109 files changed

+3198
-2487
lines changed

.gitlab-ci.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ test_py39_pytorch_intel:
9090
PYTHON: "3.9"
9191
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
9292
WHEEL_PATH: build/dist/*cp39*10_15*
93-
REQUIREMENTS: reqs/test.pip
93+
REQUIREMENTS: reqs/test_torch.pip
9494

9595
test_py37_tf1_intel:
9696
<<: *test_macos_pkg
@@ -200,7 +200,7 @@ test_py310_pytorch_script:
200200
PYTHON: "3.10"
201201
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
202202
WHEEL_PATH: build/dist/*cp310*11*
203-
REQUIREMENTS: reqs/test.pip
203+
REQUIREMENTS: reqs/test_torch.pip
204204
TORCH_FRONTENDS: TORCHSCRIPT
205205

206206
test_py310_pytorch_export:
@@ -213,7 +213,7 @@ test_py310_pytorch_export:
213213
PYTHON: "3.10"
214214
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
215215
WHEEL_PATH: build/dist/*cp310*11*
216-
REQUIREMENTS: reqs/test.pip
216+
REQUIREMENTS: reqs/test_torch.pip
217217
TORCH_FRONTENDS: TORCHEXPORT
218218

219219
test_py310_pytorch_executorch:
@@ -226,7 +226,7 @@ test_py310_pytorch_executorch:
226226
PYTHON: "3.10"
227227
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
228228
WHEEL_PATH: build/dist/*cp310*11*
229-
REQUIREMENTS: reqs/test.pip
229+
REQUIREMENTS: reqs/test_executorch.pip
230230
TORCH_FRONTENDS: EXECUTORCH
231231

232232
test_py310_tf2-1:

coremlpython/CoreMLPython.h

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#import <Availability.h>
1818

1919
#import <vector>
20+
#import <optional>
2021

2122
#ifndef BUILT_WITH_MACOS15_SDK
2223
#define BUILT_WITH_MACOS15_SDK \
@@ -73,6 +74,9 @@ namespace CoreML {
7374
};
7475

7576
struct CPUComputeDevice {
77+
inline CPUComputeDevice():
78+
m_impl(nil) {}
79+
7680
// MLCPUComputeDevice must be wrapped in a C++ class for PyBind.
7781
inline CPUComputeDevice(id impl):
7882
m_impl(impl) {}
@@ -90,6 +94,9 @@ namespace CoreML {
9094
};
9195

9296
struct GPUComputeDevice {
97+
inline GPUComputeDevice():
98+
m_impl(nil) {}
99+
93100
// MLGPUComputeDevice must be wrapped in a C++ class for PyBind.
94101
inline GPUComputeDevice(id impl):
95102
m_impl(impl) {}
@@ -107,6 +114,9 @@ namespace CoreML {
107114
};
108115

109116
struct NeuralEngineComputeDevice {
117+
inline NeuralEngineComputeDevice():
118+
m_impl(nil) {}
119+
110120
// MLNeuralEngineComputeDevice must be wrapped in a C++ class for PyBind.
111121
inline NeuralEngineComputeDevice(id impl):
112122
m_impl(impl) {}
@@ -160,6 +170,9 @@ namespace CoreML {
160170
};
161171

162172
struct ComputePlan {
173+
inline ComputePlan():
174+
m_impl(nil), m_modelStructure(py::none()) {}
175+
163176
// MLComputePlan must be wrapped in a C++ class for PyBind.
164177
inline ComputePlan(id impl, py::object modelStructure):
165178
m_impl(impl),
@@ -191,10 +204,12 @@ namespace CoreML {
191204
m_impl(impl),
192205
m_datas(std::move(datas)) {}
193206

207+
#if ML_MODEL_ASSET_IS_AVAILABLE
194208
API_AVAILABLE(macos(13.0))
195209
inline MLModelAsset *getImpl() const {
196210
return (MLModelAsset *)m_impl;
197211
}
212+
#endif
198213

199214
id m_impl = nil;
200215
std::vector<py::bytes> m_datas;
@@ -205,6 +220,8 @@ namespace CoreML {
205220
MLModel *m_model = nil;
206221
NSURL *compiledUrl = nil;
207222
bool m_deleteCompiledModelOnExit = false;
223+
std::optional<uint64_t> m_loadDurationInNanoSeconds;
224+
std::optional<uint64_t> m_lastPredictDurationInNanoSeconds;
208225

209226
public:
210227
static py::bytes autoSetSpecificationVersion(const py::bytes& modelBytes);
@@ -229,11 +246,16 @@ namespace CoreML {
229246

230247
explicit Model(MLModel* m_model, NSURL* compiledUrl, bool deleteCompiledModelOnExit);
231248

232-
py::list batchPredict(const py::list& batch) const;
249+
py::list batchPredict(const py::list& batch);
233250

234251
py::str getCompiledModelPath() const;
235252

236-
py::dict predict(const py::dict& input, State* state=NULL) const;
253+
py::dict predict(const py::dict& input, State* state=NULL);
254+
255+
py::object getLoadDurationInNanoSeconds() const;
256+
257+
py::object getLastPredictDurationInNanoSeconds() const;
258+
237259

238260
#if BUILT_WITH_MACOS15_SDK
239261
static void setOptimizationHints(MLModelConfiguration *configuration, const py::dict& optimizationHints);

coremlpython/CoreMLPython.mm

Lines changed: 61 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
55

66
#import <CoreML/CoreML.h>
7+
#include <mach/mach_time.h>
78
#import "CoreMLPythonArray.h"
89
#import "CoreMLPython.h"
910
#import "CoreMLPythonUtils.h"
@@ -289,20 +290,34 @@
289290
}
290291
#endif
291292

293+
uint64_t convertMachTimeToNanoSeconds(uint64_t time) {
294+
static dispatch_once_t once;
295+
static mach_timebase_info_data_t timebase;
296+
dispatch_once(&once, ^{
297+
mach_timebase_info(&timebase);
298+
});
299+
uint64_t result = (time * timebase.numer) / timebase.denom;
300+
return result;
301+
}
292302

293303
#if ML_MODEL_ASSET_IS_AVAILABLE
294304
API_AVAILABLE(macos(13.0))
295-
MLModel * createModelFromModelAsset(MLModelAsset *modelAsset,
296-
MLModelConfiguration *configuration,
297-
NSError * __autoreleasing *error) {
305+
std::pair<MLModel *, uint64_t> createModelFromModelAsset(
306+
MLModelAsset *modelAsset,
307+
MLModelConfiguration *configuration,
308+
NSError * __autoreleasing *error
309+
) {
298310
dispatch_semaphore_t sem = dispatch_semaphore_create(0);
299311
__block MLModel *result = nil;
300312
__block NSError *lError = nil;
313+
uint64_t loadStartTime = mach_absolute_time();
314+
__block uint64_t loadEndTime = loadStartTime;
301315
[MLModel loadModelAsset:modelAsset
302316
configuration:configuration
303317
completionHandler:^(MLModel * _Nullable model, NSError * _Nullable loadError){
304318
result = model;
305319
lError = loadError;
320+
loadEndTime = mach_absolute_time();
306321
dispatch_semaphore_signal(sem);
307322
}];
308323

@@ -312,9 +327,9 @@
312327
*error = lError;
313328
}
314329

315-
return result;
330+
uint64_t loadDurationInNanoSeconds = convertMachTimeToNanoSeconds(loadEndTime - loadStartTime);
331+
return {result, loadDurationInNanoSeconds};
316332
}
317-
318333
#endif
319334
}
320335

@@ -380,18 +395,25 @@ bool usingMacOS13OrHigher() {
380395
configuration.functionName = [NSString stringWithUTF8String:functionName.c_str()];
381396
}
382397
#endif
398+
uint64_t loadDurationInNanoSeconds = 0;
383399
// Create MLModel
384400
if (asset.is_none()) {
401+
uint64_t loadStartTime = mach_absolute_time();
385402
m_model = [MLModel modelWithContentsOfURL:compiledUrl configuration:configuration error:&error];
403+
uint64_t loadEndTime = mach_absolute_time();
404+
loadDurationInNanoSeconds = convertMachTimeToNanoSeconds(loadEndTime - loadStartTime);
386405
} else {
387406
#if ML_MODEL_ASSET_IS_AVAILABLE
388-
m_model = createModelFromModelAsset(py::cast<ModelAsset>(asset).getImpl(), configuration, &error);
407+
auto pair = createModelFromModelAsset(py::cast<ModelAsset>(asset).getImpl(), configuration, &error);
408+
m_model = pair.first;
409+
loadDurationInNanoSeconds = pair.second;
389410
#else
390411
throw std::runtime_error("MLModelAsset is only available on macOS >= 13.0");
391412
#endif
392413
}
393414

394415
Utils::handleError(error);
416+
m_loadDurationInNanoSeconds = loadDurationInNanoSeconds;
395417
}
396418
}
397419

@@ -410,13 +432,14 @@ bool usingMacOS13OrHigher() {
410432
}
411433

412434

413-
py::dict Model::predict(const py::dict& input, State* state) const {
435+
py::dict Model::predict(const py::dict& input, State* state) {
414436
@autoreleasepool {
415437
NSError *error = nil;
416438
MLDictionaryFeatureProvider *inFeatures = Utils::dictToFeatures(input, &error);
417439
Utils::handleError(error);
418440

419441
id<MLFeatureProvider> outFeatures;
442+
uint64_t predictStartTime = mach_absolute_time();
420443
#if BUILT_WITH_MACOS15_SDK
421444
if (state == NULL) {
422445
outFeatures = [m_model predictionFromFeatures:static_cast<MLDictionaryFeatureProvider * _Nonnull>(inFeatures)
@@ -430,8 +453,10 @@ bool usingMacOS13OrHigher() {
430453
outFeatures = [m_model predictionFromFeatures:static_cast<MLDictionaryFeatureProvider * _Nonnull>(inFeatures)
431454
error:&error];
432455
#endif
433-
456+
uint64_t predictEndTime = mach_absolute_time();
434457
Utils::handleError(error);
458+
459+
m_lastPredictDurationInNanoSeconds = convertMachTimeToNanoSeconds(predictEndTime - predictStartTime);
435460
return Utils::featuresToDict(outFeatures);
436461
}
437462
}
@@ -485,7 +510,7 @@ bool usingMacOS13OrHigher() {
485510
}
486511
#endif
487512

488-
py::list Model::batchPredict(const py::list& batch) const {
513+
py::list Model::batchPredict(const py::list& batch) {
489514
@autoreleasepool {
490515
NSError* error = nil;
491516

@@ -498,11 +523,14 @@ bool usingMacOS13OrHigher() {
498523
}
499524
MLArrayBatchProvider* batchProvider = [[MLArrayBatchProvider alloc] initWithFeatureProviderArray: array];
500525

526+
uint64_t predictStartTime = mach_absolute_time();
501527
// Get predictions
502528
MLArrayBatchProvider* predictions = (MLArrayBatchProvider*)[m_model predictionsFromBatch:batchProvider
503529
error:&error];
530+
uint64_t predictEndTime = mach_absolute_time();
504531
Utils::handleError(error);
505532

533+
m_lastPredictDurationInNanoSeconds = convertMachTimeToNanoSeconds(predictEndTime - predictStartTime);
506534
// Convert predictions to output
507535
py::list ret;
508536
for (int i = 0; i < predictions.array.count; i++) {
@@ -773,6 +801,22 @@ bool usingMacOS13OrHigher() {
773801
return CoreML::MLMODEL_SPECIFICATION_VERSION_NEWEST;
774802
}
775803

804+
py::object Model::getLoadDurationInNanoSeconds() const {
805+
if (m_loadDurationInNanoSeconds) {
806+
return py::cast(m_loadDurationInNanoSeconds.value());
807+
}
808+
809+
return py::none();
810+
}
811+
812+
py::object Model::getLastPredictDurationInNanoSeconds() const {
813+
if (m_lastPredictDurationInNanoSeconds) {
814+
return py::cast(m_lastPredictDurationInNanoSeconds.value());
815+
}
816+
817+
return py::none();
818+
}
819+
776820
/*
777821
*
778822
* bindings
@@ -788,6 +832,8 @@ bool usingMacOS13OrHigher() {
788832
.def("predict", &Model::predict)
789833
.def("batchPredict", &Model::batchPredict)
790834
.def("get_compiled_model_path", &Model::getCompiledModelPath)
835+
.def("get_load_duration_in_nano_seconds", &Model::getLoadDurationInNanoSeconds)
836+
.def("get_last_predict_duration_in_nano_seconds", &Model::getLastPredictDurationInNanoSeconds)
791837
.def_static("auto_set_specification_version", &Model::autoSetSpecificationVersion)
792838
.def_static("maximum_supported_specification_version", &Model::maximumSupportedSpecificationVersion)
793839
#if BUILT_WITH_MACOS15_SDK
@@ -804,14 +850,18 @@ bool usingMacOS13OrHigher() {
804850
py::class_<State>(m, "_State", py::module_local());
805851

806852
#if ML_COMPUTE_DEVICE_IS_AVAILABLE
807-
py::class_<CPUComputeDevice>(m, "_MLCPUComputeDeviceProxy", py::module_local());
808-
py::class_<GPUComputeDevice>(m, "_MLGPUComputeDeviceProxy", py::module_local());
853+
py::class_<CPUComputeDevice>(m, "_MLCPUComputeDeviceProxy", py::module_local())
854+
.def(py::init());
855+
py::class_<GPUComputeDevice>(m, "_MLGPUComputeDeviceProxy", py::module_local())
856+
.def(py::init());
809857
py::class_<NeuralEngineComputeDevice>(m, "_MLNeuralEngineComputeDeviceProxy", py::module_local())
858+
.def(py::init())
810859
.def("get_total_core_count", &NeuralEngineComputeDevice::getTotalCoreCount);
811860
#endif
812861

813862
#if ML_COMPUTE_PLAN_IS_AVAILABLE
814863
py::class_<ComputePlan>(m, "_MLComputePlanProxy", py::module_local())
864+
.def(py::init())
815865
.def_property_readonly("model_structure", &ComputePlan::getModelStructure)
816866
.def("get_compute_device_usage_for_mlprogram_operation", &ComputePlan::getComputeDeviceUsageForMLProgramOperation)
817867
.def("get_compute_device_usage_for_neuralnetwork_layer", &ComputePlan::getComputeDeviceUsageForNeuralNetworkLayer)

coremltools/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,9 +116,9 @@ class SpecializationStrategy(_Enum):
116116
_LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_NEURALNETWORK = _SPECIFICATION_VERSION_IOS_13
117117
_LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_MILPROGRAM = _SPECIFICATION_VERSION_IOS_15
118118

119-
120119
# expose sub packages as directories
121120
from . import converters, models, optimize, proto
121+
122122
# expose unified converter in coremltools package level
123123
from .converters import ClassifierConfig
124124
from .converters import ColorLayout as colorlayout

coremltools/_deps/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ def __get_sklearn_version(version):
153153

154154
# ---------------------------------------------------------------------------------------
155155
_HAS_TORCH = True
156-
_TORCH_MAX_VERSION = "2.4.0"
156+
_TORCH_MAX_VERSION = "2.5.0"
157157
_HAS_TORCH_EXPORT_API = False
158158
_CT_OPTIMIZE_TORCH_MIN_VERSION = "2.1.0"
159159
_IMPORT_CT_OPTIMIZE_TORCH = False

coremltools/converters/libsvm/_libsvm_converter.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,9 @@
33
# Use of this source code is governed by a BSD-3-clause license that can be
44
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
55

6+
import coremltools as ct
67
from coremltools import __version__ as ct_version
7-
from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION
8+
from coremltools import proto
89

910
from ... import SPECIFICATION_VERSION
1011
from ..._deps import _HAS_LIBSVM
@@ -58,12 +59,11 @@ def convert(libsvm_model, feature_names, target, input_length, probability):
5859
from libsvm import svm as _svm
5960

6061
from ...models import MLModel
61-
from ...proto import Model_pb2
6262

6363
svm_type_enum = libsvm_model.param.svm_type
6464

6565
# Create the spec
66-
export_spec = Model_pb2.Model()
66+
export_spec = proto.Model_pb2.Model()
6767
export_spec.specificationVersion = SPECIFICATION_VERSION
6868

6969
if svm_type_enum == _svm.EPSILON_SVR or svm_type_enum == _svm.NU_SVR:
@@ -90,7 +90,7 @@ def convert(libsvm_model, feature_names, target, input_length, probability):
9090
input = export_spec.description.input.add()
9191
input.name = feature_names
9292
input.type.multiArrayType.shape.append(input_length)
93-
input.type.multiArrayType.dataType = Model_pb2.ArrayFeatureType.DOUBLE
93+
input.type.multiArrayType.dataType = proto.Model_pb2.ArrayFeatureType.DOUBLE
9494

9595
else:
9696
# input will be a series of doubles
@@ -193,7 +193,7 @@ def convert(libsvm_model, feature_names, target, input_length, probability):
193193
from libsvm import __version__ as libsvm_version
194194

195195
libsvm_version = "libsvm=={0}".format(libsvm_version)
196-
model.user_defined_metadata[_METADATA_VERSION] = ct_version
197-
model.user_defined_metadata[_METADATA_SOURCE] = libsvm_version
196+
model.user_defined_metadata[ct.models._METADATA_VERSION] = ct_version
197+
model.user_defined_metadata[ct.models._METADATA_SOURCE] = libsvm_version
198198

199199
return model

0 commit comments

Comments
 (0)