Skip to content

Commit 47a6fcc

Browse files
AHReccesealirezazolanvarisadrasabouri
authored
init/paper joss (#211)
* init and add current references * init `paper.md` * add `Summary` section * add `Statement of Need` section * add comparison table (`sepand` feedback) * `CHANGELOG.md` updated * add my ORCID id, add TODO for placeholder for the others * finalize `orcid`s * update summary * `CHANGELOG.md` updated * rewrite `pickle` and `joblib` sections * rewrite `PMML and ONNX` sections * rewrite and summarize `SKOPS, Tensorflow.js` sections * make the wrap up part concise * drop duplicated ref * update ref * summarize the last paragraph introducing PyMilo * multi-lining the text * applying feedback * drop the python tag * apply some textual feedback * referencing Table according to JOSS documentation * update table defining and referring based on `https://github.com/RECeSS-EU-Project/stanscofi` * increase the cohesiveness * apply feedback * add overall pymilo image * add overall figure - following the way it is done in `https://joss.theoj.org/papers/10.21105/joss.07951` * rename file to `pymilo_outlook.png` * update image caption * add more refs * `refs` final update by Amir * sycn and enhance paper content with the refs changes (+updates) * Update paper.md Add `@`, refactor sentence beginning * add : Sadra's 2nd affiliation added. * add doi for references * add 2nd affiliation * add skops cite, drop cite of blog invesitgating skops * update cite for skops * update `onnx` and `pmml` to `@software` citing --------- Co-authored-by: alirezazolanvari <alireza.zolanvari93@gmail.com> Co-authored-by: Sadra Sabouri <sabouri.sadra@gmail.com>
1 parent 0906091 commit 47a6fcc

File tree

3 files changed

+256
-0
lines changed

3 files changed

+256
-0
lines changed

paper/paper.bib

Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,158 @@
1+
@article{Raschka2020,
2+
author = {Sebastian Raschka and Joshua Patterson and Corey Nolet},
3+
title = {Machine learning in Python: Main developments and technology trends in data science, machine learning, and artificial intelligence},
4+
journal = {Information},
5+
volume = {11},
6+
number = {4},
7+
pages = {193},
8+
year = {2020},
9+
doi = {10.3390/info11040193}
10+
}
11+
12+
@inproceedings{parida2025model,
13+
author={Parida, Shreyas Kumar and Gerostathopoulos, Ilias and Bogner, Justus},
14+
booktitle={2025 IEEE/ACM 4th International Conference on AI Engineering – Software Engineering for AI (CAIN)},
15+
title={How Do Model Export Formats Impact the Development of ML-Enabled Systems? A Case Study on Model Integration},
16+
year={2025},
17+
volume={},
18+
number={},
19+
pages={48-59},
20+
doi={10.1109/CAIN66642.2025.00014}
21+
}
22+
23+
@inproceedings{davis2023reusing,
24+
title={Reusing deep learning models: Challenges and directions in software engineering},
25+
author={Davis, James C and Jajal, Purvish and Jiang, Wenxin and Schorlemmer, Taylor R and Synovic, Nicholas and Thiruvathukal, George K},
26+
booktitle={2023 IEEE John Vincent Atanasoff International Symposium on Modern Computing (JVA)},
27+
pages={17--30},
28+
year={2023},
29+
organization={IEEE},
30+
doi={10.1109/JVA60410.2023.00015}
31+
}
32+
33+
@article{Garbin2022,
34+
author = {Cristina Garbin and Osvaldo Marques},
35+
title = {Assessing methods and tools to improve reporting, increase transparency, and reduce failures in machine learning applications in health care},
36+
journal = {Radiology: Artificial Intelligence},
37+
volume = {4},
38+
number = {2},
39+
pages = {e210127},
40+
year = {2022},
41+
doi = {10.1148/ryai.210127},
42+
}
43+
44+
@article{bodimani2024assessing,
45+
title={Assessing The Impact of Transparent AI Systems in Enhancing User Trust and Privacy},
46+
author={Bodimani, Meghasai},
47+
journal={Journal of Science \& Technology},
48+
volume={5},
49+
number={1},
50+
pages={50--67},
51+
year={2024},
52+
doi={10.55662/JST.2024.5102}
53+
}
54+
55+
@misc{Brownlee2018,
56+
author = {Jason Brownlee},
57+
title = {Save and load machine learning models in Python with scikit-learn},
58+
howpublished = {\url{https://machinelearningmastery.com/save-load-machine-learning-models-python-scikit-learn/}},
59+
year = {2018},
60+
note = {Accessed: 2024-05-22}
61+
}
62+
63+
@misc{PythonPickleDocs,
64+
author = {{Python Software Foundation}},
65+
title = {pickle — Python object serialization},
66+
year = {2024},
67+
howpublished = {\url{https://docs.python.org/3/library/pickle.html#security}},
68+
}
69+
70+
@software{onnx,
71+
author = {Bai, Junjie and Lu, Fang and Zhang, Ke and others},
72+
title = {ONNX: Open Neural Network Exchange},
73+
url = {https://github.com/onnx/onnx},
74+
version = {1.18.0},
75+
date = {2025-05-12},
76+
}
77+
78+
@article{pmml,
79+
title={PMML: An open standard for sharing models},
80+
author={Guazzelli, Alex and Zeller, Michael and Lin, Wen-Ching and Williams, Graham},
81+
year={2009},
82+
doi={10.32614/RJ-2009-010}
83+
}
84+
85+
@article{jajal2023analysis,
86+
title={Analysis of failures and risks in deep learning model converters: A case study in the onnx ecosystem},
87+
author={Jajal, Purvish and Jiang, Wenxin and Tewari, Arav and Kocinare, Erik and Woo, Joseph and Sarraf, Anusha and Lu, Yung-Hsiang and Thiruvathukal, George K and Davis, James C},
88+
journal={arXiv preprint arXiv:2303.17708},
89+
year={2023},
90+
doi={10.48550/arXiv.2303.17708}
91+
}
92+
93+
@inproceedings{cody2024extending,
94+
title={On extending the automatic test markup language (ATML) for machine learning},
95+
author={Cody, Tyler and Li, Bingtong and Beling, Peter},
96+
booktitle={2024 IEEE International Systems Conference (SysCon)},
97+
pages={1--8},
98+
year={2024},
99+
organization={IEEE},
100+
doi={10.1109/SysCon61195.2024.10553464}
101+
}
102+
103+
@software{skops,
104+
author = {{skops-dev}},
105+
title = {SKOPS},
106+
url = {https://github.com/skops-dev/skops},
107+
version = {0.11.0},
108+
date = {2024-12-10},
109+
}
110+
111+
@article{tfjs2019,
112+
title={Tensorflow. js: Machine learning for the web and beyond},
113+
author={Smilkov, Daniel and Thorat, Nikhil and Assogba, Yannick and Nicholson, Charles and Kreeger, Nick and Yu, Ping and Cai, Shanqing and Nielsen, Eric and Soegel, David and Bileschi, Stan and others},
114+
journal={Proceedings of Machine Learning and Systems},
115+
volume={1},
116+
pages={309--321},
117+
year={2019},
118+
doi={10.48550/arXiv.1901.05350}
119+
}
120+
121+
@inproceedings{quan2022towards,
122+
title={Towards understanding the faults of javascript-based deep learning systems},
123+
author={Quan, Lili and Guo, Qianyu and Xie, Xiaofei and Chen, Sen and Li, Xiaohong and Liu, Yang},
124+
booktitle={Proceedings of the 37th IEEE/ACM International Conference on Automated Software Engineering},
125+
pages={1--13},
126+
year={2022},
127+
doi={10.1145/3551349.3560427}
128+
}
129+
130+
@misc{NerdCorner2025,
131+
author = {{Nerd Corner}},
132+
title = {TensorFlow.js vs TensorFlow (Python) -- Pros and cons},
133+
year = {2025},
134+
month = {Mar},
135+
howpublished = {\url{https://nerd-corner.com/tensorflow-js-vs-tensorflow-python/}}
136+
}
137+
138+
@inproceedings{rauker2023toward,
139+
title={Toward transparent ai: A survey on interpreting the inner structures of deep neural networks},
140+
author={R{\"a}uker, Tilman and Ho, Anson and Casper, Stephen and Hadfield-Menell, Dylan},
141+
booktitle={2023 ieee conference on secure and trustworthy machine learning (satml)},
142+
pages={464--483},
143+
year={2023},
144+
organization={IEEE},
145+
doi={10.1109/SaTML54575.2023.00039}
146+
}
147+
148+
@article{macrae2019governing,
149+
title={Governing the safety of artificial intelligence in healthcare},
150+
author={Macrae, Carl},
151+
journal={BMJ quality \& safety},
152+
volume={28},
153+
number={6},
154+
pages={495--498},
155+
year={2019},
156+
publisher={BMJ Publishing Group Ltd},
157+
doi={10.1136/bmjqs-2019-009484}
158+
}

paper/paper.md

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
---
2+
title: 'PyMilo: A Python Library for ML I/O'
3+
tags:
4+
- Machine Learning
5+
- Model Deployment
6+
- Model Serialization
7+
- Transparency
8+
- MLOPS
9+
authors:
10+
- name: AmirHosein Rostami
11+
orcid: 0009-0000-0638-2263
12+
corresponding: true
13+
affiliation: "1, 2"
14+
- name: Sepand Haghighi
15+
orcid: 0000-0001-9450-2375
16+
corresponding: false
17+
affiliation: 1
18+
- name: Sadra Sabouri
19+
orcid: 0000-0003-1047-2346
20+
corresponding: false
21+
affiliation: "1, 3"
22+
- name: Alireza Zolanvari
23+
orcid: 0000-0003-2367-8343
24+
corresponding: false
25+
affiliation: 1
26+
affiliations:
27+
- index: 1
28+
name: Open Science Lab
29+
- index: 2
30+
name: University of Toronto, Toronto, Canada
31+
ror: 03dbr7087
32+
- index: 3
33+
name: University of Southern California, Los Angeles, United States
34+
ror: 03taz7m60
35+
date: 24 June 2025
36+
bibliography: paper.bib
37+
---
38+
39+
# Summary
40+
PyMilo is an open-source Python package that addresses the limitations of existing machine learning (ML) model storage formats by providing a transparent, reliable, end-to-end, and safe method for exporting and deploying trained models.
41+
Current tools rely on black-box or executable formats that obscure internal model structures, making them difficult to audit, verify, or safely share.
42+
Others apply structural transformations during export that may degrade predictive performance and reduce the model to a limited inference-only interface.
43+
In contrast, PyMilo serializes models in a transparent human-readable format that preserves end-to-end model fidelity and enables reliable, safe, and interpretable exchange.
44+
This package is designed to make the preservation and reuse of trained ML models safer, more interpretable, and easier to manage across different stages of the ML workflow (\autoref{fig:overall}).
45+
46+
![PyMilo is an end-to-end, transparent, and safe solution for transporting models from machine learning frameworks to the target devices. PyMilo preserves the original model's structure while transferring, allowing it to be imported back as the exact same object in its native framework.\label{fig:overall}](pymilo_outlook.png)
47+
48+
\newpage
49+
50+
# Statement of Need
51+
Modern machine learning development is largely centered around the Python ecosystem, which has become a dominant platform for building and training models due to its rich libraries and community support [@Raschka2020].
52+
However, once a model is trained, sharing or deploying it securely and transparently remains a significant challenge [@parida2025model; @davis2023reusing]. This issue is especially important in high-stake domains such as healthcare, where ensuring model accountability and integrity is critical [@Garbin2022].
53+
In such settings, any lack of clarity about a model’s internal logic or origin can reduce trust in its predictions. Researchers have increasingly emphasized that greater transparency in AI systems is critical for maintaining user trust and protecting privacy in machine learning applications [@bodimani2024assessing].
54+
55+
Despite ongoing concerns around transparency and safety, the dominant approach for exchanging pretrained models remains ad hoc binary serialization, most commonly through Python’s `pickle` module or its variant `joblib`.
56+
These formats allow developers to store complex model objects with minimal effort, but they were never designed with security or human interpretability in mind [@parida2025model]. In fact, loading a pickle file may execute arbitrary code contained within it, a known vulnerability that can be exploited if the file is maliciously crafted [@Brownlee2018; @PythonPickleDocs].
57+
While these methods preserve full model fidelity within the Python ecosystem, it poses serious security risks and lacks transparency, as the serialized files are opaque binary blobs that cannot be inspected without loading.
58+
Furthermore, compatibility is fragile because pickled models often depend on specific library versions, which may hinder long-term reproducibility [@Brownlee2018].
59+
60+
To improve portability across environments, several standardized model interchange formats have been developed alongside `pickle`.
61+
Most notably, Open Neural Network Exchange (ONNX) and Predictive Model Markup Language (PMML) convert trained models into framework-agnostic representations [@onnx; @pmml], enabling deployment in diverse systems without relying on the original training code.
62+
ONNX uses a graph-based structure built from primitive operators (e.g., linear transforms, activations), while PMML provides an XML-based specification for traditional models like decision trees and regressions.
63+
64+
Although these formats enhance security by avoiding executable serialization, they introduce compatibility and fidelity challenges.
65+
Exporting complex pipelines to ONNX or PMML often leads to structural approximations, missing metadata, or unsupported components, especially for customized models [@pmml].
66+
As a result, the exported model may differ in behavior, resulting in performance degradation or loss of accuracy [@jajal2023analysis].
67+
Jajal et al. found that models exported to ONNX can produce incorrect predictions despite successful conversion, indicating semantic inconsistencies between the original and exported versions [@jajal2023analysis]. This reflects predictive performance degradation and highlight the risks of silent behavioral drift in deployed systems.
68+
69+
Beyond concerns about end-to-end model preservation, ONNX and PMML also present limitations in transparency, scope, and reversibility. ONNX uses a binary protocol buffer format that is not human-readable, which limits interpretability and makes auditing difficult.
70+
PMML, although XML-based and readable, is verbose and narrowly scoped, supporting only a limited subset of scikit-learn models. As noted by Cody et al., both ONNX and PMML focus on static model specification rather than operational testing or lifecycle validation workflows [@cody2024extending]. Moreover, PMML does not provide a mechanism to restore exported models into Python, making it a one-way format that limits reproducibility across ML workflows.
71+
72+
Other tools have been developed to address specific use cases, though they remain limited in scope. For example, SKOPS improves the safety of scikit-learn model storage by enabling limited inspection of model internals without requiring code execution [@skops].
73+
However, it supports only scikit-learn models, lacks compatibility with other frameworks, and does not provide a fully transparent or human-readable structure.
74+
TensorFlow.js targets JavaScript environments by converting TensorFlow or Keras models into a JSON configuration file and binary weight files for execution in the browser or Node.js [@tfjs2019].
75+
However, this process has been shown to introduce compatibility issues, performance degradation, and inconsistencies in inference behavior due to backend limitations and environment-specific faults [@quan2022towards].
76+
Models from other frameworks, such as scikit-learn or PyTorch, must be re-implemented or retrained in TensorFlow to be exported.
77+
Additionally, running complex models in JavaScript runtimes introduces memory and performance limitations, often making the deployment of large neural networks prohibitively slow or even infeasible in browser environments [@NerdCorner2025].
78+
79+
In summary, current solutions force practitioners into a trade-offs between security, transparency, end-to-end fidelity, and performance preservation (see Table 1).
80+
The machine learning community still lacks a safe and transparent end-to-end model serialization framework through which users can securely share models, inspect them easily, and accurately reconstruct them for use across diverse frameworks and environments.
81+
82+
**Table 1**: Comparison of PyMilo with existing model serialization tools.
83+
84+
| Package | Transparent | Multi-Framework | End-to-End Preservation | Secure |
85+
|------------------|-------------|------------------|--------------------------|--------|
86+
| **Pickle** | No | Yes | Yes | No |
87+
| **Joblib** | No | Yes | Yes | No |
88+
| **ONNX** | No | Yes | No | Yes |
89+
| **PMML** | Yes | No | No | Yes |
90+
| **SKOPS** | No | No | Yes | Yes |
91+
| **TensorFlow.js** | Yes | No | No | Yes |
92+
| **PyMilo** | Yes | Yes | Yes | Yes |
93+
94+
PyMilo is proposed to address the above gaps. It is an open-source Python library that provides an end-to-end solution for exporting and importing machine learning models in a safe, non-executable, and human-readable format such as JSON. PyMilo serializes trained models into a transparent format and fully reconstructs them without structural changes, preserving their original functionality and behavior.
95+
This process does not affect inference time or performance and imports models on any target device without additional dependencies, enabling seamless execution in inference mode.
96+
PyMilo benefits a wide range of stakeholders, including machine learning engineers, data scientists, and AI practitioners, by facilitating the development of more transparent and accountable AI systems. Furthermore, researchers working on transparent AI [@rauker2023toward], user privacy in ML [@bodimani2024assessing], and safe AI [@macrae2019governing] can use PyMilo as a framework that provides transparency and safety in the machine learning environment.
97+
98+
# References

paper/pymilo_outlook.png

294 KB
Loading

0 commit comments

Comments
 (0)