From ebd13d6d0ae81e81667bf619c4041d6ffb893517 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Thu, 4 Sep 2025 14:50:31 -0400 Subject: [PATCH 01/42] add paper example --- paper/paper.bib | 450 ++++++++++++++++++++++++++++++++++++++++++++++++ paper/paper.md | 97 ++++++++++- 2 files changed, 544 insertions(+), 3 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index e69de29b..7eada02d 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -0,0 +1,450 @@ + +@Article{ andrei-2019, + Author = {Neculai Andrei}, + Title = {A diagonal quasi-{N}ewton updating method for unconstrained optimization}, + Journal = numalg, + Year = 2019, + Volume = 81, + Pages = {575-–590}, + doi = {10.1007/s11075-018-0562-7}, + abstract = {A diagonal quasi-Newton updating algorithm is presented. The elements of the diagonal matrix approximating the Hessian are determined by minimizing both the size of the change from the previous estimate and the trace of the update, subject to the weak secant equation. Under mild classical assumptions, the convergence of the algorithm is proved to be linear. The diagonal quasi-Newton update satisfies the bounded deterioration property. Numerical experiments with 80 unconstrained optimization test problems of different structures and complexities prove that the suggested algorithm is more efficient and more robust than the steepest descent, Cauchy with Oren and Luenberger scaling algorithm in its complementary form and classical Broyden-Fletcher-Goldfarb-Shanno algorithm.}, +} + +@Article{ aravkin-baraldi-orban-2022, + Author = {A. Y. Aravkin and R. Baraldi and D. Orban}, + Title = {A Proximal Quasi-{N}ewton Trust-Region Method for Nonsmooth Regularized Optimization}, + Journal = siopt, + Year = 2022, + Volume = 32, + Number = 2, + Pages = {900--929}, + doi = {10.1137/21M1409536}, + abstract = { We develop a trust-region method for minimizing the sum of a smooth term (f) and a nonsmooth term (h), both of which can be nonconvex. Each iteration of our method minimizes a possibly nonconvex model of (f + h) in a trust region. The model coincides with (f + h) in value and subdifferential at the center. We establish global convergence to a first-order stationary point when (f) satisfies a smoothness condition that holds, in particular, when it has a Lipschitz-continuous gradient, and (h) is proper and lower semicontinuous. The model of (h) is required to be proper, lower semi-continuous and prox-bounded. Under these weak assumptions, we establish a worst-case (O(1/\epsilon^2)) iteration complexity bound that matches the best known complexity bound of standard trust-region methods for smooth optimization. We detail a special instance, named TR-PG, in which we use a limited-memory quasi-Newton model of (f) and compute a step with the proximal gradient method, + resulting in a practical proximal quasi-Newton method. We establish similar convergence properties and complexity bound for a quadratic regularization variant, named R2, and provide an interpretation as a proximal gradient method with adaptive step size for nonconvex problems. R2 may also be used to compute steps inside the trust-region method, resulting in an implementation named TR-R2. We describe our Julia implementations and report numerical results on inverse problems from sparse optimization and signal processing. Both TR-PG and TR-R2 exhibit promising performance and compare favorably with two linesearch proximal quasi-Newton methods based on convex models. }, +} + +@Article{ aravkin-baraldi-orban-2024, + Author = {Aravkin, Aleksandr Y. and Baraldi, Robert and Orban, Dominique}, + Title = {A {L}evenberg–{M}arquardt Method for Nonsmooth Regularized Least Squares}, + Journal = sisc, + Year = 2024, + Volume = 46, + Number = 4, + Pages = {A2557--A2581}, + doi = {10.1137/22M1538971}, + preprint = {https://www.gerad.ca/en/papers/G-2022-58/view}, + grant = nserc, + abstract = { Abstract. We develop a Levenberg–Marquardt method for minimizing the sum of a smooth nonlinear least-squares term \(f(x) = \frac{1}{2} \|F(x)\|\_2^2\) and a nonsmooth term \(h\). Both \(f\) and \(h\) may be nonconvex. Steps are computed by minimizing the sum of a regularized linear least-squares model and a model of \(h\) using a first-order method such as the proximal gradient method. We establish global convergence to a first-order stationary point under the assu mptions that \(F\) and its Jacobian are Lipschitz continuous and \(h\) is proper and lower semicontinuous. In the worst case, our method performs \(O(\epsilon^{-2})\) iterations to bring a measure of stationarity below \(\epsilon \in (0, 1)\) . We also derive a trust-region variant that enjoys similar asymptotic worst-case iteration complexity as a special case of the trust-region algorithm of Aravkin, Baraldi, and Orban [SIAM J. Optim., 32 (2022), pp. 900–929]. We report numerica l results on three + examples: a group-lasso basis-pursuit denoise example, a nonlinear support vector machine, and parameter estimation in a neuroscience application. To implement those examples, we describe in detail how to evaluate proximal operators for separable \(h\) and for the group lasso with trust-region constraint. In all cases, the Levenberg–Marquardt methods perform fewer outer iterations than either a proximal gradient method with adaptive step length or a quasi-Newto n trust-region method, neither of which exploit the least-squares structure of the problem. Our results also highlight the need for more sophisticated subproblem solvers than simple first-order methods. }, +} + +@Software{ baraldi-leconte-orban-regularized-optimization-2024, + Author = {R. Baraldi and G. Leconte and D. Orban}, + Title = {{RegularizedOptimization.jl}: Algorithms for Regularized Optimization}, + Year = 2024, + license = {MPL-2.0}, + url = {https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl}, + doi = {10.5281/zenodo.6940313}, +} + +@Software{ leconte_linearoperators_jl_linear_operators_2023, + Author = {Leconte, Geoffroy and Orban, Dominique and Soares Siqueira, Abel and contributors}, + license = {MPL-2.0}, + Title = {{LinearOperators.jl: Linear Operators for Julia}}, + url = {https://github.com/JuliaSmoothOptimizers/LinearOperators.jl}, + version = {2.6.0}, + Year = 2023, +} + +@Article{ birgin-martinez-raydan-2014, + Author = {Birgin, Ernesto G. and Martínez, Jose Mario and Raydan, Marcos}, + Title = {Spectral Projected Gradient Methods: Review and Perspectives}, + Journal = jssoft, + Year = 2014, + Volume = 60, + Number = 3, + Pages = {1--21}, + doi = {10.18637/jss.v060.i03}, + abstract = {Over the last two decades, it has been observed that using the gradient vector as a search direction in large-scale optimization may lead to efficient algorithms. The effectiveness relies on choosing the step lengths according to novel ideas that are related to the spectrum of the underlying local Hessian rather than related to the standard decrease in the objective function. A review of these so-called spectral projected gradient methods for convex constrained optimization is presented. To illustrate the performance of these low-cost schemes, an optimization problem on the set of positive definite matrices is described.}, +} + +@Article{ bolte-sabach-teboulle-2014, + Author = {Bolte, J. and Sabach, S. and Teboulle, M.}, + Title = {Proximal alternating linearized minimization for nonconvex and nonsmooth problems}, + Journal = mp, + Year = 2014, + Number = 146, + Pages = {459–-494}, + doi = {10.1007/s10107-013-0701-9}, +} + +@Article{ dennis-wolkowicz-1993, + Author = {Dennis, Jr., J. E. and Wolkowicz, H.}, + Title = {Sizing and Least-Change Secant Methods}, + Journal = sinum, + Year = 1993, + Volume = 30, + Number = 5, + Pages = {1291--1314}, + doi = {10.1137/0730067}, + abstract = { Oren and Luenberger introduced in 1974 a strategy for replacing Hessian approximations by their scalar multiples and then performing quasi-Newton updates, generally least-change secant updates such as the BFGS or DFP updates [Oren and Luenberger, Management Sci., 20 (1974), pp. 845–862]. In this paper, the function \[\omega (A) = \left( {\frac{{{{{\operatorname{trace}}(A)} / n}}}{{{\operatorname{det}}(A)^{{1 / n}} }}} \right)\] is shown to be a measure of change with a direct connection to the Oren–Luenberger strategy. This measure is interesting because it is related to the \$\ell\_2\$ condition number, but it takes all the eigenvalues of A into account rather than just the extremes. If the class of possible updates is restricted to the Broyden class, i.e., scalar premultiples are not allowed, then the optimal update depends on the dimension of the problem. It may, or may not, be in the convex class, but it becomes the BFGS update as the dimension increases. + This seems to be yet another explanation for why the optimally conditioned updates are not significantly better than the BFGS update. The theory results in several new interesting updates including a self-scaling, hereditarily positive definite, update in the Broyden class which is not necessarily in the convex class. This update, in conjunction with the Oren–Luenberger scaling strategy at the first iteration only, was consistently the best in numerical tests. }, +} + +@Article{ gilbert-lemarechal-1989, + Author = {Gilbert, J.-C. and Lemaréchal, C.}, + Title = {Some numerical experiments with variable-storage quasi-{N}ewton algorithms}, + Journal = mp, + Year = 1989, + Volume = 45, + Pages = {407--435}, + doi = {10.1007/BF01589113}, +} + +@TechReport{ aravkin-baraldi-leconte-orban-2021, + Author = {Aravkin, Aleksandr and Baraldi, Robert and Leconte, Geoffroy and Orban, Dominique}, + Title = {Corrigendum: A proximal quasi-{N}ewton trust-region method for nonsmooth regularized optimization}, + Institution = gerad, + Year = 2024, + Type = {Cahier}, + Number = {G-2021-12-SM}, + Address = gerad-address, + Pages = {1--3}, + doi = {10.13140/RG.2.2.36250.45768}, +} + +@Article{ bot-csetnek-laszlo-2016, + Author = {Boţ, R. I. and Csetnek, E. R. and László, S.C.}, + Title = {An inertial forward–backward algorithm for the minimization of the sum of two nonconvex functions}, + Journal = euro, + Year = 2016, + Number = 4, + Pages = {3--25}, + doi = {10.1007/s13675-015-0045-8}, +} + +@Article{ cartis-gould-toint-2011, + Author = {Cartis, Coralia and Gould, Nicholas I. M. and Toint, {\relax Ph}. L.}, + Title = {On the Evaluation Complexity of Composite Function Minimization with Applications to Nonconvex Nonlinear Programming}, + Journal = siopt, + Year = 2011, + Volume = 21, + Number = 4, + Pages = {1721--1739}, + doi = {10.1137/11082381X}, +} + +@Article{ fukushima-mine-1981, + Author = {Masao Fukushima and Hisashi Mine}, + Title = {A generalized proximal point algorithm for certain non-convex minimization problems}, + Journal = ijss, + Year = 1981, + Volume = 12, + Number = 8, + Pages = {989--1000}, + Publisher = {Taylor & Francis}, + doi = {10.1080/00207728108963798}, +} + +@Article{ lee-sun-saunders-2014, + Author = {Lee, Jason D. and Sun, Yuekai and Saunders, Michael A.}, + Title = {Proximal {N}ewton-Type Methods for Minimizing Composite Functions}, + Journal = siopt, + Year = 2014, + Volume = 24, + Number = 3, + Pages = {1420--1443}, + doi = {10.1137/130921428}, +} + +@TechReport{ leconte-orban-2023, + Author = {G. Leconte and D. Orban}, + Title = {The Indefinite Proximal Gradient Method}, + Institution = gerad, + Year = 2023, + Type = {Cahier}, + Number = {G-2023-37}, + Address = gerad-address, + doi = {10.13140/RG.2.2.11836.41606}, +} + +@TechReport{ leconte-orban-2024, + Author = {Leconte, Geoffroy and Orban, Dominique}, + Title = {An interior-point trust-region method for nonsmooth regularized bound-constrained optimization}, + Institution = gerad, + Year = 2024, + Type = {Cahier}, + Number = {G-2024-17}, + Address = gerad-address, + doi = {10.13140/RG.2.2.18132.99201}, +} + +@InProceedings{ li-lin-2015, + Author = {Li, Huan and Lin, Zhouchen}, + Title = {Accelerated Proximal Gradient Methods for Nonconvex Programming}, + Booktitle = {Proceedings of the 28th International Conference on Neural Information Processing Systems - Volume 1}, + Year = 2015, + Series = {NIPS'15}, + Pages = {379--387}, + Address = {Cambridge, MA, USA}, + Publisher = {MIT Press}, + abstract = {Nonconvex and nonsmooth problems have recently received considerable attention in signal/image processing, statistics and machine learning. However, solving the nonconvex and nonsmooth optimization problems remains a big challenge. Accelerated proximal gradient (APG) is an excellent method for convex programming. However, it is still unknown whether the usual APG can ensure the convergence to a critical point in nonconvex programming. In this paper, we extend APG for general nonconvex and nonsmooth programs by introducing a monitor that satisfies the sufficient descent property. Accordingly, we propose a monotone APG and a nonmonotone APG. The latter waives the requirement on monotonic reduction of the objective function and needs less computation in each iteration. To the best of our knowledge, we are the first to provide APG-type algorithms for general nonconvex and nonsmooth problems ensuring that every accumulation point is a critical point, and the convergence + rates remain O(1/k2) when the problems are convex, in which k is the number of iterations. Numerical results testify to the advantage of our algorithms in speed.}, + numpages = 9, + location = {Montreal, Canada}, + url = {http://irc.cs.sdu.edu.cn/973project/result/download/2015/28.AcceleratedProximal.pdf}, +} + +@Article{ lions-mercier-1979, + Author = {P.-L. Lions and B. Mercier}, + Title = {Splitting algorithms for the sum of two nonlinear operators}, + Journal = sinum, + Year = 1979, + Volume = 16, + Number = 6, + Pages = {964--979}, + doi = {10.1137/0716071}, +} + +@Article{ kanzow-lechner-2021, + Author = {Kanzow, C and Lechner, T}, + Title = {Globalized inexact proximal {N}ewton-type methods for nonconvex composite functions}, + Journal = coap, + Year = 2021, + Volume = 78, + Number = 2, + Pages = {377--410}, + doi = {10.1007/s10589-020-00243-6}, + abstract = {Optimization problems with composite functions consist of an objective function which is the sum of a smooth and a (convex) nonsmooth term. This particular structure is exploited by the class of proximal gradient methods and some of their generalizations like proximal Newton and quasi-Newton methods. The current literature on these classes of methods almost exclusively considers the case where also the smooth term is convex. Here we present a globalized proximal Newton-type method which allows the smooth term to be nonconvex. The method is shown to have nice global and local convergence properties, and some numerical results indicate that this method is very promising also from a practical point of view.}, +} + +@Article{ zhu-nazareth-wolkowicz-1999, + Author = {Zhu, M and Nazareth, J L and Wolkowicz, H}, + Title = {The Quasi-{C}auchy Relation and Diagonal Updating}, + Journal = siopt, + Year = 1999, + Volume = 9, + Number = 4, + Pages = {1192--1204}, + doi = {10.1137/S1052623498331793}, + abstract = { The quasi-Cauchy (QC) relation is the weak quasi-Newton relation of Dennis and Wolkowicz [SIAM J. Numer. Anal., 30 (1993), pp. 1291--1314] with the added restriction that full matrices are replaced by diagonal matrices. This relation is justified and explored and, in particular, two basic variational techniques for updating diagonal matrices that satisfy it are formulated.For purposes of illustration, a numerical experiment is described where a diagonal updated matrix with hereditary positive definiteness is used to precondition Cauchy's steepest-descent direction. The resulting QC algorithm is shown to be significantly accelerated.In the concluding section, the following topics are briefly discussed: additional variational principles, use of diagonal updates within other optimization algorithms together with some further numerical experience (summarized in an appendix), and an interesting connection between QC-diagonal updating and trust-region techniques. }, +} + +@Book{ conn-gould-toint-2000, + Author = {A. R. Conn and N. I. M. Gould and {\relax Ph}. L. Toint}, + Title = {Trust-region methods}, + Publisher = siam, + Year = 2000, + Series = {MOS-SIAM Series on Optimization}, + Address = siam-address, + Number = 1, + doi = {10.1137/1.9780898719857}, +} + +@Book{ rockafellar-wets-1998, + Author = {{R. Tyrrell} Rockafellar and Roger J.-B. Wets}, + Title = {Variational Analysis}, + Publisher = {Springer Verlag}, + Year = 1998, + Address = {Heidelberg, Berlin, New York}, + doi = {10.1007/978-3-642-02431-3}, +} + +@TechReport{ leconte-orban-2023-2, + Author = {Leconte, Geoffroy and Orban, Dominique}, + Title = {Complexity of trust-region methods with unbounded {H}essian approximations for smooth and nonsmooth optimization}, + Institution = gerad, + Year = 2023, + Type = {Cahier}, + Number = {G-2023-65}, + Address = gerad-address, + url = {https://www.gerad.ca/fr/papers/G-2023-65}, +} + +@Article{ kanzow-mehlitz-2022, + Author = {Kanzow, Christian and Mehlitz, Patrick}, + Title = {Convergence properties of monotone and nonmonotone proximal gradient methods revisited}, + Journal = jota, + Year = 2022, + Volume = 195, + Number = 2, + Pages = {624--646}, + doi = {10.1007/s10957-022-02101-3}, + Publisher = {Springer}, +} + +@Article{ chouzenoux-pesquet-repetti-2014, + Author = {Chouzenoux, Emilie and Pesquet, Jean-Christophe and Repetti, Audrey}, + Title = {Variable metric forward--backward algorithm for minimizing the sum of a differentiable function and a convex function}, + Journal = jota, + Year = 2014, + Volume = 162, + Number = 1, + Pages = {107--132}, + Publisher = {Springer}, + doi = {10.1007/s10957-013-0465-7}, +} + +@TechReport{ diouane-habiboullah-orban-2024, + Author = {Youssef Diouane and Mohamed Laghdaf Habiboullah and Dominique Orban}, + Title = {Complexity of trust-region methods in the presence of unbounded {H}essian approximations}, + Institution = {GERAD}, + Year = 2024, + Type = {Cahier}, + Number = {G-2024-43}, + Address = {Montr\'eal, Canada}, + doi = {10.48550/arXiv.2408.06243}, + url = {https://www.gerad.ca/fr/papers/G-2024-43}, +} + +@Article{ powell-2010, + Author = {Powell, M. J. D.}, + Title = {On the convergence of a wide range of trust region methods for unconstrained optimization}, + Journal = imajna, + Year = 2010, + Volume = 30, + Number = 1, + Pages = {289--301}, + doi = {10.1093/imanum/drp021}, +} + +@Article{ nazareth-1995, + Author = {J. L. Nazareth}, + Title = {If quasi-{N}ewton then why not quasi-{C}auchy?}, + Journal = {SIAG/OPT Views-and-News}, + Year = 1995, + Volume = 6, + Pages = {11--14}, +} + +@InProceedings{ stella-themelis-sopasakis-patrinos-2017, + Author = {L. {Stella} and A. {Themelis} and P. {Sopasakis} and P. {Patrinos}}, + Title = {A simple and efficient algorithm for nonlinear model predictive control}, + Booktitle = {2017 IEEE 56th Annual Conference on Decision and Control (CDC)}, + Year = 2017, + Pages = {1939--1944}, + doi = {10.1109/CDC.2017.8263933}, +} + +@Article{ themelis-stella-patrinos-2017, + Author = {Themelis, Andreas and Stella, Lorenzo and Patrinos, Panagiotis}, + Title = {Forward-Backward Envelope for the Sum of Two Nonconvex Functions: Further Properties and Nonmonotone line seach Algorithms}, + Journal = siopt, + Year = 2018, + Volume = 28, + Number = 3, + Pages = {2274--2303}, + doi = {10.1137/16M1080240}, +} + +@Article{ yu-zhang-2022, + Author = {Yu, Quan and Zhang, Xinzhen}, + Title = {A smoothing proximal gradient algorithm for matrix rank minimization problem}, + Journal = coap, + Year = 2022, + Pages = {1--20}, + doi = {10.1007/s10589-021-00337-9}, + Publisher = {Springer}, +} + +@Article{ chouzenoux-martin-pesquet-2023, + Author = {Chouzenoux, Emilie and Martin, S{\'e}gol{\`e}ne and Pesquet, Jean-Christophe}, + Title = {A local {MM} subspace method for solving constrained variational problems in image recovery}, + Journal = jmiv, + Year = 2023, + Volume = 65, + Number = 2, + Pages = {253--276}, + doi = {10.1007/s10851-022-01112-z}, + Publisher = {Springer}, +} + +@Article{ stella-themelis-patrinos-2017, + Author = {Stella, Lorenzo and Themelis, Andreas and Patrinos, Panagiotis}, + Title = {Forward--backward quasi-{N}ewton methods for nonsmooth optimization problems}, + Journal = coap, + Year = 2017, + Volume = 67, + Number = 3, + Pages = {443--487}, + doi = {10.1007/s10589-017-9912-y}, + Publisher = {Springer}, +} + +@TechReport{ diouane-gollier-orban-2024, + Author = {Youssef Diouane and Maxence Gollier and Dominique Orban}, + Title = {A nonsmooth exact penalty method for equality-constrained optimization: complexity and implementation}, + Institution = {GERAD}, + Year = 2024, + Type = {Cahier}, + Number = {G-2024-65}, + Address = {Montr\'eal, Canada}, + doi = {10.13140/RG.2.2.16095.47527}, +} + +@InProceedings{ becker-fadili-2012, + Author = {Becker, Stephen and Fadili, Jalal}, + Title = {A quasi-{N}ewton proximal splitting method}, + Editor = {F. Pereira and C.J. Burges and L. Bottou and K.Q. Weinberger}, + Booktitle = {Advances in Neural Information Processing Systems}, + Year = 2012, + Volume = 25, + Publisher = {Curran Associates, Inc.}, + url = {https://proceedings.neurips.cc/paper_files/paper/2012/file/e034fb6b66aacc1d48f445ddfb08da98-Paper.pdf}, +} + +@Article{ liu-pan-wu-yang-2024, + Author = {Liu, Ruyu and Pan, Shaohua and Wu, Yuqia and Yang, Xiaoqi}, + Title = {An inexact regularized proximal {N}ewton method for nonconvex and nonsmooth optimization}, + Journal = coap, + Year = 2024, + Volume = 88, + Number = 2, + Pages = {603--641}, + Publisher = springer, + doi = {10.1007/s10589-024-00560-0}, +} + +@Article{ de-marchi-2023, + Author = {De Marchi, Alberto}, + Title = {Proximal gradient methods beyond monotony}, + Journal = {J Nonsmooth Anal Optim}, + Year = 2023, + Volume = 4, + Number = {Original research articles}, + Publisher = {Episciences.org}, + doi = {10.46298/jnsao-2023-10290}, +} + +@Article{ kanzow-lechner-2024, + Author = {Kanzow, Christian and Lechner, Theresa}, + Title = {Efficient Regularized Proximal Quasi-{N}ewton Methods for Large-Scale Nonconvex Composite Optimization Problems}, + Journal = {PAC J OPTIM}, + Year = 2024, + Volume = 20, + Number = 3, + Pages = {537--568}, + doi = {10.61208/pjo-2023-036}, +} + +@Article{ becker-fadili-ochs-2019, + Author = {Becker, Stephen and Fadili, Jalal and Ochs, Peter}, + Title = {On Quasi-{N}ewton Forward-Backward Splitting: Proximal Calculus and Convergence}, + Journal = siopt, + Year = 2019, + Volume = 29, + Number = 4, + Pages = {2445--2481}, + doi = {10.1137/18M1167152}, +} + +@Article{ jia-kanzow-mehlitz-2023, + Author = {Jia, Xiaoxi and Kanzow, Christian and Mehlitz, Patrick}, + Title = {Convergence Analysis of the Proximal Gradient Method in the Presence of the {K}urdyka–{\L{}}ojasiewicz Property Without Global {L}ipschitz Assumptions}, + Journal = siopt, + Year = 2023, + Volume = 33, + Number = 4, + Pages = {3038--3056}, + doi = {10.1137/23M1548293}, +} diff --git a/paper/paper.md b/paper/paper.md index 03c2d9a0..d59f0a9a 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -11,10 +11,10 @@ authors: orcid: 0000-0002-6609-7330 affiliation: 1 - name: Maxence Gollier^[corresponding author] - orcid: 0009-0008-3158-7912 + orcid: 0000-0002-8017-7687 affiliation: 1 - name: Mohamed Laghdaf Habiboullah^[corresponding author] - orcid: 0009-0005-3631-2799 + orcid: 0000-0003-3385-9379 affiliation: 1 - name: Dominique Orban orcid: 0000-0002-8017-7687 @@ -30,4 +30,95 @@ header-includes: | \setmonofont[Path = ./, Scale=0.68]{JuliaMono-Regular.ttf} --- -# References \ No newline at end of file +# Summary + +[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia package that implements a family of regularization and trust-region type algorithms for solving unconstrained or composite nonsmooth optimization problems of the form + +\[ +\min_{x \in \mathbb{R}^n} \; f(x) + h(x), +\] + +where $f$ is typically smooth (possibly nonconvex) and $h$ is convex but possibly nonsmooth. +The library provides a modular and extensible framework for experimenting with regularization-based methods such as: + +- **Trust-region methods (TR, TRDH)**, +- **Quadratic regularization methods (R2, R2N)**, +- **Levenbergh-Marquadt methods (LM, LMTR)**. + +These methods rely solely on gradient and Hessian(-vector) information and can handle cases where Hessian approximations are unbounded, making the package particularly suited for large-scale, ill-conditioned, or nonsmooth problems. + +# Statement of need + +## Unified framework for regularization methods + +RegularizedOptimization.jl provides a consistent API to formulate optimization problems and apply a range of regularization methods. +It allows researchers to: + +- Test and compare different regularization algorithms within a common environment. +- Switch between exact Hessians, quasi-Newton updates, and diagonal Hessian approximation via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). +- Incorporate nonsmooth terms $h$ via proximal mappings. + +The package is particularly motivated by recent advances in the complexity analysis of regularization and trust-region methods. + +## Compatibility with JuliaSmoothOptimizers ecosystem + +RegularizedOptimization.jl integrates seamlessly with other [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) packages: + +- **Problem definition** via [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl). +- **Linear algebra operations** via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). +- **Prox-definition** via [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl). + +This modularity makes it easy to prototype, benchmark, and extend regularization-based methods. + +## Support for inexact subproblem solves + +Solvers in RegularizedOptimization.jl allow inexact resolution of trust-region and cubic-regularized subproblems using first-order nonmsooth optimization methods such as R2. + +This is crucial for large-scale problems where exact subproblem solutions are prohibitive. + +## Research and teaching tool + +The package is designed both as a research platform for developing new optimization methods and as a pedagogical tool for teaching modern non-smooth nonconvex optimization algorithms. +It provides reference implementations that are transparent and mathematically faithful, while being efficient enough for large-scale experiments. + +# Examples + +A simple example: solving a regularized quadratic problem with an $\ell_1$ penalty. + +```julia +using LinearAlgebra, Random +using ProximalOperators +using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization, SolverCore + +# Set random seed for reproducibility +Random.seed!(123) + +# Define a basis pursuit denoising problem +compound = 10 +nz = 10 * compound +bpdn, bpdn_nls, sol = bpdn_model(compound) + +# Define the Hessian approximation +f = LSR1Model(bpdn) + +# Define the nonsmooth regularizer (L1 norm) +λ = 1.0 +h = NormL1(λ) + +# Define the regularized NLP model +reg_nlp = RegularizedNLPModel(f, h) + +# Choose a solver (R2N) and execution statistics tracker +solver = R2NSolver(reg_nlp) +stats = RegularizedExecutionStats(reg_nlp) + +# Solve the problem +solve!(solver, reg_nlp, stats, x = f.meta.x0, σk = 1.0, atol = 1e-8, rtol = 1e-8, verbose = 1) +``` + +# Acknowledgements + +Development of RegularizedOptimization.jl has been supported by the Natural Sciences and Engineering Research Council of Canada (NSERC), the Fonds de Recherche du Québec – Nature et Technologies (FRQNT). +The authors thank the JuliaSmoothOptimizers community for valuable feedback and contributions. + +# References From 158fc0ac7a249273cc16b335ef6c05e7fef35096 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Thu, 4 Sep 2025 15:05:22 -0400 Subject: [PATCH 02/42] minor error --- paper/paper.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index d59f0a9a..5e1217e5 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -34,9 +34,9 @@ header-includes: | [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia package that implements a family of regularization and trust-region type algorithms for solving unconstrained or composite nonsmooth optimization problems of the form -\[ -\min_{x \in \mathbb{R}^n} \; f(x) + h(x), -\] +$$ +\min_{x \in \mathbb{R}^n} f(x) + h(x), +$$ where $f$ is typically smooth (possibly nonconvex) and $h$ is convex but possibly nonsmooth. The library provides a modular and extensible framework for experimenting with regularization-based methods such as: From 8c75806d24ef43a4980aea18ff3107e0e7060520 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Thu, 4 Sep 2025 15:18:55 -0400 Subject: [PATCH 03/42] add bib --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index 5e1217e5..b7aa0a03 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -32,7 +32,7 @@ header-includes: | # Summary -[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia package that implements a family of regularization and trust-region type algorithms for solving unconstrained or composite nonsmooth optimization problems of the form +[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia package that implements a family of regularization and trust-region type algorithms for solving unconstrained or composite nonsmooth optimization problems of the form [@aravkin-baraldi-orban-2022]: $$ \min_{x \in \mathbb{R}^n} f(x) + h(x), From 595c3729235e84874ed671a557788e1762da9465 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Fri, 5 Sep 2025 18:21:59 -0400 Subject: [PATCH 04/42] correct the typos and bib --- paper/paper.bib | 385 ++---------------------------------------------- paper/paper.md | 96 +++++++----- 2 files changed, 75 insertions(+), 406 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 7eada02d..e1669da1 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -1,15 +1,3 @@ - -@Article{ andrei-2019, - Author = {Neculai Andrei}, - Title = {A diagonal quasi-{N}ewton updating method for unconstrained optimization}, - Journal = numalg, - Year = 2019, - Volume = 81, - Pages = {575-–590}, - doi = {10.1007/s11075-018-0562-7}, - abstract = {A diagonal quasi-Newton updating algorithm is presented. The elements of the diagonal matrix approximating the Hessian are determined by minimizing both the size of the change from the previous estimate and the trace of the update, subject to the weak secant equation. Under mild classical assumptions, the convergence of the algorithm is proved to be linear. The diagonal quasi-Newton update satisfies the bounded deterioration property. Numerical experiments with 80 unconstrained optimization test problems of different structures and complexities prove that the suggested algorithm is more efficient and more robust than the steepest descent, Cauchy with Oren and Luenberger scaling algorithm in its complementary form and classical Broyden-Fletcher-Goldfarb-Shanno algorithm.}, -} - @Article{ aravkin-baraldi-orban-2022, Author = {A. Y. Aravkin and R. Baraldi and D. Orban}, Title = {A Proximal Quasi-{N}ewton Trust-Region Method for Nonsmooth Regularized Optimization}, @@ -38,15 +26,6 @@ @Article{ aravkin-baraldi-orban-2024 examples: a group-lasso basis-pursuit denoise example, a nonlinear support vector machine, and parameter estimation in a neuroscience application. To implement those examples, we describe in detail how to evaluate proximal operators for separable \(h\) and for the group lasso with trust-region constraint. In all cases, the Levenberg–Marquardt methods perform fewer outer iterations than either a proximal gradient method with adaptive step length or a quasi-Newto n trust-region method, neither of which exploit the least-squares structure of the problem. Our results also highlight the need for more sophisticated subproblem solvers than simple first-order methods. }, } -@Software{ baraldi-leconte-orban-regularized-optimization-2024, - Author = {R. Baraldi and G. Leconte and D. Orban}, - Title = {{RegularizedOptimization.jl}: Algorithms for Regularized Optimization}, - Year = 2024, - license = {MPL-2.0}, - url = {https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl}, - doi = {10.5281/zenodo.6940313}, -} - @Software{ leconte_linearoperators_jl_linear_operators_2023, Author = {Leconte, Geoffroy and Orban, Dominique and Soares Siqueira, Abel and contributors}, license = {MPL-2.0}, @@ -56,107 +35,6 @@ @Software{ leconte_linearoperators_jl_linear_operators_2023 Year = 2023, } -@Article{ birgin-martinez-raydan-2014, - Author = {Birgin, Ernesto G. and Martínez, Jose Mario and Raydan, Marcos}, - Title = {Spectral Projected Gradient Methods: Review and Perspectives}, - Journal = jssoft, - Year = 2014, - Volume = 60, - Number = 3, - Pages = {1--21}, - doi = {10.18637/jss.v060.i03}, - abstract = {Over the last two decades, it has been observed that using the gradient vector as a search direction in large-scale optimization may lead to efficient algorithms. The effectiveness relies on choosing the step lengths according to novel ideas that are related to the spectrum of the underlying local Hessian rather than related to the standard decrease in the objective function. A review of these so-called spectral projected gradient methods for convex constrained optimization is presented. To illustrate the performance of these low-cost schemes, an optimization problem on the set of positive definite matrices is described.}, -} - -@Article{ bolte-sabach-teboulle-2014, - Author = {Bolte, J. and Sabach, S. and Teboulle, M.}, - Title = {Proximal alternating linearized minimization for nonconvex and nonsmooth problems}, - Journal = mp, - Year = 2014, - Number = 146, - Pages = {459–-494}, - doi = {10.1007/s10107-013-0701-9}, -} - -@Article{ dennis-wolkowicz-1993, - Author = {Dennis, Jr., J. E. and Wolkowicz, H.}, - Title = {Sizing and Least-Change Secant Methods}, - Journal = sinum, - Year = 1993, - Volume = 30, - Number = 5, - Pages = {1291--1314}, - doi = {10.1137/0730067}, - abstract = { Oren and Luenberger introduced in 1974 a strategy for replacing Hessian approximations by their scalar multiples and then performing quasi-Newton updates, generally least-change secant updates such as the BFGS or DFP updates [Oren and Luenberger, Management Sci., 20 (1974), pp. 845–862]. In this paper, the function \[\omega (A) = \left( {\frac{{{{{\operatorname{trace}}(A)} / n}}}{{{\operatorname{det}}(A)^{{1 / n}} }}} \right)\] is shown to be a measure of change with a direct connection to the Oren–Luenberger strategy. This measure is interesting because it is related to the \$\ell\_2\$ condition number, but it takes all the eigenvalues of A into account rather than just the extremes. If the class of possible updates is restricted to the Broyden class, i.e., scalar premultiples are not allowed, then the optimal update depends on the dimension of the problem. It may, or may not, be in the convex class, but it becomes the BFGS update as the dimension increases. - This seems to be yet another explanation for why the optimally conditioned updates are not significantly better than the BFGS update. The theory results in several new interesting updates including a self-scaling, hereditarily positive definite, update in the Broyden class which is not necessarily in the convex class. This update, in conjunction with the Oren–Luenberger scaling strategy at the first iteration only, was consistently the best in numerical tests. }, -} - -@Article{ gilbert-lemarechal-1989, - Author = {Gilbert, J.-C. and Lemaréchal, C.}, - Title = {Some numerical experiments with variable-storage quasi-{N}ewton algorithms}, - Journal = mp, - Year = 1989, - Volume = 45, - Pages = {407--435}, - doi = {10.1007/BF01589113}, -} - -@TechReport{ aravkin-baraldi-leconte-orban-2021, - Author = {Aravkin, Aleksandr and Baraldi, Robert and Leconte, Geoffroy and Orban, Dominique}, - Title = {Corrigendum: A proximal quasi-{N}ewton trust-region method for nonsmooth regularized optimization}, - Institution = gerad, - Year = 2024, - Type = {Cahier}, - Number = {G-2021-12-SM}, - Address = gerad-address, - Pages = {1--3}, - doi = {10.13140/RG.2.2.36250.45768}, -} - -@Article{ bot-csetnek-laszlo-2016, - Author = {Boţ, R. I. and Csetnek, E. R. and László, S.C.}, - Title = {An inertial forward–backward algorithm for the minimization of the sum of two nonconvex functions}, - Journal = euro, - Year = 2016, - Number = 4, - Pages = {3--25}, - doi = {10.1007/s13675-015-0045-8}, -} - -@Article{ cartis-gould-toint-2011, - Author = {Cartis, Coralia and Gould, Nicholas I. M. and Toint, {\relax Ph}. L.}, - Title = {On the Evaluation Complexity of Composite Function Minimization with Applications to Nonconvex Nonlinear Programming}, - Journal = siopt, - Year = 2011, - Volume = 21, - Number = 4, - Pages = {1721--1739}, - doi = {10.1137/11082381X}, -} - -@Article{ fukushima-mine-1981, - Author = {Masao Fukushima and Hisashi Mine}, - Title = {A generalized proximal point algorithm for certain non-convex minimization problems}, - Journal = ijss, - Year = 1981, - Volume = 12, - Number = 8, - Pages = {989--1000}, - Publisher = {Taylor & Francis}, - doi = {10.1080/00207728108963798}, -} - -@Article{ lee-sun-saunders-2014, - Author = {Lee, Jason D. and Sun, Yuekai and Saunders, Michael A.}, - Title = {Proximal {N}ewton-Type Methods for Minimizing Composite Functions}, - Journal = siopt, - Year = 2014, - Volume = 24, - Number = 3, - Pages = {1420--1443}, - doi = {10.1137/130921428}, -} - @TechReport{ leconte-orban-2023, Author = {G. Leconte and D. Orban}, Title = {The Indefinite Proximal Gradient Method}, @@ -168,88 +46,6 @@ @TechReport{ leconte-orban-2023 doi = {10.13140/RG.2.2.11836.41606}, } -@TechReport{ leconte-orban-2024, - Author = {Leconte, Geoffroy and Orban, Dominique}, - Title = {An interior-point trust-region method for nonsmooth regularized bound-constrained optimization}, - Institution = gerad, - Year = 2024, - Type = {Cahier}, - Number = {G-2024-17}, - Address = gerad-address, - doi = {10.13140/RG.2.2.18132.99201}, -} - -@InProceedings{ li-lin-2015, - Author = {Li, Huan and Lin, Zhouchen}, - Title = {Accelerated Proximal Gradient Methods for Nonconvex Programming}, - Booktitle = {Proceedings of the 28th International Conference on Neural Information Processing Systems - Volume 1}, - Year = 2015, - Series = {NIPS'15}, - Pages = {379--387}, - Address = {Cambridge, MA, USA}, - Publisher = {MIT Press}, - abstract = {Nonconvex and nonsmooth problems have recently received considerable attention in signal/image processing, statistics and machine learning. However, solving the nonconvex and nonsmooth optimization problems remains a big challenge. Accelerated proximal gradient (APG) is an excellent method for convex programming. However, it is still unknown whether the usual APG can ensure the convergence to a critical point in nonconvex programming. In this paper, we extend APG for general nonconvex and nonsmooth programs by introducing a monitor that satisfies the sufficient descent property. Accordingly, we propose a monotone APG and a nonmonotone APG. The latter waives the requirement on monotonic reduction of the objective function and needs less computation in each iteration. To the best of our knowledge, we are the first to provide APG-type algorithms for general nonconvex and nonsmooth problems ensuring that every accumulation point is a critical point, and the convergence - rates remain O(1/k2) when the problems are convex, in which k is the number of iterations. Numerical results testify to the advantage of our algorithms in speed.}, - numpages = 9, - location = {Montreal, Canada}, - url = {http://irc.cs.sdu.edu.cn/973project/result/download/2015/28.AcceleratedProximal.pdf}, -} - -@Article{ lions-mercier-1979, - Author = {P.-L. Lions and B. Mercier}, - Title = {Splitting algorithms for the sum of two nonlinear operators}, - Journal = sinum, - Year = 1979, - Volume = 16, - Number = 6, - Pages = {964--979}, - doi = {10.1137/0716071}, -} - -@Article{ kanzow-lechner-2021, - Author = {Kanzow, C and Lechner, T}, - Title = {Globalized inexact proximal {N}ewton-type methods for nonconvex composite functions}, - Journal = coap, - Year = 2021, - Volume = 78, - Number = 2, - Pages = {377--410}, - doi = {10.1007/s10589-020-00243-6}, - abstract = {Optimization problems with composite functions consist of an objective function which is the sum of a smooth and a (convex) nonsmooth term. This particular structure is exploited by the class of proximal gradient methods and some of their generalizations like proximal Newton and quasi-Newton methods. The current literature on these classes of methods almost exclusively considers the case where also the smooth term is convex. Here we present a globalized proximal Newton-type method which allows the smooth term to be nonconvex. The method is shown to have nice global and local convergence properties, and some numerical results indicate that this method is very promising also from a practical point of view.}, -} - -@Article{ zhu-nazareth-wolkowicz-1999, - Author = {Zhu, M and Nazareth, J L and Wolkowicz, H}, - Title = {The Quasi-{C}auchy Relation and Diagonal Updating}, - Journal = siopt, - Year = 1999, - Volume = 9, - Number = 4, - Pages = {1192--1204}, - doi = {10.1137/S1052623498331793}, - abstract = { The quasi-Cauchy (QC) relation is the weak quasi-Newton relation of Dennis and Wolkowicz [SIAM J. Numer. Anal., 30 (1993), pp. 1291--1314] with the added restriction that full matrices are replaced by diagonal matrices. This relation is justified and explored and, in particular, two basic variational techniques for updating diagonal matrices that satisfy it are formulated.For purposes of illustration, a numerical experiment is described where a diagonal updated matrix with hereditary positive definiteness is used to precondition Cauchy's steepest-descent direction. The resulting QC algorithm is shown to be significantly accelerated.In the concluding section, the following topics are briefly discussed: additional variational principles, use of diagonal updates within other optimization algorithms together with some further numerical experience (summarized in an appendix), and an interesting connection between QC-diagonal updating and trust-region techniques. }, -} - -@Book{ conn-gould-toint-2000, - Author = {A. R. Conn and N. I. M. Gould and {\relax Ph}. L. Toint}, - Title = {Trust-region methods}, - Publisher = siam, - Year = 2000, - Series = {MOS-SIAM Series on Optimization}, - Address = siam-address, - Number = 1, - doi = {10.1137/1.9780898719857}, -} - -@Book{ rockafellar-wets-1998, - Author = {{R. Tyrrell} Rockafellar and Roger J.-B. Wets}, - Title = {Variational Analysis}, - Publisher = {Springer Verlag}, - Year = 1998, - Address = {Heidelberg, Berlin, New York}, - doi = {10.1007/978-3-642-02431-3}, -} - @TechReport{ leconte-orban-2023-2, Author = {Leconte, Geoffroy and Orban, Dominique}, Title = {Complexity of trust-region methods with unbounded {H}essian approximations for smooth and nonsmooth optimization}, @@ -261,114 +57,16 @@ @TechReport{ leconte-orban-2023-2 url = {https://www.gerad.ca/fr/papers/G-2023-65}, } -@Article{ kanzow-mehlitz-2022, - Author = {Kanzow, Christian and Mehlitz, Patrick}, - Title = {Convergence properties of monotone and nonmonotone proximal gradient methods revisited}, - Journal = jota, - Year = 2022, - Volume = 195, - Number = 2, - Pages = {624--646}, - doi = {10.1007/s10957-022-02101-3}, - Publisher = {Springer}, -} - -@Article{ chouzenoux-pesquet-repetti-2014, - Author = {Chouzenoux, Emilie and Pesquet, Jean-Christophe and Repetti, Audrey}, - Title = {Variable metric forward--backward algorithm for minimizing the sum of a differentiable function and a convex function}, - Journal = jota, - Year = 2014, - Volume = 162, - Number = 1, - Pages = {107--132}, - Publisher = {Springer}, - doi = {10.1007/s10957-013-0465-7}, -} - @TechReport{ diouane-habiboullah-orban-2024, Author = {Youssef Diouane and Mohamed Laghdaf Habiboullah and Dominique Orban}, - Title = {Complexity of trust-region methods in the presence of unbounded {H}essian approximations}, + Title = {A proximal modified quasi-Newton method for nonsmooth regularized optimization}, Institution = {GERAD}, Year = 2024, Type = {Cahier}, - Number = {G-2024-43}, + Number = {G-2024-64}, Address = {Montr\'eal, Canada}, - doi = {10.48550/arXiv.2408.06243}, - url = {https://www.gerad.ca/fr/papers/G-2024-43}, -} - -@Article{ powell-2010, - Author = {Powell, M. J. D.}, - Title = {On the convergence of a wide range of trust region methods for unconstrained optimization}, - Journal = imajna, - Year = 2010, - Volume = 30, - Number = 1, - Pages = {289--301}, - doi = {10.1093/imanum/drp021}, -} - -@Article{ nazareth-1995, - Author = {J. L. Nazareth}, - Title = {If quasi-{N}ewton then why not quasi-{C}auchy?}, - Journal = {SIAG/OPT Views-and-News}, - Year = 1995, - Volume = 6, - Pages = {11--14}, -} - -@InProceedings{ stella-themelis-sopasakis-patrinos-2017, - Author = {L. {Stella} and A. {Themelis} and P. {Sopasakis} and P. {Patrinos}}, - Title = {A simple and efficient algorithm for nonlinear model predictive control}, - Booktitle = {2017 IEEE 56th Annual Conference on Decision and Control (CDC)}, - Year = 2017, - Pages = {1939--1944}, - doi = {10.1109/CDC.2017.8263933}, -} - -@Article{ themelis-stella-patrinos-2017, - Author = {Themelis, Andreas and Stella, Lorenzo and Patrinos, Panagiotis}, - Title = {Forward-Backward Envelope for the Sum of Two Nonconvex Functions: Further Properties and Nonmonotone line seach Algorithms}, - Journal = siopt, - Year = 2018, - Volume = 28, - Number = 3, - Pages = {2274--2303}, - doi = {10.1137/16M1080240}, -} - -@Article{ yu-zhang-2022, - Author = {Yu, Quan and Zhang, Xinzhen}, - Title = {A smoothing proximal gradient algorithm for matrix rank minimization problem}, - Journal = coap, - Year = 2022, - Pages = {1--20}, - doi = {10.1007/s10589-021-00337-9}, - Publisher = {Springer}, -} - -@Article{ chouzenoux-martin-pesquet-2023, - Author = {Chouzenoux, Emilie and Martin, S{\'e}gol{\`e}ne and Pesquet, Jean-Christophe}, - Title = {A local {MM} subspace method for solving constrained variational problems in image recovery}, - Journal = jmiv, - Year = 2023, - Volume = 65, - Number = 2, - Pages = {253--276}, - doi = {10.1007/s10851-022-01112-z}, - Publisher = {Springer}, -} - -@Article{ stella-themelis-patrinos-2017, - Author = {Stella, Lorenzo and Themelis, Andreas and Patrinos, Panagiotis}, - Title = {Forward--backward quasi-{N}ewton methods for nonsmooth optimization problems}, - Journal = coap, - Year = 2017, - Volume = 67, - Number = 3, - Pages = {443--487}, - doi = {10.1007/s10589-017-9912-y}, - Publisher = {Springer}, + doi = {10.48550/arxiv.2409.19428}, + url = {https://www.gerad.ca/fr/papers/G-2024-64}, } @TechReport{ diouane-gollier-orban-2024, @@ -382,69 +80,14 @@ @TechReport{ diouane-gollier-orban-2024 doi = {10.13140/RG.2.2.16095.47527}, } -@InProceedings{ becker-fadili-2012, - Author = {Becker, Stephen and Fadili, Jalal}, - Title = {A quasi-{N}ewton proximal splitting method}, - Editor = {F. Pereira and C.J. Burges and L. Bottou and K.Q. Weinberger}, - Booktitle = {Advances in Neural Information Processing Systems}, - Year = 2012, - Volume = 25, - Publisher = {Curran Associates, Inc.}, - url = {https://proceedings.neurips.cc/paper_files/paper/2012/file/e034fb6b66aacc1d48f445ddfb08da98-Paper.pdf}, -} - -@Article{ liu-pan-wu-yang-2024, - Author = {Liu, Ruyu and Pan, Shaohua and Wu, Yuqia and Yang, Xiaoqi}, - Title = {An inexact regularized proximal {N}ewton method for nonconvex and nonsmooth optimization}, - Journal = coap, - Year = 2024, - Volume = 88, - Number = 2, - Pages = {603--641}, - Publisher = springer, - doi = {10.1007/s10589-024-00560-0}, -} - -@Article{ de-marchi-2023, - Author = {De Marchi, Alberto}, - Title = {Proximal gradient methods beyond monotony}, - Journal = {J Nonsmooth Anal Optim}, - Year = 2023, - Volume = 4, - Number = {Original research articles}, - Publisher = {Episciences.org}, - doi = {10.46298/jnsao-2023-10290}, -} - -@Article{ kanzow-lechner-2024, - Author = {Kanzow, Christian and Lechner, Theresa}, - Title = {Efficient Regularized Proximal Quasi-{N}ewton Methods for Large-Scale Nonconvex Composite Optimization Problems}, - Journal = {PAC J OPTIM}, - Year = 2024, - Volume = 20, - Number = 3, - Pages = {537--568}, - doi = {10.61208/pjo-2023-036}, -} - -@Article{ becker-fadili-ochs-2019, - Author = {Becker, Stephen and Fadili, Jalal and Ochs, Peter}, - Title = {On Quasi-{N}ewton Forward-Backward Splitting: Proximal Calculus and Convergence}, - Journal = siopt, - Year = 2019, - Volume = 29, - Number = 4, - Pages = {2445--2481}, - doi = {10.1137/18M1167152}, -} - -@Article{ jia-kanzow-mehlitz-2023, - Author = {Jia, Xiaoxi and Kanzow, Christian and Mehlitz, Patrick}, - Title = {Convergence Analysis of the Proximal Gradient Method in the Presence of the {K}urdyka–{\L{}}ojasiewicz Property Without Global {L}ipschitz Assumptions}, - Journal = siopt, - Year = 2023, - Volume = 33, - Number = 4, - Pages = {3038--3056}, - doi = {10.1137/23M1548293}, +@article{bezanson-edelman-karpinski-shah-2017, + author = {Bezanson, Jeff and Edelman, Alan and Karpinski, Stefan and Shah, Viral B.}, + title = {Julia: A Fresh Approach to Numerical Computing}, + journal = {SIAM Review}, + volume = {59}, + number = {1}, + pages = {65--98}, + year = {2017}, + doi = {10.1137/141000671}, + publisher = {SIAM}, } diff --git a/paper/paper.md b/paper/paper.md index b7aa0a03..f00830b1 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -11,7 +11,7 @@ authors: orcid: 0000-0002-6609-7330 affiliation: 1 - name: Maxence Gollier^[corresponding author] - orcid: 0000-0002-8017-7687 + orcid: 0009-0008-3158-7912 affiliation: 1 - name: Mohamed Laghdaf Habiboullah^[corresponding author] orcid: 0000-0003-3385-9379 @@ -32,58 +32,78 @@ header-includes: | # Summary -[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia package that implements a family of regularization and trust-region type algorithms for solving unconstrained or composite nonsmooth optimization problems of the form [@aravkin-baraldi-orban-2022]: - -$$ -\min_{x \in \mathbb{R}^n} f(x) + h(x), -$$ +[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia [@bezanson-edelman-karpinski-shah-2017] package that implements a family of regularization and trust-region type algorithms for solving nonsmooth optimization problems of the form: +\begin{equation}\label{eq:nlp} + \underset{x \in \mathbb{R}^n}{\text{minimize}} \quad f(x) + h(x), +\end{equation} +where $f: \mathbb{R}^n \to \mathbb{R}$ is continuously differentiable on $\mathbb{R}^n$, and $h: \mathbb{R}^n \to \mathbb{R} \cup \{+\infty\}$ is lower semi-continuous. +Both $f$ and $h$ may be nonconvex. -where $f$ is typically smooth (possibly nonconvex) and $h$ is convex but possibly nonsmooth. -The library provides a modular and extensible framework for experimenting with regularization-based methods such as: +The library provides a modular and extensible framework for experimenting some nonsmooth nonconvex optimization algorithms, including: -- **Trust-region methods (TR, TRDH)**, -- **Quadratic regularization methods (R2, R2N)**, -- **Levenbergh-Marquadt methods (LM, LMTR)**. +- **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022,@leconte-orban-2023], +- **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024,@aravkin-baraldi-orban-2022], +- **Levenbergh-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. -These methods rely solely on gradient and Hessian(-vector) information and can handle cases where Hessian approximations are unbounded, making the package particularly suited for large-scale, ill-conditioned, or nonsmooth problems. +These methods rely solely on the gradient and Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. +Then, the objective function $f + h$ is used only to accept or reject trial points. +Moreover, they can handle cases where Hessian approximations are unbounded[@diouane-habiboullah-orban-2024,@leconte-orban-2023-2], making the package particularly suited for large-scale, ill-conditioned, and nonsmooth problems. # Statement of need -## Unified framework for regularization methods +## Unified framework for nonsmooth methods + +There exists a way to solve \eqref{eq:nlp} in Julia via [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). +It implements several proximal algorithms for nonsmooth optimization. +However, the available examples only consider convex instances of $h$, nmaely the $\ell_1$ norm and there are no tests for memory allocations. +Moreover, it implements only one quasi-Newton method (L-BFGS) and does not support Hessian approximations via linear operators. +In contrast, **RegularizedOptimization.jl** leverages [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl)[@leconte_linearoperators_jl_linear_operators_2023] to represent a variety of Hessian approximations, such as L-SR1, L-BFGS, and diagonal approximations. -RegularizedOptimization.jl provides a consistent API to formulate optimization problems and apply a range of regularization methods. -It allows researchers to: +**RegularizedOptimization.jl** implements a broad class of regularization-based algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. +The package offers a consistent API to formulate optimization problems and apply different regularization methods. +It enables researchers to: -- Test and compare different regularization algorithms within a common environment. -- Switch between exact Hessians, quasi-Newton updates, and diagonal Hessian approximation via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). -- Incorporate nonsmooth terms $h$ via proximal mappings. +- Test and compare algorithms within a unified framework. +- Switch between exact Hessians, quasi-Newton updates, and diagonal Hessian approximations via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). +- Incorporate nonsmooth terms $h$ through proximal mappings. -The package is particularly motivated by recent advances in the complexity analysis of regularization and trust-region methods. +The design of the package is motivated by recent advances in the complexity analysis of regularization and trust-region methods. ## Compatibility with JuliaSmoothOptimizers ecosystem -RegularizedOptimization.jl integrates seamlessly with other [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) packages: +**RegularizedOptimization.jl** integrates seamlessly with other [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) packages: -- **Problem definition** via [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl). -- **Linear algebra operations** via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). -- **Prox-definition** via [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl). +- **Definition of $f$** via [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl), which provides efficient implementations of smooth problems $f$ together with their gradients. +- **Model Hessians (quasi-Newton, diagonal approximations)** via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and implements efficient Hessian–vector products. +- **Definition of $h$** via [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which offers a large collection of nonsmooth terms $h$, and [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings. -This modularity makes it easy to prototype, benchmark, and extend regularization-based methods. +This modularity makes it easy to prototype, benchmark, and extend regularization-based methods [@diouane-habiboullah-orban-2024,@aravkin-baraldi-orban-2022,@aravkin-baraldi-orban-2024,@leconte-orban-2023-2,@diouane-gollier-orban-2024]. ## Support for inexact subproblem solves -Solvers in RegularizedOptimization.jl allow inexact resolution of trust-region and cubic-regularized subproblems using first-order nonmsooth optimization methods such as R2. +Solvers in **RegularizedOptimization.jl** allow inexact resolution of trust-region and cubic-regularized subproblems using first-order that are implemented in the package itself such as the quadratic regularization method R2[@aravkin-baraldi-orban-2022] and R2DH[@diouane-habiboullah-orban-2024] with trust-region variants TRDH[@leconte-orban-2023-2] This is crucial for large-scale problems where exact subproblem solutions are prohibitive. -## Research and teaching tool +## Support for Hessians as Linear Operators + +The second-order methods in **RegularizedOptimization.jl** can use Hessian approximations represented as linear operators via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). +Explicitly forming Hessians as dense or sparse matrices is often prohibitively expensive, both computationally and in terms of memory, especially in high-dimensional settings. +In contrast, many problems admit efficient implementations of Hessian–vector or Jacobian–vector products, either through automatic differentiation tools or limited-memory quasi-Newton updates, making the linear-operator approach more scalable and practical. -The package is designed both as a research platform for developing new optimization methods and as a pedagogical tool for teaching modern non-smooth nonconvex optimization algorithms. -It provides reference implementations that are transparent and mathematically faithful, while being efficient enough for large-scale experiments. +## In-place methods + +All solvers in **RegularizedOptimization.jl** are implemented in an in-place fashion, minimizing memory allocations and improving performance. +This is particularly important for large-scale problems where memory usage can be a bottleneck. # Examples -A simple example: solving a regularized quadratic problem with an $\ell_1$ penalty. +A simple example is the solution of a regularized quadratic problem with an $\ell_1$ penalty, as described in @[aravkin-baraldi-orban-2022]. +Such problems are common in statistical learning and compressed sensing applications.The formulation is +$$ + \min_{x \in \mathbb{R}^n} \ \tfrac{1}{2}\|Ax-b\|_2^2+\lambda\|x\|_1, +$$ +where $A \in \mathbb{R}^{m \times n}$, $b \in \mathbb{R}^m$, and $\lambda>0$ is a regularization parameter. ```julia using LinearAlgebra, Random @@ -91,11 +111,10 @@ using ProximalOperators using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization, SolverCore # Set random seed for reproducibility -Random.seed!(123) +Random.seed!(1234) # Define a basis pursuit denoising problem compound = 10 -nz = 10 * compound bpdn, bpdn_nls, sol = bpdn_model(compound) # Define the Hessian approximation @@ -109,16 +128,23 @@ h = NormL1(λ) reg_nlp = RegularizedNLPModel(f, h) # Choose a solver (R2N) and execution statistics tracker -solver = R2NSolver(reg_nlp) +solver_r2N = R2NSolver(reg_nlp) stats = RegularizedExecutionStats(reg_nlp) # Solve the problem -solve!(solver, reg_nlp, stats, x = f.meta.x0, σk = 1.0, atol = 1e-8, rtol = 1e-8, verbose = 1) +solve!(solver_r2N, reg_nlp, stats, x = f.meta.x0, σk = 1.0, atol = 1e-8, rtol = 1e-8, verbose = 1) + +# Choose another solver (TR) and execution statistics tracker +solver_tr = TRSolver(reg_nlp) +stats_tr = RegularizedExecutionStats(reg_nlp) + +# Solve the problem +solve!(solver_tr, reg_nlp, stats_tr, x = f.meta.x0, Δk = 1.0, atol = 1e-8, rtol = 1e-8, verbose = 1) ``` # Acknowledgements -Development of RegularizedOptimization.jl has been supported by the Natural Sciences and Engineering Research Council of Canada (NSERC), the Fonds de Recherche du Québec – Nature et Technologies (FRQNT). -The authors thank the JuliaSmoothOptimizers community for valuable feedback and contributions. +Mohamed Laghdaf Habiboullah is supported by an excellence FRQNT grant, +and Youssef Diouane and Dominique Orban are partially supported by an NSERC Discovery Grant. # References From 49cf76b53beeccb4a5ce6f550086b33165bfaf86 Mon Sep 17 00:00:00 2001 From: Mohamed Laghdaf <81633807+MohamedLaghdafHABIBOULLAH@users.noreply.github.com> Date: Fri, 5 Sep 2025 18:42:34 -0400 Subject: [PATCH 05/42] Update GitHub Actions workflow for PDF generation --- .github/workflows/draft-pdf.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/draft-pdf.yml b/.github/workflows/draft-pdf.yml index c0e82906..44fa02d1 100644 --- a/.github/workflows/draft-pdf.yml +++ b/.github/workflows/draft-pdf.yml @@ -4,7 +4,6 @@ on: branches: - paper - paper-draft - - paper-workflow pull_request: types: [opened, synchronize, reopened] From 3722a046722ded129f4b284aa882c1967c01e89f Mon Sep 17 00:00:00 2001 From: Mohamed Laghdaf <81633807+MohamedLaghdafHABIBOULLAH@users.noreply.github.com> Date: Fri, 5 Sep 2025 18:47:25 -0400 Subject: [PATCH 06/42] Update draft-pdf.yml --- paper/paper.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index f00830b1..6d9e6ff0 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -14,7 +14,7 @@ authors: orcid: 0009-0008-3158-7912 affiliation: 1 - name: Mohamed Laghdaf Habiboullah^[corresponding author] - orcid: 0000-0003-3385-9379 + orcid: 0009-0005-3631-2799 affiliation: 1 - name: Dominique Orban orcid: 0000-0002-8017-7687 @@ -41,13 +41,13 @@ Both $f$ and $h$ may be nonconvex. The library provides a modular and extensible framework for experimenting some nonsmooth nonconvex optimization algorithms, including: -- **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022,@leconte-orban-2023], -- **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024,@aravkin-baraldi-orban-2022], +- **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022] and [@leconte-orban-2023], +- **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024] and [@aravkin-baraldi-orban-2022], - **Levenbergh-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. These methods rely solely on the gradient and Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. Then, the objective function $f + h$ is used only to accept or reject trial points. -Moreover, they can handle cases where Hessian approximations are unbounded[@diouane-habiboullah-orban-2024,@leconte-orban-2023-2], making the package particularly suited for large-scale, ill-conditioned, and nonsmooth problems. +Moreover, they can handle cases where Hessian approximations are unbounded[@diouane-habiboullah-orban-2024] and [@leconte-orban-2023-2], making the package particularly suited for large-scale, ill-conditioned, and nonsmooth problems. # Statement of need @@ -77,7 +77,7 @@ The design of the package is motivated by recent advances in the complexity anal - **Model Hessians (quasi-Newton, diagonal approximations)** via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and implements efficient Hessian–vector products. - **Definition of $h$** via [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which offers a large collection of nonsmooth terms $h$, and [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings. -This modularity makes it easy to prototype, benchmark, and extend regularization-based methods [@diouane-habiboullah-orban-2024,@aravkin-baraldi-orban-2022,@aravkin-baraldi-orban-2024,@leconte-orban-2023-2,@diouane-gollier-orban-2024]. +This modularity makes it easy to prototype, benchmark, and extend regularization-based methods [@diouane-habiboullah-orban-2024],[@aravkin-baraldi-orban-2022],[@aravkin-baraldi-orban-2024],[@leconte-orban-2023-2] and [@diouane-gollier-orban-2024]. ## Support for inexact subproblem solves From 2bb4f05b854b333afade7f6d4217e8b7a45460ac Mon Sep 17 00:00:00 2001 From: Mohamed Laghdaf <81633807+MohamedLaghdafHABIBOULLAH@users.noreply.github.com> Date: Sat, 6 Sep 2025 15:46:51 -0400 Subject: [PATCH 07/42] Apply suggestions from code review Co-authored-by: Maxence Gollier <134112149+MaxenceGollier@users.noreply.github.com> --- paper/paper.bib | 20 +++++++------------- paper/paper.md | 4 ++-- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index e1669da1..9ff90576 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -7,8 +7,6 @@ @Article{ aravkin-baraldi-orban-2022 Number = 2, Pages = {900--929}, doi = {10.1137/21M1409536}, - abstract = { We develop a trust-region method for minimizing the sum of a smooth term (f) and a nonsmooth term (h), both of which can be nonconvex. Each iteration of our method minimizes a possibly nonconvex model of (f + h) in a trust region. The model coincides with (f + h) in value and subdifferential at the center. We establish global convergence to a first-order stationary point when (f) satisfies a smoothness condition that holds, in particular, when it has a Lipschitz-continuous gradient, and (h) is proper and lower semicontinuous. The model of (h) is required to be proper, lower semi-continuous and prox-bounded. Under these weak assumptions, we establish a worst-case (O(1/\epsilon^2)) iteration complexity bound that matches the best known complexity bound of standard trust-region methods for smooth optimization. We detail a special instance, named TR-PG, in which we use a limited-memory quasi-Newton model of (f) and compute a step with the proximal gradient method, - resulting in a practical proximal quasi-Newton method. We establish similar convergence properties and complexity bound for a quadratic regularization variant, named R2, and provide an interpretation as a proximal gradient method with adaptive step size for nonconvex problems. R2 may also be used to compute steps inside the trust-region method, resulting in an implementation named TR-R2. We describe our Julia implementations and report numerical results on inverse problems from sparse optimization and signal processing. Both TR-PG and TR-R2 exhibit promising performance and compare favorably with two linesearch proximal quasi-Newton methods based on convex models. }, } @Article{ aravkin-baraldi-orban-2024, @@ -20,10 +18,6 @@ @Article{ aravkin-baraldi-orban-2024 Number = 4, Pages = {A2557--A2581}, doi = {10.1137/22M1538971}, - preprint = {https://www.gerad.ca/en/papers/G-2022-58/view}, - grant = nserc, - abstract = { Abstract. We develop a Levenberg–Marquardt method for minimizing the sum of a smooth nonlinear least-squares term \(f(x) = \frac{1}{2} \|F(x)\|\_2^2\) and a nonsmooth term \(h\). Both \(f\) and \(h\) may be nonconvex. Steps are computed by minimizing the sum of a regularized linear least-squares model and a model of \(h\) using a first-order method such as the proximal gradient method. We establish global convergence to a first-order stationary point under the assu mptions that \(F\) and its Jacobian are Lipschitz continuous and \(h\) is proper and lower semicontinuous. In the worst case, our method performs \(O(\epsilon^{-2})\) iterations to bring a measure of stationarity below \(\epsilon \in (0, 1)\) . We also derive a trust-region variant that enjoys similar asymptotic worst-case iteration complexity as a special case of the trust-region algorithm of Aravkin, Baraldi, and Orban [SIAM J. Optim., 32 (2022), pp. 900–929]. We report numerica l results on three - examples: a group-lasso basis-pursuit denoise example, a nonlinear support vector machine, and parameter estimation in a neuroscience application. To implement those examples, we describe in detail how to evaluate proximal operators for separable \(h\) and for the group lasso with trust-region constraint. In all cases, the Levenberg–Marquardt methods perform fewer outer iterations than either a proximal gradient method with adaptive step length or a quasi-Newto n trust-region method, neither of which exploit the least-squares structure of the problem. Our results also highlight the need for more sophisticated subproblem solvers than simple first-order methods. }, } @Software{ leconte_linearoperators_jl_linear_operators_2023, @@ -35,15 +29,15 @@ @Software{ leconte_linearoperators_jl_linear_operators_2023 Year = 2023, } -@TechReport{ leconte-orban-2023, +@Article{ leconte-orban-2023, Author = {G. Leconte and D. Orban}, Title = {The Indefinite Proximal Gradient Method}, - Institution = gerad, - Year = 2023, - Type = {Cahier}, - Number = {G-2023-37}, - Address = gerad-address, - doi = {10.13140/RG.2.2.11836.41606}, + Journal = coap, + Year = 2025, + Volume = 91, + Number = 2, + Pages = 861--903, + doi = {10.1007/s10589-024-00604-5}, } @TechReport{ leconte-orban-2023-2, diff --git a/paper/paper.md b/paper/paper.md index 6d9e6ff0..1add8bdb 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -55,7 +55,7 @@ Moreover, they can handle cases where Hessian approximations are unbounded[@diou There exists a way to solve \eqref{eq:nlp} in Julia via [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). It implements several proximal algorithms for nonsmooth optimization. -However, the available examples only consider convex instances of $h$, nmaely the $\ell_1$ norm and there are no tests for memory allocations. +However, the available examples only consider convex instances of $h$, namely the $\ell_1$ norm and there are no tests for memory allocations. Moreover, it implements only one quasi-Newton method (L-BFGS) and does not support Hessian approximations via linear operators. In contrast, **RegularizedOptimization.jl** leverages [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl)[@leconte_linearoperators_jl_linear_operators_2023] to represent a variety of Hessian approximations, such as L-SR1, L-BFGS, and diagonal approximations. @@ -77,7 +77,7 @@ The design of the package is motivated by recent advances in the complexity anal - **Model Hessians (quasi-Newton, diagonal approximations)** via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and implements efficient Hessian–vector products. - **Definition of $h$** via [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which offers a large collection of nonsmooth terms $h$, and [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings. -This modularity makes it easy to prototype, benchmark, and extend regularization-based methods [@diouane-habiboullah-orban-2024],[@aravkin-baraldi-orban-2022],[@aravkin-baraldi-orban-2024],[@leconte-orban-2023-2] and [@diouane-gollier-orban-2024]. +This modularity makes it easy to prototype, benchmark, and extend regularization-based methods [@diouane-habiboullah-orban-2024],[@aravkin-baraldi-orban-2022],[@aravkin-baraldi-orban-2024] and[@leconte-orban-2023-2]. ## Support for inexact subproblem solves From 33510b33c70dd6720d509625fa52ccb2628803c8 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Sun, 7 Sep 2025 08:59:36 -0400 Subject: [PATCH 08/42] incorporate suggestions --- paper/paper.bib | 26 +++++++++++++++++ paper/paper.md | 76 ++++++++++++++++++++++++++++++------------------- 2 files changed, 72 insertions(+), 30 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 9ff90576..73fb136d 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -85,3 +85,29 @@ @article{bezanson-edelman-karpinski-shah-2017 doi = {10.1137/141000671}, publisher = {SIAM}, } + +@Misc{orban-siqueira-cutest-2020, + author = {D. Orban and A. S. Siqueira and {contributors}}, + title = {{CUTEst.jl}: {J}ulia's {CUTEst} interface}, + month = {October}, + url = {https://github.com/JuliaSmoothOptimizers/CUTEst.jl}, + year = {2020}, + DOI = {10.5281/zenodo.1188851}, +} + +@Misc{orban-siqueira-nlpmodels-2020, + author = {D. Orban and A. S. Siqueira and {contributors}}, + title = {{NLPModels.jl}: Data Structures for Optimization Models}, + month = {July}, + url = {https://github.com/JuliaSmoothOptimizers/NLPModels.jl}, + year = {2020}, + DOI = {10.5281/zenodo.2558627}, +} + +@Misc{jso, + author = {T. Migot and D. Orban and A. S. Siqueira}, + title = {The {JuliaSmoothOptimizers} Ecosystem for Linear and Nonlinear Optimization}, + year = {2021}, + url = {https://juliasmoothoptimizers.github.io/}, + doi = {10.5281/zenodo.2655082}, +} diff --git a/paper/paper.md b/paper/paper.md index 1add8bdb..ffb3cf49 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -56,28 +56,20 @@ Moreover, they can handle cases where Hessian approximations are unbounded[@diou There exists a way to solve \eqref{eq:nlp} in Julia via [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). It implements several proximal algorithms for nonsmooth optimization. However, the available examples only consider convex instances of $h$, namely the $\ell_1$ norm and there are no tests for memory allocations. -Moreover, it implements only one quasi-Newton method (L-BFGS) and does not support Hessian approximations via linear operators. -In contrast, **RegularizedOptimization.jl** leverages [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl)[@leconte_linearoperators_jl_linear_operators_2023] to represent a variety of Hessian approximations, such as L-SR1, L-BFGS, and diagonal approximations. +Moreover, it implements only one quasi-Newton method (L-BFGS) and does not support other Hessian approximations. -**RegularizedOptimization.jl** implements a broad class of regularization-based algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. +**RegularizedOptimization.jl**, in contrast, implements a broad class of regularization-based algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. The package offers a consistent API to formulate optimization problems and apply different regularization methods. -It enables researchers to: +It integrates seamlessly with the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) ecosystem, an academic organization for nonlinear optimization software development, testing, and benchmarking. +Specifically, **RegularizedOptimization.jl** interoperates with: -- Test and compare algorithms within a unified framework. -- Switch between exact Hessians, quasi-Newton updates, and diagonal Hessian approximations via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). -- Incorporate nonsmooth terms $h$ through proximal mappings. +- **Definition of smooth problems $f$** via [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) @[orban-siqueira-nlpmodels-2020] which provides a standardized Julia API for representing nonlinear programming (NLP) problems. +Large collections of such problems are available in [Cutest.jl](https://github.com/JuliaSmoothOptimizers/CUTEst.jl) @[orban-siqueira-cutest-2020] and [OptimizationProblems.jl](https://github.com/JuliaSmoothOptimizers/OptimizationProblems.jl). +Another option is to use [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl), which provides instances commonly used in the nonsmooth optimization literature. +- **Hessian approximations (quasi-Newton, diagonal approximations)** via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and implements efficient Hessian–vector products. +- **Definition of nonsmooth terms $h$** via [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which offers a large collection of nonsmooth functions, and [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings for nonsmooth functions. -The design of the package is motivated by recent advances in the complexity analysis of regularization and trust-region methods. - -## Compatibility with JuliaSmoothOptimizers ecosystem - -**RegularizedOptimization.jl** integrates seamlessly with other [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) packages: - -- **Definition of $f$** via [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl), which provides efficient implementations of smooth problems $f$ together with their gradients. -- **Model Hessians (quasi-Newton, diagonal approximations)** via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and implements efficient Hessian–vector products. -- **Definition of $h$** via [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which offers a large collection of nonsmooth terms $h$, and [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings. - -This modularity makes it easy to prototype, benchmark, and extend regularization-based methods [@diouane-habiboullah-orban-2024],[@aravkin-baraldi-orban-2022],[@aravkin-baraldi-orban-2024] and[@leconte-orban-2023-2]. +This modularity makes it easy to benchmark existing solvers available in the repository [@diouane-habiboullah-orban-2024], [@aravkin-baraldi-orban-2022], [@aravkin-baraldi-orban-2024], and [@leconte-orban-2023-2]. ## Support for inexact subproblem solves @@ -94,14 +86,15 @@ In contrast, many problems admit efficient implementations of Hessian–vector o ## In-place methods All solvers in **RegularizedOptimization.jl** are implemented in an in-place fashion, minimizing memory allocations and improving performance. -This is particularly important for large-scale problems where memory usage can be a bottleneck. +This is particularly important for large-scale problems, where memory usage can become a bottleneck. +Even in low-dimensional settings, Julia may exhibit significantly slower performance due to extra allocations, making the in-place design a key feature of the package. # Examples A simple example is the solution of a regularized quadratic problem with an $\ell_1$ penalty, as described in @[aravkin-baraldi-orban-2022]. Such problems are common in statistical learning and compressed sensing applications.The formulation is $$ - \min_{x \in \mathbb{R}^n} \ \tfrac{1}{2}\|Ax-b\|_2^2+\lambda\|x\|_1, + \min_{x \in \mathbb{R}^n} \ \tfrac{1}{2}\|Ax-b\|_2^2+\lambda\|x\|_0, $$ where $A \in \mathbb{R}^{m \times n}$, $b \in \mathbb{R}^m$, and $\lambda>0$ is a regularization parameter. @@ -115,31 +108,54 @@ Random.seed!(1234) # Define a basis pursuit denoising problem compound = 10 -bpdn, bpdn_nls, sol = bpdn_model(compound) +bpdn_model, _, _ = bpdn_model(compound) # Define the Hessian approximation -f = LSR1Model(bpdn) +f = SpectralGradientModel(bpdn) # Define the nonsmooth regularizer (L1 norm) -λ = 1.0 -h = NormL1(λ) +λ = norm(grad(bpdn_model, zeros(bpdn_model.meta.nvar)), Inf) / 10 +h = NormL0(λ) # Define the regularized NLP model reg_nlp = RegularizedNLPModel(f, h) -# Choose a solver (R2N) and execution statistics tracker -solver_r2N = R2NSolver(reg_nlp) +# Choose a solver (R2DH) and execution statistics tracker +solver_r2dh= R2DHSolver(reg_nlp) stats = RegularizedExecutionStats(reg_nlp) # Solve the problem -solve!(solver_r2N, reg_nlp, stats, x = f.meta.x0, σk = 1.0, atol = 1e-8, rtol = 1e-8, verbose = 1) +solve!(solver_r2dh, reg_nlp, stats, x = f.meta.x0, σk = 1.0, atol = 1e-8, rtol = 1e-8, verbose = 1) + +``` + +Another example is the FitzHugh-Nagumo inverse problem with an $\ell_1$ penalty, as described in @[aravkin-baraldi-orban-2022] and @[aravkin-baraldi-orban-2024]. + +```julia +using LinearAlgebra +using DifferentialEquations, ProximalOperators +using ADNLPModels, NLPModels, NLPModelsModifiers, RegularizedOptimization, RegularizedProblems -# Choose another solver (TR) and execution statistics tracker +# Define the Fitzagerald Higgs problem +data, _, _, _, _ = RegularizedProblems.FH_smooth_term() +fh_model = ADNLPModel(misfit, ones(5)) + +# Define the Hessian approximation +f = LBFGSModel(fh_model) + +# Define the nonsmooth regularizer (L1 norm) +λ = 0.1 +h = NormL1(λ) + +# Define the regularized NLP model +reg_nlp = RegularizedNLPModel(f, h) + +# Choose a solver (TR) and execution statistics tracker solver_tr = TRSolver(reg_nlp) -stats_tr = RegularizedExecutionStats(reg_nlp) +stats = RegularizedExecutionStats(reg_nlp) # Solve the problem -solve!(solver_tr, reg_nlp, stats_tr, x = f.meta.x0, Δk = 1.0, atol = 1e-8, rtol = 1e-8, verbose = 1) +solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-4, verbose = 10, ν = 1.0e+2) ``` # Acknowledgements From 3b67bb7fbc66483b8fd2eb3454a516b9183384a4 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Sun, 7 Sep 2025 09:05:47 -0400 Subject: [PATCH 09/42] add opt bib --- paper/paper.bib | 8 ++++++++ paper/paper.md | 8 ++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 73fb136d..c962afe7 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -111,3 +111,11 @@ @Misc{jso url = {https://juliasmoothoptimizers.github.io/}, doi = {10.5281/zenodo.2655082}, } + +@Misc{migot-orban-siqueira-optimizationproblems-2023, + author = {T. Migot and D. Orban and A. S. Siqueira}, + title = {OptimizationProblems.jl: A collection of optimization problems in Julia}, + year = {2023}, + doi = {10.5281/zenodo.3672094}, + url = {https://github.com/JuliaSmoothOptimizers/OptimizationProblems.jl}, +} diff --git a/paper/paper.md b/paper/paper.md index ffb3cf49..a5a5330d 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -63,8 +63,8 @@ The package offers a consistent API to formulate optimization problems and apply It integrates seamlessly with the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) ecosystem, an academic organization for nonlinear optimization software development, testing, and benchmarking. Specifically, **RegularizedOptimization.jl** interoperates with: -- **Definition of smooth problems $f$** via [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) @[orban-siqueira-nlpmodels-2020] which provides a standardized Julia API for representing nonlinear programming (NLP) problems. -Large collections of such problems are available in [Cutest.jl](https://github.com/JuliaSmoothOptimizers/CUTEst.jl) @[orban-siqueira-cutest-2020] and [OptimizationProblems.jl](https://github.com/JuliaSmoothOptimizers/OptimizationProblems.jl). +- **Definition of smooth problems $f$** via [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) [@orban-siqueira-nlpmodels-2020] which provides a standardized Julia API for representing nonlinear programming (NLP) problems. +Large collections of such problems are available in [Cutest.jl](https://github.com/JuliaSmoothOptimizers/CUTEst.jl) [@orban-siqueira-cutest-2020] and [OptimizationProblems.jl](https://github.com/JuliaSmoothOptimizers/OptimizationProblems.jl) [@migot-orban-siqueira-optimizationproblems-2023]. Another option is to use [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl), which provides instances commonly used in the nonsmooth optimization literature. - **Hessian approximations (quasi-Newton, diagonal approximations)** via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and implements efficient Hessian–vector products. - **Definition of nonsmooth terms $h$** via [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which offers a large collection of nonsmooth functions, and [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings for nonsmooth functions. @@ -91,7 +91,7 @@ Even in low-dimensional settings, Julia may exhibit significantly slower perform # Examples -A simple example is the solution of a regularized quadratic problem with an $\ell_1$ penalty, as described in @[aravkin-baraldi-orban-2022]. +A simple example is the solution of a regularized quadratic problem with an $\ell_1$ penalty, as described in [@aravkin-baraldi-orban-2022]. Such problems are common in statistical learning and compressed sensing applications.The formulation is $$ \min_{x \in \mathbb{R}^n} \ \tfrac{1}{2}\|Ax-b\|_2^2+\lambda\|x\|_0, @@ -129,7 +129,7 @@ solve!(solver_r2dh, reg_nlp, stats, x = f.meta.x0, σk = 1.0, atol = 1e-8, rtol ``` -Another example is the FitzHugh-Nagumo inverse problem with an $\ell_1$ penalty, as described in @[aravkin-baraldi-orban-2022] and @[aravkin-baraldi-orban-2024]. +Another example is the FitzHugh-Nagumo inverse problem with an $\ell_1$ penalty, as described in [@aravkin-baraldi-orban-2022] and [@aravkin-baraldi-orban-2024]. ```julia using LinearAlgebra From afd7753c14d5546d4ad88a0b86eb660a37decd76 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Sun, 7 Sep 2025 09:11:23 -0400 Subject: [PATCH 10/42] correct the title --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index a5a5330d..e9b77c86 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -1,5 +1,5 @@ --- -title: 'RegularizedOptimization.jl: A Julia framework for regularization-based nonlinear optimization' +title: 'RegularizedOptimization.jl: A Julia framework for regularized and nonsmooth optimization' tags: - Julia - nonsmooth optimization From 269bad86bd59619bc3b1c1be98614fb409ada3cc Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Mon, 8 Sep 2025 00:50:13 -0400 Subject: [PATCH 11/42] incorporate Dominique comments --- .cirrus.yml | 26 ---------------------- paper/paper.md | 60 ++++++++++++++++++++++++++------------------------ 2 files changed, 31 insertions(+), 55 deletions(-) delete mode 100644 .cirrus.yml diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index ad5fb1d9..00000000 --- a/.cirrus.yml +++ /dev/null @@ -1,26 +0,0 @@ -task: - matrix: - - name: FreeBSD - freebsd_instance: - image_family: freebsd-14-2 - env: - matrix: - - JULIA_VERSION: 1 - install_script: | - URL="https://raw.githubusercontent.com/ararslan/CirrusCI.jl/master/bin/install.sh" - set -x - if [ "$(uname -s)" = "Linux" ] && command -v apt; then - apt update - apt install -y curl - fi - if command -v curl; then - sh -c "$(curl ${URL})" - elif command -v wget; then - sh -c "$(wget ${URL} -q -O-)" - elif command -v fetch; then - sh -c "$(fetch ${URL} -o -)" - fi - build_script: - - cirrusjl build - test_script: - - cirrusjl test diff --git a/paper/paper.md b/paper/paper.md index e9b77c86..29e70329 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -51,29 +51,31 @@ Moreover, they can handle cases where Hessian approximations are unbounded[@diou # Statement of need -## Unified framework for nonsmooth methods +## Model-based framework for nonsmooth methods -There exists a way to solve \eqref{eq:nlp} in Julia via [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). -It implements several proximal algorithms for nonsmooth optimization. -However, the available examples only consider convex instances of $h$, namely the $\ell_1$ norm and there are no tests for memory allocations. -Moreover, it implements only one quasi-Newton method (L-BFGS) and does not support other Hessian approximations. +There exists a way to solve \eqref{eq:nlp} in Julia using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements first-order line search–based methods for composite optimization. +These methods are generally splitting schemes that alternate between gradient steps on the smooth part $f$, or directions derived from it, and proximal steps on the nonsmooth part $h$. +By contrast, **RegularizedOptimization.jl** focuses on model-based approaches such as trust-region and regularization algorithms. +As shown in [@aravkin-baraldi-orban-2022], model-based methods typically require fewer evaluations of the objective and its gradient than first-order line search methods, at the expense of solving more involved subproblems. +Although these subproblems may require many proximal iterations, each proximal computation is inexpensive, making the overall approach efficient for large-scale problems. -**RegularizedOptimization.jl**, in contrast, implements a broad class of regularization-based algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. -The package offers a consistent API to formulate optimization problems and apply different regularization methods. +Building on this perspective, **RegularizedOptimization.jl** implements a broad class of regularization-based algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. +The package provides a consistent API to formulate optimization problems and apply different regularization methods. It integrates seamlessly with the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) ecosystem, an academic organization for nonlinear optimization software development, testing, and benchmarking. -Specifically, **RegularizedOptimization.jl** interoperates with: -- **Definition of smooth problems $f$** via [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) [@orban-siqueira-nlpmodels-2020] which provides a standardized Julia API for representing nonlinear programming (NLP) problems. +On the one hand, smooth problems $f$ can be defined via [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) [@orban-siqueira-nlpmodels-2020], which provides a standardized Julia API for representing nonlinear programming (NLP) problems. Large collections of such problems are available in [Cutest.jl](https://github.com/JuliaSmoothOptimizers/CUTEst.jl) [@orban-siqueira-cutest-2020] and [OptimizationProblems.jl](https://github.com/JuliaSmoothOptimizers/OptimizationProblems.jl) [@migot-orban-siqueira-optimizationproblems-2023]. -Another option is to use [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl), which provides instances commonly used in the nonsmooth optimization literature. -- **Hessian approximations (quasi-Newton, diagonal approximations)** via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and implements efficient Hessian–vector products. -- **Definition of nonsmooth terms $h$** via [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which offers a large collection of nonsmooth functions, and [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings for nonsmooth functions. +Another option is to use [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl), which provides problem instances commonly used in the nonsmooth optimization literature. + +On the other hand, Hessian approximations of these functions, including quasi-Newton and diagonal schemes, can be specified through [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and implements efficient Hessian–vector products. + +Finally, nonsmooth terms $h$ can be modeled using [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which provides a broad collection of nonsmooth functions, together with [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings for nonsmooth functions. This modularity makes it easy to benchmark existing solvers available in the repository [@diouane-habiboullah-orban-2024], [@aravkin-baraldi-orban-2022], [@aravkin-baraldi-orban-2024], and [@leconte-orban-2023-2]. ## Support for inexact subproblem solves -Solvers in **RegularizedOptimization.jl** allow inexact resolution of trust-region and cubic-regularized subproblems using first-order that are implemented in the package itself such as the quadratic regularization method R2[@aravkin-baraldi-orban-2022] and R2DH[@diouane-habiboullah-orban-2024] with trust-region variants TRDH[@leconte-orban-2023-2] +Solvers in **RegularizedOptimization.jl** allow inexact resolution of trust-region and quadratic-regularized subproblems using first-order that are implemented in the package itself such as the quadratic regularization method R2[@aravkin-baraldi-orban-2022] and R2DH[@diouane-habiboullah-orban-2024] with trust-region variants TRDH[@leconte-orban-2023-2]. This is crucial for large-scale problems where exact subproblem solutions are prohibitive. @@ -86,35 +88,35 @@ In contrast, many problems admit efficient implementations of Hessian–vector o ## In-place methods All solvers in **RegularizedOptimization.jl** are implemented in an in-place fashion, minimizing memory allocations and improving performance. -This is particularly important for large-scale problems, where memory usage can become a bottleneck. -Even in low-dimensional settings, Julia may exhibit significantly slower performance due to extra allocations, making the in-place design a key feature of the package. # Examples -A simple example is the solution of a regularized quadratic problem with an $\ell_1$ penalty, as described in [@aravkin-baraldi-orban-2022]. -Such problems are common in statistical learning and compressed sensing applications.The formulation is -$$ - \min_{x \in \mathbb{R}^n} \ \tfrac{1}{2}\|Ax-b\|_2^2+\lambda\|x\|_0, +We consider two examples where the smooth part $f$ is nonconvex and the nonsmooth part $h$ is either the $\ell_0$ or $\ell_1$ norm. + +A first example addresses an image recognition task using a support vector machine (SVM) similar to those in [@aravkin-baraldi-orban-2022] and [@diouane-habiboullah-orban-2024]. +The formulation is $$ -where $A \in \mathbb{R}^{m \times n}$, $b \in \mathbb{R}^m$, and $\lambda>0$ is a regularization parameter. +\min_{x \in \mathbb{R}^n} \ \tfrac{1}{2} \|\mathbf{1} - \tanh(b \odot \langle A, x \rangle)\|^2 + \lambda \|x\|_0, +$$ +where $\lambda = 10^{-1}$ and $A \in \mathbb{R}^{m \times n}$, with $n = 784$ representing the vectorized size of each image and $m = 13{,}007$ is the number of images in the training dataset. ```julia using LinearAlgebra, Random using ProximalOperators using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization, SolverCore +using MLDatasets -# Set random seed for reproducibility -Random.seed!(1234) +random_seed = 1234 +Random.seed!(random_seed) -# Define a basis pursuit denoising problem -compound = 10 -bpdn_model, _, _ = bpdn_model(compound) +# Load the MNIST dataset +model, _, _ = RegularizedProblems.svm_train_model() # Define the Hessian approximation -f = SpectralGradientModel(bpdn) +f = LBFGSModel(model) -# Define the nonsmooth regularizer (L1 norm) -λ = norm(grad(bpdn_model, zeros(bpdn_model.meta.nvar)), Inf) / 10 +# Define the nonsmooth regularizer (L0 norm) +λ = 1.0e-1 h = NormL0(λ) # Define the regularized NLP model @@ -125,7 +127,7 @@ solver_r2dh= R2DHSolver(reg_nlp) stats = RegularizedExecutionStats(reg_nlp) # Solve the problem -solve!(solver_r2dh, reg_nlp, stats, x = f.meta.x0, σk = 1.0, atol = 1e-8, rtol = 1e-8, verbose = 1) +solve!(solver_r2dh, reg_nlp, stats, x = f.meta.x0, σk = 1e-6, atol = 2e-5, rtol = 2e-5, verbose = 1) ``` From bc756ee3cfbd3f94de8131da95a16a4555fa40ad Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Mon, 8 Sep 2025 12:43:53 -0400 Subject: [PATCH 12/42] Add the examples --- .gitignore | 1 + paper/examples/Project.toml | 18 ++++++++++++++++++ paper/examples/example1.jl | 35 +++++++++++++++++++++++++++++++++++ paper/examples/example2.jl | 27 +++++++++++++++++++++++++++ paper/paper.md | 24 +++++++++++++++--------- 5 files changed, 96 insertions(+), 9 deletions(-) create mode 100644 .gitignore create mode 100644 paper/examples/Project.toml create mode 100644 paper/examples/example1.jl create mode 100644 paper/examples/example2.jl diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..3a421994 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +paper/examples/Manifest.toml diff --git a/paper/examples/Project.toml b/paper/examples/Project.toml new file mode 100644 index 00000000..43bcfd97 --- /dev/null +++ b/paper/examples/Project.toml @@ -0,0 +1,18 @@ +[deps] +ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" +DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" +MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" +NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" +NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f" +ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" +RegularizedOptimization = "196f2941-2d58-45ba-9f13-43a2532b2fa8" +RegularizedProblems = "ea076b23-609f-44d2-bb12-a4ae45328278" +ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[compat] +NLPModels = "0.19, 0.20, 0.21" +NLPModelsModifiers = "0.7" +ProximalOperators = "0.15" +RegularizedProblems = "0.1" +ShiftedProximalOperators = "0.2" diff --git a/paper/examples/example1.jl b/paper/examples/example1.jl new file mode 100644 index 00000000..b292d6c7 --- /dev/null +++ b/paper/examples/example1.jl @@ -0,0 +1,35 @@ +using LinearAlgebra, Random +using ProximalOperators +using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization +using MLDatasets + +random_seed = 1234 +Random.seed!(random_seed) + +# Load MNIST from MLDatasets +imgs, labels = MLDatasets.MNIST.traindata() + +# Use RegularizedProblems' preprocessing +A, b = RegularizedProblems.generate_data(imgs, labels, (1, 7), false) + +# Build the models +model, _, _ = RegularizedProblems.svm_model(A, b) + +# Define the Hessian approximation +f = LBFGSModel(model) + +# Define the nonsmooth regularizer (L0 norm) +λ = 1.0e-1 +h = NormL0(λ) + +# Define the regularized NLP model +reg_nlp = RegularizedNLPModel(f, h) + +# Choose a solver (R2DH) and execution statistics tracker +solver_r2dh= R2DHSolver(reg_nlp) +stats = RegularizedExecutionStats(reg_nlp) + +# Solve the problem +solve!(solver_r2dh, reg_nlp, stats, x = f.meta.x0, σk = 1e-6, atol = 2e-5, rtol = 2e-5, verbose = 1) + +@test stats.status == :first_order diff --git a/paper/examples/example2.jl b/paper/examples/example2.jl new file mode 100644 index 00000000..e8590f26 --- /dev/null +++ b/paper/examples/example2.jl @@ -0,0 +1,27 @@ +## After merging the PRs on TR + +using LinearAlgebra +using DifferentialEquations, ProximalOperators +using ADNLPModels, NLPModels, NLPModelsModifiers, RegularizedOptimization, RegularizedProblems + +# Define the Fitzhugh-Nagumo problem +model, _, _ = RegularizedProblems.fh_model() + +# Define the Hessian approximation +f = LBFGSModel(fh_model) + +# Define the nonsmooth regularizer (L1 norm) +λ = 0.1 +h = NormL1(λ) + +# Define the regularized NLP model +reg_nlp = RegularizedNLPModel(f, h) + +# Choose a solver (TR) and execution statistics tracker +solver_tr = TRSolver(reg_nlp) +stats = RegularizedExecutionStats(reg_nlp) + +# Solve the problem +solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-4, verbose = 10, ν = 1.0e+2) + +@test stats.status == :first_order diff --git a/paper/paper.md b/paper/paper.md index 29e70329..ab4f5b4d 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -87,7 +87,7 @@ In contrast, many problems admit efficient implementations of Hessian–vector o ## In-place methods -All solvers in **RegularizedOptimization.jl** are implemented in an in-place fashion, minimizing memory allocations and improving performance. +All solvers in **RegularizedOptimization.jl** are implemented in an in-place fashion, minimizing memory allocations during the resolution process. # Examples @@ -103,14 +103,20 @@ where $\lambda = 10^{-1}$ and $A \in \mathbb{R}^{m \times n}$, with $n = 784$ re ```julia using LinearAlgebra, Random using ProximalOperators -using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization, SolverCore +using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization using MLDatasets random_seed = 1234 Random.seed!(random_seed) -# Load the MNIST dataset -model, _, _ = RegularizedProblems.svm_train_model() +# Load MNIST from MLDatasets +imgs, labels = MLDatasets.MNIST.traindata() + +# Use RegularizedProblems' preprocessing +A, b = RegularizedProblems.generate_data(imgs, labels, (1, 7), false) + +# Build the models +model, _, _ = RegularizedProblems.svm_model(A, b) # Define the Hessian approximation f = LBFGSModel(model) @@ -135,12 +141,12 @@ Another example is the FitzHugh-Nagumo inverse problem with an $\ell_1$ penalty, ```julia using LinearAlgebra -using DifferentialEquations, ProximalOperators -using ADNLPModels, NLPModels, NLPModelsModifiers, RegularizedOptimization, RegularizedProblems +using ProximalOperators +using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization +using DifferentialEquations, ADNLPModels -# Define the Fitzagerald Higgs problem -data, _, _, _, _ = RegularizedProblems.FH_smooth_term() -fh_model = ADNLPModel(misfit, ones(5)) +# Define the Fitzhugh-Nagumo problem +model, _, _ = RegularizedProblems.fh_model() # Define the Hessian approximation f = LBFGSModel(fh_model) From 3c68da0018b00684b7e7eb0765dd0acff8ebc1f1 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Mon, 8 Sep 2025 13:20:36 -0400 Subject: [PATCH 13/42] clarify the comparison --- paper/paper.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index ab4f5b4d..290676a6 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -53,13 +53,14 @@ Moreover, they can handle cases where Hessian approximations are unbounded[@diou ## Model-based framework for nonsmooth methods -There exists a way to solve \eqref{eq:nlp} in Julia using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements first-order line search–based methods for composite optimization. -These methods are generally splitting schemes that alternate between gradient steps on the smooth part $f$, or directions derived from it, and proximal steps on the nonsmooth part $h$. +There exists a way to solve \eqref{eq:nlp} in Julia using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place first-order line search–based methods for composite optimization. +Most of these methods are generally splitting schemes that alternate between taking steps along the gradient of the smooth part $f$ (or quasi-Newton directions) and applying proximal steps on the nonsmooth part $h$. +Currently, **ProximalAlgorithms.jl** provides only L-BFGS as a quasi-Newton option. By contrast, **RegularizedOptimization.jl** focuses on model-based approaches such as trust-region and regularization algorithms. As shown in [@aravkin-baraldi-orban-2022], model-based methods typically require fewer evaluations of the objective and its gradient than first-order line search methods, at the expense of solving more involved subproblems. Although these subproblems may require many proximal iterations, each proximal computation is inexpensive, making the overall approach efficient for large-scale problems. -Building on this perspective, **RegularizedOptimization.jl** implements a broad class of regularization-based algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. +Building on this perspective, **RegularizedOptimization.jl** implements state-of-the-art regularization-based algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. The package provides a consistent API to formulate optimization problems and apply different regularization methods. It integrates seamlessly with the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) ecosystem, an academic organization for nonlinear optimization software development, testing, and benchmarking. From b06581b8f840420f5c7eee7c1534dc22066b2154 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Mon, 15 Sep 2025 14:10:23 -0400 Subject: [PATCH 14/42] add first results on fh --- paper/examples/example2.jl | 2 +- paper/paper.md | 102 +++++++++++++++++++++---------------- 2 files changed, 59 insertions(+), 45 deletions(-) diff --git a/paper/examples/example2.jl b/paper/examples/example2.jl index e8590f26..bb6302d3 100644 --- a/paper/examples/example2.jl +++ b/paper/examples/example2.jl @@ -8,7 +8,7 @@ using ADNLPModels, NLPModels, NLPModelsModifiers, RegularizedOptimization, Regul model, _, _ = RegularizedProblems.fh_model() # Define the Hessian approximation -f = LBFGSModel(fh_model) +f = LBFGSModel(model) # Define the nonsmooth regularizer (L1 norm) λ = 0.1 diff --git a/paper/paper.md b/paper/paper.md index 290676a6..3e54f21e 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -74,71 +74,69 @@ Finally, nonsmooth terms $h$ can be modeled using [ProximalOperators.jl](https:/ This modularity makes it easy to benchmark existing solvers available in the repository [@diouane-habiboullah-orban-2024], [@aravkin-baraldi-orban-2022], [@aravkin-baraldi-orban-2024], and [@leconte-orban-2023-2]. -## Support for inexact subproblem solves +## Support for Hessians -Solvers in **RegularizedOptimization.jl** allow inexact resolution of trust-region and quadratic-regularized subproblems using first-order that are implemented in the package itself such as the quadratic regularization method R2[@aravkin-baraldi-orban-2022] and R2DH[@diouane-habiboullah-orban-2024] with trust-region variants TRDH[@leconte-orban-2023-2]. +In contrast to first-order methods package like [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), **RegularizedOptimization.jl** enables the use of second-order information, which can significantly improve convergence rates, especially for ill-conditioned problems. +A way to use Hessians is via automatic differentiation tools such as [ADNLPModels.jl](https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl). -This is crucial for large-scale problems where exact subproblem solutions are prohibitive. +## Requirements of the ShiftedProximalOperators.jl package -## Support for Hessians as Linear Operators +The nonsmooth part $h$ must have a computable proximal mapping, defined as +$$\text{prox}_{h}(v) = \underset{x \in \mathbb{R}^n}{\arg\min} \left( h(x) + \frac{1}{2} \|x - v\|^2 \right).$$ +This requirement is satisfied by a wide range of nonsmooth functions commonly used in practice, such as the $\ell_1$ norm, the $\ell_0$ "norm", indicator functions of convex sets, and group sparsity-inducing norms. +The package [ProximalOperators.jl](https://www.github.com/FirstOrder/ProximalOperators.jl) provides a comprehensive collection of such functions, along with their proximal mappings. +The main difference between the proximal operators implemented in +[ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl) +is that those implemented here involve a translation of the nonsmooth term. +Specifically, this package considers proximal operators defined as +$$ + argmin \, { \tfrac{1}{2} ‖t - q‖₂² + ν h(x + s + t) + χ(s + t; ΔB) | t ∈ ℝⁿ }, +$$ +where q is given, x and s are fixed shifts, h is the nonsmooth term with respect +to which we are computing the proximal operator, and χ(.; ΔB) is the indicator of +a ball of radius Δ defined by a certain norm. -The second-order methods in **RegularizedOptimization.jl** can use Hessian approximations represented as linear operators via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). -Explicitly forming Hessians as dense or sparse matrices is often prohibitively expensive, both computationally and in terms of memory, especially in high-dimensional settings. -In contrast, many problems admit efficient implementations of Hessian–vector or Jacobian–vector products, either through automatic differentiation tools or limited-memory quasi-Newton updates, making the linear-operator approach more scalable and practical. +## Testing and documentation -## In-place methods +The package includes a comprehensive suite of unit tests that cover all functionalities, ensuring reliability and correctness. +Extensive documentation is provided, including a user guide, API reference, and examples to help users get started quickly. +Aqua.jl is used to test the package dependencies. +Documentation is built using Documenter.jl. -All solvers in **RegularizedOptimization.jl** are implemented in an in-place fashion, minimizing memory allocations during the resolution process. +## Hyperparameter tuning -# Examples +The solvers in **RegularizedOptimization.jl** do not require extensive hyperparameter tuning. -We consider two examples where the smooth part $f$ is nonconvex and the nonsmooth part $h$ is either the $\ell_0$ or $\ell_1$ norm. +## Non-monotone strategies -A first example addresses an image recognition task using a support vector machine (SVM) similar to those in [@aravkin-baraldi-orban-2022] and [@diouane-habiboullah-orban-2024]. -The formulation is -$$ -\min_{x \in \mathbb{R}^n} \ \tfrac{1}{2} \|\mathbf{1} - \tanh(b \odot \langle A, x \rangle)\|^2 + \lambda \|x\|_0, -$$ -where $\lambda = 10^{-1}$ and $A \in \mathbb{R}^{m \times n}$, with $n = 784$ representing the vectorized size of each image and $m = 13{,}007$ is the number of images in the training dataset. +The solvers in **RegularizedOptimization.jl** implement non-monotone strategies to accept trial points, which can enhance convergence properties. -```julia -using LinearAlgebra, Random -using ProximalOperators -using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization -using MLDatasets +## Application studies -random_seed = 1234 -Random.seed!(random_seed) +The package can be applied to the exact penality work by [@diouane-gollier-orban-2024] that addresses a problem where the model of the nonsmooth part is different from the function $h$. +This is not covered in the current version of the competitive package [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). -# Load MNIST from MLDatasets -imgs, labels = MLDatasets.MNIST.traindata() +## Support for inexact subproblem solves -# Use RegularizedProblems' preprocessing -A, b = RegularizedProblems.generate_data(imgs, labels, (1, 7), false) +Solvers in **RegularizedOptimization.jl** allow inexact resolution of trust-region and quadratic-regularized subproblems using first-order that are implemented in the package itself such as the quadratic regularization method R2[@aravkin-baraldi-orban-2022] and R2DH[@diouane-habiboullah-orban-2024] with trust-region variants TRDH[@leconte-orban-2023-2]. -# Build the models -model, _, _ = RegularizedProblems.svm_model(A, b) +This is crucial for large-scale problems where exact subproblem solutions are prohibitive. -# Define the Hessian approximation -f = LBFGSModel(model) +## Support for Hessians as Linear Operators -# Define the nonsmooth regularizer (L0 norm) -λ = 1.0e-1 -h = NormL0(λ) +The second-order methods in **RegularizedOptimization.jl** can use Hessian approximations represented as linear operators via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). +Explicitly forming Hessians as dense or sparse matrices is often prohibitively expensive, both computationally and in terms of memory, especially in high-dimensional settings. +In contrast, many problems admit efficient implementations of Hessian–vector or Jacobian–vector products, either through automatic differentiation tools or limited-memory quasi-Newton updates, making the linear-operator approach more scalable and practical. -# Define the regularized NLP model -reg_nlp = RegularizedNLPModel(f, h) +## In-place methods -# Choose a solver (R2DH) and execution statistics tracker -solver_r2dh= R2DHSolver(reg_nlp) -stats = RegularizedExecutionStats(reg_nlp) +All solvers in **RegularizedOptimization.jl** are implemented in an in-place fashion, minimizing memory allocations during the resolution process. -# Solve the problem -solve!(solver_r2dh, reg_nlp, stats, x = f.meta.x0, σk = 1e-6, atol = 2e-5, rtol = 2e-5, verbose = 1) +# Examples -``` +We consider two examples where the smooth part $f$ is nonconvex and the nonsmooth part $h$ is either the $\ell_0$ or $\ell_1$ norm. -Another example is the FitzHugh-Nagumo inverse problem with an $\ell_1$ penalty, as described in [@aravkin-baraldi-orban-2022] and [@aravkin-baraldi-orban-2024]. +A first example is the FitzHugh-Nagumo inverse problem with an $\ell_1$ penalty, as described in [@aravkin-baraldi-orban-2022] and [@aravkin-baraldi-orban-2024]. ```julia using LinearAlgebra @@ -167,6 +165,22 @@ stats = RegularizedExecutionStats(reg_nlp) solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-4, verbose = 10, ν = 1.0e+2) ``` +```` +=== Comparaison PANOC vs TR (FH_smooth_term) === +PANOC : + itérations = 81 + # f évaluations = 188 + # ∇f évaluations = 188 + # prox appels (g) = 107 + solution (≈) = [-0.0, 0.19071674721048656, 1.037084478194805, -0.0, -0.0] + +TR : + statut = first_order + # f évaluations = 65 + # ∇f évaluations = 52 + solution (≈) = [0.0, 0.1910326406395867, 1.0357773976471938, 0.0, 0.0] + ```` + # Acknowledgements Mohamed Laghdaf Habiboullah is supported by an excellence FRQNT grant, From 37540189e479e2c2a6c83c457142eb354f596bf0 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 16 Sep 2025 11:11:00 -0400 Subject: [PATCH 15/42] Test if the workflow works --- .github/workflows/draft-pdf.yml | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/.github/workflows/draft-pdf.yml b/.github/workflows/draft-pdf.yml index 44fa02d1..1f6fc01c 100644 --- a/.github/workflows/draft-pdf.yml +++ b/.github/workflows/draft-pdf.yml @@ -4,6 +4,7 @@ on: branches: - paper - paper-draft + - paper-workflow pull_request: types: [opened, synchronize, reopened] @@ -20,7 +21,7 @@ jobs: journal: joss # This should be the path to the paper within your repo. paper-path: paper/paper.md - - name: Upload + - name: Upload pdf artifact uses: actions/upload-artifact@v4 with: name: paper @@ -28,3 +29,24 @@ jobs: # PDF. Note, this should be the same directory as the input # paper.md path: paper/paper.pdf + - name: Create release + if: github.event_name == 'push' + uses: rymndhng/release-on-push-action@master + id: release + with: + bump_version_scheme: patch + tag_prefix: v + release_body: "" + use_github_release_notes: true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Upload PDF to release + if: github.event_name == 'push' + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: paper/paper.pdf + asset_name: joss-draft.pdf + tag: ${{ steps.release.outputs.tag_name }} + overwrite: true + body: "" From 3ac9d452c8f2347835466da4d9077c7cd28b2203 Mon Sep 17 00:00:00 2001 From: Mohamed Laghdaf <81633807+MohamedLaghdafHABIBOULLAH@users.noreply.github.com> Date: Mon, 22 Sep 2025 00:31:15 -0400 Subject: [PATCH 16/42] Apply suggestions from code review Co-authored-by: Maxence Gollier <134112149+MaxenceGollier@users.noreply.github.com> Co-authored-by: YD1 <77547975+d1Lab@users.noreply.github.com> --- paper/paper.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 3e54f21e..1eeca12a 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -32,7 +32,7 @@ header-includes: | # Summary -[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia [@bezanson-edelman-karpinski-shah-2017] package that implements a family of regularization and trust-region type algorithms for solving nonsmooth optimization problems of the form: +[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia [@bezanson-edelman-karpinski-shah-2017] package that implements a family of quadratic regularization and trust-region type algorithms for solving nonsmooth optimization problems of the form: \begin{equation}\label{eq:nlp} \underset{x \in \mathbb{R}^n}{\text{minimize}} \quad f(x) + h(x), \end{equation} @@ -43,11 +43,11 @@ The library provides a modular and extensible framework for experimenting some n - **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022] and [@leconte-orban-2023], - **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024] and [@aravkin-baraldi-orban-2022], -- **Levenbergh-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. +- **Levenberg-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. These methods rely solely on the gradient and Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. Then, the objective function $f + h$ is used only to accept or reject trial points. -Moreover, they can handle cases where Hessian approximations are unbounded[@diouane-habiboullah-orban-2024] and [@leconte-orban-2023-2], making the package particularly suited for large-scale, ill-conditioned, and nonsmooth problems. +Moreover, they can handle cases where Hessian approximations are unbounded [@diouane-habiboullah-orban-2024] and [@leconte-orban-2023-2], making the package particularly suited for large-scale, ill-conditioned, and nonsmooth problems. # Statement of need @@ -113,7 +113,7 @@ The solvers in **RegularizedOptimization.jl** implement non-monotone strategies ## Application studies -The package can be applied to the exact penality work by [@diouane-gollier-orban-2024] that addresses a problem where the model of the nonsmooth part is different from the function $h$. +The package is used in the exact penalty work of [@diouane-gollier-orban-2024] to solve a problem where the model of the nonsmooth part differs from the function $h$. This is not covered in the current version of the competitive package [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). ## Support for inexact subproblem solves @@ -183,7 +183,7 @@ TR : # Acknowledgements -Mohamed Laghdaf Habiboullah is supported by an excellence FRQNT grant, -and Youssef Diouane and Dominique Orban are partially supported by an NSERC Discovery Grant. +Mohamed Laghdaf Habiboullah is supported by an excellence FRQNT grant. +Youssef Diouane and Dominique Orban are partially supported by an NSERC Discovery Grant. # References From 7c1856a3625fc9133966ff091743e35c74c12968 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Mon, 22 Sep 2025 00:53:33 -0400 Subject: [PATCH 17/42] include new changes --- .gitignore | 4 ++++ paper/paper.md | 18 ++++++++++++------ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 3a421994..b94e2c2f 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,5 @@ paper/examples/Manifest.toml +paper/jats/paper.jats +paper/jats/jso-packages.pdf +paper/paper.pdf +paper/jso-packages.pdf diff --git a/paper/paper.md b/paper/paper.md index 1eeca12a..45a170e2 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -39,11 +39,14 @@ header-includes: | where $f: \mathbb{R}^n \to \mathbb{R}$ is continuously differentiable on $\mathbb{R}^n$, and $h: \mathbb{R}^n \to \mathbb{R} \cup \{+\infty\}$ is lower semi-continuous. Both $f$ and $h$ may be nonconvex. -The library provides a modular and extensible framework for experimenting some nonsmooth nonconvex optimization algorithms, including: +The library provides a modular and extensible framework for experimenting with nonsmooth and nonconvex optimization algorithms, including: - **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022] and [@leconte-orban-2023], - **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024] and [@aravkin-baraldi-orban-2022], - **Levenberg-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. +- **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022;@leconte-orban-2023], +- **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022], +- **Levenbergh-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. These methods rely solely on the gradient and Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. Then, the objective function $f + h$ is used only to accept or reject trial points. @@ -53,7 +56,7 @@ Moreover, they can handle cases where Hessian approximations are unbounded [@dio ## Model-based framework for nonsmooth methods -There exists a way to solve \eqref{eq:nlp} in Julia using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place first-order line search–based methods for composite optimization. +There exists a way to solve \eqref{eq:nlp} in Julia using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place first-order line search–based methods for \eqref{eq:nlp}. Most of these methods are generally splitting schemes that alternate between taking steps along the gradient of the smooth part $f$ (or quasi-Newton directions) and applying proximal steps on the nonsmooth part $h$. Currently, **ProximalAlgorithms.jl** provides only L-BFGS as a quasi-Newton option. By contrast, **RegularizedOptimization.jl** focuses on model-based approaches such as trust-region and regularization algorithms. @@ -72,7 +75,7 @@ On the other hand, Hessian approximations of these functions, including quasi-Ne Finally, nonsmooth terms $h$ can be modeled using [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which provides a broad collection of nonsmooth functions, together with [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings for nonsmooth functions. -This modularity makes it easy to benchmark existing solvers available in the repository [@diouane-habiboullah-orban-2024], [@aravkin-baraldi-orban-2022], [@aravkin-baraldi-orban-2024], and [@leconte-orban-2023-2]. +This modularity makes it easy to benchmark existing solvers available in the repository [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022;@aravkin-baraldi-orban-2024;@leconte-orban-2023-2]. ## Support for Hessians @@ -83,7 +86,7 @@ A way to use Hessians is via automatic differentiation tools such as [ADNLPModel The nonsmooth part $h$ must have a computable proximal mapping, defined as $$\text{prox}_{h}(v) = \underset{x \in \mathbb{R}^n}{\arg\min} \left( h(x) + \frac{1}{2} \|x - v\|^2 \right).$$ -This requirement is satisfied by a wide range of nonsmooth functions commonly used in practice, such as the $\ell_1$ norm, the $\ell_0$ "norm", indicator functions of convex sets, and group sparsity-inducing norms. +This requirement is satisfied by a wide range of nonsmooth functions commonly used in practice, such as $\ell_1$ norm, $\ell_0$ "norm", indicator functions of convex sets, and group sparsity-inducing norms. The package [ProximalOperators.jl](https://www.github.com/FirstOrder/ProximalOperators.jl) provides a comprehensive collection of such functions, along with their proximal mappings. The main difference between the proximal operators implemented in [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl) @@ -96,6 +99,9 @@ where q is given, x and s are fixed shifts, h is the nonsmooth term with respect to which we are computing the proximal operator, and χ(.; ΔB) is the indicator of a ball of radius Δ defined by a certain norm. +![Composition of JSO packages](jso-packages.pdf){ width=70% } + + ## Testing and documentation The package includes a comprehensive suite of unit tests that cover all functionalities, ensuring reliability and correctness. @@ -134,7 +140,7 @@ All solvers in **RegularizedOptimization.jl** are implemented in an in-place fas # Examples -We consider two examples where the smooth part $f$ is nonconvex and the nonsmooth part $h$ is either the $\ell_0$ or $\ell_1$ norm. +We consider two examples where the smooth part $f$ is nonconvex and the nonsmooth part $h$ is either $\ell_0$ or $\ell_1$ norm. A first example is the FitzHugh-Nagumo inverse problem with an $\ell_1$ penalty, as described in [@aravkin-baraldi-orban-2022] and [@aravkin-baraldi-orban-2024]. @@ -162,7 +168,7 @@ solver_tr = TRSolver(reg_nlp) stats = RegularizedExecutionStats(reg_nlp) # Solve the problem -solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-4, verbose = 10, ν = 1.0e+2) +solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-4, verbose = 10) ``` ```` From f7cca943d1a14bd8a2331e3dc75f9fba292adf99 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Mon, 22 Sep 2025 08:24:14 -0400 Subject: [PATCH 18/42] Supprimer jso-packages.pdf du fichier .gitignore --- .gitignore | 1 - paper/jso-packages.pdf | Bin 0 -> 64578 bytes 2 files changed, 1 deletion(-) create mode 100644 paper/jso-packages.pdf diff --git a/.gitignore b/.gitignore index b94e2c2f..c68fff95 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,3 @@ paper/examples/Manifest.toml paper/jats/paper.jats paper/jats/jso-packages.pdf paper/paper.pdf -paper/jso-packages.pdf diff --git a/paper/jso-packages.pdf b/paper/jso-packages.pdf new file mode 100644 index 0000000000000000000000000000000000000000..77683eb673eaa3d6ddddad1ed11b8c3e5a374712 GIT binary patch literal 64578 zcmd431#n!;mMth|W@g3`Gc&VfF*7qWOO`BV%VK6`w%B53X0{kde(!ew+tDxn^h~^g zs;D}Z2lC`zd#$}QGx8LH=6_V@_79*q{~I`rQldoK zyev$n94t&sCIEmL8ygd+F*7qK8=Dc6DS*w;gw2SHi=UT;g^81mnahOf(-%x^tn4Pn zY=+DLCT3<66Lu38fDym$A0IfoIGGyS!gypE8<}jG8yFdILX)LY27VD2405E1P=g~& z;}2{ZCtA=)iUuFp0jvEI2=;!3c~(_e>;$Ef~TOF$Ijw zh@FPz85{W3+yIXX1c+D&HuaCM|GL%xS6=^bK>q(|2h1P;|A({`4b4qIIrSGk{>e`f zLl;9Ed-K1khW&3>{7bT`5V3sn_%BLxu>V(Q8yFy&85m^u1A*!Lv-AKb{el^wgHnpq z0+<<4nQa*xAbsTYE&bdhNza)nYRP=KTCK1>XOd_@q;p&Oc_+%pR0dpv0OUK3lJ(ym z%kj5{iP+n@nA*8G6EXdf+$Y)PK2>5UZ2w0KfX_EGCkq2RGt+0d05JSR7@ykuJpVt9 zS2lIFcXcxUgoEWTh5QqaKM?&53(mjYQuu$>&c(^V@~Jl#P6l?Sf5`A(;v{bGWc!a= zKZBE;p^KB{pT_+GAMn@rr+)v|jz9Z94N-A5a`{K(_;dMZRQqQXQ82PHHFo(=)LA~w z{=3G`Q+e$A0dQgN--9p#LK$A!M9x#Wkbq~wcreLSz3u{fZTWVd=&jx<$f)W~j9ebh zFhm2+~qRQg{S!`|l7i{bAyN#Ek!^S3Y2-IMR)Yxzh(hYESMw1bR*3 zERxBD7gw9N7TGgS5s^Pi9!Hi70-m}`V z?w#I7-dY@8Yvqi#jQ_{MOwJ@#Yh;}W8{u6`zV<~b#+ucd$ejG^tkm6uC!OP%g!d~5 zLyjohm#5+2ZyP>H()ZMrAyZg!7a?mBA1`+w=xe_qgk+_Q)GU{1JR@S%waoU8P(de4 z{7`+m1HC`;S$HQJFO26-;Jz7G@(@CPhc*^$Lv`{)GGsk+)bIhAK@j6ZxAJjvtvQ*f z5VRj)8Pdq;RSS1J4h97O3+Mj{Q2!R^|GNdi@>kyap9RSOy#Px85`ZEE_`iq&pB=4# zi2)N6D+4n#3(;pz;b3O_)7|=a#Qnb#*#FF|&usNy6(IE|?svSPzY0Rd&piia1rt06 zmY8_#&XGnYB{?kW7eBE92*%~T%^v_m2#)S(e}L1rF0&*fhARbijxnXxo{1rAp! zn7mf$fckMNhR*#G*%#?+1@hj9*3}&v2sS@p#uuWSTT*xduNUI8#Aru$QSo3G zjy?X9&Fc4R)^#h=M{el@DHRf@@qUO*Oi96P611KQVrt*+>DzbnnI+EUHV$PTOB+0! z^|vKC9DMkh^|;yo2rE#8j-3W>7Pw=J;q&bgr&*-SGdHHZT?%>qA)*+ ze<6my4@~|R3IA^p;oq)?KTZ4hqyv+Z|40W5;En$Rguf4J{w3l6>IyQke`b!qvvdB7 zradkPlHX$WmP)N~8|!7hua#E;R6-m7Mi6gFlMQV=$)fTKQFqs-gS(Wp)@sQtNF|V` zMt>{UCY)t&Rk{Hq#ZG+jfPODoeS&piT2d5G{_;<&8y?VE*c496pi?Zmmqg@r^%mnG zTpv&#ateY-q(~Id_TY#;X038>>=DD_kD2zJ87%uRJRLd}UMeqHm61AL<>c0;ds<%z zQGZXL-_4t`t#<t>Q(Pmg47vng;&tX+)Q2!EZ^Nscq-V)TtdAK?RwQ_l6{w)gtIn zN{RoKtra2TBdtQs;b;(|dOw8!BuwbArO_yD*oy4K^aZb8*O`z+<=2B~!P zvJ53VsP;6Nl4yu|7IQCAUsMn~6xJdtLlD5BkMBJ_?Hy58B1yT7*8Zj~;>8rM4?KKL z=(L=$!_ZiL+OKv$Ert12{L`u_tBcPcnmU*2EkWXxdtrCg5;ps3-KM>3^~yaGnS$&! zRu7UnP?ctsM_SEQxj=L5V77s6gH7M{o1A%LXQ5c2m_^2`M|S^lUTF{)b?$wum2;iE zbZ;iVwi(r9*TU>-cZkimxMWA~9(F{K_7pBvazgO?&B%Je0W5~Ez4vp&40HGvm`-W~ zgO1grF&7IkG-!>G_K=T~)_Vj!d_ZKniflc6>iHCf%lWb^z#c^xK)foj8THA+qwjM_ zlU<8glS~+>T&Se!ygi=xhs0UU3qTb2(Q$T4B*b4qb$RUQA&iF-3_Q5I-&?ZQ>G>xa zQM*(MU9Ot2OS%uYV}yTODNDemi<{s|UTF_gVpYNd3KUhPO^0AmWhv zYT|H7%JJcZBd?W#6va$rbW)3+j1%fMPHcjXELW~bwBSuP4C)yLAx~i_Qh6YbGaej; z91ex)Q&^b?r!K{+(s-Z;aV9+-rh7e+vk|o{8)S3Ms3L=;xh~nD3$I?QYRS3EDAckG zMqTiIQK4Lr8m=`;c>p@;~J{CiuoW1%OloFpmJ4=4D z3;o_Y9z#W=;80-sh1`s5P2AnC^5^tyM&}0MpqIb)fK{o8>2NeFE2L2wPj!XH%iBjZ zY7QDJu1pM>LoRt#OqP-Za1Lu|8(CQ&qPC8pZ~$kw`GQePmnoCc31OH7^%^P7a>xm4Ygb5KoctE09$6{=1FDMN*^Dn{!a zBx6$p$)A!GJc8=PX6A!o!Z6x1$jFtU1wX)~;P~V^NxAK&a)&1UEglxM7k+=82wiY4 z@4GPJ>{@3~kDjbQH+tL7J`QMTaXA|UJ$$`AwL{7(%U8LB?Gku>b!8S>hKr}ml_1~H zsQ#wc8P?-o%}uCSGIR0l(4_Y+bU6H^%V*}}%$^J{9%p0LKi1oV?WCft@{msaM*gCf ze?6U_55gFx4lkY91l%5Agd${-XcnT*U9>x^$ry$$1qo@uNRugV`c|)f6sktPc7BF? z&yL*^Vttt=R;r2Yod_|ZEpy0g*$AYim>_L+LO*xc&K}Ia1LaD>mi2zf{JV+ocTPEe z(tHua61?|>JeD^na;2CdHEYtki-8*#@xCE@7+J%ExpCavCRxAGLfh7Jdb!P_SYaQw zP(q~W26aq?f^<-iMD5KP#=WWQ*NAxj?aVP%0G&U$XUD4!AuEj(?)1u!cC2Lr zjHLqbW1&c9sDF5M_0=XaY)U&7%OI%EJLqnqSmO_Eh-EWeTZE16u0mFy*k{-mRl3$< zGi)DUY+4a!!kYZvbz>(}RwUs8FH(>5uoqYJUsZDV89YnkvF*;_yVVC-=5_cu>bT2W zE~16h>^f(p`_|^cR)jb!7Skrx1Iy|sj?WjC`7O3TYi0Sy%eQ%H15Vx!@DqcuiswTk z8Zn2h~h4AY@Ri9I!np*Z~v+Cst&>iCOrSV7N8*GSzZ~VW= zNq?W#_}}ED&k2A(vjBfhR(#Hx0k}R#&VLpEWYGVZ#$aON;$ZqO$t$J-Trq1Eg&qzJ z>OwxCcYS@Gyk!*#6pkSn9Pxr+4f-Ns3uEoO0rUk98Rs|BhYjb;4~yKM`I1V@(uKz)4;G!R)bxy$l0kc~|iosA8ieCH&t6|UWc|ZH2hN2U44iX71+k%OA(p$< zSs_<|S@C}L5=AWf7v`YWg$ZVXebMCO<^2+HUqu(opiXit3xo(5z{x>y0&i^zrUUYC zfKvos34XVW0guGVv4nSf8_V6cfW8K53jnHy;Hlvn?!99h_|*%t2jWqQFcqf((%}xm z^(vv-{KE76P1GPgAogBf5R$Pb7`+F@1`?zrRmmvOLpTjc z5HeaILXbyeXwf5w2gwGK)tmg)(}hS-Tn8S==ltsOUFVl>k35nFhDOagAz$5l{DJHE z3<5?%?EO6vWCZOD;b9_3d%(E<;lf%o6nHUj6`d-*26s?;U~F_}GH( z!8U<)zKh(rfwu(%Az^aFRX+&ef5gNsEdyg1I79WonSi?qJ}ca7ungZYzIuHy7$^h) zua>_q0o(Y$zP*ecsTpF#BDm>&fiioUmOyWPuY{1o(b7A-unx} z!2EC<@!g$3jp*^x`IS_Cqjv_m=VQ@P|M0e&wEd$JxUCyAgy3qkRTWP^85U&f zP2wobX3gfHTHx$s(&>X`??YkV-{h$q=FMM$Y}d^Eq4>uIW8cRRVgux==i4t2wJO{_ zSJ)(cefQpN|5EOpZgq{m>4_bG-y5W@Q-%nhFUxP(czey-(?v5mmA-| zu=3P^O3qKCzYfiU7?_zGzYuPox}~pn@e@{6XS~jWd+BC>Fi@WxTv%KWr8stM0H%I| zABEm}jZ{bAZvA?1i^U+C6i|aRFm-eY=?DYT%lr-gg}aUD%`u<114w;DK-Ql`j{&H> z&mT!3;O?y(nUFF-ZFe)`gAL)4@OG5-gTu&QjfVD!Fb=fG>Iv~NWB7zH4z~y99nwe6 z=H;`z{4Q{%wp4-e5l|h9_W`UA%k_rq)VbhT=j{9z{?RxmK#UlGena_M9iP$O+S0*K z=O6X%y7AHH-(^G;M>&jVY1uoi16V5JZ+UA{&sqxhG#B~GABi&Fd%u=k5?VTm=SdElKA{8j97X6NWfUo=+-vZKgYuW#hz~nZRNx>cY}|1N@5&oWc7;-4c7>Y?sUPrj@c^P=5c;>w6K^Ou%kY_Oo>m zKBEtCyob1%5#+$@OyRrT31yaW*pn*AQ#yQrt1S64!v4+p@>w2M++6y6L--->b={?# zYUNDD?NF-qC~AdzfoTz1l24ieV%dT{fqZ>r)5PlayM_7|`=(`Vz;0lV20ckLNhz5x z=!=W-0Z58J_v|f_MCw~@t=ybiN}8~uSIJ( z&?HL5@;U=s$NfRcya%MNTw$ZcGD`&}Cha12NNg^}!n%POalnFb67xoEQ2b#_P{AUM8OOxS z1y17j#n7=7%{D7Ddt%rMJC1~-cCQFIyltrndTy>|XS|lGLtGF4%7K;)d>%)*sN#Y%y@;6**w#-* zOPMIkB4v-L;7KlJjcwBQl5MPG*2^Y6N}iR;LrFchM8+gF%<0lyCl2@nJVu2FVov`jHJ{pjBRm#WQH`5#iScEB~1R#&n*(TLa6CgvMQ6S5z9eO(0L!fffuGi2R6fb z#99l|GxN4dMa<#7Fh{O$auscFMWa-0TtHPaX9^49jh*_gCCw!kscwHMT{y6ZQfgn? zTaR*}3#`Kpr1)~&r&3`A#f8!Dp%!2xr310+wu`sFUC{9%O=5tpF%Q&>Y9ABuwFhMw z=|=-F%;G?9^!m3NO?*bRKa33Pn19lDP*AJlhU0j`FygHPjFQw!8Tp%8&!4t8t8B&Y zdza5UWrna6IhV2U+sxI{1QL>bg82&T~tP>k>GM@yZySXXtJ;JN; z+3~};VYmAYfx{Ra0L3teR^eHxJNHb;#v>k?^Cfw)gIxd(@{-v$;^c=v`f8`fkWw;G zw2qwuzArzIo+IL7N;(Hxb5o_W?29sdb8P%iNErykLx2L`wO{sMYl(`s zg$T8hXIV+9jAJ+0=*CFg#+FnA^-nIzWyN89=6*xBGO8Uk}t&HQkeJ*3A>lW0f7iCrWh9HxPhW zQRwbga1p>mz9YmImWxleJI!)eVWx*b5H)uFJ>&IZai{H&siF)P36BEZ`gI?qXOdlxBI*20)Yl~ zd*L;gX;{E72Ue^qYwzpjtl`K^`ndf8)Jl`f)}3Qkn?rU0|Mkmft^INuB45*jExF{ssl`DP270UkH?_b2oiz%i_ z`i||IV;^-Enxr{W;kQ~g>@~-2kIGzTF@^4o^ncsm-nN?d!7XBt)v^Dr{I7 zjh@xnG@}hUEF8eSF(ne2zpN{tJ`DRn!BixWHO#{$1O%sp7(I3>TmNvzQ@Y6+%Z6`u zB>c#9LU%of0Ke^|(L=g;uJ#D`I@ygG(X#0bL~AA<4*H;+L0@w4sR}$Op(i$n!H)>Z zHR$pP5p)^-}PWvl0ujrr@M%CrTG*D#sCjq#sc(v&egPb z|Ao74#Kn8Ff1o2`TvS!xs=zx>YpDj_>w<*>S$4AZP?I54uAhuKQRZ;V^jqa zx@ikD<21fUOI2+m&2YIGpBe3=nGCd;aNjMOk@j9R)p~pI2ATmDLvbuOK%O|!&m%8d z3HPA>gw6BdpgBLxQrB>Gf1>cst%(W|=K)AYso0ey^D>WhxjPk=2jdyoS?_ZC(HrD< z*EsIqi$=g6ug&s9Og_H;5I@EYV>w++X#;Kn)wDp!yz z0Od%1j}$KJ?6r3ja8k=Ntj3VHUQDwMYP1f#7`WK_V#Tm%zvDe233KJ%s;j3asRiBf zCG~8PU}ZG&uc0b%Lk9#q)frUX>d2!vrX&fcVj-(c3|+s!`$rZ;O^!4!Mdsz2kY>7d z$c43Fvrd%7eB+a}hEM1wJtAZydqKX8IwyW}^3>L>cNw46%B{x=#*3;P43F@dAqo*b zx0FDY)Cp>EPz!ZDp)~BI?qHbxa+Mg=U-L7Bg%}jerSrYbm*l8t^Rer z0jhOEN+hWb`-}z074kZeS@1E&oWez7yqwM_m90PelACvbO7zwdW_Qe`6%uk0@$1hx zJ6QF46>0&{=ZyAdbsh4jtQQ6AI#$IBc^aY4xg%;anIN_ExB^|(BURud3y|VBktC6< zc0GN>eK;Libt?OT$;4#fP#Zck(^75ry=U!OBVi@6_7Xz-Yyl7YrC-zCyh?MR>J!!I zziTO*gW47w&hX7=6p&LmqjJU#m3@yv>zeQR$tq!yUOc)SYp>s^ewKQ{a@S@X}{M$Ba6$-}bxg97VCl zlsT5m_g5Zlgb&~5H(GuXx8_d);N5H|h);)LGU#&-zSnM-zc#SIj?{+w^k$MOFLzC> z^abPv`uR#<4~ho8)`it$!^PFRL_=}QrnV__?F=0P{k$|%<(EejqNOI;<|N5}%}i}}m^XPCK_c5A zCp&X^UKHZ|CfF*WZCV_1;c8R)ICq0isF5wZ$g6$#;NBfkeHPFM3bs}KZFTU+OKY}6 zxTmC9;<2sd1|}n@Be1WasMJVrJ^f_={VPS&I!oH@w5mbBi zY})}EHQ$ZpjjbnV^if{I@U~~>BYxkPsxLMw34WFrCVQ8u6O?oGP3W76gHMH1MW*4T zJZGe?FsuUAqXS&?klh};8`+Xr$lita(lpwy-JK+{I9^a9-;i^Fdc?dQrI;%vyr?mz zufa>e-|x?GWEk#nV{hu0EZ0*~m{h?KrIlS!?+^!|o{jBpCvZZ&`WmOGDEYyrr1wo|;_Ex2J=4$> z4&P~TAEFmVgqx3h`w`BZL!lp`zzw3-m|P~xjo>%rIE`+HA_|8lQ4i=obSdq3%E{*9 zk;kZ*+psULln_nNtpyZ~OaY2o%49q<9poRZ{sc9esBxdIwOlZDtz};6c23Yv0WzWZy5n#nU>~t@`%`bdSB)6Ujc-%J62u z!g<0SdAxq9nV$7HV6{*qr?RDc%VF*f(*& zX`tODDxo7;;ptDS(vos0-T?8^0|Ud70i)(D+H?j*D-b95Q!Vnv3)*UjNGS4bhCYD{ zBg4FRbl{BuLdtUSy52?Zw_xB8Uf+BYuHuWIArg=n%t;9=m-uv-n|zjW(2C5k*D+54 z#4N&*9#tM%i1cX1%+G+PNv*d9oRX69eom))l4%y`gv3 zVu*f4^Q+lvj0u&5QK7Oe*PXGPYdB6NNl~pwj}WX>(aB)#8V7Gwo*@q1V4=+PTEhT? z?)i^3JS6=*J_Da2%7gvFBF8=UAkP%u(wA+np?8DGS?6mSkP1lxmHiV8wqy(B`5Lk_ zVIs|9S})5*f+Xgm^J?Sm#-_f?0D?_Gr+vHEfE%lCbj9?FS!Hd}QXRg?=2uuuL7W;7 zeR;I_8Iwhurfn~IotHCsUoxW0bqi_GDF?6YA0nmU43aV>3onLhH{v1PXAg6nK*sZ? zYYDuPIS$B+#_PZ_HkwGCTXs@f5lJssUjsOrO1{to&JY@!e>0 z@a8lRG^5^&&~%mw;)dz=O!@EN5a$n$1*2+l``{EY&a|jtsQE>_O>x5~B4*3T>rbnQ z@GoEM_#>g7QYa!B#AoWDX$1l&XfM>Q};?zb&xHp5AfgJ#ZBD`Q~jEbu|A( zA6J>5vyWEH!lyg(%c(Z;Ec|%(+NEN%E7PnZ!iH@3qQ<-z!f)|n&PlJ>RM z1B+>gD~_Cc;U4dM!5J&D2@+;MJGIs`7!F#<^dfIZlrbE&3I|n9XXD8ax=7F7Uigeb z1&F>-7o{YgeC)+^`m9B_cO4vIlO?ZPN*{i=C*Ka2Z54W=D|8WDd2+oS-){?$4cls5 z_0BmqF-dq@zo5|((J@6!JE8N%d*RxkOLgB>#j*vRh{sx?hx5{6M_3g@Nl=K=k3vmS zgL-5#jqTGzWM_{!`!>3jD3I!4{j#B{`_~*vOC=gFH9<>dAp{SL`Xg##LCDAmVjUHH z?~ZQ{uOiiIx!QBZ=t!H8Wyh+|_y{^Se5xz-FPWup)Qm(N+}0Go@M41%=+l3%M0K*Q zwv>^U1WTbl=ktiIZ_h|lLz%&L`f77*?|xE0UPUqz^{%%{hQJGkY%0Og=KIv=9uPIr zdU1OkkEk-5PHmZ!c;E)V9-xA~Z`F_9g;>`U#izM3TMBBY&^$eHKeMBg}EayuGAI&hLN#~iF)8Dcy{c}WV}^(cVfBqn8=y0o$S~E2LQURerR(JZPXT40^Vn zAH7eAh@HF~IDbiH-kN!12%{>*ZIuEIhVXJyEt|?Iq!#+9vx|>PNx`w%jINc0(-)pA zETj}YnQXqi-OyKiEly(r&N1lFz|g_UcRwt!1Z{DJ)YU>@6FinjGz6%rghFkKZEa>4 zpIL>??7=Xuj_S53_;Dt{a&Vp_Br<)jYdEP^j&cnQdq;cO{eG%QjR{N`l7#fKNqv1= zKgazh=0YhEENkYkW(Y{O5hSOo1GK}msq1|xs5CDx%TpEPLkixW{wgu8k#XN;?O(4R zh7`qSilHoO;$A{}msm9^k+Q&IL#FFhw5mC zBqWb!f1yHyat`>No*(h6K}>Mi|MDvgJ)?mTxgmmcQ$29>?v~~!q5?!aD9#@Du3gd6 zONGp1sQ3EJ%W-Ok@1rq#T4Oi@5x9z`Z-vaCv_zfa>slm4+pa&`+&8N<%2=%E+=Hc7 z0#&*12U|vIC?^l1Kj3@sGn0=fX6hLc7DP%32Ohc}1QJFwY1Zxh1fRcLSD^YQhi@x)|~|wpKRvT##BB zTSiMYU%q!Gz0271^$DKfs7wS@s$w8t@O|*IWm8H~RIP9@!$AlbuHL_aKC@?~2CvVV z%jA4cVD1T`VOGNi)wx9PNHEaEk8c3RqiU5NNqw_H(dy8+wbAkq6p}%r5z#86tX%|< z0Y9cdXZ$wb}mPJ`@@a%T_z zxS2gi-wBH+52}#aw-`^C04tY@)STM5`*;@z_EFbM!L~ z6)s>e>OsbxxtvDP9-@fq(CkMtCyK*OZNnqf-1kRXVkk*uImP&N9`gDK57nR_Bf5UH zvn$C8ZjW@ltO;*+Lq?~aZ9fKqz|`cWcZy5uZ+ykp1aGxf}Zm^1$= zML*RG6QVX+RIed9~Jj?e%n?0;{R=f^zR_q`>GSL^P8v+gf`viZ+cBM@XNg zCQpl4u)rD9tC3LN7v^LMHkZRU%GYzwWssCkNkkE_^$=`8I^b4US$hun*vgS*UyP8- zco=+p6j}St^bl@woTQSSUAI*Wr}O;aJymeqZZ)da<^<$i;nUF3qW-+?LsT*T%3r@T;R+z~PJ;0uLd)7%mP%$sA)@BeL|E z-K?J?rqLh$wC(k8ia~epOIV4NCzth&b@%Nm8pzZG*Yt=9>M} z5X|W^&KX?_wzUha5jHG!!N-tA>CrwNTy1XHj_VuK>n71Ys&IMBR=X$mAN#q3ANXc0 zLd;S9tUfqv+pBV@vzBhm_$t)iJ1Hscjbj4)kK5M+IG|XA5Ov>wYp1qYJvhFoiJ5SP znuf|%2mRvLrK$OF@_$;T1wukY*-NZ@DC7 zdc`zx2_1WToO6DNaAjc)T&`&qa1}zFnf@}SCnl3@l1F)|tBc^sG5v}HLY>T5D(&BC z;Ax137XD+w(0RL;eo5O$%tv|?pcz^e42@6hU|)D5!n=>T01r7C!GqXXpEfg$H}&;v z{NeB5Sq6J=pvqcR^l?i?r5irf#k^e?5V$1--1>nWJ2o<~pjS3Inp2I*2(B4JFVL=} z4L!`Lz=jK%&6#JIQg^ROru-Q4)d@I~m$s<>fN3%7It13}!G#wzmr@G*T`f~~l3T*T zy+q@Ec;eKu2FTPJ=}qfXIY(-0dGmH4z_en>kO=9IMdG%$GyDF9L|jsit8qQEZk9^% z)xn@pX@0+oWt!u(AR@8R@N9l4`wFqc+nBhjpG%F^t^HIHROyJC{`i=rw!Z_nhr23% zB7EbYFfHUXA!AMfs3mY?M)`RttRKwge9&H=7v2`DE{x|(?|g@8C8=Ms*BsG4Vx^qF zUT8Ua&a{i zr2TuG{p$u+#mAZYVcN7aFC$j;QHnRgUVHNA!feVY_!qXCU>vOyTN)&q>&{B%VcQQU zC{CR23C9_6#*wKUVqT-_NmWt)$^g2LU31Ov@02r)x2oN?;@>m|68S<=4X?oL?C z(R@L7mHGzdO&hG%zLqFLNZ(1$7FC$nQomqkx6fTBdfrR}adVLG;qc{nptx%Q`68#? zrROxUgUZjRbcC3_=HU>QtHn*;kEJI}zfPWB1o+pvGL>l7c5Fs4Ct|xP&Lp=WiZp_q zc2AjUqeRQ*{F=&!SaKW=jTkd^tYX*BEexMdtz>V!USYFVN(O(z>`M}TC5*Bt18~xp z(TnkC{gaV`y*kY1Ma!y&q#K_VjL7GNQXGk}#)gFNe5G=I63b;dcp(r7s49vU!5*Fn z=k*aV7}-Us?c=H+wb$TSsDiYy|>A=iGqLL>PQSw7V{Q-(HlEIa^Q(h+Tyx-_!(?* zb`K*qeYzw-cr$~*g;eSe%LW(9Tv`ERm|85>)h;D{D>RzCsWWAQ+rfAz@)A&TWypsW z`VzXGDE#B_LHl{mz5lz!WmtNwjTq-je>qg*G&mnqgW4{n(PUmB%I>9H$~ai3{BvVJ ziZ|%NzIngMT1$h3ly=v+#zFC>7^B);Pa3A?EY!VSe}d@zr1oMH$pBC5Q$~{Su&0k6 zLxs&MH;*O|Gm_si5(T> zoeq>^1Y^alh6v(aX8vgVOG8uExWX{Mb_d^(#4-}LnMTKM?p)ZVGl+Mg&cy^Til+jp zdeh2}LsGkN)BBs9F7>VkDVNlHdur!`%x-->moV45XC+0;Z+3Ab{Se61$o6%Mz%h7M zGeyQa;I%%BBU!_z`26;F{l;{MQhh&~6k#)RrKU05%1jwrr1Qn3c<-*8AvM8!-eiY$ zlG4zB4ObNN^OStv0z_q`5rKcn3o) z4f6Vr`9SaIu(RNw$TjwOZB|pAY_^#VjRt~{rI}K3Ij-=BfWW{NRTy8>G0@NX`PAmHhR(QitxHVl7t=P^yA;! zcIo=TKGTdz%rticZ77b@b+BM=zlp$yY72^ar%KmFFN-b?%lnwJW}&^#@jS$A8;;FS z6mn{he+vl`X>ohwz%O)`Dy7x(+e=n=@FvLR9~tP3f1Sj%6_=c3HJ|ga4#6Rzf>Cw8 zpOI}AyBS2jk{%To!9kSWNY%@)jH4GWMWjcX3z16MZkdUu{k@}b42ujsqgk^UO%2)g zN>iA2`%2@2F1@q&Qzr{L`OIIQ#!ox65eUrV zL;C1LET9lIk;n(NKb}sC9uz#EAiFQ18F(*KZeM^L08i?8k_?l)lsW3@z!vUL;4J$} zLm*&fTKX&K0>^cA4DHe-_QlRO+(Hn7Qd-Tg;kMWfK0O+!@YhIm6VHOKD#;0XhNWSd zuxm-D#n3I@PN;ck+_(IiUW%$VVY!`~mZc=A9UPj>ILbji*Y0?~WrY#`So?tQupL}O z@FfoU@3@DK;=3od$4yet-gV%rJW>z*?aBC=Q*pmLOTkU^DVBkC6P*Zr<#JNdbkSJPUG zm_*oJBGIwM>{&cFkMDYD`fbF;Yuv73}Ms&{#g87u5L!gG}v{Wt(HpYqN7v1eK!6?u{9#4EdT5-Kn7taa<{RgQVlchN@~tK0zo} zV?5-1yfd(6nw|FSu?X~z=o9cqvn{u!ckKD59w)LK+iy}GHw1~nlR5l@4c8e@tq-N6 zqSNB4<6EF2a1TEA(7*gn2g(uV8kVnpB?xQmHTM@dsv+pcC8-Xtj2}j18vLk{)P*UV zjo7WD>6qBoG*rWGWg+adUT4wP$SH&nF=E>M;M${E zc)h}=lux>a#qT$S)aFW*sM@!rhH9?in>Xc;%V#SfoA%7xu{^G3J(NncCC11ods6e? zJ&hhUC}DbIQhKu07q$w_-)~Z-!k<*RX%Kb7lxf*Ic(g@kQO93_b6E!W{9-Y*(|+T1GoDsbEPin_(F-uQZp; zsab^vPWxue@f;>%}C&5!rln6_2$p*6`8h30Vi+(jTPb-D96BN z^_S=7l5$skf-@9CFD{n*xa$o(bVpWPUniv7%rl%b+i&$HKPdZLc;^-CS|cXcrBi!V z!VhJ<6k;(88Pf{ZZp?bmL$z?jqB;&BIS6Am$eRWOufXwFm7@mN zAwxHZEIUvSJ2j;Rnp#+~({9RX#T&c9==`l+sL*D5T-Z3gTf8>D|8~RD*ICFeF8ZQg zIRxWenat8*uxP>ftK9@IVVI3eM)1mbt+U3uLg-h11`fX*gS={y)@AA(%!a530A9~M z7QLe+x)3h;(0TIr{x4;#z(rVe2BsCQeYzecW?DR-?()kXE{B`S0D1=7tbiuAhOJ^7 z7g4c950O?UsAw}*a$cM_kQd(@P+o1856I-sUIE3YKIEG6EnM%CSi;nli{zA8cG2 zoCaXePy|BtVmEwJ>J(lF*VTKMcI#Kzf!J_-tR-WtjyI;uDu31BFhR#curt#7TACi? zcgyZBt7?%v*kMJiy3x}tniutl{YK8wS{#eN%?{%`{<=n{vvk^gW{WO{GZ0S7E*-Yu zb2?k(8u>e>XPxO}Lj;W>PxM@)mjJgZ1`D;Wq%Brk5?QFC^^F04+py)ZGiRcZbiog_?Fhuw00WmbiWLSF7AYWuK*H_S73nu`E3Plbl`|RQA z;?>CX%PzSta?{z!R(DNyc(W@RhrY19MfD|!kc=t@^FX8=56Xfd|hpQ?nK%hvh z%MFe@>oNCvEmIjAHWKtnob0UV%sk37D@Y>iL)zK5--c<{Ap=5pV=71)4-1UBQ{%F< zJZ`SZO$<1l>g?&Q)24UnQ-MK@1V!U19*1k|Sf?fKDS_mojP%Hmm;;uhs{dF z(O$XmZF(%yY5{bP%<+Q7U(hG@!|~V!5+zw;!d3w7mfPxXeMazyQ|(qI z-YP#rp~;h&n9_@ItH~?1Xbe6O8LH^>{>7^K|I=c;*Dz<;idXC`82X6O2k zmGXaIG|$1z^5-up{?}hU#JGT~CSBe$z~==>rou6~j8Cl-t(VRL-~fPeB$40{7g%Yi z2qH;$QgUMQoQQ^?f~10^0spVg6Cdsux~Cg#*1o^bJAZU;c5c4$62^&bM(9rym_n+E z5Kq4v0LP&TYD!3m-~oX^!v+Wn4+!Qk;l%{JxuSx~84_eWpedjD11IqrAh8Y>g9sTr za8r>9&cXr%!GiV}DH}LaBS8^?fk?f>M%=`Kln8cg-vi+T2T;I3|0aW=$zOZr-HMgB z7-#zM0(TH<1vdE9cGu=HkeEgqBp!?qhzpSgWg6{)C>0nmKv-xS(6A{W?J&ZbJ8h>N z6vWTZ4}zqj0Gt$_kHXtOjXVl` z4tgB`3_t+GgAwsEV(y5$2Ot6+I0VFpu?tSZhrNO>yn}9mzINgQeF1vCbn>tFM-L+P zrxeLiOe)6(JBF0t2Ep5c!V*-F(*`<^J^&SryYCJ_3ZaBaM7$2`K}O<%+j2id3dqO< z1`5Kyy7b6aL~OxbQRzXAePqkqs%B2pRu@~L&DYU2lx(29u6|eDA^=bwth|AJ*erlY z-Uvawvf*t>dWY+aEP1y=XAo^)=mFr9yOF*|KML?mVBrwKBp_j?Ac6F(0R!|)61S>v z2L0@6HifPjIn07`fH;zHdw^NM=%6lu`d^VDJ%l?9ff~Xw>8Q5+A>S>LgL{FzInm&2 zg1P$Q2R=$;9sBX#m(upVdwhWr1+gvyfphe`{kt^{k4(P_lf1rzest;!-FatdiF0@O zQ@p#&N!7J!p2}Z!F|I(Pn(J?ON7%WJ!ji`RUi2Cf+7H6MNw{};c-L^WT42` z>wdoZtuBDKFDK5)Z8)?4D@AufSOBTvGnK+)wLL5aEZB23mbktm=J-QDYX6u3aRjBf z&C#ayt7$*L>DipHOpp&9Ak-Ye!As^ckPt8#OpFw; zff)h_XAW_80FMmAHyPKqB{6@`u?& z{UQWV^^>&IH__Oq^!Y9>=2db9JwjuJ0*EX?oqPhD@qT(C^?EYR%~Ai1c~twltH{Pi z?Ie^W#<1op%lDIhACJ}bExBxwLzP$Z$T_-uO20VN0 z#HyFCYBEK;UpapkpWGXLO!e@3GR|hUDE=RO-9vCDP?rYa*tXNLla6iMwr#s(+qP|6 z9otU6*mg2Ki@$27YHIH8Ztm*r-t#_J_NIQB_gR9aqw*1`C=$vr<;;igOw=ynWu=K5 z*zJWfd8vai+PMsaqR#9o!*(BBTot~fmaN*dC;1w|afiC+ui7V`-YluuajLEL?e*39 zOK0ZLoaZ#j@dc6AmO@}_vD7|q$ccL!EW*;JK(uQ34wKTKag?I!c}^5s*T>6Iw5GlY z1!C#9B?a`m^Dc~1zso^er-nD(SbS%U4+=yn!qF#=QomXPewt`m_pMHUQyX~_eZpa3 z+#;9#`vwmnO3ZsfPb{3fSbi-=eR1>77J02$)kp{L3a8=^Kp#xm|ZEH zLT(T?DL^L=MQIeQ6WK*?c614@10S#(rpDMV9*t%Aesz0Oc>iRmsCa$xinePw?)l6Iu+NKH< z>^37+qh(I9mD@+L{-mf=M5D_GD2tzwAaE&Sc~~#5)(%QRLwMa%dfZ`60UW6&kV8Pq z0UR8s!qN2p!1GZ_YDOFm28c=~?-%eI!-Hh6z=QqPd%?wAZI=Jss4CL4lSZNi{7c~G z%v9d;!@+lf$?_i$rm+ZMLlSge$eHDhboRrukbK!nB)NWhvHkV<#GL(ES;hnaNH%XP z6=g&@a-ukQ&T8rw(}(`eFhnX0Vfmh91Z~E5_4S=$p7ZSd>{BqCtGx|C1pNp^HEI$M z`w67)1J={Wj455`t$YcT*L?~r%{SLcwyfU9lVW}z$ANdEoZ2beRX)*8_H>>086@gp zZ2izHT?{9+9lABH9QE*bT|~__$k%M*mq|v~HSi*h{j3l|B#N***kU2}jlw=zm@?n; zyJrX4`~`b6=QP*H3hB)`guU9&qGms=vx}6n z%;rFbiuuO&s!*+`~+8cghT=(*mol@1KZTOYr>j~O6Z3xTW>~;*Bd;c{7r$3 zav#XPGRk-BES2dy$tE#XG=}B-3rKtR7+5nL-GRT0op0`I# zS4oZ0f*KKzVFhorYhGM%MNToQTIy6*+W2h|^#r}!yt5kW&j)8uv_0Ku-+IeMbjNe> zl^e9yPmyNE(->FD3T>uu-sTXgHBi*MDgKw);_MrnQytyt%!si zghVuD8ms-`aB&DmcowlxymXkUCn%OuKLUzMbTZ-K5k0Pt+l2x0nqMciqWl&gxdC5E zV=>YR=IPH z$yFQS1nK@8oVDD0RZnXzEt1sim=|;7S=%_#s<5a>0AgghcBGy`F=yaFv?eGDRhHGRxC+Ko zf3k3HmEDs9B?`rOg20U+kzPA0zWcan9Lp5md{Us1rTK>vkIilMRKe_|BHZuZ@=GQilhPb9jnb+6VgwLTNiib4g~= zWP=x6q@=gzU{Pz(oGO!EuD#fy(Hf*&QbJS=Ne$ z76-gJW>i$1t>~3pG+#uvW^(>pO=!^L8$ifdgBn1_ldZ2z!|-1D-BW?E8ynr#kSW76;gIG=PW)G2LQh#TAGvwCH4nkN)c%5Ssu9vFHnnuqbcZcYY zg-W6`;kV4bMeUD9y76g7F%aQU`R!aE;w8&wWI3SjoG@mZEgidH-Np2N@!ubEjN^HMa?g< zkBuM}L_5-G=YMQ2+SFY!9nNf9@top?4CUOMm+l_Hl_Z}f%Q4Sn=$^}(dOT+T1ijHb zn%)-UNc)IDuFy7o#_WGW3QgGudAvQ@GyQxvG}Q1?RJ6Li&|N16ot+FP1tKK01wj%6 zJ?zZ@tgbs>KDvWrKRt$+n}?WLc^GT;gW8p3B$A-HMrpkWvozumCy*c9(5-ePHR593<(+iyos-p)(39UPg>BiiEp+CUDorZa}=HU-Q~!KXx`d zwnpekr2i7ZrJj)L#5rF+=9P*qVU&sGT-)CFXqaNv7+d>2Rf{$#YSs`lPY*tM20u;P zhIjx+sxRgy)r@yJ!6DG9MK3C60vJa5sdszbHLpZBJMYH-a5-+tb5_(WOgV>oWfYfr zMUa%TsSKqD=-85qIG2I4`A9s|1qAhBjM}{$O@T`&dl$LqdQnW{4!BKGwdT1p*UBBR zu*Qn`76>!iXz^T>sIvM{XX;>?39?*pf7+R{YR$#X3|Pf^C|{#(ZhI}W9sPRjZpDrA zxgs*xl|!n%D!4`8$%+Yru5CegSO?%e3|+(jKnt6T<|)@ng@fU0xtDM7+GGBJS}JM) zf7o6!$P+5^u`Nvc)rqI{Rou20GS}|24n-1jDg$|l*VJ9&8*0sxTx(df*38BKk_}jV zfsqj={x*Sa+KZ)_q#nHFpm*^p$p2TFGU4jm4`t1fK#v(2_dx2hhxLPh#S zJmJN`G1ugjLZQXuFBAeyk^x*bqYDE9Rh(>=tu)1B`u#S7*{4m^wR`VOx>f$x?fxdb zxI-WB7RK(ynZ{zZpEKc7;h12hhvFd_ksBXmOm;P0Rq`sqLE^dVAQ0O;Zu`gVG~>)l&eWN&4hLwxjn?UmtKg9o6)E9jG#=4q1g{K=>;cOIZ}8EvZm7#am$6 zk-wWcvX&>*{Lb+8x62|+so!T#DT5$J&PkmPu_K`W>OD3o5+^E=X)TYZhO6RQko6iK zT+tHQZ*$utTj@8-q;qDee}b%rsx;^kC{IB#>-=8UC@;`kbJOndL>WsQ<^gZThp&B~ zt%!FCHxx(rp(UJJ)AMm6ESqG?Z*`Rp*_E|dAioo~78yRtpL)@XUIm4~BX5IIgMcW9 z2D5MRNqna9c)xFa0gc|#m*pNpd<9tW>UqCRH7L2WowyQB%aiiaq3=|=(QO#FPRBMj z6-j2vQAJuTc(1T-y-JG#+({ueLa&=rmT0&fS8XQd)V1UmhUaTEdjS#|vds5(yuBby z7A3egqx6m-D|!&CNO}uV66E!5{!;l$6iV1&ny;LW&hRzTH~jH(G&2?z-wup|z1L%9 zo$dauuj7yAyC1;QdsqtzNxi~@59qw#o_ZXRfk3{Pv2sRvwfH@e?E?Z?;=BWx_|2{Q zG)*4g1~wtvaTWaTt-D%_pK$KYHvtZ%Bg7q)^f?fG&R zRwIBM!@Sid{+V#YAen>iXOC#JDZ~f=HOGjUC-KQy0b8-0huMgl;|*DU&hh(`Fh||B zNA0vxX6bc^=)ZlPYUeC(k{qIc6ILT->$&`0cpP1g#guwhw?$6(qWwW(vk`pJ7tH#~ z7Cf5%$a%I*-l{BVTgyJ-*}aDxb$fnQm-LEld{Jpm=rR?uy4;5lC~QsfKmGO{doz@M zsf{QK@z#DlpYc*EoW~t)eh=YoLYBe~lfQ82X?5pz3OUw_*ys|f_H{~By=3H+QObLv zE=~)AW&&;l^J>hFL!F9Q8RySgxv zY1fpEgSU$8PjHJQ7uI8)F%qwhri*3nwt3ef1<#WP@Z3AOkm z>jnEx+x{d9@wtY(-H7jv|HwrnEYcpJc_nKb_j?tLfbj8mieQjyvkc!$39X5|Az$Op z_fzW5#q-ZSo7z>^d%g$XPem4Bi%P(r#e1I_zUqy<8Hp%-lWti&#Lv6 zSJ$7$pe9Ms$BbLum8Bs5F$B|2T|xZjn`>rZ1hGc9!}i2dPdF7avYtwzY=dx5wmVD4 zo2IEfqMA^m1pkHX_Oq6Iu`|qbmMuBPs@l&u-UF^I_&wNmeok4%)xBaK zo|(HS^b?UDs@t#`)X`zi?-wrS<2Oh-9gmuUJg(PZOhmp1W}y@+M@RCwq#%in9(>}D z!jn3s zZK=DnS1tBCXOBl#P^xUXcZoSR`5{@)-7r0@e6i}Y+VD+JMu=G zMnffkqd7WQPuZ1H7ANB5$dEj*o5r;`rGs9iR+yK{`5~U8um~7;$4+9U# z#*c*?kMf(t?R%^3>0MN3k=BxDL>2%@PHcrK1G0lhp3G{BX;VrSq1qc@&tXyXy=|f@ zVdvn74*5!ht~@!FSp16Qq*M=d3OU=1p-)mJVdIPxvGx+cvN5M_|AoPKQMr>yYT&^i z-Y&elZ3XZr2Ure+2zS`Phm$_8D%n3f`ivYDcN?QBJT2K?P@_%bZq5FMeJ5&$V|BS! zsD$U1nhv-y2ln#Y}~sLs!tXzW(95$mjqODfdjsFXlM>ZG2Az_W$VSLOPFcNti6LI1{~ zZm(e}kLNo%d~x#fz+SO9S+{9vmC#4VjBsD9N6h2#B9I z?jswsH123DQhr{9M7MOGQU+D&$F->B^{kP)krtSC=vVz#xoCe#yPT?mucTZNL^k+y zuLd+zV60!kXAA42^d9S&i%|8iXZEMFHbQIE#cJtW!^PhIanUp0u9U6rSK-l2-@7z_$xM{d2|Henyjcmay~v=p@ZtlM5%>YV-!lCm{QUwfK8LPvss-B&<6}{`jZ`@a8oI8o?q&`(q(OS$(Sy5)z1UPPGQ5hZHq{b}ca8P7E~l3(+d4eOecf(2 z1pzvMnvb}`u4oswR4$@j`B~C;tPc%cCj68Zio|e!xgQfc0%_j_7k4emEjxk@^lmC1Jw|O_hwc!S5^MNY)}4D=zqvUHMy9Nf+=OzqHmiMQt|?DRfuqe^ z{5s;qEFmw5{UW5!EfVu`#ZX>t$1m*jfMtY>te%?u{lMF4n3Q`1tD1B#xtgw6v(PQeu(I@rATowha(vDT{Vt;^U0w zSC25YzOH)pkjuQqjC%&a57cAJ79l;D7rT+Va7$yZR5F{b@H53;4Ma!M5plQw8?HwliH&-nFmk$!C@1r7id{@%Y zNLr;-Fd7j8?@*SL7>c&ceKCe#i!vwdGUxF_vFBxt65)n5nlA`JR#Cm74XU#X{!$!HYQ;;}mQlioYL54%C#5;vmX^!9Pxu!CT}wtvi*H3l7wU{IIz;q`wXmN~FaM|Vjm9F_rIj$p7rNcVpS)-H{N+1YHTs8O90Y>0lN*Sm9SE;G5YH#@@)AhS%`LzmTSx~C zcmadpm-QbQ8UbVwh)&{p3Bl}cU4d3NyE@D7C#d{3BOnJTs9VOLGlX~tu+Bf#wEaNn zu59=_|49tYj*?BoJ%Y;tpRt1)70tC29Z6(P+2Q2d=n$cJ_T z@ALqw0rqEsQ)qDj`(DPTL4g<8L;$>}>A)Ht9$W5%fOW&*38)FwUsQ#0ui>16c*r8m zDJejda{>?eW7GVY_QSn?*Z|>GcMM*$-tT_yodzp@;LHQ+;`y@X8!U;gLl&x>$t_~{!e*Z%WOe@NI1V}J`KuvcfLA9tpY?)@nX^xl^i1bn;Oph8qv z4GwbpBkW}CM&MSp$`JcwUiou&|1);WU--Ez@!bbL-pS4JSyB3<{PUxz%@K&#{ln%# zR@1e4@h|7rTLt>;EA{H{OH~86fOm5L)UOH58o3~r_+yLz?Hm0|T{9kYP75DkX8MAu z{hfI6*$d{_0nGcM7I1xc?6JQ~U6tw6_0ze7ZwVledhg=+(_IR?9)0*d4*d|Y z53bSPuHiw_gYbra2js)4=RS{f`a}@{Y*TFzzF7sk*TU=n2Ywj*EH2_68fb0l8|f?J zbsgB|{6{bwh;91k7d=qcN?!sH;h*UP{&2#6vp4%kaYCDwpFem&Y*t+}$2!L?^@n4wzWNFTbPv)%ZAJJ^df z_(VVDS%)oXY5!hO()q`M@os$s>mv?-@y9=~wypkqoGFig-(PliVteH13FO1MPfz@V z;q$~68q5p0CjQv`mzt2_^X^cleV-(fccV>QxXgmhg|DH(U5>k@W1qONgjx9nl%{J( zs>Jn8zs5##K_80#FaLX9n{vU|x%Be+$GsN|j5^NV?O+!6&{bwF2gD&N{;Fk|^OD^Hx^3N{^IyLTL&OBbUOMpr}#{ zCpzXbXv+O<4GCoIhMDfUvf5gThDLGyzvzb<@t<)-Xm{qmQLt%V$K5!=u;WdN8T$kdpwmdfV+3VM@>(ohC=){aJ#s?^k&CxoVDbwZNv1^xXUuClh z6o4UvJO@ZJevoH~b=KGcI`_HZ#zlW}w&MwFQN6+e!4zXRJS%;L&oWP|8)(PXK@_Qy zr_z*l=TW8?CrwQtLfrJ&5794=9cN5egwE;*r!2v}pw?wV((5xT)VCTX(%s`JsB9j` zGu{=j9q0hAHglQ}k{IFcs~fO;O?ej@mrTApfVF}RzGws7%`(J`WN4^6w2taGj!;_hF=R*zIazkq|WQegYUT1FW z->+kb%~f&vV!I^at0KkWhCbWrG~Ui%L^= z5GA^`k#pkWe48RGb*COuOSJJSW%5rz>T&O(24odTTJ39&BI284d>K z2+e0)zKnwxfa4Q!O6};bx=8AlKjhrj7m`$yNoXgYwdjIZ>Q5+e^GF2i_;1nCJaz=P${b12K|RBWj8CHN zmb-V&rPp@!#wEMgHe?hwEk^uM4Xw|cZ7vOdLe34+y~)BnCQSu+97O>Av`@YB!7aSS z;xuBrqJJH6*wb>M?h5#)Z+fX~ybm2jOD-h^hkTZ99M87Mo5Q5<)g3Ss=2>7h6PD4c znKHrAm>BMVWIq9#*qIqSOpNEQoWBb1!SkhtC~`wxVybRAbBlrUvD-syw7Y&%z*esYicE-91t+uQsJKD4vHJea zp@>5yec}=UT7l~1lK58BQC?#D?f1xJ`pWPtdP^5*i_NEKzH70EYBh~M74&RmfPurK z+4k!c+h$FmTD1wYTl2*WD3;#GToajNSq9gb*0M$=Uyp%_;FXt|O1DDXVw6wZZidjh zSD{l9mJ?R}NWmn=HU$;2-q*)>dLVDX_Akr>XK`O2cDKcUDUa$r*G}9;M{nbdB?#4( z4*0*oKZ0rvzOd`&NQx`>@8BWqic>z(Oo;|b>Z9kwPH;0_HIRg_wU!yuWrx)k4WU!X;Q)8d0t zH5LJ{Yxv@^bXOe-O@)JAbD@6LKft(~U7lOybV61~)}*Q9e9Fb5KrwB93Y~N@#b7Q` z#cUIqeuR5}fIej+@vnZ*xC>I{BfgDrX_-~_Vi@S;AcX?4n4oU}%;C#&b`YsStGngl zqJ73$PESL!2ZG9dQ2DPc!&w=>l_4Ep@(6puryZqr?|pdc$qF{xjs}NGVsSL}Lyjra z$>2m@3i@o_;gTAWCTWB}QDtly+zuDe6-AXQ%5h3bv>T3(6xQnpL8-=;!ovVd#+yws=&ySS; zDGL#Ob`aFQm-{v`IrPfb_sJA-iy3?rhd-PHXPyC+OJsBOcfEY`_&1-*8qJ(ZK@7+T z@Ct`!$4MFO@O%uI37?gn|LPQo1v9^E*V`@)vk8Wn5ST_6YW}sHDmla)wkHA~BPX>#N;2w3#|JDc`a_WFO=(10jW*Bn;p zsu^&o!ARRPd%0(vTViD)i{6IYNQhMcHyBVe@s#FoNSVV%xhb8$#4(OjLcPkRej=3; z4pH;f6d-sP$)%>&QR;&5J$efGDSKm*RI7TB)V#WU%lUNjF=&VcfA8J#F*=wI62-Z- zJ+Byew}#sJfl1jL+Hfbg(#7#=K;O!QXN^L5!Di+;_*-X)E)o61^fr%3i1ayfrom$X z(_V(jhs*U1m!m}cdWxA52c(P>R>L`UtYemE8N(*v4m4C8ir^^7YY7sFzWQRADZ z>mRtA_qdpIt!I~Y_o1P486!NN)nS(j2C}Hu9D58rHuyWj4`iQ_3@;qUKjH9ZYDzY%Dup5s zdnap_w6f1j(+RCEbI!vmC`;_;IFp3lY$25+5C$4w9&i6T&sU};=of^ntiaafO>z^& z7NAo9CQ!%DA_?*CJtntzGY&IM8*Fd&MMtVli4smpWJFxuo^8^$KD#L#SeC#`pbIW< zbFqy!4uw!k^Rp_zJ2|lGRm4n3=U|FPB^t2f8ro*`+tc;QJaa0YL z$*f=&7o!h3i}Qt0-(DMom=X9nKIcS99xTt|3_?7A=}QnZDR4EpkEPEmnk7#W+AN(v zDmv?3jTFMQ3XjXDnX2oU2iW%%n>1siGvB(-e|t?e-~yW$s-Rxu&dJhgxqL2N^OP;R zLuYHrnu2BpBpITmIN!x1pD7O~^xho9#g-7ME>$MJQ;)CI(&uLMO1J5p0TF`j;b+mP zA7%Zb*Frl52(i0-@QE3C+GJDL(X(Cy94Wq!Uxo0)uMQ~hoDMRh7w4Tlf!TCI3}tLl zs5-D6dbom$4>ck`M!jc$ktj(>CWyZsXcIM_gX4VjaFJ~qjzN;tX>x^Zc2@c~l$v3) z_txTMNR_E#xLJjaREoRzi``j6N!0D{c9~z@NWG1lFfUvj>RuvC_1Zn_E|fX#2|s`A zW*ZSGPlw;yBhY8nnen@v3u^wSj;f0?JC1zGt8PJUX1Y>g;AbGRZ*xq!XWz_(%4~o!1CdBvDf286^Q0O>1ay zrz8RL+V8t$dhIw<7IuYVDY&0Q5l*$+PXmE~%q0-=ZjNT;m)M;YxO}ZaiMVgyk67!T z1Iim1b$2-oM>!JYdgCnWIqL|}D8jW%*umA# z6KASV^{FH|gc8dqiNJTMHZzt--17Y+^(eRbuq`wg>dak;XW6D1-1zLM*#O5$9i`t> zeuM#Of!s)n{uR7-os#8)4U|QJ%7uJU0~s}Y8JQsr8waFI5GMOi!_Xt?Nxrz;^lFKp z^-iO(m+$MnB6{fhaip2eCv%niXg|5-vf zBUavbpm=qSgau5GmrkHUBr)w#I5YM}(q2hBmW!;}riM_a(Ed*35VN)&kX@XV(vGC( z?!X?Q5qu<^W3z6Y@@NoIwxTg_4ic7(EeU`sXBUWg;E!DkM+6e*6spp4TDV{`?zueq z6(y2HR*(4j6_VpYgj0#Yl)6Npp@O^4vx9`!AHxE zUc|+l(H@F`shPP>KTFRKL{F@&+IISOB0Xs;3GQnALRe#FRn?B78$NY<4j1V8QWZt$ z8~S43E+9XndhFfACrS3YiyG9)O_lmUl-0yCS!BNeYdMYpaIZfFuMI)Wros}@?5Z9P)xg}n#rqR z4SU5>U|OQRyY7EZ1y{@G^oIw!1eVbi=`v?y1K@qGFsC~{@pe4o)iT)^s5k&T;I*pC zHmf|xda{-7aCmRzM+1{8zMSaTj*V-~q6oQ7+$!FX#<`mAiM?ko-f>C;H`hHPS566y zJ{gy>CWrP%TjRrv*Z2Z=%{ELqp7K7MZo#EPI>29(bYX^&q?G;%57Knq)(O1}aOD`? z^3+uy7wevI1oF*WzVVvPpr2qsC@*WjrvoYnMv&Xnt?ElwA^yku8~9;+x2JM;C# z)o42nYo-y-N(($8Z^WHIdPTZ<(1NP4D+tlIV?SC-`Kth1o>PDYWIO8<&R)L!t_D$g zBL2D&INNNZ8b|-?<8mPJbY6+5ckTh^tF`0}m^HhL7 zU=34&BT($pDZ^+>^3ol#*l_5iVBc}C*u=o2OCz`Rky_lK!IDv+hJJT7r%2I0Rkpd6VS;;A+bGcinGeCkl)s$Bo`k z$aG~gXT?9BvW&7}o>DvGPS$p6EKySG751B-2rR%0qq;=p_t(nZ3^?2J#`>pBtnKf#A>HNY z#^W9I<1L|J(pPkhahR6p+h~W#Ya)`_cM*=B94nP8kI zI|H`@euh|A$FbuuhQF&8FcuAiq=;jxR9QFB=>cV-{&5Ly5tgt7QM2Q@2RD_yzep_3 z+8CPDVbhpd)@#nD`ytyLHd+2kt*(#IHO_p^`3)IVg0s&EVIVv|`WuPG{#L0;8^BKr z{ML01viXBo1PxjJbe7)9cW9OSTkM_l#O?FbrhdklAdCUmAoY5g02QVv=?GJ_EKgzc zgGfdWPuCnGNF8`Rorl%N{Y>jov0i`ma$mOZ^idsH4BXAXdtA#{YM1k$GfJNWo|pUJ z_d>T0hnrsT7I;hZw0r;zks*)#c}f(=6@vYe3zuTvx|R^sKBF9Hz4ELQ0ZT1i6qB#kZ=dMuDUV>`8&YmVd9TFJtPQk|jV&_-m z3D%8d+M26p>~M_@Zk-P~p3>u~fg15kke|0ADqm)G*Zr9?#6sGbOH4gZQ3<+Xj+?I_ z%36GgC?id&b!N+X+gPQnbfOjy$c?@5roX^gvYDdWjEm`K{r2LHz1|K}3+?=cSg?Na z&x{0o_;WBr-?njoOtjn<9Q%l3i9^LH%Pkcey^nm<&Aq0?pq60dS~eZ=8ezl;0nqe? zG0&+(0N*28M!~S`Fb7`sunVUXSJkO9$+lS#)_6Q$ypA^FeWr7<_|s=_o8d#^jATVeZX6f z3T#jBihd5y*pNhlz)L=;6I9_J>4W-MhBfoKJoQO{lE}8r0~W)paSdC(rtqd@k%H=C zIpw_=ywYuP*{Jq})pnDfAgNpf_lKwaU~)N%zdCe>lVqh__^7R=1$lP_yq$Pdg{%9gH+I&2Y}XI0o&gl> z#BhH%)gRt?IU)Q0%rG^TJzbt9Cbi>X&3CAx~=E8x*p!?^N{`LcP?z^EUPSbj?O4DzaI-UxG8h9~r> z?%;bV*bEg&H2!aEzdp(bWYCB3EW1S9CIfj`S|fm=0$n->{aF!JZ`lg&8bs_L{zNyj zyYgY1)YgZoWuu8Whc)? z$OWt_3&rmVhW;(u+x+z#ZmoH*d<2#nhR+i@G*YM9g3P(C{`V9#&u@h#D9 zvGw}Y%2gQNW}`&Ml@_Rh2C39`1i`au2n7Rdvj#@ro!ucxQVMMBA-awD)(tob^>0uuI8_DEHA^P)x@89fx&S)%5np|7b0mzxr z6oXP|FT#8yN!5mR8ZL!e!xd!og_>?$aC4eH!j10b6Y<7@!p6)H0vsW`@;H_3^plCv z9ZURHAqrlNp!gPe3O#~@Ga{rmmSh(k>-ju-N4zOFUxPo2(GM^o!t{C*)^*c0f{rJM z8(+7ltT;MxC2P2YWZ{_d2KS%;F%EqwzB9T}ok;`@sQ0Pdl*zwF5oB?8mxw0Hk zb?=1YYTnRO^!R24^!nBMyMtEWPTD-2waB4hk%F(8!RAo76Il&3 zV?vJ1YjuC8c?!)TD9jO%jmTRwGpgpr+oc|!@nbm8vSN$mlX;m41!C23J-&}fwO9g2 zGu~-07@@pmgx!Anb9A0&88rqLX%`BMYd)GapW(-3Xc}Er_)8qb39}y$F_}HRj_v*~ zPPqt$lK+x&5ZTZ85H{2QX4jdei8$oc?o%ZCG{9UL8t-_ zixAE_pJ5+?cd%_SV?T5+zTh~vld+BU@YjRu@#4-hy2mgoHZHA;ijsd9#?i3Qk-Q;t zBaJsv2umbx52}DE(Oe^t&rWp^+nG@)WW+lSQ=fhZvop?sH>U!2ciMllO~X-Gsiol{ z?0v1PXmeoaGBNn&ZQC{1n=p4&Gw;IXq>h3!WUg#?`h()=3q1iZ1EgC@XD$`Kcfa^6fj z5OS=Dh^=X<$2wSOEL8B)sYDC{%(=kIHDYmFSfceK8SI7+!lP(HSS2|TnSZyUlbl6= zDSgUl!k-1j^IdAI`;L*p(?u(>gTtIw+Ww8F53?dxO6L<-U*r%=iuwnnMYZ<@f&&Tp z$5}F~X&2tz1$TcovjzR{p-|k>+MqM-BmOQk08PF$5_KQnDZuowa*Qj`O(hng3TA4U z(>LnLY60+3Z3=uX_ifuijH%0(>|(?Mpd%+6=w{Td9bJTd-nT`%wewklKyBXSz-WgG zx%AN+LL6I5kb*Cd4b&W9svQ(wigrbzd}DaoaxmFwiFWz8+lu*?+vsl{ z9e(v>`8kA_>r8(ILGdn9US(FQ(n|Ap_GM**^u81z*T%=;%i8m#{y^qLydAIn!n}=4 zQFsyHaa86;qiqAD9XjV8;A?Y(ILSAre`@l)I+`U^tTT8qLQD4iqRFxBZjl-k4cw%; zo;<_{YKVUM7*94i?}y%pq*h;5WuCCL)Eqt6lSD~eK?-gtng{9d?cdQ7-8qOvk?K?J z@>hlpHoOq9+uuf1FS`lMGlpMAW-N{GcvU;xV2c!a>3K$fI_x|bCPr}dF5kdbY|xzU zl;-k_ld80EBlYtJ&ispk1Pd??NAOIGi!?zP4o;|26Ww|g{TDO{=N74z++n>2{T@?gyeB%@@6^rUZ&a z{Dd7YeucMF@9bXTI++9$`^x^(uAyW-%!6HjvwOlFp(Q1Zlpbmex>HcuN~Ku$YkTz# zW!+D7=(n2JC=1MVcw?hjydR)iF=E;>e-p|e(E@E!j}@^dFJmW4)lfI@OFxj@1)+VS1h>P2--7K#q7dD?|!wW+cKk-f7JBJ2Fg6KIYx;j7V z(y^KMg0ds8Fxu^Qx86wKrbX4NyEN9D19V`tnKe)LnzRWp%%rE#B0o5`q^QL~V_pmL zymT~|8pn#LpSp6T41d#UL}EC&@IoDxl?U^%ZkduM^e$1na&_$2Br96F=I>^?-iR?v z&Sy|A@}IFOtil2_B1Mj}Kp@QiS;g9oe_9s1k24>IXfD~2?*_kr+-QyUFY3|=#VMza zNL=xQ(Iz?g5*Ed7qqLKE)cBK2MUrh1xiO)sUNT z<_ul4#K-5ok8M8QBD~YO^5`y%WhDA770k~uM@N#nB?W!W%(PfRH6li4?xN5C0>j{5k2bHTe@=I z2Yw(d{hbIrAhD~CQ9sH!FDCk=0&M4}91UHauSwi8PUj65K_s<}!0zXgb9n9*+P;^^ zDD;=U0v2jB9t@lkn&yF%^~F{=usk&*N)M4EJ0LzDQ52@^KFG>)>s2c^Gvds*?JBve zh_>>g@tmr}JD2_B&@lL+Kx*iFv(o2}oOyvqF64ynpmgAzn)e(U7c!of0(JE2q)nGB zZ&vz>#3%QjBA|;c9w^7)TMS-p*pna zkjemXk8o=T#CatW*QcVH(tcMtCjrjW?qAa zU=g2zemMo{!Z!^n>8*-th?O6Io}^-PH_C>}T)OK>_U_3D&pG46Mb`+^RC_`%gh|jNhg6KC4VHzDbBG7mil+WmF-7xZm zT!O+mmZQvQ;~4Zhk&ZE(XTSO@3#M|29O&mG?8;wkbrumZI%)+eS@gRpfH2QB_Aa@4 z`!FWYX!2X}w30a3vb4{I=Ii3rnQ#!fw`~Dzzt4u%xXg1x#Tk1t!*dsI4EQX5-WisE z2L%7*^oEwOrF(j5lbI@~E9Gdvrju`fKH;ucA9lNiPIOk?T^=YfWGQ$W$|rpAzF{Fr zoGn&*obqw`U)Ug?YSEPWMrbp1keBSjs4;Xkb{JU~MQopF7prUjp5Y4m-$4SVga*ej zK=#m$YI~N{T$W1a_oOM5xoT?&QqcwPEr8^5pc+m#_E|%{SX!hBIabSEBW6oq(}iSb zMw1O_(Sm&1SQTXQ#x-f)GTy;j|MU@YL`!&$3Ic zSn5^vunkYpzv{TiYnO6~?{B|mjGXq#DiXH1iY&f!OEM2c_o2{T_AhyeQCVc0#MLus zeG-ru7%GBwipb)k^*I!mS-h>GE)m%No0MbL9=(gjwzk2H0H|Uw&CF?rk9KsToEMY9Ybkjg=?owBgtiQ0PHlVN(+Apt@xsTW7aedTZ)q9Uua zd*4t;QLlJx_Z27byrD=ZsBFfQtZ3qjJ7+0d8~khiR2?IqZEHfm)({$FMOlym z2o>wVw;AptE-#Tw5WIslZZSLjJsx&Yv`CH%;`W8*hR;?de;vX*>-{+-$OWR#uR(q`{8y&>6#eQ6ZfvG39WtL4(FRaK;muJ!EC&9P zAd;?q^F39WU5>TrfTAL&NW5RB8O9zN=tl}JJ2k8PFNQAEwc)b}(#|859AWF5QWUYy zOL;dzj4(68jaz5uwX<9U3Zfyb%c~j`y~?nM^srYoplMX_b$v?q$9;s1OWY0oM|bGg zE(ah_$|U*z&svM9Q~fLKq4m*)C3e_}f#X+X{8dKSTOt1}U5JS(YE-!UM;Bv0c(@@$6fr{B>uJl9$P z{LQYi1Hrmr#hmYM?>2HB` zvYlS98fVBIC(1-N;JZJih2CZ{w>U%;X9li@t*0U+D#CTE5NgBhiK?I*wi!^iD!1JPUjp?cV& z{GU>zn#1MXnq`o*C}Raf{K(>vfSyj_dbdtHZ_#8@2)le(z-#|Ui<%|wk#J_8#mJbV zuC4F57y$BuCXm!2G&}DS&at3Ej{zmoq>>62nGCRMJp@}M%mt~?6>bj>hKUY?UA7>n zdQeiGjUp!35Q!;N8Zs@q?$jZwtyfN5!lZ|7eWn5BgF()BAQ6fP(IRO=2S3^R)MUP! zVjWye&qXj$Q%F%i9l+xX4X8UL!5Zuk?C$j1>JHaYbsN^F!fsBRTrd_5{@0wNut~Sv zLQE@v4r529t51S`>a=5LLI6-h&^f!3G>d6X2P4Y`Gl z;#`*bn%Tkp5f@aElUa+<3Dp>wTiF`dkK>HvJFPMnKBilZq3n4iapaq8wP3U_>{Sm} zqB=9M>v`mfHy3dnop~+Izi&ZjIXvoPFib*$r$BsTMqVZ8==KbZE~G`)I9B0@5V(uv z$SqUf5hGKag&8_@{K#O2LqsFj6UA#9yQkCSSH2l0XxBp2xXJr;rR*s8+B~*7i9V4o z_`vHBXvu^AHSr`TC1YCJ8f^tc3lDRV3iw@kcI3dzA^3SXJ2yVbg?;b9V1WCyUNOsl zO&2#qUwRrPlwoB6;$C(mEg$ZfZ%236m9g;)#mN#BW@s z!hxm&Q_|T6)-4({0~t`#RrPan2jhdfF`!aIlZyj}*>e*54oY5GM$^xf~JpH$5kdDMp1SB$Kpof&JYN*~>xA{B@I zWwB-=ciVhL35ih!iR=Z%D}{_9z5OcrLAepTH?-e<&hQt_H!e*vvtD=O=!ft8P5cph z6A~f<19zm)4c&w6!#L}SLGm}BbE0}7Q}w`r3j`NdraL;!HrZt==Lb!NLN-a##731{ z(xG(A>?Y~76%rcNK8k%}?&b3XEKI9G;XF_tz5(vcLOEmMI5>rWr20nlZ4*lJahvT$ zyg~Xc={nu4K9*uZ1e`PwB9zzRdI=i@ZklqO$@Z@5HmZm`h^Vh7k*A+ z)LGZ7Lz-Ko!+w11VR5yHy(*o}HiPSZLjIGrIq*uVlGN+vSZUj*fZf+KVVU0Y9?jed z-L*Qzi;J4?hQif^QlBSDN3J3Bda2Baffj-uEn%ucfU=1$3l=9q=(m7sx7WT63r^~z$U*Iut z+>y~Ba-=c~F+AL0QR38$oWOYA*Tu-|Gz>@{ z7-BCau~02!iWWfO@2rgYnqOoXOe#m2pM^P^!)%^z)1(hzn5Kc`T#2x!G&e@3yfS|5 zb-skTS5?PC$vLwh`RD(Ewa}vyV(@6XkH^svcK>B8D?9b&3PaL1|JP}2CezFhCYEYU zFmBGxrNo_fxz(ySuABl6`J(a6NtbSuR0$Sf$;dIeD9+1_YC0OgeQU8dV`{z{Et!|i zXCI27Jihc#KhiL+3bQ?pz%x9eI9MS{K~#k4ArmC?K&&?Y>5NfqTd5kpwBfcKlTz3) z?h}RX6XcdA!>w)_2ZE^aZ2wL|(iNDh6PAK4#)+aRi6#T5qX?JxauEI@T!1{=VA$xH z&TsPK0$jEPqZFMS4Vo+w1*!iP=?nr#dXdTsJS84% z;G!nko7aEKi&l-^jpzbmlXV1|&1c;wFZf^l-Q^J9{mjkTe?dmnPv3}dLgSKXKVmW~ zeq?gw>@zIn?;jmqK|4RxZ@2O|>S=ScA8_z^(sl`4)IG#%490-t8}i><9wV06a5FN8 z{bCqHhwg+F#AdPtK@tOFHrGupM)VHC%kt`XXMLKy&u<2t6bS~ekx+`LZ=)X6DLd|{eF4(=!GfQD2TFcznDx0RXUOA|XlmdEab8wM)eoDm7 z}a7K$GF}YvXZD)JU>J1SIXu~`kDpA z^Bq#9^f)Ol5xe{1a(T< z3VzC3|8-PH!=lXTT(C<%T*y$^3_vj|TT$h=+1dMLt78n5tQB@{50%!_IOqH47?CWS z0RGueR`feJ-=F{`$EHno9aP-8YKdK`Wda8X_uxk$m3 z{v~L0YI6Kv;4n@?MnZcdD`;L`LWci89LB}U@qcNs{{x3{u>P+)OC}aBHm?5{4r>Kd z)zv|!j{$RU6Fj<=cW|eWbg;;=+onU@)(Zxw5Oi=4>XLDQyG7qm^fo7VKmA?vvVYbt zf7NL1tI}(kD2uMv0&P+|kON^2pn%qEtYeP-E z_CLlL8^=>cBK*BE| zq_H{`OE3|O5CR-H7qN19-~~TU9X)NfzU#{ys(`H&v;hDBF#eptBesBYYiD8y14WI_ zrU`bf;mO6?0$&=a&=fR}`G*#uHMqF7o|zjuIy)P)w>lev^JqgZFoA5>#Q-a1NN5AKh|0WKL?1rnqG_^IJ0UlynSGAlnVuio0n zV)Bl6+A^L*$l}#YX`>6@|zo!8O;|> zK0Y}BMo9x;h}J+lB*+At$Lb%MoSYmQg9hRT3b-vxqwNc}_E-n~Rc`u5?|XO!G(l{E zPV1`xeQ0Ka>i;74`$>G??bU?7zGo z;?!>g5NhgU$G{D~zdz@se_Pt7`w4a81y23(=%dpry33MChxDO-CCJFZ-5@-e8Xti& z);lzTZEXI-0Q1&D=lmseW6t4A=Z~`EUBbgW>-p#8Ud} z@nHVU|5!Tksne@3twC)4I%of$Jp8Wh^c8>UV}JR-OLnYp{*;z}*Z%xk#NO1{@coqf z-)LZ*t?g?3>5~UK_m^u4@Q<%1o}Zo^x%F>pnjNiYA%t#f_*Ouw%_*tb4m6`ypDj!C zZ=2k6n%M(2vjzjH3dpVT)29R?XIEeSUFeZdt(>_kJ$TxT_pc177kS*%kq(j__|MW2 zog0l~aD8rZ9r)%oTpt0mKk~vIW+wRB$Bo%P2n)i~*9r7@=@B?17!S!mpMnD;KKg*k1fr?>Q?N#9<&OvvAoh)L_@}|* z3j)wH%KC$F=n-paWB$8o(Ptm^yWTA=_e01J6n){3xK=%jYV)Z*=`X{{`QJG4=D)FCmq5Zz z>ZE{={@ow+`~MEmJ_WLS)PGyh)%OLqx%4MCwmLb#hfY%SkNz8G3TW4@&Ey8SssE}J zu4VCTVDo7G4BfcWoC(;~ljiRHOkAxDoeZ8D~#{QtKk?nVR)y z9mQHK8gP6}rjg#Xj3ZwpuNkV)=}I~WSxptcG7NFHOyr^uheB=N9qGf1S%2&jBeGaV zTzd&{dG!5SfbRU+Ad*Nu+8>OrN(y&9Tcx@AyQI;{i_F_r+^I7`7ynf45=AA=qI_q( z0IOCvai-PhgU5J(nAb z63PXzC#-KAC#&ooc|$REFyoDeQFA(X-hmqCSmPpmF7r8SOMDOMp*aHZL%|Oq-)dQJ z=ercN!UboN4p*lo^WX3_qJ6Ax2tVB1`5f`e<=YSUJ7jxpMQ!fQ>ONY-zz1|~vgXL$ zGWv5tGYbn1SZ9eYvW!KLX($(xDPPb89YZ%6prb0Qp;D=iXz%emqw^pWf9O_2Zqhn2 zS0~lkhoPa|7W zT%EN0w55TUP=qvRUygz53LE^fB$*2?EF(RJB0R_*Y}Bg#4<)V%Io^O&u8|*XY?}{- zRAT3w=sJ)RA5-eo$3gbitTS3>Qc6g=kO)n_H29Y7IcP<3|Mx-Lf8$8usL&iDg0@1g zr{0|=Sl;zQt;Sj%t`IzvAWi2yWN5K&K#}Fy&i+zzDACE2Zq;NqKOVs&i2&Gew`GsFE?ez3Z;bfz4@B? zBBEQMwYwncT}T5MDNHHF_IA{a*GMd_FjO9v#oY71vttR{yxDapZQA5+&WSATA|JE8 z@c(dimYP>~skE}S`IAP_6HJV5!+f^c9W?`Ov*u0QBRs_yc0GIJJ(b1S{(HKqTBk6=6YLK+Eey0dOHA#@5@M;jRWqfH%r z$d3Ns(O2}bevNhcx342~x=|1I2Rt}4Z5bT==h*u?bTt6M2kAf8+Tbd-}v9VBJ{Wh^3VnNrRR5WLM0v}Z|bqK#dY<~nm zNcnC((Rqj%_2?z8VHtskRP>;|o4$Kv+ z#;cgW?Om z^-J&CYTMbG-=asi+6n7i!`>~8`6T^%CSj5=`qcv$c>_aIdf2t3-yqLN!O2l>jN zL5_5I2UhzC@Adux!q9j1>WNSFO$4BA8_(u4yemQ&Kuy&k01qh&SR-c~r)=OjV5?rh6R9YR_@iONW9l2_N6ox3~{E(nlZOy(ex25GaoIXdYtG$jOA5`KJ# zxIsbhl?NUUAd7I7uhy^?5cd_q90YsBH>$*Vhq|N`Ym5I^#ZSdLN)4~t#l}P0zP{R? zvXn3Fp0TfXrb^XrUygClL|8clHnb|ZscaeG0<3-vaL7&LBwK|V!7Don#Gx=ue8Y*E z2sRp*z?S{Uzkg<(FAeqG<{8}L2`_+O$RV$vuBl>G9!+CyRjh}d$et%T z%Wl!rwt?iRi6Pg4S0C|yQa+EnKG}ielZtQ%!j>3L_pR7ud;4eoS$?E+W*kod8DhO} z%v$IzDp_A-ZakDS`CmqHwoZ8%0ol@YdV*p$xe2rKEosS?B4J3<@PnXu`V;?U_qQfP zj#8)P)YIO^f_L51Q*QQH%#qhOrcFn5_z3_E0r+oyb6u3n^nHj6Z!~gxO}!mU$+m)Q zVkm8RH@!$lF4_h-n>(Bs5-cbWyRm7fgUIXZ;ld17W|?b465C{wB`Jkn_5C`x*=1^&Qp4Ssr$Ute_=wNTX)L+xI4o}5-K)D? zm%f*Rhu}>Lt7PQ+y}qgxb3An(iJ}bZ8(zdkrb`x-mB3> zztu^FY>l(7hk)?O`c`RUQXA?&>ny1u4~q??)kPl72ERd5vR`GLdj8f zvVOFEuj(2*lT0WJqQ{w(7anPCd)*}u31%&`4>gk=OrvjUK%wDY{o~GGyDRWa!sU4v zwP%*!3G5Kp!2xvu_U|>QK@BYp1+1^XW_)zMf{rt?0Ums5TBDgzp+9u#iDJ5E)s`l1 z+sa}7E*orC-Yio1Pq+W2P#zk%zoxC+zgv>P_3%L``T6ur@E9mxj*S?8R5XFoeCvs^V6(d zu$~uP`07ag{g0_u$f_~zK66qh+*7dO3=ewcR zMBbCl#@i$(zy6;X3@`6EI-v}@8lyL-OJ3kvyq**Ug(x-1awa;sK06N>$DXJL;coOK z6vOcNT6mg}oslOlr@%oTdhGYyz4cQFHum$}((VY-Vwz!YBqWD#%=6i#x0+PXvb*nX z1gbDY4)_{^4XP~{t4#}3dmNa%$2aI4Uaq;8q48=eS@wnP0<`WXS)!oQuhd>)K^x7X zB@>_cX(hP)NN2lFqF!@~tTj!L_dG^|bTANpS6CD>&vfjUM1$Oe`Vn^it(YahRIf&n zKem8H^KatS!2Na#^VWcSzI{au9ZCuuxeiQM?Gsxu1I(=tq?n`}lhn<255j>Z)(9Fv z^-1qxl2Q4CR|g91dqu?{_l%W32sK`0!t&>OmnLAm;XaMRa~!c!>=}KRMX{Cx))aS7 z8iW=-pXxj}8MG+GjAOuaFL|L(%*$0fyw>g1Db3Dd`9GuhkUY{?3(0W}?m{kvce#)r za$9IMwSW|twc(+vbA&$T*9ve4xSkBcjVI)pPdMuq5pESCf`TcW(YV$=_878LbzPUg zi%9H3OA-Z$+~ObN0{b?tbQqn4d+iE}BYrUYn;Lt1lu!D6kn#~_JQcZrr;-sp?r7|Y ze?8UL(POs_Q_*de56hh###^Fi*jUR85iXqk(Z!&cMFSI+CC)*sm9cbzyrK)=m$NBnsNF358wtxTk zH!5Tbk*s5)Ic}RDv_b1d8v_KVLVjj{sh5OwNx-p}9mtjiJ4H$r932FloB`c=7)~JP zS5#LVKQer{gr?Na3;Ou#|DQ{A3Ff$PYvAP_aiBNhEkq3R?P8U^p)gHo=R zA&|Mp_j@8r^!{>b;F|gK6O(^X$y-*ONL$xu&9*f6aQ}TqBAC}-P{$~Ww(236Gfcfk zgZzSbJerOy+pZukMl^PZC}#C<*dKOpGo+~Nl&<;~WN0M9UcjNB5)zWWb1LeFvF>ms%yW(wdte_*mz)m1UV&i( zhwM4XSI^mwV;Ut<-gn0eM|Q$qgm0ajqDtLA0|s`UY;`GjsV@L?3Ns%7o85`{v4$p+ zVK}ki0ex9yfn$;`Yq%B}Mz9G6)T2=~dQM!xRXK>?U=klHjXbipKW(9F)Tw(6+=iIX ztCQDea?=|679E0hIf!!>=X}Sa+Vn8oD432ZK zM{8eTro5^l{d06xQ*2U_SX%&fmIB6H<(0iItByBpjlbYt1hQ&N=!lZf7Rz%A;*vR? z_eaRk#PvAt(1?&CTB|5q!924VeaZc(;4(>BYK{yz`LdDOxI_kF zZX`jD7<~G2%{~WX4lu_uMXQ_x)%Zy<5G1#KvTr|f;(_t1Z>B`En>81MO+I{y>(*#B zm7?gV&O}K+CCJl-cLYjK+(m45VCj2n`_15;t{v+;@{kX^EqBBz_!oBMZzcIrx>OC{ z$OYQy08^8u&DLtDj#vA6#(bI_WLEMjBp7qTP!AbXWD_)Sm2+Q_6WztjcgpB!RJvoE z^hs!f=2Z?1T$|!~Y)bV0dxv)7AZmQmVw=xl_nn(dsy6GVeKEoW<*K4;`R4p^byBkl z4`Q~D`+#VhB+wmg#=V@hCp)cTUmK6Une#b-b}t>S9frX;b)7qz!{fn&_7u}Cx#2CQ zhH0>G`u#fk#=|(Xoo|6{4E&z1z^|yB`A#bPcIR|1ECw||0?|3L%?7z&vo1taEB6zC(lf7dpMB z$Vkp~T1dn0Sken*sW#=!x=m+SwWNl>Y+}Kh!8+-eUyj1y1E}8``OU5up%fBsr%Cwm z0sKfkVWBOV?EwlmH{mgNp16F?v{z*iInmCgV~&Uq)qT`(R$mG1oU-zj!;oT>GH5fG z@sysH2)W3_s$u_RVO;;tzHKCvr&ciFG8%|$aH`ZBQ#m6=?_IU8y7;G8hT9IRK>R_J z@J;MsKd>`%5uu!zHVWBoGuiU9!F_5Pkis_BQJgaZ`(yu zE$e2Vt>$Gw0GXHM?JOWlO%GVJh=Yb2D;jY zB>yRhl4!nqc&k)ap1~GPjfk%}Ln=F*!+Fjmorb8&17}7raVi6SY_+f=tqD3sKt9^C zJ3f3MnM!MQ!1(-M`lFJBE>2Brvwb?rJ|sQ;trgOM15UV@jPvr(HoJV zIE*-Kv&PDx4~Buus{FM~h!U>v(}1rD7IzpJ_~eiu4AU&z7<8o)a-xX&!WO=q$jU8B6utVo0$h zyGJG>m_N{so#v!ot?V>GG?k~%5Z&elr^dOT_CSb`6F^!I+Xx#!y!Q%jlC}~rZ1sy@t@BfoS^&KizIcB5moscEcol4v#I&Q&|@&m zzOvGKP+}XxwgH3*74q=#JPK9o+Mo1AJab?bbkG8pO?CBFrcl-=)V~Y#<9p% z%Q)bLj4Z2R?DhXHF_ZP&=3a}}f}^OE5XjTNcu)B`rLBC7-da-aWjyrr{#1Bx}N}31YqIqU6I9 zVQO?7(X2F*{wQ=+&K&7ih{WhmU_$byvC&`7pFxUP-J`$-#$8 zja-;#Zz=RwNi?GpY+8XHs< z*M8@0MQQR_t!WTVnsmoFX|eEJ8EP$0s>F(Hdf&_7rcD@mTq zFH>+Dm^V|)@%d>r_F|@!*`xE;b+7o5Q6%eH=m%_7Z#vW1h{)hxD%jvBHJHA`7)^If z3Ud~owjcRcWD%u^6cH`7w=mIkzEZ57QzP*^D@P3}Z=x~R4U60a@uM(b7fe2V+LQRI0~9y`?QlB+nWXAwlR>`_0dOj zDw$b(T;J>Z{PgS2lqZwJgGVA6EJ8&=zD@!yjlQkv_*w zs=A#-Q|IO{NoL@il5{4Vr-|dXLp)N+CvEL48xRCUSXU{+-31&g&E}uh{U8lV%yJwfD@9kIta!Ra&n=7-*IQe4byUw{3iJ@Iv1KNnhU`d}h zf|Bz3CCiLbjVwX@iJUFRessf5Teb+IidLCZ|v&q78azqPI)UU`b{ zYH4y!u>-+%D3dYRqqw);99NbTfAje+ zzZjEPwoB5@V=xDAp`k!Gb;0{eP=YHYM`Jqa5{-SemOYHz!uX4aAX`YxKwc9E11uWH zFwuw!zPRc9QYp~Y-I~woj2_sEFph9L1^oSB%){R-uqmA66FvX5pWTir$a{Rdu!SNd znfP14=ZVOh+DC7VH?7lz-pK<^^&GvgTuYd{nG44GOc9H76!1BGXB@}MEL&8i9mN-3 z5;bp)|E&lCcTMmUAL}&3*q6`i%*o6#4`Jq@PST>!y8OCt;qHwUkkL)wr-j4}iGsO2 zMiKhmWDNx{y2#%Yy19Rcg|HC&Tx&SC!GI|p!qHUP07(p2-f!~KcL4E-CS_76DtN}a|cWZ;f8`|$d&FtZZYxvC+P8n$*-r7;Viqb zzIEU1mzNgepWXSuTu{fRxmoJyRd9XLAd2TuJ^L#o0ht<&cA5O0+lXCA@GWAI%kJ&<%FS#~5f=^da7 zoaJvz=3Hhqy1%@UtU^N*dL3Na)RNTL+kP2jtI$9y#`Rgk>^GK-2_d+_i4BJo%-BO&Bn zYC@|^o<96J#Apu3Hcn4jWs~vAPSwfTm89N1Or^}9+G&vWx82QJB-u(+(NMoGw;A57 zYWhPl_^i=Va zlZbv?-;!)~mg01GS6{b*pJI4I{==Cck6zw{6n&|d(P5stEctFIjwek=lACUnvS(CY z<_auao9@6xB>!bY$ZP1Dxtx)^irfX(_gLg-f&A{dIGd}zgv~% zms6VQT`t@Xnmpb`q+lykM-X>(bdwA%_NK*e+Xc5p8CgDv+gw1~3q7|?%>DOfq*G`< zf&|=h9v#%MNOk#reOhere4whh^G<#h_1gkUK;#fQ@Hat=--%(1$4To?I$ss5q^0Mh z%rlgQtUy$|mXpAcrvM+FdFt$};`{Bibs==4tlbS{x!NG^3qtaq%UR#e-yr|OcHfNC zMdCUq!L?F@}7jLPb~F%%n@ewns2)Ri%w`_PlAcF_XUE+ z!_}r}%Vvu_88KNWo!;^!T&dK`<66})QI`WRMhg|d?%z`h04TcI%HnEvB4JJ%$>mdo zzMdi;mAkfYkwC3C6p_5K)F?`J4K%5cA4IzZoY~!x7@x?pE_5wF=Z;GIBuBq6=!^AN zr`Kb}1ha=I2_ZM%bwPf#ftx@=UIj7HUY>YF8I4TVGCsX0+}sXArGCUe8@a(g4)fvr zsTK7O)$1;MpeZF4gOh15OKi<`uoG}CH&kZ5HMx25%ij#^0iWMFtDrdCOc@3Cc8ENq*hLA587bYO_@F zKSvGQX5ez!Dr;M-Bq#gEkeF{y=;EO)?B5?s4A6JKk$&L} zpdEtwq4E!L`@ZdZYwIn!DzRd@bT8WfVvU82*KXL0-~?*r$>N_z?ip5WlZAhQ6-06h zh!`l^GFd4wAbRe0TQu0wxZ2(_@y~$GZDhSBi)+W7@69t1#tv0HO zqY^ZsVI2DL)X0JpQ4Bekj?O`9UNQ(rx8Hm+6&kk@a=o6Is_6S{ zn0`ilJ_q1S0?E)>l$68?UY1hkRpZREAQAh@e&v2h7?>T zsNzd0pHgBUnip>)ls4+aj~;Wp=*nBZ)$9xi{w$$S@%D5nEx#kMHXl+ENw3+%R0`!# zx)=0xB8WBJ6cIQdBnmhW-3Ay_}uL7e;E z6mz7)>Ui4TW4N;-9b5;eZNp!O4}9D+Ty`>aRxmbZnICvm^7}D0E!EOL@QR#!d*BY?Jbh?}xElE|8%cp7HS! zgrDOD6zJCE;+fz5BcqJ=np?%0Wku-2K2qqrO2mwTwjohRi7c$@;;eP*gVL*ab)E7RCJ(E8@q^y z#srO5dD=?8FMJZnqVhlJa(b>36Zts=?^9kVX|pu`*<;uX5pfP6PQ|1IftleqHtRev zRmK|dTSpmb)B`y=W4u#x7Yk*;zha;0Df1Nf2+e&DXod!-ec?QKUoYBh$qgcP#=ee4 zMQ;t&32sr&)Pu&`AW~3gKl`M$Xd~W#FH3pj&6l$&9EBW$iHYu4#uhwg#m1m2?~8zN z)C;r{wBE<*?B!OK2yqJRQ`9M~gQ9j;=jyj_|f6t0{`p zl}}9-z_3P<*W=`dzDkep4a@x zD~4VTw3n8KcH&Gqyizde$k-k0zNyDi;tMc^YsTw5Ue-4fo=uspnUv4#eGn}4?_x9I z@~ge z3|}p;iEd3ysNCR&NjJCde~&t<<~)cuwP&9&CKQtJS?mLh;d#JI6M0VgNfs_AtP*7G zpGvQ$pThVg6~GeIPc+4lmJYp&0rYGl@recdZSf4ttEOg{P>w|aqS;$T{W!P$>tB764{X1Zk z7qcpL%m9F#wYHqq_Y#yD$}7VMMT&-H?DRl~?X4K~!Hojj{duX5T^z*S`Ag&Fc}pz3 z)n+CD!{#M3#Yo`x;4BUjPHPv%Pis)bS0=B;Q6cLGe_&JcpT(8~wu4VKJ;=tO7MfQj zrqEQJHg7zunA!U6O9DV^4h-BPTeS%LKzP0+FF*EuSvUPZ0V5a^EW$R}%dLk{`Eu+n zb)gTl0Hg^scfSn&Fk1XSrm#XgB(wax+P9~HK~iSsikYXP80=oIdB2iHso*yBe~ zP0H-}s$JSVkOZkz6doar+}7-sGvY`Zg7}gCI(s0QlOK}DU0h-{ukmpy#;P^Xl1TEZ zXdY2i;V8r?ErC6?b1c<0d%)>L$^ah~|E~Pc*KoDRe-(h)RH9q@+Ua2nz(xGg1Bbe* zoY~aGfdi{zI|?zLnX-#u@Hf4Rgm-QjU!=g#+f(GoCor6A4wl}%Q4_m?r8!kAL8J4q zgo4|=kyy!toIWjE8TucVl+hwS@EMf9Q&5gp&2a<0*Mz2yyE8%Hqbiqivy`(H@-CYZ z<|H$OiK|r4DSivZ8BODVIAXSiC7P@9m;W_Xf@2I@8JDsTXoY)RfC^pKs46;@CLQ)J zNH+-jd?|vNRbWPXzQ18MiC!E#(F&;KV&o zaea73WGl!JBzVnP%E)ZylYaC4y*fl7|2x~+w2nlzR!I~kW~Q&s#LezeE7?u*s#v(C zw@yD~6}T-30WK1HC(5=$ zyG45^5Wv2yJ4`lm)XXfm6bN4~4f1t`58PUg!$ohroqEIWUD0p|x7vl06N#>`3h{|Q zTS$%Fxm#fU1FL(_uAru-wW-#*jWHd-<(v=~;s=A>Aa)-@|Eqm*855H9O{;c4^E{H6 zb?#P-d{X8mwzmAVM6ZVM1N`NvR+{Y62!S(__uQrXALS@w9s8na_U)urR83&%;&nBV z!ULw$$w4S4QK`luGVd4+#}?ix{B_JeI?+YXLB)P_WEQW zd{@*@N~JZ=Z_(4^Qb$zgui!Fk+zdUr z#l2-OY#f62;NZv*2p_}~ap4cQ9pBoW z+(@r2QZ-XtkWBm?3Sb`AV<|fzhOh~fqitUP;T_#@4emoyt)9(se27Z`?A+P(@m{ac`M^INt%%~-0lk(-XC-0Z-@b!D!rmj#oN$e+0i)j*yG1NAmxaZh&I(kH<<~&@* z0+R6{2C_Wa>Rc%L(2T&FftDSb3Iwrw5CX2*>6Lmj(oFrFl^REM{b5zhdJ zab3WK6`IUmWNB$(yoEXw{<(2BWK+{^N(Rv3h`+O^KO&UiTdgSF`j#(&&y&%VTxVjG zaTDT#M{x)djR4@fsD(gCbo{<|L+ThjS_<@w9ov@WKpO%we$TT_DJ?08wtBpT1p#3B z&=~B1D}Z>fTwF^`9(0101RQlxC0$%_eXV|K~k(xaQ)h@-`mtqHC@UC8!ebz~^`+aJTZCUI-sr zn-3ABUf!O6%`)ofS&r`^OpQZ!UxVWvA~I{g-!bm@atHmh$&XXYRT7Y>cXmGZ|1|a$ zz>zFTmSVM_#f&9pX0@1EYB4i2Gh>SxTZ}DcW@ct)W@hR1cW>|J@64D@Sy@_EMn*!)TuCO0331e^P6d2A9hxvU=~uh{A>M*$DWjHYE|0<$Y2MKK`iWGev(X|F;Y!g@90 zvD!0Jz}LizlKB4ehwAiZ+Ot4Q*|nl~{qTmm?Arp^obHL-soSHx2V6G8kcq|*`+s6| z(3GG329?kdaH3BneiDgq+jL@o(y*96h(Z}V);rpFh7=g4>WyV5TDF#81k;imup5OS z7gY()iuN&K?1V&<#Snv(S1>e%)lrQ}w+QoOTKw9IL@s*!!A=Tk6e4lVL{`lVvp@u0 z-BA-7P`L@IykuNl^T41!$9x6-`$;Sh!^H1fYtb!h_Y`+#a))p(o}m?2^r|+Zx7_oO3RoVV-^=2Jwc%`Sl8VSl-X3JeSgRDa6ZH;Lc^b_e4{!Boqk1ITywtOeQ>Q+M)3;ublp z_wnR2IdEGxGpOLBKU3&*AWpmHl=AQA2l$EvH&IoQ!!7%jRh77ONhG2a*eN~>=%oB2 zx3~DBDN@n^<{}G7o%Eh$K@BOCC+n}QOyU*XwAw9*ENw%#vA>Bz8N+nugp+j?*&YTu z^Pc0%^Rif>m_W+G9nFg-gLtqf@tT$3VG&xZ&x81w_{8iZ!>?QfLyMg#%>-?kAYmt+ zyn>|Ax&REOs0bsQ0F|bJUDUfJ6Q#zGLL>6jPL4qClmD6C)sqD=@Ct_OTZGc;xf)@=Yp|s zq+>T;cP@G`Uv}~*poVcEZz-=!v_#V)>nMO9lRLHSsBPeNh#E(>uOJ*d1FjHT@u9NE z?6jDk6t+72j|Hrkgke6LwM1cG<)?E*@w<&rD1VQPQN>E7sLb+wt z+G(#yc3}9&isnbrHCeMWRb%_z`czV0`S>R&ZamG}zleO}_>Uvs*ckp_s2d}InVpUO zf8yZ&F)JDi2h;zGdvkc&GPK{{N(7|Y(8A`z@OyCn@nDU3QUdQOxuc>CfK52 zL{O_@%r-N$uK4ELZC|5HpCBwXh9hb>_=Z^fORFzth#f@REz6G-Dj;9ZNL>tie&Uxe zN>atfa( zb@)I?p!gCJCd5ANA?LX1iE*GqAtPc?R3ac*h@g+*QV`|XVf!z22#B{TP@sQeg56wS z#|pd07(o%ED~X$RL5kXeOEDP24r2#EJYYdY#NwrFW(?Q-0zRl^_<(vG^q@n4`y9m#6+q?FUBYO4&s~7`c%Zt z7#2t*KU8Cw{;D0G-A+G8VX-@%1_logn<-$tTq+AA+LQgq;1TqF8`5VVzH8^o z^h_Jsl#Xx+B>MKQ=g91xAty}rV?>i41qcEn0Cm3$XaKqEDOZ!2$gtbLCzx%&sg?^0ge-2)@lYe6Of3^Z}>0BOl%tLivK1Cz%x|(0#8M5UU!UYOC z5&hi3o<3RY``&oeh+we}_pdkAxuN6SAx^%$l6(e!5uI*%GMIuz`P;t>M{Mecw<9|Y zZZmuX!EAgi1Y$&>f9iCcYZcJS?Z7sq#P=ztpR0X#5wYS5^WA8rBbNf^ixU&$MWas? zk;8y(0?EN6#M*rg?*lg*aX0y1|2{z@6AzpQbDNdjb8L-rDc(ya*nE-VLR_<}wR z45;;qnZX1Kd<#bbj=AMEt%OH##}5PojPZzl4_}8Yw$pmxcLUmMf$srAu*O#e;8vA|Q{;Qas6)Hq=ex^2@SpIt zeF%`VG+y$ENjpEIzb!tsckIV(Bw7cC?CJ%b|Fwj{_v8fC9;=m|5&%&d$hZ7JBnXNPcC!qPR1E{Lncae z3y~=ON=v3N^Rd2W;xiWiy*;}Y!pVtM#@@9K=2E`dtaP$vMPoQ`CX|@`7Gr48W zzE>18jWyxVty>f4+-~;~llfPC9ZDP~+Y^Y5FzL>@gVnI4Z`ze`P;s`TE&%9pO4?~hu;}R5)vJ{MRTbm)% z@PcVm3ger$>Mn;lZX~Q_BqWIJc0Q~y+s;dYLEKPky(VZ9b`~=doaSHGL;+3rpX4T8 zhy6hkGlg=3$Eb}m+RV^3Ppd|91EH{|g|pO3FlobdoAEXNIs#)!K`w4W>HGujrCe?oG5#1GiecA!`UHnGoYk8Pm-S9 zNR(3YynR&7afeA6OcLi3o!l&MOtLZ~f0?>~=_&cTRy%F3=yL>3atcY^eRmgK1ZtC? zqdXWc-a}o+@31uJv<@q z%;Be{PLNRsT4iA2AODa@?$z(Nob17-zrRvg`sa}=ByA_87783cC-|vQmBq$U#!r;r zDq&;q1n=vBL@2ySJ}EnS*yS3Kj5HM3Qq*YG`5*dyUpKJh0bt~>v zG={ zFh6gAhBE2^JG8-S3h?7{{qcgl-yn%ZIZwECvx2j~U}NGb(7OD_75NP>RUI++kHjRc z45$JXk_OVbHE2nMPLp;N>BjEQ@%vVajt3i4&|2l>9?}4(o)*DUw0+#loh(TDn(5z# zW7BfTlS!T8?#hQP}x@X4ON*;6;l**arcdM7om&vnP=A%{PczMO~0f?6sY@EenO(0dMph2Ymw!#X^)YDNJ> zKQ02QOO0zbYel@_{LG?WdSvk$q9L(KcUbPF&l}AWf+uT^DF7&NofO7GA<j&^2C08Dy+$8{T7-@-Espr}O5U`ZDmG$=lzTBI z`LrFb-ML5Zmf7{Ajyon-RpzoztE~3_rom_-y@h#Bt>74XG*3?>Xb`AJ7sX0mbW@$X zd1K{aZ>A)1JjR{;NkzysMhvLPvgSH+ui2(DeQ5rCl``?a{}ML5zd3Dtji<@V6SjOB zDOkCPD$uUN!ihk(S#M_Xj}xn-X=O4S3<-{Jh>Jr7J0F*<-clc(7VZRlg;<4H&3d*Q zOCJ{H-8B7yV{^AoK)nkZ%G;Q$^NJ>5x?^eQk|>V2KFQ$Bv}`n1j>IACw&cs_-jIG+ z3Vj_hR+@^n0ljJ?`R2qX#&3_;KNZf2S;S6Qas}KWZb>UxV#FMcM67*PoeIh3Vszp~ z4y-rt!ATnyA;LH~Tgkid%;nG5g@CX=5|WsNBnYdua=}m2TFJ{RbXQp_x*Usa>$4|fJ@snTOV5x1Nc(za1$ z-uaj;3pjkSA6hASXC^2L_B-yAic(5mrw?aGa^WIUegVF5At0sO;|kOB52DO&b%_PB zR1qW}48ixQNt)G{#_h(ih@66gGO3(XwWMR9sf5KMtD;<9w`NOMelZGL9^Dd`B~)g- z$MLYwmMZ5zVCh);eb^F+WIIHeN|0hOHx{A1hS0-3+2#{118|-$tFGa7?(oon2qNcj z8&1tt&X372C&t;WEV%kLv)az=?WLQNryLm9k!L{I_N%N4>2C&_i@u%AgR-9UJxzD3 zzq42*g14-SRITpv z`f^gLb2XnSi*SJ>IZo}4$~u34BGTq!)713+Iotl%K`dy%g)#nA_y#d+eT=E4je5e6#Sik6K1eLRz0m|ir zO;j<2f4_gK`9t(~nZYTmGlkrOfLHis<*v0+@zD>rI*6JWDekO7?V@Bz4(MH_amht5 z&tW2mBo_%LeIXTCmQ{l-SW&QU7|5Ws+jUrpT`8vkw4{VldT-2#QcV9qu(GoFSIg+VRTGo3B!)IG3}Z zw7k4Lu36p5)3(IguR2tqkXJTDk+(%BUm7R|)&l6b#w44z?`uRI03;Q^gu6@ZJ`Qj6 z#vQcx$%ar2?}PCyR%eAdk`k&RPl}iq!A0qS=p(|&dWe{XJo*x+Jvuh+2fAECDt9d# z*45XHUBpZEJMSoT?$z!f8PupS^_vIco%c04J5#zPlmn_~FsI_Ov59lGR{1yyNyO~K z1=Y9sFZZc`kqL~p(J*lgEj)HZO@V)mfeL0&**RL#$l6<6pd(eT2c~B!E5AEBoT_SS zf1drpYG-TEVe=t$5dAbTaT|M`GLYGf4dv{RA}kPxgsn+oBPW}^6Gjll4M>=o-1O1i z8{Vr>QC-eQSaMyf+7nw*Dj+^nx*+`YM|?kA#;)7caza*t=Zldvx4wTXEj|y4rwn^p zN|p{rpu4n2N0Jb@l7&eUVmDb-*3mjwJu>4$&}EWqD*cjknJAdeuqjs`lFHe@{Cuh& z9A7?{ZOrHCG^iMoCxFOPUo;rRZ{(FV2+26(G}9s_$BrMj+tq^Ip(ez<&-&(3UVzTZSGtmXEp(Gc#VS066 zvX<9v?`Ov1bQv!eW1%H3y(659Yp1IY*P0nBy|e3x^of=BtEDZ$?2|CU8HwGy>b%;i zS;df$c)|$aK*L4ezDnp%QzJt+V^oJ|Vh`2r@l*zlV@`ov-5uDUJg383nt_Z29P!8o z7rBWWO>NDkG1|Nb^LKLvo4q^(PB)t}G|d+r1H2gZ0>iQtYqND%4q1l<&uoB0;!pcJ zco)0hWNnaUv1h;;RZ#+z){gj#7wr=UB^JI3+?;PSD9y`LtZBQdJvTR^^Ecbi2e#GH zR^u`53zJTjHU1MYOaPhT6HGJlyW;7(?m?UCh1J6rTwESHM~5HJYRaMYdxJ;Qbk(`v z9U6KFQ6A8lwe=qL-O|Kd@RZ8AuoLn$ip&bLa&Gr!C2S%l*OWc~G-=tq<2f0){LIyv zx*WUr9S2RdC=wwSZiWC!r^O@pEdM^tURXYj;)lXq-+p@Sq~gHbuUy{MK6MY1%jLxs zY(nJcLm!xfY$u7rN(y+(xCR>!k+SytT4InuV5v2IR3SA~6P`{UQi>V;t{id#?S8%B zQv?qGY=6ry5?3G8JXui1F%FIQwP{q+NER89Xrq7kJxK)Oqy{Ridz}qBJhWBNcE(b3 z?m&I=Zh~Q6?QG(S4Z$0|q0dJI>Fn^KV28IjG>_}fEN;^rp-@-DOm-5n%GFG$ygZA4 zqqwK{>AADNjSmRv3{n9@AXSoY-J-D)Gm1dMQ|3g+o#f zB+$-EKNE+j9@YWz$Im3H#pRKXVA?WN`Ad&Pc9}KkBT}&8_-I~4w3Biv2Xq<4v04tK z>6iLDUypvP2_j%YTU@noR+TaY%jbrp8N*7oxN9UTXzRNJ_0Sjhjufb*6@#TwZY@}A zNeH^`4Yea7B$>((<0>3x?mcAZ?YImwVoM*WkN)g9n2n{Z_~!R2LbHIFwC-(tApN4 zXwK@c6CNK}JKJ?jY2Ee@Oc|oV%;3;)!RmGS4IhGO`QolH7i!%(=s13O%nsoT@_Ox> zSNDyeHB~dk{*Lz^@N4|rA$+vK5|ft}Q3r_~%F;ZU+x%;x3^CB}>Iyyrs%eWWWIuoj znzSFMJr190v0gR8k}KJ#f`ZlnB%}CIF=n>$vD58x zTk$D5n_BT1z3ya!MXQANa4vAaEh6*Vmm?@U96#_PCrVTJ9?cxulcF}q-~cGvr7ER!MmpTc1`k0-`Cq2HlE zCjMEd9w)T6lYw1xJ$PCI-y;g*aQU_O%fG$3SVd>DdDbL`FWrV_VrQs+k6M{@)deZ8 zaj<4SQCtmkWl@y39*ZZ?uCe~MS(WG%0{w1%Vd&*t(&z3*Q`W~O*0ns7d1_$k!$&3$2*^PBO~H6A7pV4V4_7O5wKAnpO;S4Wa~4Y? zU9o7;(K6_oHnx1|DJMLVOGR?;u6Hp7$Vuq5`H^;X9_#BL%Ny&=cL#8={aXS|E#|x; zWZir2VrUd?+FnrtSFZjz;j&kh0VUH1KM!l+Ex;wkFNc`SO>w zq+W^IL+wX4l+J2SG$wO^t@Z{j;JXkRb9kB89f^te!^4d|&Sr|`D+>B`5#G|>f?Lie{mW+gsei!m8?OuK#fne5Li9=CwKg4*1bO)j1uneNHSWh8vH-5XM#79!5HuKRD~x1xdwMTV2%) ztuaE}DLUkE-1Cclh~)=RD|-q&*O1%&UF1z;fKqHWEGUs%TAjX}*i+_7Mv`a|_~U_YUJzL6b#Hz*n+aH^N6(FWDr545a9-HFv?BxqIo5*9;2Bj6_QI z?jrnko5lp^S(m>tFuDk50pWvloLDLLfSSEF3>;u z?L4RVq@UHaI{3jt!}e*xgt$Z#HlGK2WZsONxs!dzsh%m}fWTI@H{emDQf{g*6nLAX zq?ouDWTBVRD{`b9rHPqP)?B31AwL+Tf@i>NA2wZ9${bsw5l8m0o4=rDD!@Cyj>;); zXxz~tURLTgB|Hk)mj-iAHlXod3t{=bR>gX=lZh6QQNV!QqvNqeTZHr5*x6mkytS@q znlrtflAyq9Yo2wW_fhyZrtwXo0C^mmf26v=>^GII8l+q5dLHoTNF66Fc|%4cE8J!~ zl}Cc`dFpQx&%^^h^5GiB3y6@xdE*{k$@Qk|kDY=-n$x4bIkwFHjOA&S0Kl4()bS6r zB4Sh4>mWCff#T@I5ltBu zw%(yfX*;B93z0bY)Bi#22TiaH70Yg^fm*jhMO=stg@oc?_x^hSMABsG0Th`vILY*& zd*R(yAzEVTY>dKV|6n{ZUO+QO{@%X=x5 zQsPmyr=*2LTY#y$8DHmCX)fE%bLMT}9UZoNQMr}3Ucl+7#JvNsDY?)U-<)95Fl-b) zKznCAZ#F+6BgNx5Y^W+5t>fq!;J|p4UvfogtUReXzpuib0~e^Np=|Nq>j=-|hNsr{ zN&n%XIsZzUGP(RTE|b~%&}-)T?JH%U*-DAG$z$Wp+1!ggCoZfZJsve$8MDXg^o!31 z-gj)D)9aCC&ve*$Yico->4XE%6|}$bV3Z6lf z>R=9->eL1(P5lkN!*U9G<}iBJc0%Cc0dCaPG6__wIAh3Cq!d_oet6K_b7w8#^DN|I zg#o!clMUzfx3#GkTC$9YgHO8Y zFW0cUW6=r`$yraJWS!-HWa*HIJJeuYOYu^^Bz8Hn?C_ehz8R3IoL^GPOs1%g-=RaO zkxHnxD9W914kJyX1YN2E(YM<|zkPr|kKkr;6-3n%M}d*zTkB99HRR|>v<$$q6Tof~ z+$(Mc&M|?JSi*r3&?hc7-PGp=0Hyd*+yog~ERq7je2m-Z43fkCAan4vUvb zFGAvBlXzMCGj%8;?P;yJ=a8qLg`N1cM!xTmM@|Lpu6VhA&6GuU&w-EOGXn8u=KY(D zrsF_-w9;)RXEHwXiR*aiP<)U_mh;+eMj?xg`}QmE%AvSL7Fos%*YVh)IGtCfCDZswrd!N$5W>t(@Kqv$oE6bzqwtKJL=-k9xqVV*Y~yD%q)L! zX=p5)b8+)lFLqEhI_o|zlWx}CUu_NjZs5|U{88ggiuwAp(a)df>|^~SBSBiBDE_^G z2hQoo_EyJY@3fsar!ky?4!sD4kEKsV>qBU$=tMKol^_pdPg*-|)G+(g`yhLcMD zCA=9z0yU-N-uuW*qg`6i;nm4o-QUt4N4fiP(wl4BZb!W0+k(tq$5g9}=a8+RxnmiY zbVak%0^qyT=KYi&w&3e$ldS_a$FiUs)iV1FQfVM~a zlcK2nJLaQHPzS@kJW}moP_!ReA0mjWM0=oraDuxyxRu!jM^kCS$raAin>m^7cst%& zEm}7nO@NL%c;_AUQ^D?5vW&kD=XY;2+_xFGtI?sYWHqo(M!Vba4(gYriTUc`WR-T5atp(>|@+{x|<({4@TCyD6X#I+%$pJQ7=Nl&&aL!MFM_`H=P(BYW2Hsto3o zkm0N~nzO4#UZ6i7@K;2YHhXbaNwHSx{H;6u97c2NiJH># zW<#5U`r0cdO`SM-jap)gN^N9X9r1GuZ54!6P&%lYCa}355tSM)sk(~&XN9g^l@xty zzG)x0JNcbl_$jUO;vYb2bXZtt>qTxx2n_YEDK$y7AIW@lBcz2Z;>7P?TRJh7YuCdHC+rqGV!Aim z4~hQP*e?b77Gu81Jy*E$vfwge+=8|OoJ8Nbr%6EA{)7?dIT;~uB z|AF%sc!TvqnmN9Zx0?yQRqQzw5zoR#{UVIi$;hxT!Z>rtL(0M~@U%uT66TL?admn7 zn)JOUAP2H)fPK<755&X2nBvAF)A8sw-{gxXZro<7C*x&2Gw;^E$iXwS?(4$Jp?J|( z(HC|6#T&0&1#5>Cs64X8cWx3Ui$5GXJs%|pR|8${UQSRCD{A3$JuKG+L&z#h#_zeU zf1{PI4RY`8ZbFDmx?RCv-~N&N7hw6zdEq}${spl8FTaL=wylEy+qU`#u>3_9@9uBk z3Rm&?b`P&_Be+vq@##)uD^|*J`=is_R}?%hBz}x|TSrZ=Yi%SQ23Wo5EGRc%8OzF^ z6SrRZF(Iv2HAWv009rMLFfeEAUpffkv1B6bf1lxeLit_wr03aAhhm!8yg zM!dPW*^>G7pGGU#z?T{?irjKwceV^GY;wT&8Aky4W6hZabC3iVoNFyPHTx(t27tvaSoj-x1ZF6tBRlF6zbLM z-xc@&c!pJ~l7*Yn8iW{yfw(N8(qm#%N@)?$;>i5jvSpSH>Xmr)@154vlQ;ScET(r; zwWqW)i`jhAkiz?$(k{~zyO>FK%t>)YU2!LJT?Rc6t^%%5=~Hkx>?CKen$8avISSCn z9&w?}{gAuV5hfUidg*#?ZQJnaI}YEuwz+>{m=J7rXXJw<^#$>4bK5cjKj0Z2aBKvO z`pdy}{flx$?M;aG8kXKuPhub;wUy33etp zMsidU!frC$&u#V1F+q?OGBHlYYZCf0iT{fCji(-_VY~Wak@gicDVbQ zKf76QkdJ&=EIbsMM8L)g*E}5`gPjq$cEr3VQW}F@E@;hcBrF)+;rjT-wI{xL0J{6z zvMXyl>(;Pukqh&gMV1uKjWIrkEIyDcm3eRYPYfB;cQ?k`7_vOU%m^qqMzx{%VZqEX z#=T(_0$EIWw-KQs1yaGxn&JD2H@eNwnQPW<)OH^C{pQm(kE1O0&#CRuQfv>e$mny# z(zik0-Co`=tO(!1=L;5j{|l@z|A$y%W&2<25XElmyf4G*7Ij0g;2~y!6*!JE%n&=q zeyPE{WL_KDwnZ$IpAYV99TFjE+#)_}z~D?WLT`j_6<+%Ezp(Pkw{1R~sTWL;rsGug zoPV~|)Y2KI2N{C~HGEQtRjKn-G4a6&aC_v4cxZDPWL2+RejCS!Gx!XJ#?O70)LHGY z!$127XY{FQbzC3zhaSiq51xNGf_YiadNdh5*!=FA*XHA#B>slRuQb2leDsib{&>+V zVllyHsg?A5sO6=zme>#{3Yq0ru_I8WEKa&EsmCByz}Mpp)HWr7pz6*K1w+m6>i~^m z?o_(nDs#`ob6@&#_FBq_SE-*hP#wQVp^LdAmo*4%(G~6s(Lenc*r3)*t`^5%L3s{v-i{S_)_nFLm0zg%DDQQQgGPL3g5%0V!NO!3N8Xjen2Z8Ff=t=0&$;W|4hszN4u z7N!mVUh0H8BN>cB+*`D`Oe2t)uNf+oi6X#Agua--k~n?MlHceI@__=o3hN9GI{6SvCj2oYx4 z%hFxzM3t?kKDDyYzWm2~Cuc2uHMCDspr9K0Ph#JU2&ck$EQIsq75xy_aRDF}V4o_a5o%Qg}=`KkbB&2 zm!(aZ;S0~w-KdLD2D!Jl7I#cQmjO{;r*ve(ddzAxrQ~9hnTtNJZdJ?CA9%gjS{;iO{jG1!mD%_gbce|R zw;aq~14GfazH;>J+GbAuI7C4rbrmi7mnKc(PAwltLedqxpQ`I21wq;ll7N(00<(Oq zcUk2Y#l;>}6i;*v86xBFxm^&-La?l-70SKkS7(FV}>U}Yd zlD2)%a@1y-Z&Q842tg}70hVc%wT4P#xuv*b4#Sbmg=>_u6&eXg6`%Rj`8@KwsO7k3 zJFaK^bbr2tqt}{PG_!FJsg;_eSN-?u^4uF3t@Hn!Cm5wV-g}ORx76R)(VcQ*pU2~T zIPsuY(po1Lu?QZIzb9K z!#-ViX9;;iQ4-CR0;eY%qyu_>n#!g=I{;m@+SS!wVU^f2meOa zbBxS$#Ke=eh01NXbn|E;cI;fdW)XAW%QJKKuw^}SW7nb01b0;dMf_MQV%sV05>^!a z!gdty$%u!R6{{ki692mgWIRt`abs!NHFgT)_(1lw(N*aRK~tM40^Y4E8}~;SWNR}K z`i5)saiqqhalN29XRRm?v5Ws(1>2GX@6I@t_XM5(hxLsebH{Z+@*%mMKAwly^Vs!k11HnMm4Y7M|d%fQUQ z!okSO!bHPJ%SyvQ%S=zp$Oh2-s}+EhnX?h#ubya904!|v98DdJ^Z?HPw<;?wGcChc zN962n44n-AYUIBk{y)$7DpIu9v$Zuc{Gum4O9!LB_{YfF@UJD#Kkf_sH>;`28yWvU zPGe;FH;g7^WME@xB&TO@r`rwo2## literal 0 HcmV?d00001 From bff58921c983aeb14ad3fe9c0ba77aad82bb5ae7 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Thu, 25 Sep 2025 12:46:31 -0400 Subject: [PATCH 19/42] reviwe the paper draft --- paper/paper.md | 66 +++++++++++++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 30 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 45a170e2..6ffa4927 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -41,16 +41,13 @@ Both $f$ and $h$ may be nonconvex. The library provides a modular and extensible framework for experimenting with nonsmooth and nonconvex optimization algorithms, including: -- **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022] and [@leconte-orban-2023], -- **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024] and [@aravkin-baraldi-orban-2022], -- **Levenberg-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. - **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022;@leconte-orban-2023], - **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022], - **Levenbergh-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. These methods rely solely on the gradient and Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. Then, the objective function $f + h$ is used only to accept or reject trial points. -Moreover, they can handle cases where Hessian approximations are unbounded [@diouane-habiboullah-orban-2024] and [@leconte-orban-2023-2], making the package particularly suited for large-scale, ill-conditioned, and nonsmooth problems. +Moreover, they can handle cases where Hessian approximations are unbounded [@diouane-habiboullah-orban-2024;@leconte-orban-2023-2], making the package particularly suited for large-scale, ill-conditioned, and nonsmooth problems. # Statement of need @@ -58,12 +55,12 @@ Moreover, they can handle cases where Hessian approximations are unbounded [@dio There exists a way to solve \eqref{eq:nlp} in Julia using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place first-order line search–based methods for \eqref{eq:nlp}. Most of these methods are generally splitting schemes that alternate between taking steps along the gradient of the smooth part $f$ (or quasi-Newton directions) and applying proximal steps on the nonsmooth part $h$. -Currently, **ProximalAlgorithms.jl** provides only L-BFGS as a quasi-Newton option. -By contrast, **RegularizedOptimization.jl** focuses on model-based approaches such as trust-region and regularization algorithms. +Currently, [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) provides only L-BFGS as a quasi-Newton option. +By contrast, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) focuses on model-based approaches such as trust-region and regularization algorithms. As shown in [@aravkin-baraldi-orban-2022], model-based methods typically require fewer evaluations of the objective and its gradient than first-order line search methods, at the expense of solving more involved subproblems. Although these subproblems may require many proximal iterations, each proximal computation is inexpensive, making the overall approach efficient for large-scale problems. -Building on this perspective, **RegularizedOptimization.jl** implements state-of-the-art regularization-based algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. +Building on this perspective, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) implements state-of-the-art regularization-based algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. The package provides a consistent API to formulate optimization problems and apply different regularization methods. It integrates seamlessly with the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) ecosystem, an academic organization for nonlinear optimization software development, testing, and benchmarking. @@ -79,9 +76,20 @@ This modularity makes it easy to benchmark existing solvers available in the rep ## Support for Hessians -In contrast to first-order methods package like [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), **RegularizedOptimization.jl** enables the use of second-order information, which can significantly improve convergence rates, especially for ill-conditioned problems. +In contrast to first-order methods package like [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) enables the use of second-order information, which can significantly improve convergence rates, especially for ill-conditioned problems. A way to use Hessians is via automatic differentiation tools such as [ADNLPModels.jl](https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl). +## Requirements of the RegularizedProblems.jl package + +To model the problem \eqref{eq:nlp}, one defines the smooth part $f$ and the nonsmooth part $h$ as discussed above. +The package [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl) provides a straightforward way to create such instances, called *Regularized Nonlinear Programming Models*: + +```julia +reg_nlp = RegularizedNLPModel(f, h) +``` + +This design makes it a convenient source of reproducible problem instances for testing and benchmarking algorithms in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl). + ## Requirements of the ShiftedProximalOperators.jl package The nonsmooth part $h$ must have a computable proximal mapping, defined as @@ -95,9 +103,9 @@ Specifically, this package considers proximal operators defined as $$ argmin \, { \tfrac{1}{2} ‖t - q‖₂² + ν h(x + s + t) + χ(s + t; ΔB) | t ∈ ℝⁿ }, $$ -where q is given, x and s are fixed shifts, h is the nonsmooth term with respect -to which we are computing the proximal operator, and χ(.; ΔB) is the indicator of -a ball of radius Δ defined by a certain norm. +where $q$ is given, $x$ and $s$ are fixed shifts, $h$ is the nonsmooth term with respect +to which we are computing the proximal operator, and $χ(.; \Delta B)$ is the indicator of +a ball of radius $\Delta$ defined by a certain norm. ![Composition of JSO packages](jso-packages.pdf){ width=70% } @@ -111,11 +119,11 @@ Documentation is built using Documenter.jl. ## Hyperparameter tuning -The solvers in **RegularizedOptimization.jl** do not require extensive hyperparameter tuning. +The solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) do not require extensive hyperparameter tuning. ## Non-monotone strategies -The solvers in **RegularizedOptimization.jl** implement non-monotone strategies to accept trial points, which can enhance convergence properties. +The solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) implement non-monotone strategies to accept trial points, which can enhance convergence properties. ## Application studies @@ -124,19 +132,19 @@ This is not covered in the current version of the competitive package [ProximalA ## Support for inexact subproblem solves -Solvers in **RegularizedOptimization.jl** allow inexact resolution of trust-region and quadratic-regularized subproblems using first-order that are implemented in the package itself such as the quadratic regularization method R2[@aravkin-baraldi-orban-2022] and R2DH[@diouane-habiboullah-orban-2024] with trust-region variants TRDH[@leconte-orban-2023-2]. +Solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) allow inexact resolution of trust-region and quadratic-regularized subproblems using first-order that are implemented in the package itself such as the quadratic regularization method R2 [@aravkin-baraldi-orban-2022] and R2DH [@diouane-habiboullah-orban-2024] with trust-region variants TRDH [@leconte-orban-2023-2]. This is crucial for large-scale problems where exact subproblem solutions are prohibitive. ## Support for Hessians as Linear Operators -The second-order methods in **RegularizedOptimization.jl** can use Hessian approximations represented as linear operators via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). +The second-order methods in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) can use Hessian approximations represented as linear operators via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). Explicitly forming Hessians as dense or sparse matrices is often prohibitively expensive, both computationally and in terms of memory, especially in high-dimensional settings. In contrast, many problems admit efficient implementations of Hessian–vector or Jacobian–vector products, either through automatic differentiation tools or limited-memory quasi-Newton updates, making the linear-operator approach more scalable and practical. ## In-place methods -All solvers in **RegularizedOptimization.jl** are implemented in an in-place fashion, minimizing memory allocations during the resolution process. +All solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) are implemented in an in-place fashion, minimizing memory allocations during the resolution process. # Examples @@ -152,13 +160,14 @@ using DifferentialEquations, ADNLPModels # Define the Fitzhugh-Nagumo problem model, _, _ = RegularizedProblems.fh_model() +x0 = 0.1 * ones(model.meta.nvars) # initial guess # Define the Hessian approximation f = LBFGSModel(fh_model) # Define the nonsmooth regularizer (L1 norm) λ = 0.1 -h = NormL1(λ) +h = NormL0(λ) # Define the regularized NLP model reg_nlp = RegularizedNLPModel(f, h) @@ -171,20 +180,17 @@ stats = RegularizedExecutionStats(reg_nlp) solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-4, verbose = 10) ``` +Compare the performance of different solvers on this problem: + ```` -=== Comparaison PANOC vs TR (FH_smooth_term) === -PANOC : - itérations = 81 - # f évaluations = 188 - # ∇f évaluations = 188 - # prox appels (g) = 107 - solution (≈) = [-0.0, 0.19071674721048656, 1.037084478194805, -0.0, -0.0] - -TR : - statut = first_order - # f évaluations = 65 - # ∇f évaluations = 52 - solution (≈) = [0.0, 0.1910326406395867, 1.0357773976471938, 0.0, 0.0] + +┌──────────────────┬───────────────────┬────────────────────┬───────────┬────────────┬────────────────┐ +│ Method │ Status │ Time │ #f │ #∇f │ #prox │ +├──────────────────┼───────────────────┼────────────────────┼───────────┼────────────┼────────────────┤ +│ PANOC │ first_order │ 1.2794 │ 188 │ 188 │ 107 │ +│ TR(LBFGS) │ first_order │ 3.0748 │ 113 │ 92 │ missing │ +│ R2N(LBFGS) │ first_order │ 0.5582 │ 112 │ 65 │ missing │ +└──────────────────┴───────────────────┴────────────────────┴───────────┴────────────┴────────────────┘ ```` # Acknowledgements From f4c0c2e96cc928c1c23d2e3d18587623301eead6 Mon Sep 17 00:00:00 2001 From: Mohamed Laghdaf <81633807+MohamedLaghdafHABIBOULLAH@users.noreply.github.com> Date: Thu, 25 Sep 2025 12:55:49 -0400 Subject: [PATCH 20/42] Update paper/paper.md Co-authored-by: Maxence Gollier <134112149+MaxenceGollier@users.noreply.github.com> --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index 6ffa4927..a13a2352 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -101,7 +101,7 @@ The main difference between the proximal operators implemented in is that those implemented here involve a translation of the nonsmooth term. Specifically, this package considers proximal operators defined as $$ - argmin \, { \tfrac{1}{2} ‖t - q‖₂² + ν h(x + s + t) + χ(s + t; ΔB) | t ∈ ℝⁿ }, + \underset{t \in \mathbb{R}^n}{\arg\min} \, { \tfrac{1}{2} ‖t - q‖₂² + ν h(x + s + t) + χ(s + t; ΔB) | t ∈ ℝⁿ }, $$ where $q$ is given, $x$ and $s$ are fixed shifts, $h$ is the nonsmooth term with respect to which we are computing the proximal operator, and $χ(.; \Delta B)$ is the indicator of From f523011355e65965c44e40097e57706be3d4a0a2 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Thu, 25 Sep 2025 13:06:49 -0400 Subject: [PATCH 21/42] Refine documentation by removing hyperparameter tuning section and enhancing non-monotone strategies description for improved clarity. --- paper/paper.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index a13a2352..3d18e2cf 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -117,13 +117,9 @@ Extensive documentation is provided, including a user guide, API reference, and Aqua.jl is used to test the package dependencies. Documentation is built using Documenter.jl. -## Hyperparameter tuning - -The solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) do not require extensive hyperparameter tuning. - ## Non-monotone strategies -The solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) implement non-monotone strategies to accept trial points, which can enhance convergence properties. +The solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) implement non-monotone strategies to accept trial points, which can enhance algorithmic performance in practice [@leconte-orban-2023;@diouane-habiboullah-orban-2024]. ## Application studies From 4152a563838367fcddcf02252c252525bd7adb27 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Thu, 25 Sep 2025 14:01:47 -0400 Subject: [PATCH 22/42] Enhance documentation by refining the section on Hessians, clarifying support for second-order information --- paper/paper.md | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 3d18e2cf..51c94e8f 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -72,12 +72,11 @@ On the other hand, Hessian approximations of these functions, including quasi-Ne Finally, nonsmooth terms $h$ can be modeled using [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which provides a broad collection of nonsmooth functions, together with [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings for nonsmooth functions. -This modularity makes it easy to benchmark existing solvers available in the repository [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022;@aravkin-baraldi-orban-2024;@leconte-orban-2023-2]. +## Support for Hessians of the smooth part $f$ -## Support for Hessians - -In contrast to first-order methods package like [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) enables the use of second-order information, which can significantly improve convergence rates, especially for ill-conditioned problems. -A way to use Hessians is via automatic differentiation tools such as [ADNLPModels.jl](https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl). +In contrast to [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) methods such as **R2N** and **TR** support Hessians of $f$, which can significantly improve convergence rates, especially for ill-conditioned problems. +Hessians can be obtained via automatic differentiation through [ADNLPModels.jl](https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl) or supplied directly as Hessian–vector products $v \mapsto Hv$. +This enables algorithms to exploit second-order information without explicitly forming dense (or sparse) Hessians, which is often prohibitively expensive in both computation and memory, particularly in high-dimensional settings. ## Requirements of the RegularizedProblems.jl package @@ -88,7 +87,7 @@ The package [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/Re reg_nlp = RegularizedNLPModel(f, h) ``` -This design makes it a convenient source of reproducible problem instances for testing and benchmarking algorithms in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl). +This design makes it a convenient source of reproducible problem instances for testing and benchmarking algorithms in the repository [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022;@aravkin-baraldi-orban-2024;@leconte-orban-2023-2]. ## Requirements of the ShiftedProximalOperators.jl package @@ -132,12 +131,6 @@ Solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers This is crucial for large-scale problems where exact subproblem solutions are prohibitive. -## Support for Hessians as Linear Operators - -The second-order methods in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) can use Hessian approximations represented as linear operators via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). -Explicitly forming Hessians as dense or sparse matrices is often prohibitively expensive, both computationally and in terms of memory, especially in high-dimensional settings. -In contrast, many problems admit efficient implementations of Hessian–vector or Jacobian–vector products, either through automatic differentiation tools or limited-memory quasi-Newton updates, making the linear-operator approach more scalable and practical. - ## In-place methods All solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) are implemented in an in-place fashion, minimizing memory allocations during the resolution process. From 5cf8f249e897af9b010d89f1ff731c39e7561ea4 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Mon, 29 Sep 2025 10:02:37 -0400 Subject: [PATCH 23/42] final version --- paper/examples/Bench-utils.jl | 49 ++++++ paper/examples/benchmark-fh.jl | 202 +++++++++++++++++++++++++ paper/examples/benchmark-nnmf.jl | 223 ++++++++++++++++++++++++++++ paper/examples/benchmark-svm.jl | 215 +++++++++++++++++++++++++++ paper/examples/comparison-config.jl | 27 ++++ paper/paper.bib | 18 +++ paper/paper.md | 184 +++++++++++++++++++++-- 7 files changed, 902 insertions(+), 16 deletions(-) create mode 100644 paper/examples/Bench-utils.jl create mode 100644 paper/examples/benchmark-fh.jl create mode 100644 paper/examples/benchmark-nnmf.jl create mode 100644 paper/examples/benchmark-svm.jl create mode 100644 paper/examples/comparison-config.jl diff --git a/paper/examples/Bench-utils.jl b/paper/examples/Bench-utils.jl new file mode 100644 index 00000000..486ec18c --- /dev/null +++ b/paper/examples/Bench-utils.jl @@ -0,0 +1,49 @@ +module BenchUtils + +using ProximalAlgorithms +using ProximalCore +using ADNLPModels, NLPModels + +export Counting, reset_counters!, make_adnlp_compatible! + +(f::ADNLPModel)(x) = obj(f, x) +function ProximalAlgorithms.value_and_gradient(f::ADNLPModel, x) + return obj(f, x), grad(f, x) +end + +(f::AbstractNLPModel)(x) = obj(f,x) + +function ProximalAlgorithms.value_and_gradient(f::AbstractNLPModel, x) + return obj(f,x), grad(f, x) +end + + +"Wrapper compteur pour f ou g (compte #obj, #∇f, #prox)." +mutable struct Counting{T} + f::T + eval_count::Int + gradient_count::Int + prox_count::Int +end +Counting(f::T) where {T} = Counting{T}(f, 0, 0, 0) + +# f(x) +(f::Counting)(x) = (f.eval_count += 1; f.f(x)) + +# (f, ∇f) +function ProximalAlgorithms.value_and_gradient(f::Counting, x) + f.eval_count += 1 + f.gradient_count += 1 + return ProximalAlgorithms.value_and_gradient(f.f, x) +end + +# prox!(y, g, x, γ) +function ProximalCore.prox!(y, g::Counting, x, γ) + g.prox_count += 1 + return ProximalCore.prox!(y, g.f, x, γ) +end + +"Réinitialise les compteurs d’un Counting." +reset_counters!(c::Counting) = (c.eval_count = 0; c.gradient_count = 0; c.prox_count = 0; nothing) + +end # module diff --git a/paper/examples/benchmark-fh.jl b/paper/examples/benchmark-fh.jl new file mode 100644 index 00000000..05d70ab3 --- /dev/null +++ b/paper/examples/benchmark-fh.jl @@ -0,0 +1,202 @@ + +############################# +# ======== IMPORTS ======== # +############################# +using Random, LinearAlgebra +using ProximalOperators, ProximalCore, ProximalAlgorithms +using ADNLPModels, NLPModels, NLPModelsModifiers +using RegularizedOptimization, RegularizedProblems +using DifferentialEquations, SciMLSensitivity + +include("comparison-config.jl") +using .ComparisonConfig: CFG, CFG2 + +include("Bench-utils.jl") +using .BenchUtils + +function print_config(CFG) + println("Configuration:") + println(" SEED = $(CFG.SEED)") + println(" LAMBDA_L0 = $(CFG.LAMBDA_L0)") + println(" TOL = $(CFG.TOL)") + println(" RTOL = $(CFG.RTOL)") + println(" MAXIT_PANOC = $(CFG.MAXIT_PANOC)") + println(" VERBOSE_PANOC = $(CFG.VERBOSE_PANOC)") + println(" VERBOSE_RO = $(CFG.VERBOSE_RO)") + println(" RUN_SOLVERS = $(CFG.RUN_SOLVERS)") + println(" QN_FOR_TR = $(CFG.QN_FOR_TR)") + println(" QN_FOR_R2N = $(CFG.QN_FOR_R2N)") + println(" SUB_KWARGS_R2N = $(CFG.SUB_KWARGS_R2N)") + println(" SIGMAK_R2N = $(CFG.SIGMAK_R2N)") + println(" X0_SCALAR = $(CFG.X0_SCALAR)") + println(" PRINT_TABLE = $(CFG.PRINT_TABLE)") + println(" OPNORM_MAXITER = $(CFG.OPNORM_MAXITER)") + println(" HESSIAN_SCALE = $(CFG.HESSIAN_SCALE)") +end + +############################# +# ===== PROBLÈME (FH) ===== # +############################# +Random.seed!(CFG.SEED) + +# Si tu as fh_model() (wrapper perso) qui renvoie (model, misfit?, x*) +if @isdefined fh_model + model, _, x_true = fh_model() +else + # Fallback: construit le modèle depuis RegularizedProblems + _, _, _, misfit, _ = RegularizedProblems.FH_smooth_term() + model = ADNLPModel(misfit, x0; matrix_free = true) + x_true = nothing +end + +x0 = CFG.X0_SCALAR .* ones(length(model.meta.x0)) + +############################# +# ======= PANOC run ======= # +############################# +function run_panoc!(model, x0; λ = 1.0, maxit = 500, tol = 1e-3, verbose = false) + # BenchUtils.make_adnlp_compatible!() + f = BenchUtils.Counting(model) + g = BenchUtils.Counting(NormL0(λ)) + algo = ProximalAlgorithms.PANOC(maxit = maxit, tol = tol, verbose = verbose) + t = @elapsed x̂, it = algo(x0 = x0, f = f, g = g) + metrics = ( + name = "PANOC", + status = "first_order", + time = t, + iters = it, + fevals = f.eval_count, + gevals = f.gradient_count, + proxcalls = g.prox_count, + solution = x̂, + ) + return metrics +end + +############################# +# ======== TR run ========= # +############################# +function ensure_qn(model, which::Symbol) + which === :LBFGS && return LBFGSModel(model) + which === :LSR1 && return LSR1Model(model) + error("QN inconnu: $which (attendu :LBFGS ou :LSR1)") +end + +function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), opnorm_maxiter = 4, scaling_factor = 1.0) + qn_model = ensure_qn(model, qn) + reset!(qn_model) # reset des compteurs + qn_model.op.data.scaling_factor = scaling_factor + reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ)) + solver = TRSolver(reg_nlp) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = opnorm_maxiter, sub_kwargs = sub_kwargs) + metrics = ( + name = "TR($(String(qn)))", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_obj(qn_model), + gevals = neval_grad(qn_model), + proxcalls = stats.solver_specific[:prox_evals], + solution = stats.solution, + ) + return metrics +end + +############################# +# ======== R2N run ======== # +############################# +function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5, opnorm_maxiter = 4) + qn_model = ensure_qn(model, qn) + reset!(qn_model) + reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ)) + solver = R2NSolver(reg_nlp, m_monotone = 10) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, σk = σk, + verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = opnorm_maxiter) + metrics = ( + name = "R2N($(String(qn)))", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_obj(qn_model), + gevals = neval_grad(qn_model), + proxcalls = stats.solver_specific[:prox_evals], + solution = stats.solution, + ) + return metrics +end + +############################# +# ====== LANCEMENTS ======= # +############################# +results = NamedTuple[] + +if :PANOC in CFG.RUN_SOLVERS + push!(results, run_panoc!(model, x0; λ = CFG.LAMBDA_L0, maxit = CFG.MAXIT_PANOC, tol = CFG.TOL, verbose = CFG.VERBOSE_PANOC)) +end +if :TR in CFG.RUN_SOLVERS + push!(results, run_tr!(model, x0; λ = CFG.LAMBDA_L0, qn = CFG.QN_FOR_R2N, atol = CFG.TOL, rtol = CFG.RTOL, verbose = CFG.VERBOSE_RO, sub_kwargs = CFG.SUB_KWARGS_R2N, opnorm_maxiter = CFG.OPNORM_MAXITER, scaling_factor = CFG.HESSIAN_SCALE)) # test LBFGS aussi +end +if :R2N in CFG.RUN_SOLVERS + push!(results, run_r2n!(model, x0; λ = CFG.LAMBDA_L0, qn = CFG.QN_FOR_R2N, atol = CFG.TOL, rtol = CFG.RTOL, + verbose = CFG.VERBOSE_RO, sub_kwargs = CFG.SUB_KWARGS_R2N, σk = CFG.SIGMAK_R2N, opnorm_maxiter = CFG.OPNORM_MAXITER)) +end + +using PrettyTables + +############################# +# ===== AFFICHAGE I/O ===== # +############################# +if x_true !== nothing + println("=== True solution (≈) ===") + println(x_true) +end + +println("\n=== Comparaison solveurs ===") +for m in results + println("\n→ ", m.name) + println(" statut = ", m.status) + println(" temps (s) = ", round(m.time, digits=4)) + if m.iters !== missing + println(" itérations = ", m.iters) + end + println(" # f eval = ", m.fevals) + println(" # ∇f eval = ", m.gevals) + if m.proxcalls !== missing + println(" # prox appels = ", m.proxcalls) + end + println(" solution (≈) = ", m.solution) +end + +println("\n") +print_config(CFG) + +if CFG.PRINT_TABLE + println("\nSummary :") + # Construire les données pour la table + data = [ + (; name=m.name, + status=string(m.status), + time=round(m.time, digits=4), + fe=m.fevals, + ge=m.gevals, + prox = m.proxcalls === missing ? missing : Int(m.proxcalls)) + for m in results +] + + # En-têtes + table_str = pretty_table(String, + data; + header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox"], + tf = tf_unicode, + alignment = [:l, :c, :r, :r, :r, :r], + crop = :none, + ) + + open("FH-comparison.txt", "w") do io + write(io, table_str) + end +end \ No newline at end of file diff --git a/paper/examples/benchmark-nnmf.jl b/paper/examples/benchmark-nnmf.jl new file mode 100644 index 00000000..97a95cbc --- /dev/null +++ b/paper/examples/benchmark-nnmf.jl @@ -0,0 +1,223 @@ + +############################# +# ======== IMPORTS ======== # +############################# +using Random, LinearAlgebra +using ProximalOperators, ProximalCore, ProximalAlgorithms +using ADNLPModels, NLPModels, NLPModelsModifiers +using RegularizedOptimization, RegularizedProblems +using ShiftedProximalOperators +using MLDatasets + +include("comparison-config.jl") +using .ComparisonConfig: CFG3 + +include("Bench-utils.jl") +using .BenchUtils + +function print_config(CFG3) + println("Configuration:") + println(" SEED = $(CFG3.SEED)") + println(" LAMBDA_L0 = $(CFG3.LAMBDA_L0)") + println(" TOL = $(CFG3.TOL)") + println(" RTOL = $(CFG3.RTOL)") + println(" MAXIT_PANOC = $(CFG3.MAXIT_PANOC)") + println(" VERBOSE_PANOC = $(CFG3.VERBOSE_PANOC)") + println(" VERBOSE_RO = $(CFG3.VERBOSE_RO)") + println(" RUN_SOLVERS = $(CFG3.RUN_SOLVERS)") + println(" QN_FOR_TR = $(CFG3.QN_FOR_TR)") + println(" QN_FOR_R2N = $(CFG3.QN_FOR_R2N)") + println(" SUB_KWARGS_R2N = $(CFG3.SUB_KWARGS_R2N)") + println(" SIGMAK_R2N = $(CFG3.SIGMAK_R2N)") + println(" X0_SCALAR = $(CFG3.X0_SCALAR)") + println(" PRINT_TABLE = $(CFG3.PRINT_TABLE)") + println(" OPNORM_MAXITER = $(CFG3.OPNORM_MAXITER)") + println(" HESSIAN_SCALE = $(CFG3.HESSIAN_SCALE)") +end + +acc = vec -> length(findall(x -> x < 1, vec)) / length(vec) * 100 # for SVM + +############################# +# ===== PROBLÈME (NNMF) ===== # +############################# +Random.seed!(CFG3.SEED) + +m, n, k = 100, 50, 5 +model, nls_model, A, selected = nnmf_model(m, n, k) + +x0 = rand(model.meta.nvar) + +CFG3.LAMBDA_L0 = norm(grad(model, rand(model.meta.nvar)), Inf) / 200 +############################# +# ======= PANOC run ======= # +############################# +function run_panoc!(model, x0; λ = 1.0, maxit = 500, tol = 1e-3, verbose = false) + λ = norm(grad(model, rand(model.meta.nvar)), Inf) / 200 + f = BenchUtils.Counting(model) + g = BenchUtils.Counting(NormL0(λ)) + algo = ProximalAlgorithms.PANOC(maxit = maxit, tol = tol, verbose = verbose) + t = @elapsed x̂, it = algo(x0 = x0, f = f, g = g) + metrics = ( + name = "PANOC", + status = "first_order", + time = t, + iters = it, + fevals = f.eval_count, + gevals = f.gradient_count, + proxcalls = g.prox_count, + solution = x̂, + ) + return metrics +end + +############################# +# ======== TR run ========= # +############################# +function ensure_qn(model, which::Symbol) + which === :LBFGS && return LBFGSModel(model) + which === :LSR1 && return LSR1Model(model) + error("QN inconnu: $which (attendu :LBFGS ou :LSR1)") +end + +function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), selected = selected) + qn_model = ensure_qn(model, qn) + reset!(qn_model) # reset des compteurs + reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ), selected) + solver = TRSolver(reg_nlp) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = 30, sub_kwargs = sub_kwargs) + metrics = ( + name = "TR($(String(qn)))", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_obj(qn_model), + gevals = neval_grad(qn_model), + proxcalls = stats.solver_specific[:prox_evals], + solution = stats.solution, + ) + return metrics +end + +############################# +# ======== R2N run ======== # +############################# +function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5) + qn_model = ensure_qn(model, qn) + reset!(qn_model) + reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ)) + solver = R2NSolver(reg_nlp, m_monotone = 10) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, σk = σk, + verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = 30) + metrics = ( + name = "R2N($(String(qn)))", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_obj(qn_model), + gevals = neval_grad(qn_model), + proxcalls = stats.solver_specific[:prox_evals], + solution = stats.solution, + ) + return metrics +end + +############################# +# ======== LM run ======== # +############################# +function run_LM!(nls_model, x0; λ = 1.0, atol = 1e-3, rtol = 1e-3, verbose = 0, σk = 1e0) + reg_nlp = RegularizedNLSModel(nls_model, NormL0(λ)) + solver = LMSolver(reg_nlp) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, σk = σk, + verbose = verbose) + metrics = ( + name = "LM", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_residual(nls_model), + gevals = neval_jtprod_residual(nls_model) + neval_jprod_residual(nls_model), + proxcalls = stats.solver_specific[:prox_evals], + solution = stats.solution, + ) + return metrics +end + +############################# +# ====== LANCEMENTS ======= # +############################# +results = NamedTuple[] + +# if :PANOC in CFG3.RUN_SOLVERS +# push!(results, run_panoc!(model, x0; λ = CFG3.LAMBDA_L0, maxit = CFG3.MAXIT_PANOC, tol = CFG3.TOL, verbose = CFG3.VERBOSE_PANOC)) +# end +if :TR in CFG3.RUN_SOLVERS + push!(results, run_tr!(model, x0; λ = CFG3.LAMBDA_L0, qn = CFG3.QN_FOR_TR, atol = CFG3.TOL, rtol = CFG3.RTOL, verbose = CFG3.VERBOSE_RO, sub_kwargs = CFG3.SUB_KWARGS_R2N,)) +end +if :R2N in CFG3.RUN_SOLVERS + push!(results, run_r2n!(model, x0; λ = CFG3.LAMBDA_L0, qn = CFG3.QN_FOR_R2N, atol = CFG3.TOL, rtol = CFG3.RTOL, + verbose = CFG3.VERBOSE_RO, sub_kwargs = CFG3.SUB_KWARGS_R2N, σk = CFG3.SIGMAK_R2N)) +end +if :LM in CFG3.RUN_SOLVERS + push!(results, run_LM!(nls_model, x0; λ = CFG3.LAMBDA_L0, atol = CFG3.TOL, rtol = CFG3.RTOL, + verbose = CFG3.VERBOSE_RO, σk = CFG3.SIGMAK_R2N)) +end + +using PrettyTables + +############################# +# ===== AFFICHAGE I/O ===== # +############################# + + +println("\n=== Comparaison solveurs ===") +for m in results + println("\n→ ", m.name) + println(" statut = ", m.status) + println(" temps (s) = ", round(m.time, digits=4)) + if m.iters !== missing + println(" itérations = ", m.iters) + end + println(" # f eval = ", m.fevals) + println(" # ∇f eval = ", m.gevals) + if m.proxcalls !== missing + println(" # prox appels = ", Int(m.proxcalls)) + end + println(" objective final", " = ", round(obj(model, m.solution), digits=4)) +end + +println("\n") +print_config(CFG3) + +if CFG3.PRINT_TABLE + println("\nSummary :") + # Construire les données pour la table + data = [ + (; name=m.name, + status=string(m.status), + time=round(m.time, digits=4), + fe=m.fevals, + ge=m.gevals, + prox = m.proxcalls === missing ? missing : Int(m.proxcalls)) + for m in results +] + + # En-têtes + table_str = pretty_table(String, + data; + header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox"], + tf = tf_unicode, + alignment = [:l, :c, :r, :r, :r, :r], + crop = :none, + ) + + + open("NNMF-comparison.txt", "w") do io + write(io, table_str) + end +end \ No newline at end of file diff --git a/paper/examples/benchmark-svm.jl b/paper/examples/benchmark-svm.jl new file mode 100644 index 00000000..7f6d8989 --- /dev/null +++ b/paper/examples/benchmark-svm.jl @@ -0,0 +1,215 @@ + +############################# +# ======== IMPORTS ======== # +############################# +using Random, LinearAlgebra +using ProximalOperators, ProximalCore, ProximalAlgorithms +using ADNLPModels, NLPModels, NLPModelsModifiers +using RegularizedOptimization, RegularizedProblems +using MLDatasets + +include("comparison-config.jl") +using .ComparisonConfig: CFG2 + +include("Bench-utils.jl") +using .BenchUtils + +function print_config(CFG2) + println("Configuration:") + println(" SEED = $(CFG2.SEED)") + println(" LAMBDA_L0 = $(CFG2.LAMBDA_L0)") + println(" TOL = $(CFG2.TOL)") + println(" RTOL = $(CFG2.RTOL)") + println(" MAXIT_PANOC = $(CFG2.MAXIT_PANOC)") + println(" VERBOSE_PANOC = $(CFG2.VERBOSE_PANOC)") + println(" VERBOSE_RO = $(CFG2.VERBOSE_RO)") + println(" RUN_SOLVERS = $(CFG2.RUN_SOLVERS)") + println(" QN_FOR_TR = $(CFG2.QN_FOR_TR)") + println(" QN_FOR_R2N = $(CFG2.QN_FOR_R2N)") + println(" SUB_KWARGS_R2N = $(CFG2.SUB_KWARGS_R2N)") + println(" SIGMAK_R2N = $(CFG2.SIGMAK_R2N)") + println(" X0_SCALAR = $(CFG2.X0_SCALAR)") + println(" PRINT_TABLE = $(CFG2.PRINT_TABLE)") +end + +acc = vec -> length(findall(x -> x < 1, vec)) / length(vec) * 100 # for SVM + +############################# +# ===== PROBLÈME (SVM) ===== # +############################# +Random.seed!(CFG2.SEED) + +model, nls_train, _ = RegularizedProblems.svm_train_model() +x0 = model.meta.x0 + +############################# +# ======= PANOC run ======= # +############################# +function run_panoc!(model, x0; λ = 1.0, maxit = 500, tol = 1e-3, verbose = false) + # BenchUtils.make_adnlp_compatible!() + f = BenchUtils.Counting(model) + g = BenchUtils.Counting(RootNormLhalf(λ)) + algo = ProximalAlgorithms.PANOC(maxit = maxit, tol = tol, verbose = verbose) + t = @elapsed x̂, it = algo(x0 = x0, f = f, g = g) + metrics = ( + name = "PANOC", + status = "first_order", + time = t, + iters = it, + fevals = f.eval_count, + gevals = f.gradient_count, + proxcalls = g.prox_count, + solution = x̂, + ) + return metrics +end + +############################# +# ======== TR run ========= # +############################# +function ensure_qn(model, which::Symbol) + which === :LBFGS && return LBFGSModel(model) + which === :LSR1 && return LSR1Model(model) + error("QN inconnu: $which (attendu :LBFGS ou :LSR1)") +end + +function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;)) + qn_model = ensure_qn(model, qn) + reset!(qn_model) # reset des compteurs + reg_nlp = RegularizedNLPModel(qn_model, RootNormLhalf(λ)) + solver = TRSolver(reg_nlp) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = 20, sub_kwargs = sub_kwargs)#, max_iter = 400) + metrics = ( + name = "TR($(String(qn)))", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_obj(qn_model), + gevals = neval_grad(qn_model), + proxcalls = stats.solver_specific[:prox_evals], + solution = stats.solution, + ) + return metrics +end + +############################# +# ======== R2N run ======== # +############################# +function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5) + qn_model = ensure_qn(model, qn) + reset!(qn_model) + reg_nlp = RegularizedNLPModel(qn_model, RootNormLhalf(λ)) + solver = R2NSolver(reg_nlp) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, σk = σk, + verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = 20) + metrics = ( + name = "R2N($(String(qn)))", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_obj(qn_model), + gevals = neval_grad(qn_model), + proxcalls = stats.solver_specific[:prox_evals], + solution = stats.solution, + ) + return metrics +end + +############################# +# ======== LM run ======== # +############################# +function run_LM!(nls_model, x0; λ = 1.0, atol = 1e-3, rtol = 1e-3, verbose = 0, σk = 1e0) + reg_nlp = RegularizedNLSModel(nls_model, RootNormLhalf(λ)) + solver = LMSolver(reg_nlp) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, σk = σk, + verbose = verbose) + metrics = ( + name = "LM", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_residual(nls_model), + gevals = neval_jtprod_residual(nls_model) + neval_jprod_residual(nls_model), + proxcalls = stats.solver_specific[:prox_evals], + solution = stats.solution, + ) + return metrics +end + +############################# +# ====== LANCEMENTS ======= # +############################# +results = NamedTuple[] + +if :PANOC in CFG2.RUN_SOLVERS + push!(results, run_panoc!(model, x0; λ = CFG2.LAMBDA_L0, maxit = CFG2.MAXIT_PANOC, tol = CFG2.TOL, verbose = CFG2.VERBOSE_PANOC)) +end +if :TR in CFG2.RUN_SOLVERS + push!(results, run_tr!(model, x0; λ = CFG2.LAMBDA_L0, qn = CFG2.QN_FOR_TR, atol = CFG2.TOL, rtol = CFG2.RTOL, verbose = CFG2.VERBOSE_RO, sub_kwargs = CFG2.SUB_KWARGS_R2N,)) +end +if :R2N in CFG2.RUN_SOLVERS + push!(results, run_r2n!(model, x0; λ = CFG2.LAMBDA_L0, qn = CFG2.QN_FOR_R2N, atol = CFG2.TOL, rtol = CFG2.RTOL, + verbose = CFG2.VERBOSE_RO, sub_kwargs = CFG2.SUB_KWARGS_R2N, σk = CFG2.SIGMAK_R2N)) +end + + +using PrettyTables + +############################# +# ===== AFFICHAGE I/O ===== # +############################# + + +println("\n=== Comparaison solveurs ===") +for m in results + println("\n→ ", m.name) + println(" statut = ", m.status) + println(" temps (s) = ", round(m.time, digits=4)) + if m.iters !== missing + println(" itérations = ", m.iters) + end + println(" # f eval = ", m.fevals) + println(" # ∇f eval = ", m.gevals) + if m.proxcalls !== missing + println(" # prox appels = ", Int(m.proxcalls)) + end + println(" objective final", " = ", round(obj(model, m.solution), digits=4)) + println("accuracy (%) = ", round(acc(residual(nls_train, m.solution)), digits=1)) +end + +println("\n") +print_config(CFG2) + +if CFG2.PRINT_TABLE + println("\nSummary :") + # Construire les données pour la table + data = [ + (; name=m.name, + status=string(m.status), + time=round(m.time, digits=4), + fe=m.fevals, + ge=m.gevals, + prox = m.proxcalls === missing ? missing : Int(m.proxcalls)) + for m in results +] + + # En-têtes + table_str = pretty_table(String, + data; + header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox"], + tf = tf_unicode, + alignment = [:l, :c, :r, :r, :r, :r], + crop = :none, + ) + + open("SVM-comparison.txt", "w") do io + write(io, table_str) + end + +end \ No newline at end of file diff --git a/paper/examples/comparison-config.jl b/paper/examples/comparison-config.jl new file mode 100644 index 00000000..fac78e65 --- /dev/null +++ b/paper/examples/comparison-config.jl @@ -0,0 +1,27 @@ +module ComparisonConfig + +Base.@kwdef mutable struct Config + SEED::Int = 1234 + LAMBDA_L0::Float64 = 1.0 + TOL::Float64 = 1e-3 + RTOL::Float64 = 1e-3 + MAXIT_PANOC::Int = 500 + VERBOSE_PANOC::Bool = false + VERBOSE_RO::Int = 0 + RUN_SOLVERS::Vector{Symbol} = [:PANOC, :TR, :R2N] # mutable + QN_FOR_TR::Symbol = :LSR1 + QN_FOR_R2N::Symbol = :LBFGS + SUB_KWARGS_R2N::NamedTuple = (; max_iter = 200) + SIGMAK_R2N::Float64 = 1e5 + X0_SCALAR::Float64 = 0.1 + PRINT_TABLE::Bool = true + OPNORM_MAXITER::Int = 4 + HESSIAN_SCALE::Float64 = 1e-4 +end + +# One global, constant *binding* to a mutable object = type stable & editable +const CFG = Config() +const CFG2 = Config(SIGMAK_R2N=eps()^(1/3), TOL = 1e-4, RTOL = 1e-4) +const CFG3 = Config(SIGMAK_R2N=1e3, TOL = 1e-4, RTOL = 1e-4, RUN_SOLVERS = [:LM, :TR, :R2N]) + +end # module \ No newline at end of file diff --git a/paper/paper.bib b/paper/paper.bib index c962afe7..560eb5e4 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -119,3 +119,21 @@ @Misc{migot-orban-siqueira-optimizationproblems-2023 doi = {10.5281/zenodo.3672094}, url = {https://github.com/JuliaSmoothOptimizers/OptimizationProblems.jl}, } + +@techreport{kim-park-2008, + title = {Sparse Nonnegative Matrix Factorization for Clustering}, + author = {Jingu Kim and Haesun Park}, + institution = {Georgia Inst. of Technology}, + number = {GT-CSE-08-01}, + year = {2008}, + url = {http://hdl.handle.net/1853/20058}, +} + +@InProceedings{ stella-themelis-sopasakis-patrinos-2017, + Author = {L. {Stella} and A. {Themelis} and P. {Sopasakis} and P. {Patrinos}}, + Title = {A simple and efficient algorithm for nonlinear model predictive control}, + Booktitle = {2017 IEEE 56th Annual Conference on Decision and Control (CDC)}, + Year = 2017, + Pages = {1939--1944}, + doi = {10.1109/CDC.2017.8263933}, +} diff --git a/paper/paper.md b/paper/paper.md index 51c94e8f..0363de87 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -44,6 +44,7 @@ The library provides a modular and extensible framework for experimenting with n - **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022;@leconte-orban-2023], - **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022], - **Levenbergh-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. +- **Augmented Lagrangian methods (ALTR)** (cite?). These methods rely solely on the gradient and Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. Then, the objective function $f + h$ is used only to accept or reject trial points. @@ -122,14 +123,16 @@ The solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimi ## Application studies -The package is used in the exact penalty work of [@diouane-gollier-orban-2024] to solve a problem where the model of the nonsmooth part differs from the function $h$. -This is not covered in the current version of the competitive package [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). +The package is used to solve equality-constrained optimization problems by means of the exact penalty approach [@diouane-gollier-orban-2024] where the model of the nonsmooth part differs from the function $h$ itself. +This is not covered in the current version of the package [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). ## Support for inexact subproblem solves Solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) allow inexact resolution of trust-region and quadratic-regularized subproblems using first-order that are implemented in the package itself such as the quadratic regularization method R2 [@aravkin-baraldi-orban-2022] and R2DH [@diouane-habiboullah-orban-2024] with trust-region variants TRDH [@leconte-orban-2023-2]. This is crucial for large-scale problems where exact subproblem solutions are prohibitive. +Moreover, one way to outperform line-search–based methods is to solve the subproblems more accurately by performing many proximal iterations, which are inexpensive to compute, rather than relying on numerous function and gradient evaluations. +We will illustrate this in the examples below. ## In-place methods @@ -137,9 +140,78 @@ All solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimi # Examples -We consider two examples where the smooth part $f$ is nonconvex and the nonsmooth part $h$ is either $\ell_0$ or $\ell_1$ norm. -A first example is the FitzHugh-Nagumo inverse problem with an $\ell_1$ penalty, as described in [@aravkin-baraldi-orban-2022] and [@aravkin-baraldi-orban-2024]. +We consider three examples where the smooth part $f$ is nonconvex and the nonsmooth part $h$ is either $\ell^{1/2}$ or $\ell_0$ norm with or without constraints. + +We compare the performance of our solvers with (**PANOC**) solver [@stella-themelis-sopasakis-patrinos-2017] implemented in [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). + +## Problem of support vector machine with $\ell^{1/2}$ penalty + + +A first example addresses an image recognition task using a support vector machine (SVM) similar to those in [@aravkin-baraldi-orban-2024]. +The formulation is +$$ +\min_{x \in \mathbb{R}^n} \ \tfrac{1}{2} \|\mathbf{1} - \tanh(b \odot \langle A, x \rangle)\|^2 + \|x\|_{1/2}^{1/2}, +$$ +where $A \in \mathbb{R}^{m \times n}$, with $n = 784$ representing the vectorized size of each image and $m = 13{,}007$ is the number of images in the training dataset. + +```julia +using LinearAlgebra, Random +using ProximalOperators +using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization +using MLDatasets + +random_seed = 1234 +Random.seed!(random_seed) + +# Build the models +model, nls_train, _ = RegularizedProblems.svm_train_model() + +# Define the Hessian approximation +f = LSR1Model(model) + +# Define the nonsmooth regularizer (L0 norm) +λ = 1.0 +h = RootNormLhalf(λ) + +# Define the regularized NLP model +reg_nlp = RegularizedNLPModel(f, h) + +# Choose a solver (R2DH) and execution statistics tracker +solver_r2n = R2NSolver(reg_nlp) +stats = RegularizedExecutionStats(reg_nlp) + +# Max number of proximal iterations for subproblem solver +sub_kwargs = (max_iter=200,) + +# Solve the problem +solve!(solver_r2n, reg_nlp, stats, x = f.meta.x0, atol = 1e-4, rtol = 1e-4, verbose = 0, sub_kwargs = sub_kwargs) + + + + + + +``` + +```` +┌───────────┬─────────────┬──────────┬──────┬──────┬───────┐ +│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ +├───────────┼─────────────┼──────────┼──────┼──────┼───────┤ +│ PANOC │ first_order │ 18.5413 │ 1434 │ 1434 │ 934 │ +│ TR(LSR1) │ first_order │ 5.8974 │ 385 │ 333 │ 11113 │ +│ R2N(LSR1) │ first_order │ 2.1251 │ 175 │ 95 │ 56971 │ +└───────────┴─────────────┴──────────┴──────┴──────┴───────┘ +```` + +We observe that both **TR** and **R2N** outperform **PANOC** in terms of the number of function and gradient evaluations and computational time, although they require more proximal iterations. +But since each proximal iteration is inexpensive, the overall performance is better. + +## Problem of FitzHugh-Nagumo inverse with $\ell_0$ penalty + +A second example is the FitzHugh-Nagumo inverse problem with an $\ell_0$ penalty, as described in [@aravkin-baraldi-orban-2022] and [@aravkin-baraldi-orban-2024]. +This problem consists of recovering the parameters of a system of ordinary differential equations (ODEs) with sparsity constraints. +In general, the evaluation of the objective function and its gradient are costly because they require solving the ODEs compared to the proximal operator of the $\ell_0$ norm, which is inexpensive. ```julia using LinearAlgebra @@ -154,8 +226,11 @@ x0 = 0.1 * ones(model.meta.nvars) # initial guess # Define the Hessian approximation f = LBFGSModel(fh_model) +# Initialize the starting Hessian approximation scaling factor +f.op.data.scaling_factor = 1e4 + # Define the nonsmooth regularizer (L1 norm) -λ = 0.1 +λ = 1.0 h = NormL0(λ) # Define the regularized NLP model @@ -165,23 +240,100 @@ reg_nlp = RegularizedNLPModel(f, h) solver_tr = TRSolver(reg_nlp) stats = RegularizedExecutionStats(reg_nlp) +# Max number of proximal iterations for subproblem solver +sub_kwargs = (max_iter=200,) + # Solve the problem -solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-4, verbose = 10) +solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = sub_kwargs) ``` -Compare the performance of different solvers on this problem: - ```` - -┌──────────────────┬───────────────────┬────────────────────┬───────────┬────────────┬────────────────┐ -│ Method │ Status │ Time │ #f │ #∇f │ #prox │ -├──────────────────┼───────────────────┼────────────────────┼───────────┼────────────┼────────────────┤ -│ PANOC │ first_order │ 1.2794 │ 188 │ 188 │ 107 │ -│ TR(LBFGS) │ first_order │ 3.0748 │ 113 │ 92 │ missing │ -│ R2N(LBFGS) │ first_order │ 0.5582 │ 112 │ 65 │ missing │ -└──────────────────┴───────────────────┴────────────────────┴───────────┴────────────┴────────────────┘ +┌────────────┬─────────────┬──────────┬─────┬─────┬───────┐ +│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ +├────────────┼─────────────┼──────────┼─────┼─────┼───────┤ +│ PANOC │ first_order │ 1.3279 │ 188 │ 188 │ 107 │ +│ TR(LBFGS) │ first_order │ 0.4075 │ 83 │ 60 │ 20983 │ +│ R2N(LBFGS) │ first_order │ 0.4001 │ 63 │ 62 │ 17061 │ +└────────────┴─────────────┴──────────┴─────┴─────┴───────┘ ```` +Same observation as in the previous example: **TR** and **R2N** with LBFGS approximation of the Hessian of $f$ outperform **PANOC** in terms of the number of function and gradient evaluations and computational time, although they require more proximal iterations. + +## Problem of Nonnegative least squares with $\ell_0$ penalty and constraints + +The third experiment considers the sparse nonnegative matrix factorization (NNMF) problem introduced by [@kim-park-2008]. +Let $A \in \mathbb{R}^{m \times n}$ be a nonnegative matrix whose columns correspond to observations drawn from a Gaussian mixture, with negative entries truncated to zero. + +The goal is to obtain a factorization $A \approx WH$, where $W \in \mathbb{R}^{m \times k}$, $H \in \mathbb{R}^{k \times n}$, $k < \min(m,n)$, such that both factors are nonnegative and $H$ is sparse. + +This leads to the optimization problem + +$$ +\min_{W, H \geq 0} \; \tfrac{1}{2} \| A - WH \|_F^2 + \lambda \| \operatorname{vec}(H) \|_0, +$$ + +where $\operatorname{vec}(H)$ denotes the column-stacked version of $H$. + +Compared to the previous examples, we now consider a constrained problem with a nonsmooth and nonconvex term. + +The library [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) provides solvers that can handle constraints by separating the objective into three parts: a smooth term, a nonsmooth term, and the indicator function of the constraints. However, this approach assumes that the nonsmooth part is convex, which is not the case here. + +Another approach is to merge the nonsmooth term with the indicator function of the constraints into a single nonsmooth function, and then apply **PANOC**, which is the strategy adopted here. However, the current library of proximal operators, [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl), on which [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) relies, does not provide the proximal mapping of the sum of the $\ell_0$ norm and the indicator function of the nonnegative orthant. In contrast, [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl) does implement this operator. + +Therefore, to apply **PANOC** in this setting, one would first need to implement this combined proximal operator in [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl). For this reason, we do not include **PANOC** in this example. + +Instead, we compare the performance of **TR** and **R2N** with that of **LM**. + +```julia +using LinearAlgebra +using ProximalOperators +using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization +using DifferentialEquations, ADNLPModels + +# Build the models +m, n, k = 100, 50, 5 +model, nls_model, A, selected = nnmf_model(m, n, k) + +# Define the nonsmooth regularizer (L1 norm) +λ = norm(grad(model, rand(model.meta.nvar)), Inf) / 200 +h = NormL0(λ) + +# Define the regularized NLS model +reg_nlp = RegularizedNLSModel(nls_model, h) + +# Choose a solver (TR) and execution statistics tracker +solver_lm = LMSolver(reg_nlp) +stats = RegularizedExecutionStats(reg_nlp) + + +# Solve the problem +solve!(solver_lm, reg_nlp, stats, x = f.meta.x0, atol = 1e-4, rtol = 1e-4, verbose = 0) +``` + +``` +┌────────────┬─────────────┬──────────┬────┬──────┬───────┐ +│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ +├────────────┼─────────────┼──────────┼────┼──────┼───────┤ +│ TR(LBFGS) │ first_order │ 0.1727 │ 78 │ 73 │ 10231 │ +│ R2N(LBFGS) │ first_order │ 0.1244 │ 62 │ 62 │ 5763 │ +│ LM │ first_order │ 1.2796 │ 11 │ 2035 │ 481 │ +└────────────┴─────────────┴──────────┴────┴──────┴───────┘ +``` + +We observe that **R2N** and **TR** achieve similar performance, with **R2N** being slightly better. +Both methods outperform **LM** in terms of computational time and the number of gradient evaluations. +However, **LM** requires significantly fewer function evaluations, which is expected since it is specifically designed for nonlinear least squares problems and can exploit the structure of the objective function more effectively. + +## Conclusion + +The experiments highlight the effectiveness of the solvers implemented in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) compared to **PANOC** from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). + +The performance can be summarized as follows: + +- **Function and gradient evaluations:** **TR** and **R2N** are the most efficient choices when aiming to minimize both. +- **Function evaluations only:** **LM** is preferable when the problem is a nonlinear least squares problem, as it achieves the lowest number of function evaluations. +- **Proximal iterations:** **PANOC** requires the fewest proximal iterations. However, in most nonsmooth applications, proximal steps are relatively inexpensive, so this criterion is of limited practical relevance. + # Acknowledgements Mohamed Laghdaf Habiboullah is supported by an excellence FRQNT grant. From bb2b23b38e27ef2f35df41257d950c5acd672af0 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Mon, 29 Sep 2025 14:34:50 -0400 Subject: [PATCH 24/42] add nonmonotone and use opnorm=20 for all solvers --- paper/examples/benchmark-fh.jl | 5 ++-- paper/examples/benchmark-nnmf.jl | 27 ++++++++---------- paper/examples/benchmark-svm.jl | 44 ++++++++--------------------- paper/examples/comparison-config.jl | 13 +++++---- paper/paper.md | 38 +++++++++++++------------ 5 files changed, 54 insertions(+), 73 deletions(-) diff --git a/paper/examples/benchmark-fh.jl b/paper/examples/benchmark-fh.jl index 05d70ab3..6130dfa4 100644 --- a/paper/examples/benchmark-fh.jl +++ b/paper/examples/benchmark-fh.jl @@ -4,6 +4,7 @@ ############################# using Random, LinearAlgebra using ProximalOperators, ProximalCore, ProximalAlgorithms +using ShiftedProximalOperators using ADNLPModels, NLPModels, NLPModelsModifiers using RegularizedOptimization, RegularizedProblems using DifferentialEquations, SciMLSensitivity @@ -117,7 +118,7 @@ function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, ve x = x0, atol = atol, rtol = rtol, σk = σk, verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = opnorm_maxiter) metrics = ( - name = "R2N($(String(qn)))", + name = "R2N($(String(qn))) Nonmonotone", status = string(stats.status), time = t, iters = get(stats.solver_specific, :outer_iter, missing), @@ -196,7 +197,7 @@ if CFG.PRINT_TABLE crop = :none, ) - open("FH-comparison.txt", "w") do io + open("FH-comparison-f.txt", "w") do io write(io, table_str) end end \ No newline at end of file diff --git a/paper/examples/benchmark-nnmf.jl b/paper/examples/benchmark-nnmf.jl index 97a95cbc..a3d59d3a 100644 --- a/paper/examples/benchmark-nnmf.jl +++ b/paper/examples/benchmark-nnmf.jl @@ -79,14 +79,14 @@ function ensure_qn(model, which::Symbol) error("QN inconnu: $which (attendu :LBFGS ou :LSR1)") end -function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), selected = selected) +function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), selected = selected, opnorm_maxiter = 20) qn_model = ensure_qn(model, qn) reset!(qn_model) # reset des compteurs reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ), selected) solver = TRSolver(reg_nlp) stats = RegularizedExecutionStats(reg_nlp) t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; - x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = 30, sub_kwargs = sub_kwargs) + x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = opnorm_maxiter, sub_kwargs = sub_kwargs) metrics = ( name = "TR($(String(qn)))", status = string(stats.status), @@ -103,7 +103,7 @@ end ############################# # ======== R2N run ======== # ############################# -function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5) +function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5, opnorm_maxiter = 20) qn_model = ensure_qn(model, qn) reset!(qn_model) reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ)) @@ -111,9 +111,9 @@ function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, ve stats = RegularizedExecutionStats(reg_nlp) t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; x = x0, atol = atol, rtol = rtol, σk = σk, - verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = 30) + verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = opnorm_maxiter) metrics = ( - name = "R2N($(String(qn)))", + name = "R2N($(String(qn))) Nonmonotone", status = string(stats.status), time = t, iters = get(stats.solver_specific, :outer_iter, missing), @@ -129,10 +129,10 @@ end # ======== LM run ======== # ############################# function run_LM!(nls_model, x0; λ = 1.0, atol = 1e-3, rtol = 1e-3, verbose = 0, σk = 1e0) - reg_nlp = RegularizedNLSModel(nls_model, NormL0(λ)) - solver = LMSolver(reg_nlp) - stats = RegularizedExecutionStats(reg_nlp) - t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + reg_nls = RegularizedNLSModel(nls_model, NormL0(λ)) + solver = LMSolver(reg_nls) + stats = RegularizedExecutionStats(reg_nls) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nls, stats; x = x0, atol = atol, rtol = rtol, σk = σk, verbose = verbose) metrics = ( @@ -153,15 +153,12 @@ end ############################# results = NamedTuple[] -# if :PANOC in CFG3.RUN_SOLVERS -# push!(results, run_panoc!(model, x0; λ = CFG3.LAMBDA_L0, maxit = CFG3.MAXIT_PANOC, tol = CFG3.TOL, verbose = CFG3.VERBOSE_PANOC)) -# end if :TR in CFG3.RUN_SOLVERS - push!(results, run_tr!(model, x0; λ = CFG3.LAMBDA_L0, qn = CFG3.QN_FOR_TR, atol = CFG3.TOL, rtol = CFG3.RTOL, verbose = CFG3.VERBOSE_RO, sub_kwargs = CFG3.SUB_KWARGS_R2N,)) + push!(results, run_tr!(model, x0; λ = CFG3.LAMBDA_L0, qn = CFG3.QN_FOR_TR, atol = CFG3.TOL, rtol = CFG3.RTOL, verbose = CFG3.VERBOSE_RO, sub_kwargs = CFG3.SUB_KWARGS_R2N, opnorm_maxiter = CFG3.OPNORM_MAXITER)) end if :R2N in CFG3.RUN_SOLVERS push!(results, run_r2n!(model, x0; λ = CFG3.LAMBDA_L0, qn = CFG3.QN_FOR_R2N, atol = CFG3.TOL, rtol = CFG3.RTOL, - verbose = CFG3.VERBOSE_RO, sub_kwargs = CFG3.SUB_KWARGS_R2N, σk = CFG3.SIGMAK_R2N)) + verbose = CFG3.VERBOSE_RO, sub_kwargs = CFG3.SUB_KWARGS_R2N, σk = CFG3.SIGMAK_R2N, opnorm_maxiter = CFG3.OPNORM_MAXITER)) end if :LM in CFG3.RUN_SOLVERS push!(results, run_LM!(nls_model, x0; λ = CFG3.LAMBDA_L0, atol = CFG3.TOL, rtol = CFG3.RTOL, @@ -217,7 +214,7 @@ if CFG3.PRINT_TABLE ) - open("NNMF-comparison.txt", "w") do io + open("NNMF-comparison-f.txt", "w") do io write(io, table_str) end end \ No newline at end of file diff --git a/paper/examples/benchmark-svm.jl b/paper/examples/benchmark-svm.jl index 7f6d8989..d5b12553 100644 --- a/paper/examples/benchmark-svm.jl +++ b/paper/examples/benchmark-svm.jl @@ -5,6 +5,7 @@ using Random, LinearAlgebra using ProximalOperators, ProximalCore, ProximalAlgorithms using ADNLPModels, NLPModels, NLPModelsModifiers +using ShiftedProximalOperators using RegularizedOptimization, RegularizedProblems using MLDatasets @@ -30,6 +31,9 @@ function print_config(CFG2) println(" SIGMAK_R2N = $(CFG2.SIGMAK_R2N)") println(" X0_SCALAR = $(CFG2.X0_SCALAR)") println(" PRINT_TABLE = $(CFG2.PRINT_TABLE)") + println(" OPNORM_MAXITER = $(CFG2.OPNORM_MAXITER)") + println(" HESSIAN_SCALE = $(CFG2.HESSIAN_SCALE)") + println(" M_MONOTONE = $(CFG2.M_MONOTONE)") end acc = vec -> length(findall(x -> x < 1, vec)) / length(vec) * 100 # for SVM @@ -73,14 +77,14 @@ function ensure_qn(model, which::Symbol) error("QN inconnu: $which (attendu :LBFGS ou :LSR1)") end -function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;)) +function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), opnorm_maxiter = 20) qn_model = ensure_qn(model, qn) reset!(qn_model) # reset des compteurs reg_nlp = RegularizedNLPModel(qn_model, RootNormLhalf(λ)) solver = TRSolver(reg_nlp) stats = RegularizedExecutionStats(reg_nlp) t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; - x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = 20, sub_kwargs = sub_kwargs)#, max_iter = 400) + x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = opnorm_maxiter, sub_kwargs = sub_kwargs) metrics = ( name = "TR($(String(qn)))", status = string(stats.status), @@ -97,7 +101,7 @@ end ############################# # ======== R2N run ======== # ############################# -function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5) +function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5, opnorm_maxiter = 20) qn_model = ensure_qn(model, qn) reset!(qn_model) reg_nlp = RegularizedNLPModel(qn_model, RootNormLhalf(λ)) @@ -105,7 +109,7 @@ function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, ve stats = RegularizedExecutionStats(reg_nlp) t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; x = x0, atol = atol, rtol = rtol, σk = σk, - verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = 20) + verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = opnorm_maxiter) metrics = ( name = "R2N($(String(qn)))", status = string(stats.status), @@ -119,29 +123,6 @@ function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, ve return metrics end -############################# -# ======== LM run ======== # -############################# -function run_LM!(nls_model, x0; λ = 1.0, atol = 1e-3, rtol = 1e-3, verbose = 0, σk = 1e0) - reg_nlp = RegularizedNLSModel(nls_model, RootNormLhalf(λ)) - solver = LMSolver(reg_nlp) - stats = RegularizedExecutionStats(reg_nlp) - t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; - x = x0, atol = atol, rtol = rtol, σk = σk, - verbose = verbose) - metrics = ( - name = "LM", - status = string(stats.status), - time = t, - iters = get(stats.solver_specific, :outer_iter, missing), - fevals = neval_residual(nls_model), - gevals = neval_jtprod_residual(nls_model) + neval_jprod_residual(nls_model), - proxcalls = stats.solver_specific[:prox_evals], - solution = stats.solution, - ) - return metrics -end - ############################# # ====== LANCEMENTS ======= # ############################# @@ -151,11 +132,11 @@ if :PANOC in CFG2.RUN_SOLVERS push!(results, run_panoc!(model, x0; λ = CFG2.LAMBDA_L0, maxit = CFG2.MAXIT_PANOC, tol = CFG2.TOL, verbose = CFG2.VERBOSE_PANOC)) end if :TR in CFG2.RUN_SOLVERS - push!(results, run_tr!(model, x0; λ = CFG2.LAMBDA_L0, qn = CFG2.QN_FOR_TR, atol = CFG2.TOL, rtol = CFG2.RTOL, verbose = CFG2.VERBOSE_RO, sub_kwargs = CFG2.SUB_KWARGS_R2N,)) + push!(results, run_tr!(model, x0; λ = CFG2.LAMBDA_L0, qn = CFG2.QN_FOR_TR, atol = CFG2.TOL, rtol = CFG2.RTOL, verbose = CFG2.VERBOSE_RO, sub_kwargs = CFG2.SUB_KWARGS_R2N, opnorm_maxiter = CFG2.OPNORM_MAXITER)) end if :R2N in CFG2.RUN_SOLVERS push!(results, run_r2n!(model, x0; λ = CFG2.LAMBDA_L0, qn = CFG2.QN_FOR_R2N, atol = CFG2.TOL, rtol = CFG2.RTOL, - verbose = CFG2.VERBOSE_RO, sub_kwargs = CFG2.SUB_KWARGS_R2N, σk = CFG2.SIGMAK_R2N)) + verbose = CFG2.VERBOSE_RO, sub_kwargs = CFG2.SUB_KWARGS_R2N, σk = CFG2.SIGMAK_R2N, opnorm_maxiter = CFG2.OPNORM_MAXITER)) end @@ -200,15 +181,14 @@ if CFG2.PRINT_TABLE ] # En-têtes - table_str = pretty_table(String, - data; + table_str = pretty_table(String, data; header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox"], tf = tf_unicode, alignment = [:l, :c, :r, :r, :r, :r], crop = :none, ) - open("SVM-comparison.txt", "w") do io + open("SVM-comparison-f.txt", "w") do io write(io, table_str) end diff --git a/paper/examples/comparison-config.jl b/paper/examples/comparison-config.jl index fac78e65..8b87f2a5 100644 --- a/paper/examples/comparison-config.jl +++ b/paper/examples/comparison-config.jl @@ -5,7 +5,7 @@ Base.@kwdef mutable struct Config LAMBDA_L0::Float64 = 1.0 TOL::Float64 = 1e-3 RTOL::Float64 = 1e-3 - MAXIT_PANOC::Int = 500 + MAXIT_PANOC::Int = 10000 VERBOSE_PANOC::Bool = false VERBOSE_RO::Int = 0 RUN_SOLVERS::Vector{Symbol} = [:PANOC, :TR, :R2N] # mutable @@ -15,13 +15,14 @@ Base.@kwdef mutable struct Config SIGMAK_R2N::Float64 = 1e5 X0_SCALAR::Float64 = 0.1 PRINT_TABLE::Bool = true - OPNORM_MAXITER::Int = 4 - HESSIAN_SCALE::Float64 = 1e-4 + OPNORM_MAXITER::Int = 20 + HESSIAN_SCALE::Float64 = 1e-4 + M_MONOTONE::Int = 10 # for nonmonotone R2N end # One global, constant *binding* to a mutable object = type stable & editable -const CFG = Config() -const CFG2 = Config(SIGMAK_R2N=eps()^(1/3), TOL = 1e-4, RTOL = 1e-4) -const CFG3 = Config(SIGMAK_R2N=1e3, TOL = 1e-4, RTOL = 1e-4, RUN_SOLVERS = [:LM, :TR, :R2N]) +const CFG = Config(QN_FOR_TR = :LBFGS) +const CFG2 = Config(SIGMAK_R2N=eps()^(1/3), TOL = 1e-4, RTOL = 1e-4, QN_FOR_R2N=:LSR1, M_MONOTONE=1) +const CFG3 = Config(SIGMAK_R2N=1e3, TOL = 1e-4, RTOL = 1e-4, RUN_SOLVERS = [:LM, :TR, :R2N], QN_FOR_TR = :LBFGS) end # module \ No newline at end of file diff --git a/paper/paper.md b/paper/paper.md index 0363de87..d42f11b0 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -198,14 +198,14 @@ solve!(solver_r2n, reg_nlp, stats, x = f.meta.x0, atol = 1e-4, rtol = 1e-4, verb ┌───────────┬─────────────┬──────────┬──────┬──────┬───────┐ │ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ ├───────────┼─────────────┼──────────┼──────┼──────┼───────┤ -│ PANOC │ first_order │ 18.5413 │ 1434 │ 1434 │ 934 │ -│ TR(LSR1) │ first_order │ 5.8974 │ 385 │ 333 │ 11113 │ -│ R2N(LSR1) │ first_order │ 2.1251 │ 175 │ 95 │ 56971 │ +│ PANOC │ first_order │ 51.1714 │ 3713 │ 3713 │ 2269 │ +│ TR(LSR1) │ first_order │ 6.8107 │ 385 │ 333 │ 11113 │ +│ R2N(LSR1) │ first_order │ 2.4201 │ 175 │ 95 │ 56971 │ └───────────┴─────────────┴──────────┴──────┴──────┴───────┘ ```` We observe that both **TR** and **R2N** outperform **PANOC** in terms of the number of function and gradient evaluations and computational time, although they require more proximal iterations. -But since each proximal iteration is inexpensive, the overall performance is better. +But since each proximal iteration is inexpensive, the overall performance is better. In this instance, PANOC exhibits markedly slower convergence. ## Problem of FitzHugh-Nagumo inverse with $\ell_0$ penalty @@ -248,13 +248,14 @@ solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-3, verbo ``` ```` -┌────────────┬─────────────┬──────────┬─────┬─────┬───────┐ -│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ -├────────────┼─────────────┼──────────┼─────┼─────┼───────┤ -│ PANOC │ first_order │ 1.3279 │ 188 │ 188 │ 107 │ -│ TR(LBFGS) │ first_order │ 0.4075 │ 83 │ 60 │ 20983 │ -│ R2N(LBFGS) │ first_order │ 0.4001 │ 63 │ 62 │ 17061 │ -└────────────┴─────────────┴──────────┴─────┴─────┴───────┘ +┌────────────────────────┬─────────────┬──────────┬─────┬─────┬───────┐ +│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ +├────────────────────────┼─────────────┼──────────┼─────┼─────┼───────┤ +│ PANOC │ first_order │ 2.0095 │ 188 │ 188 │ 107 │ +│ TR(LBFGS) │ first_order │ 0.4377 │ 75 │ 63 │ 21915 │ +│ R2N(LBFGS) Nonmonotone │ first_order │ 0.491 │ 99 │ 54 │ 28173 │ +└────────────────────────┴─────────────┴──────────┴─────┴─────┴───────┘ + ```` Same observation as in the previous example: **TR** and **R2N** with LBFGS approximation of the Hessian of $f$ outperform **PANOC** in terms of the number of function and gradient evaluations and computational time, although they require more proximal iterations. @@ -311,13 +312,14 @@ solve!(solver_lm, reg_nlp, stats, x = f.meta.x0, atol = 1e-4, rtol = 1e-4, verbo ``` ``` -┌────────────┬─────────────┬──────────┬────┬──────┬───────┐ -│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ -├────────────┼─────────────┼──────────┼────┼──────┼───────┤ -│ TR(LBFGS) │ first_order │ 0.1727 │ 78 │ 73 │ 10231 │ -│ R2N(LBFGS) │ first_order │ 0.1244 │ 62 │ 62 │ 5763 │ -│ LM │ first_order │ 1.2796 │ 11 │ 2035 │ 481 │ -└────────────┴─────────────┴──────────┴────┴──────┴───────┘ +┌────────────────────────┬─────────────┬──────────┬────┬──────┬───────┐ +│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ +├────────────────────────┼─────────────┼──────────┼────┼──────┼───────┤ +│ TR(LBFGS) │ first_order │ 1.0527 │ 73 │ 68 │ 10005 │ +│ R2N(LBFGS) Nonmonotone │ first_order │ 0.7296 │ 68 │ 68 │ 7825 │ +│ LM │ first_order │ 1.2668 │ 11 │ 2035 │ 481 │ +└────────────────────────┴─────────────┴──────────┴────┴──────┴───────┘ + ``` We observe that **R2N** and **TR** achieve similar performance, with **R2N** being slightly better. From 30c8c8fc46c6ccdf18cb5cde07f0967aa54da1d8 Mon Sep 17 00:00:00 2001 From: Mohamed Laghdaf <81633807+MohamedLaghdafHABIBOULLAH@users.noreply.github.com> Date: Mon, 29 Sep 2025 14:35:49 -0400 Subject: [PATCH 25/42] Update paper/paper.md Co-authored-by: Maxence Gollier <134112149+MaxenceGollier@users.noreply.github.com> --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index d42f11b0..c51b93ff 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -57,7 +57,7 @@ Moreover, they can handle cases where Hessian approximations are unbounded [@dio There exists a way to solve \eqref{eq:nlp} in Julia using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place first-order line search–based methods for \eqref{eq:nlp}. Most of these methods are generally splitting schemes that alternate between taking steps along the gradient of the smooth part $f$ (or quasi-Newton directions) and applying proximal steps on the nonsmooth part $h$. Currently, [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) provides only L-BFGS as a quasi-Newton option. -By contrast, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) focuses on model-based approaches such as trust-region and regularization algorithms. +By contrast, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) focuses on model-based approaches such as trust-region and quadratic regularization algorithms. As shown in [@aravkin-baraldi-orban-2022], model-based methods typically require fewer evaluations of the objective and its gradient than first-order line search methods, at the expense of solving more involved subproblems. Although these subproblems may require many proximal iterations, each proximal computation is inexpensive, making the overall approach efficient for large-scale problems. From ad42d675fc7f5a9315ebfbca779c1f61162e9477 Mon Sep 17 00:00:00 2001 From: Mohamed Laghdaf <81633807+MohamedLaghdafHABIBOULLAH@users.noreply.github.com> Date: Mon, 29 Sep 2025 14:35:59 -0400 Subject: [PATCH 26/42] Update paper/paper.md Co-authored-by: Maxence Gollier <134112149+MaxenceGollier@users.noreply.github.com> --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index c51b93ff..f5ed71ab 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -61,7 +61,7 @@ By contrast, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimize As shown in [@aravkin-baraldi-orban-2022], model-based methods typically require fewer evaluations of the objective and its gradient than first-order line search methods, at the expense of solving more involved subproblems. Although these subproblems may require many proximal iterations, each proximal computation is inexpensive, making the overall approach efficient for large-scale problems. -Building on this perspective, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) implements state-of-the-art regularization-based algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. +Building on this perspective, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) implements state-of-the-art algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. The package provides a consistent API to formulate optimization problems and apply different regularization methods. It integrates seamlessly with the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) ecosystem, an academic organization for nonlinear optimization software development, testing, and benchmarking. From 157b9869d150284aeee9dbd02998d7f8ad9e4cb8 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Mon, 29 Sep 2025 14:43:15 -0400 Subject: [PATCH 27/42] minor changes --- paper/paper.bib | 12 ++++++++++++ paper/paper.md | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 560eb5e4..ac649cc5 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -137,3 +137,15 @@ @InProceedings{ stella-themelis-sopasakis-patrinos-2017 Pages = {1939--1944}, doi = {10.1109/CDC.2017.8263933}, } + +@article{demarchi-jia-kanzow-mehlitz-2023, + author = {De~Marchi, Alberto and Jia, Xiaoxi and Kanzow, Christian and Mehlitz, Patrick}, + title = {Constrained composite optimization and augmented {L}agrangian methods}, + journal = {Mathematical Programming}, + year = {2023}, + month = {9}, + volume = {201}, + number = {1}, + pages = {863--896}, + doi = {10.1007/s10107-022-01922-4}, +} diff --git a/paper/paper.md b/paper/paper.md index f5ed71ab..730aa056 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -44,7 +44,7 @@ The library provides a modular and extensible framework for experimenting with n - **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022;@leconte-orban-2023], - **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022], - **Levenbergh-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. -- **Augmented Lagrangian methods (ALTR)** (cite?). +- **Augmented Lagrangian methods (AL)** [@demarchi-jia-kanzow-mehlitz-2023]. These methods rely solely on the gradient and Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. Then, the objective function $f + h$ is used only to accept or reject trial points. @@ -330,7 +330,7 @@ However, **LM** requires significantly fewer function evaluations, which is expe The experiments highlight the effectiveness of the solvers implemented in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) compared to **PANOC** from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). -The performance can be summarized as follows: +On these examples, the performance of the solvers can be summarized as follows: - **Function and gradient evaluations:** **TR** and **R2N** are the most efficient choices when aiming to minimize both. - **Function evaluations only:** **LM** is preferable when the problem is a nonlinear least squares problem, as it achieves the lowest number of function evaluations. From 552c2439acf1d19dbbc269b29bbefb01320b366b Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Mon, 29 Sep 2025 20:12:25 -0400 Subject: [PATCH 28/42] second final version --- paper/paper.bib | 12 --- paper/paper.md | 225 ++++++++++-------------------------------------- 2 files changed, 45 insertions(+), 192 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index ac649cc5..3e7391f2 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -74,18 +74,6 @@ @TechReport{ diouane-gollier-orban-2024 doi = {10.13140/RG.2.2.16095.47527}, } -@article{bezanson-edelman-karpinski-shah-2017, - author = {Bezanson, Jeff and Edelman, Alan and Karpinski, Stefan and Shah, Viral B.}, - title = {Julia: A Fresh Approach to Numerical Computing}, - journal = {SIAM Review}, - volume = {59}, - number = {1}, - pages = {65--98}, - year = {2017}, - doi = {10.1137/141000671}, - publisher = {SIAM}, -} - @Misc{orban-siqueira-cutest-2020, author = {D. Orban and A. S. Siqueira and {contributors}}, title = {{CUTEst.jl}: {J}ulia's {CUTEst} interface}, diff --git a/paper/paper.md b/paper/paper.md index 730aa056..abe9397c 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -32,7 +32,7 @@ header-includes: | # Summary -[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia [@bezanson-edelman-karpinski-shah-2017] package that implements a family of quadratic regularization and trust-region type algorithms for solving nonsmooth optimization problems of the form: +[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia package that implements a family of quadratic regularization and trust-region type algorithms for solving nonsmooth optimization problems of the form: \begin{equation}\label{eq:nlp} \underset{x \in \mathbb{R}^n}{\text{minimize}} \quad f(x) + h(x), \end{equation} @@ -79,7 +79,7 @@ In contrast to [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/Proxim Hessians can be obtained via automatic differentiation through [ADNLPModels.jl](https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl) or supplied directly as Hessian–vector products $v \mapsto Hv$. This enables algorithms to exploit second-order information without explicitly forming dense (or sparse) Hessians, which is often prohibitively expensive in both computation and memory, particularly in high-dimensional settings. -## Requirements of the RegularizedProblems.jl package +## Requirements of the RegularizedProblems.jl To model the problem \eqref{eq:nlp}, one defines the smooth part $f$ and the nonsmooth part $h$ as discussed above. The package [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl) provides a straightforward way to create such instances, called *Regularized Nonlinear Programming Models*: @@ -90,7 +90,7 @@ reg_nlp = RegularizedNLPModel(f, h) This design makes it a convenient source of reproducible problem instances for testing and benchmarking algorithms in the repository [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022;@aravkin-baraldi-orban-2024;@leconte-orban-2023-2]. -## Requirements of the ShiftedProximalOperators.jl package +## Requirements of the ShiftedProximalOperators.jl The nonsmooth part $h$ must have a computable proximal mapping, defined as $$\text{prox}_{h}(v) = \underset{x \in \mathbb{R}^n}{\arg\min} \left( h(x) + \frac{1}{2} \|x - v\|^2 \right).$$ @@ -101,14 +101,12 @@ The main difference between the proximal operators implemented in is that those implemented here involve a translation of the nonsmooth term. Specifically, this package considers proximal operators defined as $$ - \underset{t \in \mathbb{R}^n}{\arg\min} \, { \tfrac{1}{2} ‖t - q‖₂² + ν h(x + s + t) + χ(s + t; ΔB) | t ∈ ℝⁿ }, + \underset{t \in \mathbb{R}^n}{\arg\min} \, { \tfrac{1}{2} ‖t - q‖₂² + ν h(x + s + t) + χ(s + t|ΔB)} $$ where $q$ is given, $x$ and $s$ are fixed shifts, $h$ is the nonsmooth term with respect to which we are computing the proximal operator, and $χ(.; \Delta B)$ is the indicator of a ball of radius $\Delta$ defined by a certain norm. - -![Composition of JSO packages](jso-packages.pdf){ width=70% } - +This package enables to encode this shifted proximal operator through without adding allocations and allowing to solve problem \eqref{eq:nlp} with bound constraints. ## Testing and documentation @@ -117,9 +115,10 @@ Extensive documentation is provided, including a user guide, API reference, and Aqua.jl is used to test the package dependencies. Documentation is built using Documenter.jl. -## Non-monotone strategies +## Solvers caracteristics -The solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) implement non-monotone strategies to accept trial points, which can enhance algorithmic performance in practice [@leconte-orban-2023;@diouane-habiboullah-orban-2024]. +All solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) are implemented in an in-place fashion, minimizing memory allocations during the resolution process. +Moreover, they implement non-monotone strategies to accept trial points, which can enhance algorithmic performance in practice [@leconte-orban-2023;@diouane-habiboullah-orban-2024]. ## Application studies @@ -134,197 +133,63 @@ This is crucial for large-scale problems where exact subproblem solutions are pr Moreover, one way to outperform line-search–based methods is to solve the subproblems more accurately by performing many proximal iterations, which are inexpensive to compute, rather than relying on numerous function and gradient evaluations. We will illustrate this in the examples below. -## In-place methods - -All solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) are implemented in an in-place fashion, minimizing memory allocations during the resolution process. # Examples -We consider three examples where the smooth part $f$ is nonconvex and the nonsmooth part $h$ is either $\ell^{1/2}$ or $\ell_0$ norm with or without constraints. +We consider two examples where the smooth part $f$ is nonconvex and the nonsmooth part $h$ is either $\ell^{1/2}$ or $\ell_0$ norm with constraints. We compare the performance of our solvers with (**PANOC**) solver [@stella-themelis-sopasakis-patrinos-2017] implemented in [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). -## Problem of support vector machine with $\ell^{1/2}$ penalty +We illustrate the capabilities of [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) on two nonsmooth and nonconvex problems: +- **Support Vector Machine (SVM) with $\ell^{1/2}$ penalty** for image classification [@aravkin-baraldi-orban-2024]. +- **Nonnegative Matrix Factorization (NNMF) with $\ell_0$ penalty and constraints** [@kim-park-2008]. -A first example addresses an image recognition task using a support vector machine (SVM) similar to those in [@aravkin-baraldi-orban-2024]. -The formulation is -$$ -\min_{x \in \mathbb{R}^n} \ \tfrac{1}{2} \|\mathbf{1} - \tanh(b \odot \langle A, x \rangle)\|^2 + \|x\|_{1/2}^{1/2}, -$$ -where $A \in \mathbb{R}^{m \times n}$, with $n = 784$ representing the vectorized size of each image and $m = 13{,}007$ is the number of images in the training dataset. +Both problems are of the form $\min f(x) + h(x)$ with $f$ nonconvex and $h$ nonsmooth. +The NNMF problem can be set up in a similar way to the SVM case, with $h$ given by an $\ell_0$ norm and additional nonnegativity constraints. +Below is a condensed example showing how to define and solve such problems: ```julia -using LinearAlgebra, Random -using ProximalOperators -using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization -using MLDatasets - -random_seed = 1234 -Random.seed!(random_seed) - -# Build the models -model, nls_train, _ = RegularizedProblems.svm_train_model() - -# Define the Hessian approximation -f = LSR1Model(model) - -# Define the nonsmooth regularizer (L0 norm) -λ = 1.0 -h = RootNormLhalf(λ) - -# Define the regularized NLP model -reg_nlp = RegularizedNLPModel(f, h) - -# Choose a solver (R2DH) and execution statistics tracker -solver_r2n = R2NSolver(reg_nlp) -stats = RegularizedExecutionStats(reg_nlp) - -# Max number of proximal iterations for subproblem solver -sub_kwargs = (max_iter=200,) - -# Solve the problem -solve!(solver_r2n, reg_nlp, stats, x = f.meta.x0, atol = 1e-4, rtol = 1e-4, verbose = 0, sub_kwargs = sub_kwargs) - - - - - - +using LinearAlgebra, Random, ProximalOperators +using NLPModels, RegularizedProblems, RegularizedOptimization + +Random.seed!(1234) +model, nls, _ = RegularizedProblems.svm_train_model() # Build SVM model +f = LSR1Model(model) # Hessian approximation +h = RootNormLhalf(1.0) # Nonsmooth term +reg_nlp = RegularizedNLPModel(f, h) # Regularized problem +solver = R2NSolver(reg_nlp) # Choose solver +stats = RegularizedExecutionStats(reg_nlp) +solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-4, rtol=1e-4, verbose=0, sub_kwargs=(max_iter=200,)) +solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-5, rtol=1e-5, verbose=0, sub_kwargs=(max_iter=200,)) ``` +The NNMF problem can be set up in a similar way, replacing the model by nnmf_model(...) and $h$ by an $\ell_0$ norm. -```` -┌───────────┬─────────────┬──────────┬──────┬──────┬───────┐ -│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ -├───────────┼─────────────┼──────────┼──────┼──────┼───────┤ -│ PANOC │ first_order │ 51.1714 │ 3713 │ 3713 │ 2269 │ -│ TR(LSR1) │ first_order │ 6.8107 │ 385 │ 333 │ 11113 │ -│ R2N(LSR1) │ first_order │ 2.4201 │ 175 │ 95 │ 56971 │ -└───────────┴─────────────┴──────────┴──────┴──────┴───────┘ -```` - -We observe that both **TR** and **R2N** outperform **PANOC** in terms of the number of function and gradient evaluations and computational time, although they require more proximal iterations. -But since each proximal iteration is inexpensive, the overall performance is better. In this instance, PANOC exhibits markedly slower convergence. - -## Problem of FitzHugh-Nagumo inverse with $\ell_0$ penalty - -A second example is the FitzHugh-Nagumo inverse problem with an $\ell_0$ penalty, as described in [@aravkin-baraldi-orban-2022] and [@aravkin-baraldi-orban-2024]. -This problem consists of recovering the parameters of a system of ordinary differential equations (ODEs) with sparsity constraints. -In general, the evaluation of the objective function and its gradient are costly because they require solving the ODEs compared to the proximal operator of the $\ell_0$ norm, which is inexpensive. - -```julia -using LinearAlgebra -using ProximalOperators -using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization -using DifferentialEquations, ADNLPModels - -# Define the Fitzhugh-Nagumo problem -model, _, _ = RegularizedProblems.fh_model() -x0 = 0.1 * ones(model.meta.nvars) # initial guess - -# Define the Hessian approximation -f = LBFGSModel(fh_model) - -# Initialize the starting Hessian approximation scaling factor -f.op.data.scaling_factor = 1e4 +### Numerical results -# Define the nonsmooth regularizer (L1 norm) -λ = 1.0 -h = NormL0(λ) +We compare **PANOC** (from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl)) with **TR**, **R2N**, and **LM** from our library. +The results are summarized in the combined table below: -# Define the regularized NLP model -reg_nlp = RegularizedNLPModel(f, h) - -# Choose a solver (TR) and execution statistics tracker -solver_tr = TRSolver(reg_nlp) -stats = RegularizedExecutionStats(reg_nlp) - -# Max number of proximal iterations for subproblem solver -sub_kwargs = (max_iter=200,) - -# Solve the problem -solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = sub_kwargs) ``` - -```` -┌────────────────────────┬─────────────┬──────────┬─────┬─────┬───────┐ -│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ -├────────────────────────┼─────────────┼──────────┼─────┼─────┼───────┤ -│ PANOC │ first_order │ 2.0095 │ 188 │ 188 │ 107 │ -│ TR(LBFGS) │ first_order │ 0.4377 │ 75 │ 63 │ 21915 │ -│ R2N(LBFGS) Nonmonotone │ first_order │ 0.491 │ 99 │ 54 │ 28173 │ -└────────────────────────┴─────────────┴──────────┴─────┴─────┴───────┘ - - ```` - -Same observation as in the previous example: **TR** and **R2N** with LBFGS approximation of the Hessian of $f$ outperform **PANOC** in terms of the number of function and gradient evaluations and computational time, although they require more proximal iterations. - -## Problem of Nonnegative least squares with $\ell_0$ penalty and constraints - -The third experiment considers the sparse nonnegative matrix factorization (NNMF) problem introduced by [@kim-park-2008]. -Let $A \in \mathbb{R}^{m \times n}$ be a nonnegative matrix whose columns correspond to observations drawn from a Gaussian mixture, with negative entries truncated to zero. - -The goal is to obtain a factorization $A \approx WH$, where $W \in \mathbb{R}^{m \times k}$, $H \in \mathbb{R}^{k \times n}$, $k < \min(m,n)$, such that both factors are nonnegative and $H$ is sparse. - -This leads to the optimization problem - -$$ -\min_{W, H \geq 0} \; \tfrac{1}{2} \| A - WH \|_F^2 + \lambda \| \operatorname{vec}(H) \|_0, -$$ - -where $\operatorname{vec}(H)$ denotes the column-stacked version of $H$. - -Compared to the previous examples, we now consider a constrained problem with a nonsmooth and nonconvex term. - -The library [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) provides solvers that can handle constraints by separating the objective into three parts: a smooth term, a nonsmooth term, and the indicator function of the constraints. However, this approach assumes that the nonsmooth part is convex, which is not the case here. - -Another approach is to merge the nonsmooth term with the indicator function of the constraints into a single nonsmooth function, and then apply **PANOC**, which is the strategy adopted here. However, the current library of proximal operators, [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl), on which [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) relies, does not provide the proximal mapping of the sum of the $\ell_0$ norm and the indicator function of the nonnegative orthant. In contrast, [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl) does implement this operator. - -Therefore, to apply **PANOC** in this setting, one would first need to implement this combined proximal operator in [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl). For this reason, we do not include **PANOC** in this example. - -Instead, we compare the performance of **TR** and **R2N** with that of **LM**. - -```julia -using LinearAlgebra -using ProximalOperators -using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization -using DifferentialEquations, ADNLPModels - -# Build the models -m, n, k = 100, 50, 5 -model, nls_model, A, selected = nnmf_model(m, n, k) - -# Define the nonsmooth regularizer (L1 norm) -λ = norm(grad(model, rand(model.meta.nvar)), Inf) / 200 -h = NormL0(λ) - -# Define the regularized NLS model -reg_nlp = RegularizedNLSModel(nls_model, h) - -# Choose a solver (TR) and execution statistics tracker -solver_lm = LMSolver(reg_nlp) -stats = RegularizedExecutionStats(reg_nlp) - - -# Solve the problem -solve!(solver_lm, reg_nlp, stats, x = f.meta.x0, atol = 1e-4, rtol = 1e-4, verbose = 0) +┌────────────────────────┬─────────────┬──────────┬──────┬──────┬───────┐ +│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ +├────────────────────────┼─────────────┼──────────┼──────┼──────┼───────┤ +│ PANOC (SVM) │ first_order │ 51.17 │ 3713 │ 3713 │ 2269 │ +│ TR(LSR1, SVM) │ first_order │ 6.81 │ 385 │ 333 │ 11113 │ +│ R2N(LSR1, SVM) │ first_order │ 2.42 │ 175 │ 95 │ 56971 │ +│ TR(LBFGS, NNMF) │ first_order │ 1.05 │ 73 │ 68 │ 10005 │ +│ R2N(LBFGS, NNMF) │ first_order │ 0.73 │ 68 │ 68 │ 7825 │ +│ LM (NNMF) │ first_order │ 1.27 │ 11 │ 2035 │ 481 │ +└────────────────────────┴─────────────┴──────────┴──────┴──────┴───────┘ ``` -``` -┌────────────────────────┬─────────────┬──────────┬────┬──────┬───────┐ -│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ -├────────────────────────┼─────────────┼──────────┼────┼──────┼───────┤ -│ TR(LBFGS) │ first_order │ 1.0527 │ 73 │ 68 │ 10005 │ -│ R2N(LBFGS) Nonmonotone │ first_order │ 0.7296 │ 68 │ 68 │ 7825 │ -│ LM │ first_order │ 1.2668 │ 11 │ 2035 │ 481 │ -└────────────────────────┴─────────────┴──────────┴────┴──────┴───────┘ +### Discussion -``` +- **SVM with $\ell^{1/2}$ penalty:** TR and R2N require far fewer function and gradient evaluations than PANOC, at the expense of more proximal iterations. Since each proximal step is inexpensive, TR and R2N are much faster overall. +- **NNMF with constrained $\ell_0$ penalty:** R2N slightly outperforms TR, while LM is competitive in terms of function calls but incurs many gradient evaluations. -We observe that **R2N** and **TR** achieve similar performance, with **R2N** being slightly better. -Both methods outperform **LM** in terms of computational time and the number of gradient evaluations. -However, **LM** requires significantly fewer function evaluations, which is expected since it is specifically designed for nonlinear least squares problems and can exploit the structure of the objective function more effectively. +Additional tests (e.g., other regularizers, constraint types, and scaling dimensions) have also been conducted, and a full benchmarking campaign is currently underway. ## Conclusion From 1ba11854dabdad1f4a3664d98ba2922f6cca5c54 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 08:01:01 -0400 Subject: [PATCH 29/42] update results with master rebased --- paper/paper.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index abe9397c..1222dbfb 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -164,7 +164,7 @@ stats = RegularizedExecutionStats(reg_nlp) solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-4, rtol=1e-4, verbose=0, sub_kwargs=(max_iter=200,)) solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-5, rtol=1e-5, verbose=0, sub_kwargs=(max_iter=200,)) ``` -The NNMF problem can be set up in a similar way, replacing the model by nnmf_model(...) and $h$ by an $\ell_0$ norm. +The NNMF problem can be set up in a similar way, replacing the model by nnmf_model(...) and $h$ by an $\ell_0$ norm and we set LBFGS as Hessian approximation. ### Numerical results @@ -172,16 +172,16 @@ We compare **PANOC** (from [ProximalAlgorithms.jl](https://github.com/JuliaFirst The results are summarized in the combined table below: ``` -┌────────────────────────┬─────────────┬──────────┬──────┬──────┬───────┐ -│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ -├────────────────────────┼─────────────┼──────────┼──────┼──────┼───────┤ -│ PANOC (SVM) │ first_order │ 51.17 │ 3713 │ 3713 │ 2269 │ -│ TR(LSR1, SVM) │ first_order │ 6.81 │ 385 │ 333 │ 11113 │ -│ R2N(LSR1, SVM) │ first_order │ 2.42 │ 175 │ 95 │ 56971 │ -│ TR(LBFGS, NNMF) │ first_order │ 1.05 │ 73 │ 68 │ 10005 │ -│ R2N(LBFGS, NNMF) │ first_order │ 0.73 │ 68 │ 68 │ 7825 │ -│ LM (NNMF) │ first_order │ 1.27 │ 11 │ 2035 │ 481 │ -└────────────────────────┴─────────────┴──────────┴──────┴──────┴───────┘ +┌───────────────────┬─────────────┬──────────┬──────┬──────┬───────┐ +│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ +├───────────────────┼─────────────┼──────────┼──────┼──────┼───────┤ +│ PANOC (SVM) │ first_order │ 38.226 │ 3713 │ 3713 │ 2269 │ +│ TR (LSR1, SVM) │ first_order │ 5.912 │ 347 │ 291 │ 4037 │ +│ R2N (LSR1, SVM) │ first_order │ 1.2944 │ 86 │ 76 │ 8586 │ +│ TR (LBFGS, NNMF) │ first_order │ 0.0857 │ 42 │ 40 │ 3160 │ +│ R2N (LBFGS, NNMF) │ first_order │ 0.2116 │ 79 │ 76 │ 6273 │ +│ LM (NNMF) │ first_order │ 0.1363 │ 8 │ 7540 │ 2981 │ +└───────────────────┴─────────────┴──────────┴──────┴──────┴───────┘ ``` ### Discussion From 66ed7130c0509f7bb3d8f89fbfa17d18d12f3d7f Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 08:29:32 -0400 Subject: [PATCH 30/42] minor comments --- paper/paper.bib | 11 +++++++++++ paper/paper.md | 8 ++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 3e7391f2..518297e7 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -137,3 +137,14 @@ @article{demarchi-jia-kanzow-mehlitz-2023 pages = {863--896}, doi = {10.1007/s10107-022-01922-4}, } + +@Article{ themelis-stella-patrinos-2017, + Author = {Themelis, Andreas and Stella, Lorenzo and Patrinos, Panagiotis}, + Title = {Forward-Backward Envelope for the Sum of Two Nonconvex Functions: Further Properties and Nonmonotone line seach Algorithms}, + Journal = siopt, + Year = 2018, + Volume = 28, + Number = 3, + Pages = {2274--2303}, + doi = {10.1137/16M1080240}, +} diff --git a/paper/paper.md b/paper/paper.md index 1222dbfb..95d21a9f 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -46,7 +46,7 @@ The library provides a modular and extensible framework for experimenting with n - **Levenbergh-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. - **Augmented Lagrangian methods (AL)** [@demarchi-jia-kanzow-mehlitz-2023]. -These methods rely solely on the gradient and Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. +These methods rely on the gradient and optionnally on the Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. Then, the objective function $f + h$ is used only to accept or reject trial points. Moreover, they can handle cases where Hessian approximations are unbounded [@diouane-habiboullah-orban-2024;@leconte-orban-2023-2], making the package particularly suited for large-scale, ill-conditioned, and nonsmooth problems. @@ -54,12 +54,12 @@ Moreover, they can handle cases where Hessian approximations are unbounded [@dio ## Model-based framework for nonsmooth methods -There exists a way to solve \eqref{eq:nlp} in Julia using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place first-order line search–based methods for \eqref{eq:nlp}. +In Julia, \eqref{eq:nlp} can be solved using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place, first-order, line-search–based methods[@stella-themelis-sopasakis-patrinos-2017;@themelis-stella-patrinos-2017]. Most of these methods are generally splitting schemes that alternate between taking steps along the gradient of the smooth part $f$ (or quasi-Newton directions) and applying proximal steps on the nonsmooth part $h$. Currently, [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) provides only L-BFGS as a quasi-Newton option. By contrast, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) focuses on model-based approaches such as trust-region and quadratic regularization algorithms. As shown in [@aravkin-baraldi-orban-2022], model-based methods typically require fewer evaluations of the objective and its gradient than first-order line search methods, at the expense of solving more involved subproblems. -Although these subproblems may require many proximal iterations, each proximal computation is inexpensive, making the overall approach efficient for large-scale problems. +Although these subproblems may require many proximal iterations, each proximal computation is inexpensive for several commonly used nonsmooth functions, such as separable penalties and bound constraints (see examples below), making the overall approach efficient for large-scale problems. Building on this perspective, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) implements state-of-the-art algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. The package provides a consistent API to formulate optimization problems and apply different regularization methods. @@ -164,7 +164,7 @@ stats = RegularizedExecutionStats(reg_nlp) solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-4, rtol=1e-4, verbose=0, sub_kwargs=(max_iter=200,)) solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-5, rtol=1e-5, verbose=0, sub_kwargs=(max_iter=200,)) ``` -The NNMF problem can be set up in a similar way, replacing the model by nnmf_model(...) and $h$ by an $\ell_0$ norm and we set LBFGS as Hessian approximation. +The NNMF problem can be set up in a similar way, replacing the model by nnmf_model(...) with bound constraints, $h$ by an $\ell_0$ norm and use an L-BFGS Hessian approximation. ### Numerical results From a9a498563aa104343eba92840ecb19c7313b950a Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 09:40:42 -0400 Subject: [PATCH 31/42] Fix typos and improve clarity in paper.md --- paper/paper.md | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 95d21a9f..8785160b 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -46,7 +46,7 @@ The library provides a modular and extensible framework for experimenting with n - **Levenbergh-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. - **Augmented Lagrangian methods (AL)** [@demarchi-jia-kanzow-mehlitz-2023]. -These methods rely on the gradient and optionnally on the Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. +These methods rely on the gradient and optionally on the Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. Then, the objective function $f + h$ is used only to accept or reject trial points. Moreover, they can handle cases where Hessian approximations are unbounded [@diouane-habiboullah-orban-2024;@leconte-orban-2023-2], making the package particularly suited for large-scale, ill-conditioned, and nonsmooth problems. @@ -54,7 +54,7 @@ Moreover, they can handle cases where Hessian approximations are unbounded [@dio ## Model-based framework for nonsmooth methods -In Julia, \eqref{eq:nlp} can be solved using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place, first-order, line-search–based methods[@stella-themelis-sopasakis-patrinos-2017;@themelis-stella-patrinos-2017]. +In Julia, \eqref{eq:nlp} can be solved using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place, first-order, line-search–based methods [@stella-themelis-sopasakis-patrinos-2017;@themelis-stella-patrinos-2017]. Most of these methods are generally splitting schemes that alternate between taking steps along the gradient of the smooth part $f$ (or quasi-Newton directions) and applying proximal steps on the nonsmooth part $h$. Currently, [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) provides only L-BFGS as a quasi-Newton option. By contrast, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) focuses on model-based approaches such as trust-region and quadratic regularization algorithms. @@ -69,15 +69,17 @@ On the one hand, smooth problems $f$ can be defined via [NLPModels.jl](https://g Large collections of such problems are available in [Cutest.jl](https://github.com/JuliaSmoothOptimizers/CUTEst.jl) [@orban-siqueira-cutest-2020] and [OptimizationProblems.jl](https://github.com/JuliaSmoothOptimizers/OptimizationProblems.jl) [@migot-orban-siqueira-optimizationproblems-2023]. Another option is to use [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl), which provides problem instances commonly used in the nonsmooth optimization literature. -On the other hand, Hessian approximations of these functions, including quasi-Newton and diagonal schemes, can be specified through [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and implements efficient Hessian–vector products. +On the other hand, nonsmooth terms $h$ can be modeled using [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which provides a broad collection of nonsmooth functions, together with [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings for nonsmooth functions. -Finally, nonsmooth terms $h$ can be modeled using [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which provides a broad collection of nonsmooth functions, together with [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings for nonsmooth functions. +## Support for Hessians and Hessian approximations of the smooth part $f$ -## Support for Hessians of the smooth part $f$ +In contrast to [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) methods such as **R2N** and **TR** methods support exact Hessians as well as several Hessian approximations of $f$, which can significantly improve convergence rates—especially for ill-conditioned problems. -In contrast to [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) methods such as **R2N** and **TR** support Hessians of $f$, which can significantly improve convergence rates, especially for ill-conditioned problems. -Hessians can be obtained via automatic differentiation through [ADNLPModels.jl](https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl) or supplied directly as Hessian–vector products $v \mapsto Hv$. -This enables algorithms to exploit second-order information without explicitly forming dense (or sparse) Hessians, which is often prohibitively expensive in both computation and memory, particularly in high-dimensional settings. +Exact Hessians can be obtained via automatic differentiation through [ADNLPModels.jl](https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl) or supplied directly as Hessian–vector products $v \mapsto Hv$. + +Hessian approximations (e.g., quasi-Newton and diagonal schemes) can be specified via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and provides efficient implementations of Hessian–vector products. + +This design allows algorithms to exploit second-order information **without** explicitly forming dense (or even sparse) Hessian matrices, which is often prohibitively expensive in time and memory, particularly at large scale. ## Requirements of the RegularizedProblems.jl @@ -115,7 +117,7 @@ Extensive documentation is provided, including a user guide, API reference, and Aqua.jl is used to test the package dependencies. Documentation is built using Documenter.jl. -## Solvers caracteristics +## Solvers characteristics All solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) are implemented in an in-place fashion, minimizing memory allocations during the resolution process. Moreover, they implement non-monotone strategies to accept trial points, which can enhance algorithmic performance in practice [@leconte-orban-2023;@diouane-habiboullah-orban-2024]. @@ -144,10 +146,8 @@ We compare the performance of our solvers with (**PANOC**) solver [@stella-theme We illustrate the capabilities of [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) on two nonsmooth and nonconvex problems: - **Support Vector Machine (SVM) with $\ell^{1/2}$ penalty** for image classification [@aravkin-baraldi-orban-2024]. -- **Nonnegative Matrix Factorization (NNMF) with $\ell_0$ penalty and constraints** [@kim-park-2008]. +- **Nonnegative Matrix Factorization (NNMF) with $\ell_0$ penalty and bound constraints** [@kim-park-2008]. -Both problems are of the form $\min f(x) + h(x)$ with $f$ nonconvex and $h$ nonsmooth. -The NNMF problem can be set up in a similar way to the SVM case, with $h$ given by an $\ell_0$ norm and additional nonnegativity constraints. Below is a condensed example showing how to define and solve such problems: ```julia @@ -164,7 +164,7 @@ stats = RegularizedExecutionStats(reg_nlp) solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-4, rtol=1e-4, verbose=0, sub_kwargs=(max_iter=200,)) solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-5, rtol=1e-5, verbose=0, sub_kwargs=(max_iter=200,)) ``` -The NNMF problem can be set up in a similar way, replacing the model by nnmf_model(...) with bound constraints, $h$ by an $\ell_0$ norm and use an L-BFGS Hessian approximation. +The NNMF problem can be set up in a similar way, replacing the model by nnmf_model(...), $h$ by an $\ell_0$ norm and use an L-BFGS Hessian approximation. ### Numerical results @@ -186,8 +186,8 @@ The results are summarized in the combined table below: ### Discussion -- **SVM with $\ell^{1/2}$ penalty:** TR and R2N require far fewer function and gradient evaluations than PANOC, at the expense of more proximal iterations. Since each proximal step is inexpensive, TR and R2N are much faster overall. -- **NNMF with constrained $\ell_0$ penalty:** R2N slightly outperforms TR, while LM is competitive in terms of function calls but incurs many gradient evaluations. +- **SVM with $\ell^{1/2}$ penalty:** **TR** and **R2N** require far fewer function and gradient evaluations than **PANOC**, at the expense of more proximal iterations. Since each proximal step is inexpensive, **TR** and **R2N** are much faster overall. +- **NNMF with constrained $\ell_0$ penalty:** **TR** outperforms **R2N**, while **LM** is competitive in terms of function calls but incurs many gradient evaluations. Additional tests (e.g., other regularizers, constraint types, and scaling dimensions) have also been conducted, and a full benchmarking campaign is currently underway. From 872cbd05ec8704851d6866e9463ef69fd2d8d645 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 09:55:25 -0400 Subject: [PATCH 32/42] Improve clarity in examples section of paper.md --- paper/paper.md | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 8785160b..19d3d0d8 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -138,11 +138,6 @@ We will illustrate this in the examples below. # Examples - -We consider two examples where the smooth part $f$ is nonconvex and the nonsmooth part $h$ is either $\ell^{1/2}$ or $\ell_0$ norm with constraints. - -We compare the performance of our solvers with (**PANOC**) solver [@stella-themelis-sopasakis-patrinos-2017] implemented in [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). - We illustrate the capabilities of [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) on two nonsmooth and nonconvex problems: - **Support Vector Machine (SVM) with $\ell^{1/2}$ penalty** for image classification [@aravkin-baraldi-orban-2024]. @@ -166,9 +161,9 @@ solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-5, rtol=1e-5, verbose=0, sub ``` The NNMF problem can be set up in a similar way, replacing the model by nnmf_model(...), $h$ by an $\ell_0$ norm and use an L-BFGS Hessian approximation. -### Numerical results +## Numerical results -We compare **PANOC** (from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl)) with **TR**, **R2N**, and **LM** from our library. +We compare **PANOC** [@stella-themelis-sopasakis-patrinos-2017](from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl)) with **TR**, **R2N**, and **LM** from our library. The results are summarized in the combined table below: ``` @@ -184,14 +179,14 @@ The results are summarized in the combined table below: └───────────────────┴─────────────┴──────────┴──────┴──────┴───────┘ ``` -### Discussion +## Discussion - **SVM with $\ell^{1/2}$ penalty:** **TR** and **R2N** require far fewer function and gradient evaluations than **PANOC**, at the expense of more proximal iterations. Since each proximal step is inexpensive, **TR** and **R2N** are much faster overall. - **NNMF with constrained $\ell_0$ penalty:** **TR** outperforms **R2N**, while **LM** is competitive in terms of function calls but incurs many gradient evaluations. Additional tests (e.g., other regularizers, constraint types, and scaling dimensions) have also been conducted, and a full benchmarking campaign is currently underway. -## Conclusion +# Conclusion The experiments highlight the effectiveness of the solvers implemented in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) compared to **PANOC** from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). From fc1f5a6013f5f0ed0655540da14368bd5c65eec7 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 10:21:24 -0400 Subject: [PATCH 33/42] Refine shifted proximal operator definitions and enhance clarity in requirements section of paper.md --- paper/paper.md | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 19d3d0d8..c363459b 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -95,20 +95,19 @@ This design makes it a convenient source of reproducible problem instances for t ## Requirements of the ShiftedProximalOperators.jl The nonsmooth part $h$ must have a computable proximal mapping, defined as -$$\text{prox}_{h}(v) = \underset{x \in \mathbb{R}^n}{\arg\min} \left( h(x) + \frac{1}{2} \|x - v\|^2 \right).$$ -This requirement is satisfied by a wide range of nonsmooth functions commonly used in practice, such as $\ell_1$ norm, $\ell_0$ "norm", indicator functions of convex sets, and group sparsity-inducing norms. -The package [ProximalOperators.jl](https://www.github.com/FirstOrder/ProximalOperators.jl) provides a comprehensive collection of such functions, along with their proximal mappings. -The main difference between the proximal operators implemented in -[ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl) -is that those implemented here involve a translation of the nonsmooth term. -Specifically, this package considers proximal operators defined as +$$\text{prox}_{\nu h}(v) = \underset{x \in \mathbb{R}^n}{\arg\min} \frac{1}{2} \|t - v\|^2 + \nu h(t).$$ + +This computation is performed efficiently in [ShiftedProximalOperators.jl](https://www.github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl). +While [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl) provides many standard proximal mappings, [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl) also supplies **shifted** variants of these mappings. +Specifically, the package supports shifted proximal operators of the form $$ - \underset{t \in \mathbb{R}^n}{\arg\min} \, { \tfrac{1}{2} ‖t - q‖₂² + ν h(x + s + t) + χ(s + t|ΔB)} + \underset{t \in \mathbb{R}^n}{\arg\min} \, { \tfrac{1}{2} ‖t - q‖₂² + ν h(t + x + s) + χ(s + t|ΔB)} $$ where $q$ is given, $x$ and $s$ are fixed shifts, $h$ is the nonsmooth term with respect -to which we are computing the proximal operator, and $χ(.; \Delta B)$ is the indicator of +to which we are computing the proximal operator, and $χ(.| \Delta B)$ is the indicator of a ball of radius $\Delta$ defined by a certain norm. -This package enables to encode this shifted proximal operator through without adding allocations and allowing to solve problem \eqref{eq:nlp} with bound constraints. + +These shifted operators allow us to (i) incorporate bound or trust-region constraints via the indicator term which is required for **TR** algorithm and (ii) evaluate the prox **in place**, without additional allocations, which integrates efficiently with our subproblem solvers and enables solving \eqref{eq:nlp} with bound constraints. ## Testing and documentation From 8dc9fdc168ee04fa3cd49ab3a5bd929e732e29f5 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 15:12:40 -0400 Subject: [PATCH 34/42] Incorporate Dominique comments --- paper/Benchmark.tex | 10 +++ paper/paper.bib | 4 +- paper/paper.md | 165 +++++++++++++++++++++++--------------------- 3 files changed, 97 insertions(+), 82 deletions(-) create mode 100644 paper/Benchmark.tex diff --git a/paper/Benchmark.tex b/paper/Benchmark.tex new file mode 100644 index 00000000..be20157a --- /dev/null +++ b/paper/Benchmark.tex @@ -0,0 +1,10 @@ +\begin{tabular}{lcrrrrr} + \hline + \textbf{Method} & \textbf{Status} & \textbf{$t$($s$)} & \textbf{$\#f$} & \textbf{$\#\nabla f$} & \textbf{$\#prox$} & \textbf{Objective} \\\hline + PANOC (SVM) & first\_order & 38.0537 & 3713 & 3713 & 2269 & 188.924 \\ + TR (LSR1, SVM) & first\_order & 3.9597 & 347 & 291 & 4037 & 179.837 \\ + R2N (LSR1, SVM) & first\_order & 0.9791 & 86 & 76 & 8586 & 185.769 \\ + TR (LBFGS, NNMF) & first\_order & 0.0865 & 42 & 40 & 3160 & 976.06 \\ + R2N (LBFGS, NNMF) & first\_order & 0.2125 & 79 & 76 & 6273 & 408.599 \\ + LM (NNMF) & first\_order & 0.1346 & 8 & 7540 & 2981 & 131.183 \\\hline +\end{tabular} diff --git a/paper/paper.bib b/paper/paper.bib index 518297e7..01e69776 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -10,7 +10,7 @@ @Article{ aravkin-baraldi-orban-2022 } @Article{ aravkin-baraldi-orban-2024, - Author = {Aravkin, Aleksandr Y. and Baraldi, Robert and Orban, Dominique}, + Author = {A. Y. Aravkin and R. Baraldi and D. Orban}, Title = {A {L}evenberg–{M}arquardt Method for Nonsmooth Regularized Least Squares}, Journal = sisc, Year = 2024, @@ -41,7 +41,7 @@ @Article{ leconte-orban-2023 } @TechReport{ leconte-orban-2023-2, - Author = {Leconte, Geoffroy and Orban, Dominique}, + Author = {G. Leconte and D. Orban}, Title = {Complexity of trust-region methods with unbounded {H}essian approximations for smooth and nonsmooth optimization}, Institution = gerad, Year = 2023, diff --git a/paper/paper.md b/paper/paper.md index c363459b..e30f99dc 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -32,82 +32,91 @@ header-includes: | # Summary -[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia package that implements a family of quadratic regularization and trust-region type algorithms for solving nonsmooth optimization problems of the form: +[RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) is a Julia package that implements a family of quadratic regularization and trust-region type algorithms for solving nonsmooth optimization problem \begin{equation}\label{eq:nlp} - \underset{x \in \mathbb{R}^n}{\text{minimize}} \quad f(x) + h(x), + \underset{x \in \mathbb{R}^n}{\text{minimize}} \quad f(x) + h(x), \quad s.t. \quad c(x) = 0, \end{equation} -where $f: \mathbb{R}^n \to \mathbb{R}$ is continuously differentiable on $\mathbb{R}^n$, and $h: \mathbb{R}^n \to \mathbb{R} \cup \{+\infty\}$ is lower semi-continuous. -Both $f$ and $h$ may be nonconvex. - +where $f: \mathbb{R}^n \to \mathbb{R}$ is continuously differentiable, $h: \mathbb{R}^n \to \mathbb{R} \cup \{+\infty\}$ is lower semi-continuous which that the regularizer such as sparsity-inducing penalties, bound constraints or a combination of both and $c: \mathbb{R}^n \to \mathbb{R}^m$ is continuously differentiable defining equality constraints. +All $f$, $h$, and $c$ can be nonconvex. The library provides a modular and extensible framework for experimenting with nonsmooth and nonconvex optimization algorithms, including: -- **Trust-region methods (TR, TRDH)** [@aravkin-baraldi-orban-2022;@leconte-orban-2023], -- **Quadratic regularization methods (R2, R2N)** [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022], -- **Levenbergh-Marquardt methods (LM, LMTR)** [@aravkin-baraldi-orban-2024]. -- **Augmented Lagrangian methods (AL)** [@demarchi-jia-kanzow-mehlitz-2023]. +- **Trust-region solvers (TR, TRDH)** [@aravkin-baraldi-orban-2022;@leconte-orban-2023], +- **Quadratic regularization solvers (R2, R2N)** [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022], +- **Levenberg-Marquardt solvers (LM, LMTR)** [@aravkin-baraldi-orban-2024]. +- **Augmented Lagrangian solver (AL)** [@demarchi-jia-kanzow-mehlitz-2023]. -These methods rely on the gradient and optionally on the Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. +Except of the **AL** solver, these methods rely on the gradient and optionally on the Hessian(-vector) information of the smooth part $f$ and the proximal mapping of the nonsmooth part $h$ in order to compute steps. Then, the objective function $f + h$ is used only to accept or reject trial points. -Moreover, they can handle cases where Hessian approximations are unbounded [@diouane-habiboullah-orban-2024;@leconte-orban-2023-2], making the package particularly suited for large-scale, ill-conditioned, and nonsmooth problems. + +Solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) allow inexact resolution of trust-region and quadratic-regularized subproblems using first-order that are implemented in the package itself such as the quadratic regularization method R2 [@aravkin-baraldi-orban-2022] and R2DH [@diouane-habiboullah-orban-2024] with trust-region variants TRDH [@leconte-orban-2023-2]. + +All solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) are implemented in an in-place fashion, minimizing memory allocations during the solution process. +Moreover, they implement non-monotone strategies to accept trial points, which can enhance algorithmic performance in practice [@leconte-orban-2023;@diouane-habiboullah-orban-2024]. + +## Requirements of the ShiftedProximalOperators.jl + +The nonsmooth part $h$ must have a computable proximal mapping, defined as +$$\text{prox}_{\nu h}(v) = \underset{x \in \mathbb{R}^n}{\arg\min} \frac{1}{2} \|t - v\|^2 + \nu h(t).$$ + +While [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl) provides many standard proximal mappings, [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl) also supplies **shifted** variants of these mappings which is not supported by [ProximalOperators.jl](https://www.github.com/JuliaFirstOrder/ProximalOperators.jl). # Statement of need ## Model-based framework for nonsmooth methods In Julia, \eqref{eq:nlp} can be solved using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place, first-order, line-search–based methods [@stella-themelis-sopasakis-patrinos-2017;@themelis-stella-patrinos-2017]. -Most of these methods are generally splitting schemes that alternate between taking steps along the gradient of the smooth part $f$ (or quasi-Newton directions) and applying proximal steps on the nonsmooth part $h$. +Most of these methods are generally splitting schemes that alternate between taking steps along some direction $d$ that depends on the gradient of $f$ and applying proximal steps on the nonsmooth part $h$ followed for some of them such as **PANOC** solver by a line-search mechanism along $d$. +``` +(Maybe remove the former sentence if it means that we should detail each algorithms ?) +``` Currently, [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) provides only L-BFGS as a quasi-Newton option. By contrast, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) focuses on model-based approaches such as trust-region and quadratic regularization algorithms. -As shown in [@aravkin-baraldi-orban-2022], model-based methods typically require fewer evaluations of the objective and its gradient than first-order line search methods, at the expense of solving more involved subproblems. -Although these subproblems may require many proximal iterations, each proximal computation is inexpensive for several commonly used nonsmooth functions, such as separable penalties and bound constraints (see examples below), making the overall approach efficient for large-scale problems. +As shown in [@aravkin-baraldi-orban-2022], model-based methods typically require fewer evaluations of the objective and its gradient than first-order line search methods, at the expense of requiring a lot of proximal iterations to solve the subproblems. +Although these subproblems may require many proximal iterations, each proximal computation is inexpensive for nuumerous commonly used nonsmooth functions, such as separable penalties and bound constraints (see examples below), making the overall approach efficient for large-scale problems. -Building on this perspective, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) implements state-of-the-art algorithms for solving problems of the form $f(x) + h(x)$, where $f$ is smooth and $h$ is nonsmooth. -The package provides a consistent API to formulate optimization problems and apply different regularization methods. -It integrates seamlessly with the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) ecosystem, an academic organization for nonlinear optimization software development, testing, and benchmarking. +The package provides a consistent API to formulate optimization problems and apply different solvers. +It integrates seamlessly with the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) [@jso] ecosystem, an academic organization for nonlinear optimization software development, testing, and benchmarking. -On the one hand, smooth problems $f$ can be defined via [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) [@orban-siqueira-nlpmodels-2020], which provides a standardized Julia API for representing nonlinear programming (NLP) problems. -Large collections of such problems are available in [Cutest.jl](https://github.com/JuliaSmoothOptimizers/CUTEst.jl) [@orban-siqueira-cutest-2020] and [OptimizationProblems.jl](https://github.com/JuliaSmoothOptimizers/OptimizationProblems.jl) [@migot-orban-siqueira-optimizationproblems-2023]. -Another option is to use [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl), which provides problem instances commonly used in the nonsmooth optimization literature. +On the one hand, the smooth probobjectivelems $f$ can be defined via [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) [@orban-siqueira-nlpmodels-2020], which provides a standardized Julia API for representing nonlinear programming (NLP) problems. +Large collections of such problems are available in [CUTE.jl](https://github.com/JuliaSmoothOptimizers/CUTEst.jl) [@orban-siqueira-cutest-2020] and [OptimizationProblems.jl](https://github.com/JuliaSmoothOptimizers/OptimizationProblems.jl) [@migot-orban-siqueira-optimizationproblems-2023]. +Another option is to use [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl), which provides problem instances commonly used in the nonsmooth optimization literature, where $f$ can be paired with various nonsmooth terms $h$. On the other hand, nonsmooth terms $h$ can be modeled using [ProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ProximalOperators.jl), which provides a broad collection of nonsmooth functions, together with [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl), which provides shifted proximal mappings for nonsmooth functions. +Specifically, the package supports shifted proximal operators of the form +$$ + \underset{t \in \mathbb{R}^n}{\arg\min} \, { \tfrac{1}{2} ‖t - q‖₂² + ν \psi(t + s;x) + χ(s + t\mid Δ\mathbb{B})} +$$ +where $\psi(;x)$ is a nonsmooth function that models $h$, in general we set $\psi(t;x) = h(x+t)$, $q$ is given, $x$ and $s$ are fixed shifts, $h$ is the nonsmooth term with respect +to which we are computing the proximal operator, and $χ(.| \Delta \mathbb{B})$ is the indicator of +a ball of radius $\Delta > 0$ defined by a certain norm. + +These shifted operators allow us to (i) incorporate bound or trust-region constraints via the indicator term which is required for the **TR** and **TRDH** algorithms and (ii) evaluate the prox **in place**, without additional allocations, which integrates efficiently with our subproblem solvers. ## Support for Hessians and Hessian approximations of the smooth part $f$ -In contrast to [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) methods such as **R2N** and **TR** methods support exact Hessians as well as several Hessian approximations of $f$, which can significantly improve convergence rates—especially for ill-conditioned problems. +In contrast to [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) methods such as **R2N** and **TR** methods support exact Hessians as well as several Hessian approximations of $f$. -Exact Hessians can be obtained via automatic differentiation through [ADNLPModels.jl](https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl) or supplied directly as Hessian–vector products $v \mapsto Hv$. +Hessian–vector products $v \mapsto Hv$ can be obtained via automatic differentiation through [ADNLPModels.jl](https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl) or implemented normally. -Hessian approximations (e.g., quasi-Newton and diagonal schemes) can be specified via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl), which represents Hessians as linear operators and provides efficient implementations of Hessian–vector products. +Hessian approximations (e.g., quasi-Newton and diagonal schemes) can be selected from via [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). -This design allows algorithms to exploit second-order information **without** explicitly forming dense (or even sparse) Hessian matrices, which is often prohibitively expensive in time and memory, particularly at large scale. +This design allows algorithms to exploit second-order information **without** explicitly forming dense or sparse Hessian matrices, which is often expensive in time and memory, particularly at large scale. ## Requirements of the RegularizedProblems.jl -To model the problem \eqref{eq:nlp}, one defines the smooth part $f$ and the nonsmooth part $h$ as discussed above. -The package [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl) provides a straightforward way to create such instances, called *Regularized Nonlinear Programming Models*: +With $f$ and $h$ modeled as discussed above, the package [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl) provides a straightforward way to pair them into a *Regularized Nonlinear Programming Model* ```julia reg_nlp = RegularizedNLPModel(f, h) ``` -This design makes it a convenient source of reproducible problem instances for testing and benchmarking algorithms in the repository [@diouane-habiboullah-orban-2024;@aravkin-baraldi-orban-2022;@aravkin-baraldi-orban-2024;@leconte-orban-2023-2]. - -## Requirements of the ShiftedProximalOperators.jl - -The nonsmooth part $h$ must have a computable proximal mapping, defined as -$$\text{prox}_{\nu h}(v) = \underset{x \in \mathbb{R}^n}{\arg\min} \frac{1}{2} \|t - v\|^2 + \nu h(t).$$ +They can also be paired into a *Regularized Nonlinear Least Squares Model* if $f(.) = \tfrac{1}{2} \|F(.)\|^2$ for some residual function $F: \mathbb{R}^n \to \mathbb{R}^m$, which is required for the **LM** and **LMTR** solvers. -This computation is performed efficiently in [ShiftedProximalOperators.jl](https://www.github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl). -While [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl) provides many standard proximal mappings, [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl) also supplies **shifted** variants of these mappings. -Specifically, the package supports shifted proximal operators of the form -$$ - \underset{t \in \mathbb{R}^n}{\arg\min} \, { \tfrac{1}{2} ‖t - q‖₂² + ν h(t + x + s) + χ(s + t|ΔB)} -$$ -where $q$ is given, $x$ and $s$ are fixed shifts, $h$ is the nonsmooth term with respect -to which we are computing the proximal operator, and $χ(.| \Delta B)$ is the indicator of -a ball of radius $\Delta$ defined by a certain norm. +```julia +reg_nls = RegularizedNLSModel(f, h) +``` -These shifted operators allow us to (i) incorporate bound or trust-region constraints via the indicator term which is required for **TR** algorithm and (ii) evaluate the prox **in place**, without additional allocations, which integrates efficiently with our subproblem solvers and enables solving \eqref{eq:nlp} with bound constraints. +This design makes for a convenient source of reproducible problem instances for testing and benchmarking the solvers in [RegularizedOptimization.jl](https://www.github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl). ## Testing and documentation @@ -116,72 +125,65 @@ Extensive documentation is provided, including a user guide, API reference, and Aqua.jl is used to test the package dependencies. Documentation is built using Documenter.jl. -## Solvers characteristics - -All solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) are implemented in an in-place fashion, minimizing memory allocations during the resolution process. -Moreover, they implement non-monotone strategies to accept trial points, which can enhance algorithmic performance in practice [@leconte-orban-2023;@diouane-habiboullah-orban-2024]. - ## Application studies The package is used to solve equality-constrained optimization problems by means of the exact penalty approach [@diouane-gollier-orban-2024] where the model of the nonsmooth part differs from the function $h$ itself. This is not covered in the current version of the package [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). -## Support for inexact subproblem solves - -Solvers in [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) allow inexact resolution of trust-region and quadratic-regularized subproblems using first-order that are implemented in the package itself such as the quadratic regularization method R2 [@aravkin-baraldi-orban-2022] and R2DH [@diouane-habiboullah-orban-2024] with trust-region variants TRDH [@leconte-orban-2023-2]. - -This is crucial for large-scale problems where exact subproblem solutions are prohibitive. -Moreover, one way to outperform line-search–based methods is to solve the subproblems more accurately by performing many proximal iterations, which are inexpensive to compute, rather than relying on numerous function and gradient evaluations. -We will illustrate this in the examples below. - - # Examples We illustrate the capabilities of [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) on two nonsmooth and nonconvex problems: -- **Support Vector Machine (SVM) with $\ell^{1/2}$ penalty** for image classification [@aravkin-baraldi-orban-2024]. +- **Support Vector Machine (SVM) with $\ell_{1/2}^{1/2}$ penalty** for image classification [@aravkin-baraldi-orban-2024]. - **Nonnegative Matrix Factorization (NNMF) with $\ell_0$ penalty and bound constraints** [@kim-park-2008]. -Below is a condensed example showing how to define and solve such problems: +Below is a condensed example showing how to define and solve SVM problem: ```julia using LinearAlgebra, Random, ProximalOperators using NLPModels, RegularizedProblems, RegularizedOptimization +using MLDatasets Random.seed!(1234) -model, nls, _ = RegularizedProblems.svm_train_model() # Build SVM model -f = LSR1Model(model) # Hessian approximation -h = RootNormLhalf(1.0) # Nonsmooth term -reg_nlp = RegularizedNLPModel(f, h) # Regularized problem -solver = R2NSolver(reg_nlp) # Choose solver +model, nls_model, _ = RegularizedProblems.svm_train_model() # Build SVM model +f = LSR1Model(model) # L-SR1 Hessian approximation +λ = 1.0 # Regularization parameter +h = RootNormLhalf(1.0) # Nonsmooth term +reg_nlp = RegularizedNLPModel(f, h) # Regularized problem +solver = R2NSolver(reg_nlp) # Choose solver stats = RegularizedExecutionStats(reg_nlp) -solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-4, rtol=1e-4, verbose=0, sub_kwargs=(max_iter=200,)) -solve!(solver, reg_nlp, stats; x=f.meta.x0, atol=1e-5, rtol=1e-5, verbose=0, sub_kwargs=(max_iter=200,)) +solve!(solver, reg_nlp, stats; atol=1e-4, rtol=1e-4, verbose=0, sub_kwargs=(max_iter=200,)) +solve!(solver, reg_nlp, stats; atol=1e-5, rtol=1e-5, verbose=0, sub_kwargs=(max_iter=200,)) +``` + +The NNMF problem can be set up in a similar fashion: + +```julia +Random.seed!(1234) +m, n, k = 100, 50, 5 +model, nls_model, _, selected = nnmf_model(m, n, k) # Build NNMF model +x0 = rand(model.meta.nvar) # Initial point +λ = norm(grad(model, rand(model.meta.nvar)), Inf) / 200 # Regularization parameter +h = NormL0(λ) # Nonsmooth term +reg_nls = RegularizedNLSModel(nls_model, h) # Regularized problem for LM +solver = LMSolver(reg_nls) # Choose solver ``` -The NNMF problem can be set up in a similar way, replacing the model by nnmf_model(...), $h$ by an $\ell_0$ norm and use an L-BFGS Hessian approximation. ## Numerical results -We compare **PANOC** [@stella-themelis-sopasakis-patrinos-2017](from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl)) with **TR**, **R2N**, and **LM** from our library. +We compare **PANOC** [@stella-themelis-sopasakis-patrinos-2017](from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl)) against **TR**, **R2N**, and **LM** from our library. +In order to do so, we implemented a wrapper for **PANOC** to make it compatible with our problem definition. The results are summarized in the combined table below: -``` -┌───────────────────┬─────────────┬──────────┬──────┬──────┬───────┐ -│ Method │ Status │ Time (s) │ #f │ #∇f │ #prox │ -├───────────────────┼─────────────┼──────────┼──────┼──────┼───────┤ -│ PANOC (SVM) │ first_order │ 38.226 │ 3713 │ 3713 │ 2269 │ -│ TR (LSR1, SVM) │ first_order │ 5.912 │ 347 │ 291 │ 4037 │ -│ R2N (LSR1, SVM) │ first_order │ 1.2944 │ 86 │ 76 │ 8586 │ -│ TR (LBFGS, NNMF) │ first_order │ 0.0857 │ 42 │ 40 │ 3160 │ -│ R2N (LBFGS, NNMF) │ first_order │ 0.2116 │ 79 │ 76 │ 6273 │ -│ LM (NNMF) │ first_order │ 0.1363 │ 8 │ 7540 │ 2981 │ -└───────────────────┴─────────────┴──────────┴──────┴──────┴───────┘ -``` +\input{Benchmark.tex} ## Discussion +According to **status**, all methods successfully reduced the optimality measure below the specified tolerance which is set to $10^{-4}$ and thus converged to a **first-order** stationary point. +However, the final objective values differ due to the nonconvexity of the problems. + - **SVM with $\ell^{1/2}$ penalty:** **TR** and **R2N** require far fewer function and gradient evaluations than **PANOC**, at the expense of more proximal iterations. Since each proximal step is inexpensive, **TR** and **R2N** are much faster overall. -- **NNMF with constrained $\ell_0$ penalty:** **TR** outperforms **R2N**, while **LM** is competitive in terms of function calls but incurs many gradient evaluations. +- **NNMF with constrained $\ell_0$ penalty:** **TR** outperforms **R2N**, while **LM** is competitive in terms of function calls but incurs many gradient evaluations. Additional tests (e.g., other regularizers, constraint types, and scaling dimensions) have also been conducted, and a full benchmarking campaign is currently underway. @@ -195,8 +197,11 @@ On these examples, the performance of the solvers can be summarized as follows: - **Function evaluations only:** **LM** is preferable when the problem is a nonlinear least squares problem, as it achieves the lowest number of function evaluations. - **Proximal iterations:** **PANOC** requires the fewest proximal iterations. However, in most nonsmooth applications, proximal steps are relatively inexpensive, so this criterion is of limited practical relevance. +In the future, the package will be extended with additional algorithms that enable to reduce the number of proximal evaluations, especially when the proximal mapping of $h$ is expensive to compute. + # Acknowledgements +The authors would like to thank Alberto Demarchi for his implementation of the Augmented Lagrangian solver. Mohamed Laghdaf Habiboullah is supported by an excellence FRQNT grant. Youssef Diouane and Dominique Orban are partially supported by an NSERC Discovery Grant. From d10eb413a7de76c034ee7d425de9779ae9a90f54 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 16:47:28 -0400 Subject: [PATCH 35/42] Update benchmark results in Benchmark.tex after debugging with constraints --- paper/Benchmark.tex | 12 +- paper/examples/Generate_results.jl | 16 +++ paper/examples/Project.toml | 2 - paper/examples/benchmark-fh.jl | 203 ---------------------------- paper/examples/benchmark-nnmf.jl | 71 ++++++---- paper/examples/benchmark-svm.jl | 53 ++++---- paper/examples/comparison-config.jl | 8 +- paper/examples/example1.jl | 44 ++---- paper/examples/example2.jl | 27 ---- 9 files changed, 105 insertions(+), 331 deletions(-) create mode 100644 paper/examples/Generate_results.jl delete mode 100644 paper/examples/benchmark-fh.jl delete mode 100644 paper/examples/example2.jl diff --git a/paper/Benchmark.tex b/paper/Benchmark.tex index be20157a..ecc37d3b 100644 --- a/paper/Benchmark.tex +++ b/paper/Benchmark.tex @@ -1,10 +1,10 @@ \begin{tabular}{lcrrrrr} \hline \textbf{Method} & \textbf{Status} & \textbf{$t$($s$)} & \textbf{$\#f$} & \textbf{$\#\nabla f$} & \textbf{$\#prox$} & \textbf{Objective} \\\hline - PANOC (SVM) & first\_order & 38.0537 & 3713 & 3713 & 2269 & 188.924 \\ - TR (LSR1, SVM) & first\_order & 3.9597 & 347 & 291 & 4037 & 179.837 \\ - R2N (LSR1, SVM) & first\_order & 0.9791 & 86 & 76 & 8586 & 185.769 \\ - TR (LBFGS, NNMF) & first\_order & 0.0865 & 42 & 40 & 3160 & 976.06 \\ - R2N (LBFGS, NNMF) & first\_order & 0.2125 & 79 & 76 & 6273 & 408.599 \\ - LM (NNMF) & first\_order & 0.1346 & 8 & 7540 & 2981 & 131.183 \\\hline + PANOC (SVM) & first\_order & 48.2465 & 3713 & 3713 & 2269 & 188.924 \\ + TR (LSR1, SVM) & first\_order & 4.4479 & 347 & 291 & 4037 & 179.837 \\ + R2N (LSR1, SVM) & first\_order & 2.8338 & 185 & 101 & 27932 & 192.493 \\ + TR (LBFGS, NNMF) & first\_order & 0.086 & 42 & 40 & 3160 & 976.06 \\ + R2N (LBFGS, NNMF) & first\_order & 1.0187 & 198 & 196 & 32845 & 1511.76 \\ + LM (NNMF) & first\_order & 0.6718 & 10 & 37496 & 17407 & 131.183 \\\hline \end{tabular} diff --git a/paper/examples/Generate_results.jl b/paper/examples/Generate_results.jl new file mode 100644 index 00000000..d4251e91 --- /dev/null +++ b/paper/examples/Generate_results.jl @@ -0,0 +1,16 @@ +include("benchmark-nnmf.jl") + +include("benchmark-svm.jl") + +using LaTeXStrings +all_data = vcat(data_svm, data_nnmf) + +table_str = pretty_table(String, all_data; + header = ["Method", "Status", L"$t$($s$)", L"$\#f$", L"$\#\nabla f$", L"$\#prox$", "Objective"], + backend = Val(:latex), + alignment = [:l, :c, :r, :r, :r, :r, :r], + ) + +open("Benchmark.tex", "w") do io + write(io, table_str) +end \ No newline at end of file diff --git a/paper/examples/Project.toml b/paper/examples/Project.toml index 43bcfd97..74aafd68 100644 --- a/paper/examples/Project.toml +++ b/paper/examples/Project.toml @@ -1,6 +1,4 @@ [deps] -ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f" diff --git a/paper/examples/benchmark-fh.jl b/paper/examples/benchmark-fh.jl deleted file mode 100644 index 6130dfa4..00000000 --- a/paper/examples/benchmark-fh.jl +++ /dev/null @@ -1,203 +0,0 @@ - -############################# -# ======== IMPORTS ======== # -############################# -using Random, LinearAlgebra -using ProximalOperators, ProximalCore, ProximalAlgorithms -using ShiftedProximalOperators -using ADNLPModels, NLPModels, NLPModelsModifiers -using RegularizedOptimization, RegularizedProblems -using DifferentialEquations, SciMLSensitivity - -include("comparison-config.jl") -using .ComparisonConfig: CFG, CFG2 - -include("Bench-utils.jl") -using .BenchUtils - -function print_config(CFG) - println("Configuration:") - println(" SEED = $(CFG.SEED)") - println(" LAMBDA_L0 = $(CFG.LAMBDA_L0)") - println(" TOL = $(CFG.TOL)") - println(" RTOL = $(CFG.RTOL)") - println(" MAXIT_PANOC = $(CFG.MAXIT_PANOC)") - println(" VERBOSE_PANOC = $(CFG.VERBOSE_PANOC)") - println(" VERBOSE_RO = $(CFG.VERBOSE_RO)") - println(" RUN_SOLVERS = $(CFG.RUN_SOLVERS)") - println(" QN_FOR_TR = $(CFG.QN_FOR_TR)") - println(" QN_FOR_R2N = $(CFG.QN_FOR_R2N)") - println(" SUB_KWARGS_R2N = $(CFG.SUB_KWARGS_R2N)") - println(" SIGMAK_R2N = $(CFG.SIGMAK_R2N)") - println(" X0_SCALAR = $(CFG.X0_SCALAR)") - println(" PRINT_TABLE = $(CFG.PRINT_TABLE)") - println(" OPNORM_MAXITER = $(CFG.OPNORM_MAXITER)") - println(" HESSIAN_SCALE = $(CFG.HESSIAN_SCALE)") -end - -############################# -# ===== PROBLÈME (FH) ===== # -############################# -Random.seed!(CFG.SEED) - -# Si tu as fh_model() (wrapper perso) qui renvoie (model, misfit?, x*) -if @isdefined fh_model - model, _, x_true = fh_model() -else - # Fallback: construit le modèle depuis RegularizedProblems - _, _, _, misfit, _ = RegularizedProblems.FH_smooth_term() - model = ADNLPModel(misfit, x0; matrix_free = true) - x_true = nothing -end - -x0 = CFG.X0_SCALAR .* ones(length(model.meta.x0)) - -############################# -# ======= PANOC run ======= # -############################# -function run_panoc!(model, x0; λ = 1.0, maxit = 500, tol = 1e-3, verbose = false) - # BenchUtils.make_adnlp_compatible!() - f = BenchUtils.Counting(model) - g = BenchUtils.Counting(NormL0(λ)) - algo = ProximalAlgorithms.PANOC(maxit = maxit, tol = tol, verbose = verbose) - t = @elapsed x̂, it = algo(x0 = x0, f = f, g = g) - metrics = ( - name = "PANOC", - status = "first_order", - time = t, - iters = it, - fevals = f.eval_count, - gevals = f.gradient_count, - proxcalls = g.prox_count, - solution = x̂, - ) - return metrics -end - -############################# -# ======== TR run ========= # -############################# -function ensure_qn(model, which::Symbol) - which === :LBFGS && return LBFGSModel(model) - which === :LSR1 && return LSR1Model(model) - error("QN inconnu: $which (attendu :LBFGS ou :LSR1)") -end - -function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), opnorm_maxiter = 4, scaling_factor = 1.0) - qn_model = ensure_qn(model, qn) - reset!(qn_model) # reset des compteurs - qn_model.op.data.scaling_factor = scaling_factor - reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ)) - solver = TRSolver(reg_nlp) - stats = RegularizedExecutionStats(reg_nlp) - t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; - x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = opnorm_maxiter, sub_kwargs = sub_kwargs) - metrics = ( - name = "TR($(String(qn)))", - status = string(stats.status), - time = t, - iters = get(stats.solver_specific, :outer_iter, missing), - fevals = neval_obj(qn_model), - gevals = neval_grad(qn_model), - proxcalls = stats.solver_specific[:prox_evals], - solution = stats.solution, - ) - return metrics -end - -############################# -# ======== R2N run ======== # -############################# -function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5, opnorm_maxiter = 4) - qn_model = ensure_qn(model, qn) - reset!(qn_model) - reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ)) - solver = R2NSolver(reg_nlp, m_monotone = 10) - stats = RegularizedExecutionStats(reg_nlp) - t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; - x = x0, atol = atol, rtol = rtol, σk = σk, - verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = opnorm_maxiter) - metrics = ( - name = "R2N($(String(qn))) Nonmonotone", - status = string(stats.status), - time = t, - iters = get(stats.solver_specific, :outer_iter, missing), - fevals = neval_obj(qn_model), - gevals = neval_grad(qn_model), - proxcalls = stats.solver_specific[:prox_evals], - solution = stats.solution, - ) - return metrics -end - -############################# -# ====== LANCEMENTS ======= # -############################# -results = NamedTuple[] - -if :PANOC in CFG.RUN_SOLVERS - push!(results, run_panoc!(model, x0; λ = CFG.LAMBDA_L0, maxit = CFG.MAXIT_PANOC, tol = CFG.TOL, verbose = CFG.VERBOSE_PANOC)) -end -if :TR in CFG.RUN_SOLVERS - push!(results, run_tr!(model, x0; λ = CFG.LAMBDA_L0, qn = CFG.QN_FOR_R2N, atol = CFG.TOL, rtol = CFG.RTOL, verbose = CFG.VERBOSE_RO, sub_kwargs = CFG.SUB_KWARGS_R2N, opnorm_maxiter = CFG.OPNORM_MAXITER, scaling_factor = CFG.HESSIAN_SCALE)) # test LBFGS aussi -end -if :R2N in CFG.RUN_SOLVERS - push!(results, run_r2n!(model, x0; λ = CFG.LAMBDA_L0, qn = CFG.QN_FOR_R2N, atol = CFG.TOL, rtol = CFG.RTOL, - verbose = CFG.VERBOSE_RO, sub_kwargs = CFG.SUB_KWARGS_R2N, σk = CFG.SIGMAK_R2N, opnorm_maxiter = CFG.OPNORM_MAXITER)) -end - -using PrettyTables - -############################# -# ===== AFFICHAGE I/O ===== # -############################# -if x_true !== nothing - println("=== True solution (≈) ===") - println(x_true) -end - -println("\n=== Comparaison solveurs ===") -for m in results - println("\n→ ", m.name) - println(" statut = ", m.status) - println(" temps (s) = ", round(m.time, digits=4)) - if m.iters !== missing - println(" itérations = ", m.iters) - end - println(" # f eval = ", m.fevals) - println(" # ∇f eval = ", m.gevals) - if m.proxcalls !== missing - println(" # prox appels = ", m.proxcalls) - end - println(" solution (≈) = ", m.solution) -end - -println("\n") -print_config(CFG) - -if CFG.PRINT_TABLE - println("\nSummary :") - # Construire les données pour la table - data = [ - (; name=m.name, - status=string(m.status), - time=round(m.time, digits=4), - fe=m.fevals, - ge=m.gevals, - prox = m.proxcalls === missing ? missing : Int(m.proxcalls)) - for m in results -] - - # En-têtes - table_str = pretty_table(String, - data; - header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox"], - tf = tf_unicode, - alignment = [:l, :c, :r, :r, :r, :r], - crop = :none, - ) - - open("FH-comparison-f.txt", "w") do io - write(io, table_str) - end -end \ No newline at end of file diff --git a/paper/examples/benchmark-nnmf.jl b/paper/examples/benchmark-nnmf.jl index a3d59d3a..c273dfbf 100644 --- a/paper/examples/benchmark-nnmf.jl +++ b/paper/examples/benchmark-nnmf.jl @@ -33,10 +33,9 @@ function print_config(CFG3) println(" PRINT_TABLE = $(CFG3.PRINT_TABLE)") println(" OPNORM_MAXITER = $(CFG3.OPNORM_MAXITER)") println(" HESSIAN_SCALE = $(CFG3.HESSIAN_SCALE)") + println(" M_MONOTONE = $(CFG3.M_MONOTONE)") end -acc = vec -> length(findall(x -> x < 1, vec)) / length(vec) * 100 # for SVM - ############################# # ===== PROBLÈME (NNMF) ===== # ############################# @@ -46,6 +45,14 @@ m, n, k = 100, 50, 5 model, nls_model, A, selected = nnmf_model(m, n, k) x0 = rand(model.meta.nvar) +#println("Initial objective value: ", obj(model, x0)) + +## project this point on the positive orthant +for i in 1:length(x0) + x0[i] < 0.0 && (x0[i] = 0.0) +end + +#println("Initial objective value (after projection): ", obj(model, x0)) CFG3.LAMBDA_L0 = norm(grad(model, rand(model.meta.nvar)), Inf) / 200 ############################# @@ -66,6 +73,7 @@ function run_panoc!(model, x0; λ = 1.0, maxit = 500, tol = 1e-3, verbose = fals gevals = f.gradient_count, proxcalls = g.prox_count, solution = x̂, + final_obj = obj(model, x̂) ) return metrics end @@ -88,7 +96,7 @@ function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verb t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = opnorm_maxiter, sub_kwargs = sub_kwargs) metrics = ( - name = "TR($(String(qn)))", + name = "TR ($(String(qn)), NNMF)", status = string(stats.status), time = t, iters = get(stats.solver_specific, :outer_iter, missing), @@ -96,6 +104,7 @@ function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verb gevals = neval_grad(qn_model), proxcalls = stats.solver_specific[:prox_evals], solution = stats.solution, + final_obj = obj(model, stats.solution) ) return metrics end @@ -113,7 +122,7 @@ function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, ve x = x0, atol = atol, rtol = rtol, σk = σk, verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = opnorm_maxiter) metrics = ( - name = "R2N($(String(qn))) Nonmonotone", + name = "R2N ($(String(qn)), NNMF)", status = string(stats.status), time = t, iters = get(stats.solver_specific, :outer_iter, missing), @@ -121,6 +130,7 @@ function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, ve gevals = neval_grad(qn_model), proxcalls = stats.solver_specific[:prox_evals], solution = stats.solution, + final_obj = obj(model, stats.solution) ) return metrics end @@ -136,7 +146,7 @@ function run_LM!(nls_model, x0; λ = 1.0, atol = 1e-3, rtol = 1e-3, verbose = 0, x = x0, atol = atol, rtol = rtol, σk = σk, verbose = verbose) metrics = ( - name = "LM", + name = "LM (NNMF)", status = string(stats.status), time = t, iters = get(stats.solver_specific, :outer_iter, missing), @@ -144,6 +154,7 @@ function run_LM!(nls_model, x0; λ = 1.0, atol = 1e-3, rtol = 1e-3, verbose = 0, gevals = neval_jtprod_residual(nls_model) + neval_jprod_residual(nls_model), proxcalls = stats.solver_specific[:prox_evals], solution = stats.solution, + final_obj = obj(nls_model, stats.solution) ) return metrics end @@ -191,30 +202,30 @@ end println("\n") print_config(CFG3) -if CFG3.PRINT_TABLE - println("\nSummary :") - # Construire les données pour la table - data = [ - (; name=m.name, - status=string(m.status), - time=round(m.time, digits=4), - fe=m.fevals, - ge=m.gevals, - prox = m.proxcalls === missing ? missing : Int(m.proxcalls)) - for m in results + +println("\nSummary :") +# Construire les données pour la table +data_nnmf = [ +(; name=m.name, + status=string(m.status), + time=round(m.time, digits=4), + fe=m.fevals, + ge=m.gevals, + prox = m.proxcalls === missing ? missing : Int(m.proxcalls), + obj = round(m.final_obj, digits=4)) +for m in results ] - # En-têtes - table_str = pretty_table(String, - data; - header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox"], - tf = tf_unicode, - alignment = [:l, :c, :r, :r, :r, :r], - crop = :none, - ) - - - open("NNMF-comparison-f.txt", "w") do io - write(io, table_str) - end -end \ No newline at end of file +# En-têtes +table_str = pretty_table(String, + data_nnmf; + header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox", "#obj"], + tf = tf_unicode, + alignment = [:l, :c, :r, :r, :r, :r, :r], + crop = :none, + ) + + +open("Benchmarks/NNMF-comparison-f.txt", "w") do io +write(io, table_str) +end diff --git a/paper/examples/benchmark-svm.jl b/paper/examples/benchmark-svm.jl index d5b12553..f2a8500e 100644 --- a/paper/examples/benchmark-svm.jl +++ b/paper/examples/benchmark-svm.jl @@ -56,7 +56,7 @@ function run_panoc!(model, x0; λ = 1.0, maxit = 500, tol = 1e-3, verbose = fals algo = ProximalAlgorithms.PANOC(maxit = maxit, tol = tol, verbose = verbose) t = @elapsed x̂, it = algo(x0 = x0, f = f, g = g) metrics = ( - name = "PANOC", + name = "PANOC (SVM)", status = "first_order", time = t, iters = it, @@ -64,6 +64,7 @@ function run_panoc!(model, x0; λ = 1.0, maxit = 500, tol = 1e-3, verbose = fals gevals = f.gradient_count, proxcalls = g.prox_count, solution = x̂, + final_obj = obj(model, x̂) ) return metrics end @@ -86,7 +87,7 @@ function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verb t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = opnorm_maxiter, sub_kwargs = sub_kwargs) metrics = ( - name = "TR($(String(qn)))", + name = "TR ($(String(qn)), SVM)", status = string(stats.status), time = t, iters = get(stats.solver_specific, :outer_iter, missing), @@ -94,6 +95,7 @@ function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verb gevals = neval_grad(qn_model), proxcalls = stats.solver_specific[:prox_evals], solution = stats.solution, + final_obj = obj(model, stats.solution) ) return metrics end @@ -111,7 +113,7 @@ function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, ve x = x0, atol = atol, rtol = rtol, σk = σk, verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = opnorm_maxiter) metrics = ( - name = "R2N($(String(qn)))", + name = "R2N ($(String(qn)), SVM)", status = string(stats.status), time = t, iters = get(stats.solver_specific, :outer_iter, missing), @@ -119,6 +121,7 @@ function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, ve gevals = neval_grad(qn_model), proxcalls = stats.solver_specific[:prox_evals], solution = stats.solution, + final_obj = obj(model, stats.solution) ) return metrics end @@ -167,29 +170,27 @@ end println("\n") print_config(CFG2) -if CFG2.PRINT_TABLE - println("\nSummary :") - # Construire les données pour la table - data = [ - (; name=m.name, - status=string(m.status), - time=round(m.time, digits=4), - fe=m.fevals, - ge=m.gevals, - prox = m.proxcalls === missing ? missing : Int(m.proxcalls)) - for m in results +println("\nSummary :") +# Construire les données pour la table +data_svm = [ +(; name=m.name, + status=string(m.status), + time=round(m.time, digits=4), + fe=m.fevals, + ge=m.gevals, + prox = m.proxcalls === missing ? missing : Int(m.proxcalls), + obj = round(obj(model, m.solution), digits=4)) +for m in results ] - # En-têtes - table_str = pretty_table(String, data; - header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox"], - tf = tf_unicode, - alignment = [:l, :c, :r, :r, :r, :r], - crop = :none, - ) - - open("SVM-comparison-f.txt", "w") do io - write(io, table_str) - end +# En-têtes +table_str = pretty_table(String, data_svm; + header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox", "#obj"], + tf = tf_unicode, + alignment = [:l, :c, :r, :r, :r, :r, :r], + crop = :none, + ) -end \ No newline at end of file +open("Benchmarks/SVM-comparison-f.txt", "w") do io + write(io, table_str) +end diff --git a/paper/examples/comparison-config.jl b/paper/examples/comparison-config.jl index 8b87f2a5..56f3d075 100644 --- a/paper/examples/comparison-config.jl +++ b/paper/examples/comparison-config.jl @@ -15,14 +15,14 @@ Base.@kwdef mutable struct Config SIGMAK_R2N::Float64 = 1e5 X0_SCALAR::Float64 = 0.1 PRINT_TABLE::Bool = true - OPNORM_MAXITER::Int = 20 + OPNORM_MAXITER::Int = 5 HESSIAN_SCALE::Float64 = 1e-4 - M_MONOTONE::Int = 10 # for nonmonotone R2N + M_MONOTONE::Int = 1 # for nonmonotone R2N end # One global, constant *binding* to a mutable object = type stable & editable const CFG = Config(QN_FOR_TR = :LBFGS) -const CFG2 = Config(SIGMAK_R2N=eps()^(1/3), TOL = 1e-4, RTOL = 1e-4, QN_FOR_R2N=:LSR1, M_MONOTONE=1) -const CFG3 = Config(SIGMAK_R2N=1e3, TOL = 1e-4, RTOL = 1e-4, RUN_SOLVERS = [:LM, :TR, :R2N], QN_FOR_TR = :LBFGS) +const CFG2 = Config(SIGMAK_R2N=eps()^(1 / 5), TOL = 1e-4, RTOL = 1e-4, QN_FOR_R2N=:LSR1) +const CFG3 = Config(SIGMAK_R2N=eps()^(1 / 5), TOL = 1e-4, RTOL = 1e-4, RUN_SOLVERS = [:LM, :TR, :R2N], QN_FOR_TR = :LBFGS) end # module \ No newline at end of file diff --git a/paper/examples/example1.jl b/paper/examples/example1.jl index b292d6c7..27db1ae5 100644 --- a/paper/examples/example1.jl +++ b/paper/examples/example1.jl @@ -1,35 +1,13 @@ -using LinearAlgebra, Random -using ProximalOperators -using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization +using LinearAlgebra, Random, ProximalOperators +using NLPModels, RegularizedProblems, RegularizedOptimization using MLDatasets -random_seed = 1234 -Random.seed!(random_seed) - -# Load MNIST from MLDatasets -imgs, labels = MLDatasets.MNIST.traindata() - -# Use RegularizedProblems' preprocessing -A, b = RegularizedProblems.generate_data(imgs, labels, (1, 7), false) - -# Build the models -model, _, _ = RegularizedProblems.svm_model(A, b) - -# Define the Hessian approximation -f = LBFGSModel(model) - -# Define the nonsmooth regularizer (L0 norm) -λ = 1.0e-1 -h = NormL0(λ) - -# Define the regularized NLP model -reg_nlp = RegularizedNLPModel(f, h) - -# Choose a solver (R2DH) and execution statistics tracker -solver_r2dh= R2DHSolver(reg_nlp) -stats = RegularizedExecutionStats(reg_nlp) - -# Solve the problem -solve!(solver_r2dh, reg_nlp, stats, x = f.meta.x0, σk = 1e-6, atol = 2e-5, rtol = 2e-5, verbose = 1) - -@test stats.status == :first_order +Random.seed!(1234) +model, nls_model, _ = RegularizedProblems.svm_train_model() # Build SVM model +f = LSR1Model(model) # L-SR1 Hessian approximation +λ = 1.0 # Regularization parameter +h = RootNormLhalf(1.0) # Nonsmooth term +reg_nlp = RegularizedNLPModel(f, h) # Regularized problem +solver = R2NSolver(reg_nlp) # Choose solver +stats = RegularizedExecutionStats(reg_nlp) +solve!(solver, reg_nlp, stats; atol=1e-4, rtol=1e-4, verbose=0, sub_kwargs=(max_iter=200,)) diff --git a/paper/examples/example2.jl b/paper/examples/example2.jl deleted file mode 100644 index bb6302d3..00000000 --- a/paper/examples/example2.jl +++ /dev/null @@ -1,27 +0,0 @@ -## After merging the PRs on TR - -using LinearAlgebra -using DifferentialEquations, ProximalOperators -using ADNLPModels, NLPModels, NLPModelsModifiers, RegularizedOptimization, RegularizedProblems - -# Define the Fitzhugh-Nagumo problem -model, _, _ = RegularizedProblems.fh_model() - -# Define the Hessian approximation -f = LBFGSModel(model) - -# Define the nonsmooth regularizer (L1 norm) -λ = 0.1 -h = NormL1(λ) - -# Define the regularized NLP model -reg_nlp = RegularizedNLPModel(f, h) - -# Choose a solver (TR) and execution statistics tracker -solver_tr = TRSolver(reg_nlp) -stats = RegularizedExecutionStats(reg_nlp) - -# Solve the problem -solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-4, verbose = 10, ν = 1.0e+2) - -@test stats.status == :first_order From 8d5d675faa1442b202835d7425542cb8a8a9e1f0 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 17:08:42 -0400 Subject: [PATCH 36/42] Update benchmark results in Benchmark.tex to reflect accurate performance metrics --- paper/Benchmark.tex | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/paper/Benchmark.tex b/paper/Benchmark.tex index ecc37d3b..00bc28c3 100644 --- a/paper/Benchmark.tex +++ b/paper/Benchmark.tex @@ -1,10 +1,10 @@ \begin{tabular}{lcrrrrr} \hline \textbf{Method} & \textbf{Status} & \textbf{$t$($s$)} & \textbf{$\#f$} & \textbf{$\#\nabla f$} & \textbf{$\#prox$} & \textbf{Objective} \\\hline - PANOC (SVM) & first\_order & 48.2465 & 3713 & 3713 & 2269 & 188.924 \\ - TR (LSR1, SVM) & first\_order & 4.4479 & 347 & 291 & 4037 & 179.837 \\ - R2N (LSR1, SVM) & first\_order & 2.8338 & 185 & 101 & 27932 & 192.493 \\ - TR (LBFGS, NNMF) & first\_order & 0.086 & 42 & 40 & 3160 & 976.06 \\ - R2N (LBFGS, NNMF) & first\_order & 1.0187 & 198 & 196 & 32845 & 1511.76 \\ - LM (NNMF) & first\_order & 0.6718 & 10 & 37496 & 17407 & 131.183 \\\hline + PANOC (SVM) & first\_order & 52.6788 & 3713 & 3713 & 2269 & 188.924 \\ + TR (LSR1, SVM) & first\_order & 4.7909 & 347 & 291 & 4037 & 179.837 \\ + R2N (LSR1, SVM) & first\_order & 2.5851 & 185 & 101 & 27932 & 192.493 \\ + TR (LBFGS, NNMF) & first\_order & 0.0853 & 42 & 40 & 3160 & 976.06 \\ + R2N (LBFGS, NNMF) & first\_order & 0.4935 & 169 & 107 & 17789 & 411.727 \\ + LM (NNMF) & first\_order & 0.451 & 15 & 27636 & 12261 & 131.183 \\\hline \end{tabular} From 037ca76f8872444f3e0ebb691f674a02aeba9420 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 17:51:58 -0400 Subject: [PATCH 37/42] Improve clarity in the description of splitting schemes in the model-based framework section of paper.md --- paper/paper.bib | 11 +++++++++++ paper/paper.md | 7 ++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 01e69776..37da40fd 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -148,3 +148,14 @@ @Article{ themelis-stella-patrinos-2017 Pages = {2274--2303}, doi = {10.1137/16M1080240}, } + +@article{eckstein1992douglas, + title={On the Douglas—Rachford splitting method and the proximal point algorithm for maximal monotone operators}, + author={Eckstein, Jonathan and Bertsekas, Dimitri P}, + journal={Mathematical programming}, + volume={55}, + number={1}, + pages={293--318}, + year={1992}, + publisher={Springer} +} diff --git a/paper/paper.md b/paper/paper.md index e30f99dc..8c2f74ec 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -65,11 +65,8 @@ While [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperator ## Model-based framework for nonsmooth methods In Julia, \eqref{eq:nlp} can be solved using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place, first-order, line-search–based methods [@stella-themelis-sopasakis-patrinos-2017;@themelis-stella-patrinos-2017]. -Most of these methods are generally splitting schemes that alternate between taking steps along some direction $d$ that depends on the gradient of $f$ and applying proximal steps on the nonsmooth part $h$ followed for some of them such as **PANOC** solver by a line-search mechanism along $d$. -``` -(Maybe remove the former sentence if it means that we should detail each algorithms ?) -``` -Currently, [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) provides only L-BFGS as a quasi-Newton option. +Most of these methods are splitting schemes that either alternate between the proximal operators of $f$ and $h$, as in the **Douglas–Rachford** solver [@eckstein1992douglas], or take a step along a direction $d$, which depends on the gradient of $f$, possibly modified by a Quasi-Newton approximation (e.g., L-BFGS)—followed by proximal steps on the nonsmooth part $h$. In some cases, such as with the **PANOC** [@stella-themelis-sopasakis-patrinos-2017] solver, this process is augmented with a line-search mechanism along $d$. + By contrast, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) focuses on model-based approaches such as trust-region and quadratic regularization algorithms. As shown in [@aravkin-baraldi-orban-2022], model-based methods typically require fewer evaluations of the objective and its gradient than first-order line search methods, at the expense of requiring a lot of proximal iterations to solve the subproblems. Although these subproblems may require many proximal iterations, each proximal computation is inexpensive for nuumerous commonly used nonsmooth functions, such as separable penalties and bound constraints (see examples below), making the overall approach efficient for large-scale problems. From c0b010498603ad88984c7a357738282028f9f74f Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 17:58:26 -0400 Subject: [PATCH 38/42] Refine descriptions in the model-based framework section of paper.md for clarity and consistency --- paper/paper.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 8c2f74ec..51a07a20 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -56,7 +56,7 @@ Moreover, they implement non-monotone strategies to accept trial points, which c ## Requirements of the ShiftedProximalOperators.jl The nonsmooth part $h$ must have a computable proximal mapping, defined as -$$\text{prox}_{\nu h}(v) = \underset{x \in \mathbb{R}^n}{\arg\min} \frac{1}{2} \|t - v\|^2 + \nu h(t).$$ +$$\text{prox}_{\nu h}(v) = \underset{x \in \mathbb{R}^n}{\arg\min} \frac{1}{2} \|x - v\|^2 + \nu h(x).$$ While [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl) provides many standard proximal mappings, [ShiftedProximalOperators.jl](https://github.com/JuliaSmoothOptimizers/ShiftedProximalOperators.jl) also supplies **shifted** variants of these mappings which is not supported by [ProximalOperators.jl](https://www.github.com/JuliaFirstOrder/ProximalOperators.jl). @@ -65,7 +65,7 @@ While [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperator ## Model-based framework for nonsmooth methods In Julia, \eqref{eq:nlp} can be solved using [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl), which implements in-place, first-order, line-search–based methods [@stella-themelis-sopasakis-patrinos-2017;@themelis-stella-patrinos-2017]. -Most of these methods are splitting schemes that either alternate between the proximal operators of $f$ and $h$, as in the **Douglas–Rachford** solver [@eckstein1992douglas], or take a step along a direction $d$, which depends on the gradient of $f$, possibly modified by a Quasi-Newton approximation (e.g., L-BFGS)—followed by proximal steps on the nonsmooth part $h$. In some cases, such as with the **PANOC** [@stella-themelis-sopasakis-patrinos-2017] solver, this process is augmented with a line-search mechanism along $d$. +Most of these methods are splitting schemes that either alternate between the proximal operators of $f$ and $h$, as in the **Douglas–Rachford** solver [@eckstein1992douglas], or take a step along a direction $d$, which depends on the gradient of $f$, possibly modified by a L-BFGS Quasi-Newton approximation followed by proximal steps on the nonsmooth part $h$. In some cases, such as with the **PANOC** [@stella-themelis-sopasakis-patrinos-2017] solver, this process is augmented with a line-search mechanism along $d$. By contrast, [RegularizedOptimization.jl](https://github.com/JuliaSmoothOptimizers/RegularizedOptimization.jl) focuses on model-based approaches such as trust-region and quadratic regularization algorithms. As shown in [@aravkin-baraldi-orban-2022], model-based methods typically require fewer evaluations of the objective and its gradient than first-order line search methods, at the expense of requiring a lot of proximal iterations to solve the subproblems. @@ -119,7 +119,6 @@ This design makes for a convenient source of reproducible problem instances for The package includes a comprehensive suite of unit tests that cover all functionalities, ensuring reliability and correctness. Extensive documentation is provided, including a user guide, API reference, and examples to help users get started quickly. -Aqua.jl is used to test the package dependencies. Documentation is built using Documenter.jl. ## Application studies @@ -168,7 +167,7 @@ solver = LMSolver(reg_nls) # Choose solver ## Numerical results -We compare **PANOC** [@stella-themelis-sopasakis-patrinos-2017](from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl)) against **TR**, **R2N**, and **LM** from our library. +We compare **PANOC** [@stella-themelis-sopasakis-patrinos-2017] (from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl)) against **TR**, **R2N**, and **LM** from our library. In order to do so, we implemented a wrapper for **PANOC** to make it compatible with our problem definition. The results are summarized in the combined table below: From f92e92e4148ffc95300fa32079133ddc477366e7 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 19:46:06 -0400 Subject: [PATCH 39/42] Refactor benchmarking scripts and update results table - Removed old Benchmark.tex file and replaced it with a new version containing updated results. - Consolidated benchmarking logic into a single Benchmark.jl file, removing separate benchmark-nnmf.jl and benchmark-svm.jl files. - Updated comparison-config.jl to modify tolerance values and adjust configurations for benchmarking. - Adjusted imports in Bench-utils.jl to remove unused dependencies and streamline the code. - Updated Project.toml to include new dependencies for PrettyTables and ProximalAlgorithms. - Modified the paper.md to correctly reference the new Benchmark.tex file location. --- paper/Benchmark.tex | 10 - paper/examples/Bench-utils.jl | 8 +- paper/examples/Benchmark.jl | 291 ++++++++++++++++++++++++++++ paper/examples/Benchmark.tex | 10 + paper/examples/Generate_results.jl | 16 -- paper/examples/Project.toml | 11 +- paper/examples/benchmark-nnmf.jl | 231 ---------------------- paper/examples/benchmark-svm.jl | 196 ------------------- paper/examples/comparison-config.jl | 14 +- paper/paper.md | 2 +- 10 files changed, 310 insertions(+), 479 deletions(-) delete mode 100644 paper/Benchmark.tex create mode 100644 paper/examples/Benchmark.jl create mode 100644 paper/examples/Benchmark.tex delete mode 100644 paper/examples/Generate_results.jl delete mode 100644 paper/examples/benchmark-nnmf.jl delete mode 100644 paper/examples/benchmark-svm.jl diff --git a/paper/Benchmark.tex b/paper/Benchmark.tex deleted file mode 100644 index 00bc28c3..00000000 --- a/paper/Benchmark.tex +++ /dev/null @@ -1,10 +0,0 @@ -\begin{tabular}{lcrrrrr} - \hline - \textbf{Method} & \textbf{Status} & \textbf{$t$($s$)} & \textbf{$\#f$} & \textbf{$\#\nabla f$} & \textbf{$\#prox$} & \textbf{Objective} \\\hline - PANOC (SVM) & first\_order & 52.6788 & 3713 & 3713 & 2269 & 188.924 \\ - TR (LSR1, SVM) & first\_order & 4.7909 & 347 & 291 & 4037 & 179.837 \\ - R2N (LSR1, SVM) & first\_order & 2.5851 & 185 & 101 & 27932 & 192.493 \\ - TR (LBFGS, NNMF) & first\_order & 0.0853 & 42 & 40 & 3160 & 976.06 \\ - R2N (LBFGS, NNMF) & first\_order & 0.4935 & 169 & 107 & 17789 & 411.727 \\ - LM (NNMF) & first\_order & 0.451 & 15 & 27636 & 12261 & 131.183 \\\hline -\end{tabular} diff --git a/paper/examples/Bench-utils.jl b/paper/examples/Bench-utils.jl index 486ec18c..74b29f7d 100644 --- a/paper/examples/Bench-utils.jl +++ b/paper/examples/Bench-utils.jl @@ -2,22 +2,16 @@ module BenchUtils using ProximalAlgorithms using ProximalCore -using ADNLPModels, NLPModels +using NLPModels export Counting, reset_counters!, make_adnlp_compatible! -(f::ADNLPModel)(x) = obj(f, x) -function ProximalAlgorithms.value_and_gradient(f::ADNLPModel, x) - return obj(f, x), grad(f, x) -end - (f::AbstractNLPModel)(x) = obj(f,x) function ProximalAlgorithms.value_and_gradient(f::AbstractNLPModel, x) return obj(f,x), grad(f, x) end - "Wrapper compteur pour f ou g (compte #obj, #∇f, #prox)." mutable struct Counting{T} f::T diff --git a/paper/examples/Benchmark.jl b/paper/examples/Benchmark.jl new file mode 100644 index 00000000..590af9d8 --- /dev/null +++ b/paper/examples/Benchmark.jl @@ -0,0 +1,291 @@ + +############################# +# ======== IMPORTS ======== # +############################# +using Random, LinearAlgebra +using ProximalOperators, ProximalCore, ProximalAlgorithms +using ShiftedProximalOperators +using NLPModels, NLPModelsModifiers +using RegularizedOptimization, RegularizedProblems +using MLDatasets +using PrettyTables +using LaTeXStrings + + +# Local includes +include("comparison-config.jl") +using .ComparisonConfig: CFG, CFG2 + +include("Bench-utils.jl") +using .BenchUtils + +############################# +# ===== Helper utils ====== # +############################# + +# Generic config printer (works for both CFG / CFG2) +function print_config(cfg) + println("Configuration:") + for fld in fieldnames(typeof(cfg)) + val = getfield(cfg, fld) + println(rpad(" $(String(fld))", 16), " = ", val) + end +end + +# Common QN selector +function ensure_qn(model, which::Symbol) + which === :LBFGS && return LBFGSModel(model) + which === :LSR1 && return LSR1Model(model) + error("Unknown QN: $which (expected :LBFGS or :LSR1)") +end + +# Close a PrettyTables Markdown string by adding a bottom rule +function close_markdown_table(table_str::AbstractString) + lines = split(String(table_str), '\n') + isempty(lines) && return table_str + sep = length(lines) >= 2 ? lines[2] : repeat("-", 10) + push!(lines, sep) + return join(lines, '\n') +end + +############################# +# ======= SVM bench ======= # +############################# + +# Accuracy for SVM (as in original script) +acc = vec -> length(findall(x -> x < 1, vec)) / length(vec) * 100 + +# PANOC (SVM) with RootNormLhalf +function run_panoc_svm!(model, x0; λ = 1.0, maxit = 500, tol = 1e-3, verbose = false) + f = BenchUtils.Counting(model) + g = BenchUtils.Counting(RootNormLhalf(λ)) + algo = ProximalAlgorithms.PANOC(maxit = maxit, tol = tol, verbose = verbose) + t = @elapsed x̂, it = algo(x0 = x0, f = f, g = g) + return ( + name = "PANOC (SVM)", + status = "first_order", + time = t, + iters = it, + fevals = f.eval_count, + gevals = f.gradient_count, + proxcalls = g.prox_count, + solution = x̂, + final_obj = obj(model, x̂) + ) +end + +function run_tr_svm!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;)) + qn_model = ensure_qn(model, qn) + reset!(qn_model) + reg_nlp = RegularizedNLPModel(qn_model, RootNormLhalf(λ)) + solver = TRSolver(reg_nlp) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, verbose = verbose, sub_kwargs = sub_kwargs) + return ( + name = "TR ($(String(qn)), SVM)", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_obj(qn_model), + gevals = neval_grad(qn_model), + proxcalls = get(stats.solver_specific, :prox_evals, missing), + solution = stats.solution, + final_obj = obj(model, stats.solution) + ) +end + +function run_r2n_svm!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;)) + qn_model = ensure_qn(model, qn) + reset!(qn_model) + reg_nlp = RegularizedNLPModel(qn_model, RootNormLhalf(λ)) + solver = R2NSolver(reg_nlp) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol,verbose = verbose, sub_kwargs = sub_kwargs) + return ( + name = "R2N ($(String(qn)), SVM)", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_obj(qn_model), + gevals = neval_grad(qn_model), + proxcalls = get(stats.solver_specific, :prox_evals, missing), + solution = stats.solution, + final_obj = obj(model, stats.solution) + ) +end + +function bench_svm!(cfg = CFG) + Random.seed!(cfg.SEED) + model, nls_train, _ = RegularizedProblems.svm_train_model() + x0 = model.meta.x0 + + results = NamedTuple[] + (:PANOC in cfg.RUN_SOLVERS) && push!(results, run_panoc_svm!(model, x0; λ = cfg.LAMBDA_L0, maxit = cfg.MAXIT_PANOC, tol = cfg.TOL, verbose = cfg.VERBOSE_PANOC)) + (:TR in cfg.RUN_SOLVERS) && push!(results, run_tr_svm!(model, x0; λ = cfg.LAMBDA_L0, qn = cfg.QN_FOR_TR, atol = cfg.TOL, rtol = cfg.RTOL, verbose = cfg.VERBOSE_RO, sub_kwargs = cfg.SUB_KWARGS_R2N)) + (:R2N in cfg.RUN_SOLVERS) && push!(results, run_r2n_svm!(model, x0; λ = cfg.LAMBDA_L0, qn = cfg.QN_FOR_R2N, atol = cfg.TOL, rtol = cfg.RTOL, verbose = cfg.VERBOSE_RO, sub_kwargs = cfg.SUB_KWARGS_R2N)) + + # Print quick summary + println("\n=== SVM: solver comparison ===") + for m in results + println("\n→ ", m.name) + println(" status = ", m.status) + println(" time (s) = ", round(m.time, digits = 4)) + m.iters !== missing && println(" outer iters = ", m.iters) + println(" # f eval = ", m.fevals) + println(" # ∇f eval = ", m.gevals) + m.proxcalls !== missing && println(" # prox calls = ", Int(m.proxcalls)) + println(" final objective= ", round(obj(model, m.solution), digits = 4)) + println(" accuracy (%) = ", round(acc(residual(nls_train, m.solution)), digits = 1)) + end + + println("\nSVM Config:"); print_config(cfg) + + data_svm = [ + (; name=m.name, + status=string(m.status), + time=round(m.time, digits=4), + fe=m.fevals, + ge=m.gevals, + prox = m.proxcalls === missing ? missing : Int(m.proxcalls), + obj = round(obj(model, m.solution), digits=4)) + for m in results + ] + + return data_svm +end + +############################# +# ======= NNMF bench ====== # +############################# + + +function run_tr_nnmf!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), selected = nothing) + qn_model = ensure_qn(model, qn) + reset!(qn_model) + reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ), selected) + solver = TRSolver(reg_nlp) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, verbose = verbose, sub_kwargs = sub_kwargs) + return ( + name = "TR ($(String(qn)), NNMF)", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_obj(qn_model), + gevals = neval_grad(qn_model), + proxcalls = get(stats.solver_specific, :prox_evals, missing), + solution = stats.solution, + final_obj = obj(model, stats.solution) + ) +end + +function run_r2n_nnmf!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5, selected = nothing) + qn_model = ensure_qn(model, qn) + reset!(qn_model) + reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ), selected) + solver = R2NSolver(reg_nlp) + stats = RegularizedExecutionStats(reg_nlp) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; + x = x0, atol = atol, rtol = rtol, verbose = verbose, + sub_kwargs = sub_kwargs) + return ( + name = "R2N ($(String(qn)), NNMF)", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_obj(qn_model), + gevals = neval_grad(qn_model), + proxcalls = get(stats.solver_specific, :prox_evals, missing), + solution = stats.solution, + final_obj = obj(model, stats.solution) + ) +end + +function run_LM_nnmf!(nls_model, x0; λ = 1.0, atol = 1e-3, rtol = 1e-3, verbose = 0, selected = nothing) + reg_nls = RegularizedNLSModel(nls_model, NormL0(λ), selected) + solver = LMSolver(reg_nls) + stats = RegularizedExecutionStats(reg_nls) + t = @elapsed RegularizedOptimization.solve!(solver, reg_nls, stats; + x = x0, atol = atol, rtol = rtol, verbose = verbose) + return ( + name = "LM (NNMF)", + status = string(stats.status), + time = t, + iters = get(stats.solver_specific, :outer_iter, missing), + fevals = neval_residual(nls_model), + gevals = neval_jtprod_residual(nls_model) + neval_jprod_residual(nls_model), + proxcalls = get(stats.solver_specific, :prox_evals, missing), + solution = stats.solution, + final_obj = obj(nls_model, stats.solution) + ) +end + +function bench_nnmf!(cfg = CFG2; m = 100, n = 50, k = 5) + Random.seed!(cfg.SEED) + + model, nls_model, _, selected = nnmf_model(m, n, k) + + # build x0 on positive orthant as original + x0 = max.(rand(model.meta.nvar), 0.0) + + # heuristic lambda (copied logic) + cfg.LAMBDA_L0 = norm(grad(model, rand(model.meta.nvar)), Inf) / 200 + + results = NamedTuple[] + (:TR in cfg.RUN_SOLVERS) && push!(results, run_tr_nnmf!(model, x0; λ = cfg.LAMBDA_L0, qn = cfg.QN_FOR_TR, atol = cfg.TOL, rtol = cfg.RTOL, verbose = cfg.VERBOSE_RO, sub_kwargs = cfg.SUB_KWARGS_R2N, selected = selected)) + (:R2N in cfg.RUN_SOLVERS) && push!(results, run_r2n_nnmf!(model, x0; λ = cfg.LAMBDA_L0, qn = cfg.QN_FOR_R2N, atol = cfg.TOL, rtol = cfg.RTOL, verbose = cfg.VERBOSE_RO, sub_kwargs = cfg.SUB_KWARGS_R2N, selected = selected)) + (:LM in cfg.RUN_SOLVERS) && push!(results, run_LM_nnmf!(nls_model, x0; λ = cfg.LAMBDA_L0, atol = cfg.TOL, rtol = cfg.RTOL, verbose = cfg.VERBOSE_RO, selected = selected)) + + println("\n=== NNMF: solver comparison ===") + for m in results + println("\n→ ", m.name) + println(" status = ", m.status) + println(" time (s) = ", round(m.time, digits = 4)) + m.iters !== missing && println(" outer iters = ", m.iters) + println(" # f eval = ", m.fevals) + println(" # ∇f eval = ", m.gevals) + m.proxcalls !== missing && println(" # prox calls = ", Int(m.proxcalls)) + println(" final objective= ", round(obj(model, m.solution), digits = 4)) + end + + println("\nNNMF Config:"); print_config(cfg) + + data_nnmf = [ + (; name=m.name, + status=string(m.status), + time=round(m.time, digits=4), + fe=m.fevals, + ge=m.gevals, + prox = m.proxcalls === missing ? missing : Int(m.proxcalls), + obj = round(m.final_obj, digits=4)) + for m in results + ] + + return data_nnmf +end + +# ############################# +# # ========= Main ========== # +# ############################# + +function main() + data_svm = bench_svm!(CFG) + data_nnmf = bench_nnmf!(CFG2) + + # concat both datasets + all_data = vcat(data_svm, data_nnmf) + + table_str = pretty_table(String, all_data; + header = ["Method", "Status", L"$t$($s$)", L"$\#f$", L"$\#\nabla f$", L"$\#prox$", "Objective"], + backend = Val(:latex), + alignment = [:l, :c, :r, :r, :r, :r, :r], + ) + + open("Benchmark.tex", "w") do io + write(io, table_str) + end +end + diff --git a/paper/examples/Benchmark.tex b/paper/examples/Benchmark.tex new file mode 100644 index 00000000..0f38c3ae --- /dev/null +++ b/paper/examples/Benchmark.tex @@ -0,0 +1,10 @@ +\begin{tabular}{lcrrrrr} + \hline + \textbf{Method} & \textbf{Status} & \textbf{$t$($s$)} & \textbf{$\#f$} & \textbf{$\#\nabla f$} & \textbf{$\#prox$} & \textbf{Objective} \\\hline + PANOC (SVM) & first\_order & 14.642 & 3713 & 3713 & 2269 & 188.924 \\ + TR (LSR1, SVM) & first\_order & 1.8405 & 347 & 291 & 4037 & 179.837 \\ + R2N (LSR1, SVM) & first\_order & 0.9269 & 185 & 101 & 27932 & 192.493 \\ + TR (LBFGS, NNMF) & first\_order & 0.0552 & 42 & 40 & 3160 & 976.06 \\ + R2N (LBFGS, NNMF) & first\_order & 0.3024 & 169 & 107 & 17789 & 411.727 \\ + LM (NNMF) & first\_order & 0.2622 & 15 & 27763 & 12320 & 131.183 \\\hline +\end{tabular} diff --git a/paper/examples/Generate_results.jl b/paper/examples/Generate_results.jl deleted file mode 100644 index d4251e91..00000000 --- a/paper/examples/Generate_results.jl +++ /dev/null @@ -1,16 +0,0 @@ -include("benchmark-nnmf.jl") - -include("benchmark-svm.jl") - -using LaTeXStrings -all_data = vcat(data_svm, data_nnmf) - -table_str = pretty_table(String, all_data; - header = ["Method", "Status", L"$t$($s$)", L"$\#f$", L"$\#\nabla f$", L"$\#prox$", "Objective"], - backend = Val(:latex), - alignment = [:l, :c, :r, :r, :r, :r, :r], - ) - -open("Benchmark.tex", "w") do io - write(io, table_str) -end \ No newline at end of file diff --git a/paper/examples/Project.toml b/paper/examples/Project.toml index 74aafd68..05b0675f 100644 --- a/paper/examples/Project.toml +++ b/paper/examples/Project.toml @@ -2,15 +2,10 @@ MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f" +PrettyTables = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" +ProximalAlgorithms = "140ffc9f-1907-541a-a177-7475e0a401e9" +ProximalCore = "dc4f5ac2-75d1-4f31-931e-60435d74994b" ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" RegularizedOptimization = "196f2941-2d58-45ba-9f13-43a2532b2fa8" RegularizedProblems = "ea076b23-609f-44d2-bb12-a4ae45328278" ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[compat] -NLPModels = "0.19, 0.20, 0.21" -NLPModelsModifiers = "0.7" -ProximalOperators = "0.15" -RegularizedProblems = "0.1" -ShiftedProximalOperators = "0.2" diff --git a/paper/examples/benchmark-nnmf.jl b/paper/examples/benchmark-nnmf.jl deleted file mode 100644 index c273dfbf..00000000 --- a/paper/examples/benchmark-nnmf.jl +++ /dev/null @@ -1,231 +0,0 @@ - -############################# -# ======== IMPORTS ======== # -############################# -using Random, LinearAlgebra -using ProximalOperators, ProximalCore, ProximalAlgorithms -using ADNLPModels, NLPModels, NLPModelsModifiers -using RegularizedOptimization, RegularizedProblems -using ShiftedProximalOperators -using MLDatasets - -include("comparison-config.jl") -using .ComparisonConfig: CFG3 - -include("Bench-utils.jl") -using .BenchUtils - -function print_config(CFG3) - println("Configuration:") - println(" SEED = $(CFG3.SEED)") - println(" LAMBDA_L0 = $(CFG3.LAMBDA_L0)") - println(" TOL = $(CFG3.TOL)") - println(" RTOL = $(CFG3.RTOL)") - println(" MAXIT_PANOC = $(CFG3.MAXIT_PANOC)") - println(" VERBOSE_PANOC = $(CFG3.VERBOSE_PANOC)") - println(" VERBOSE_RO = $(CFG3.VERBOSE_RO)") - println(" RUN_SOLVERS = $(CFG3.RUN_SOLVERS)") - println(" QN_FOR_TR = $(CFG3.QN_FOR_TR)") - println(" QN_FOR_R2N = $(CFG3.QN_FOR_R2N)") - println(" SUB_KWARGS_R2N = $(CFG3.SUB_KWARGS_R2N)") - println(" SIGMAK_R2N = $(CFG3.SIGMAK_R2N)") - println(" X0_SCALAR = $(CFG3.X0_SCALAR)") - println(" PRINT_TABLE = $(CFG3.PRINT_TABLE)") - println(" OPNORM_MAXITER = $(CFG3.OPNORM_MAXITER)") - println(" HESSIAN_SCALE = $(CFG3.HESSIAN_SCALE)") - println(" M_MONOTONE = $(CFG3.M_MONOTONE)") -end - -############################# -# ===== PROBLÈME (NNMF) ===== # -############################# -Random.seed!(CFG3.SEED) - -m, n, k = 100, 50, 5 -model, nls_model, A, selected = nnmf_model(m, n, k) - -x0 = rand(model.meta.nvar) -#println("Initial objective value: ", obj(model, x0)) - -## project this point on the positive orthant -for i in 1:length(x0) - x0[i] < 0.0 && (x0[i] = 0.0) -end - -#println("Initial objective value (after projection): ", obj(model, x0)) - -CFG3.LAMBDA_L0 = norm(grad(model, rand(model.meta.nvar)), Inf) / 200 -############################# -# ======= PANOC run ======= # -############################# -function run_panoc!(model, x0; λ = 1.0, maxit = 500, tol = 1e-3, verbose = false) - λ = norm(grad(model, rand(model.meta.nvar)), Inf) / 200 - f = BenchUtils.Counting(model) - g = BenchUtils.Counting(NormL0(λ)) - algo = ProximalAlgorithms.PANOC(maxit = maxit, tol = tol, verbose = verbose) - t = @elapsed x̂, it = algo(x0 = x0, f = f, g = g) - metrics = ( - name = "PANOC", - status = "first_order", - time = t, - iters = it, - fevals = f.eval_count, - gevals = f.gradient_count, - proxcalls = g.prox_count, - solution = x̂, - final_obj = obj(model, x̂) - ) - return metrics -end - -############################# -# ======== TR run ========= # -############################# -function ensure_qn(model, which::Symbol) - which === :LBFGS && return LBFGSModel(model) - which === :LSR1 && return LSR1Model(model) - error("QN inconnu: $which (attendu :LBFGS ou :LSR1)") -end - -function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), selected = selected, opnorm_maxiter = 20) - qn_model = ensure_qn(model, qn) - reset!(qn_model) # reset des compteurs - reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ), selected) - solver = TRSolver(reg_nlp) - stats = RegularizedExecutionStats(reg_nlp) - t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; - x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = opnorm_maxiter, sub_kwargs = sub_kwargs) - metrics = ( - name = "TR ($(String(qn)), NNMF)", - status = string(stats.status), - time = t, - iters = get(stats.solver_specific, :outer_iter, missing), - fevals = neval_obj(qn_model), - gevals = neval_grad(qn_model), - proxcalls = stats.solver_specific[:prox_evals], - solution = stats.solution, - final_obj = obj(model, stats.solution) - ) - return metrics -end - -############################# -# ======== R2N run ======== # -############################# -function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5, opnorm_maxiter = 20) - qn_model = ensure_qn(model, qn) - reset!(qn_model) - reg_nlp = RegularizedNLPModel(qn_model, NormL0(λ)) - solver = R2NSolver(reg_nlp, m_monotone = 10) - stats = RegularizedExecutionStats(reg_nlp) - t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; - x = x0, atol = atol, rtol = rtol, σk = σk, - verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = opnorm_maxiter) - metrics = ( - name = "R2N ($(String(qn)), NNMF)", - status = string(stats.status), - time = t, - iters = get(stats.solver_specific, :outer_iter, missing), - fevals = neval_obj(qn_model), - gevals = neval_grad(qn_model), - proxcalls = stats.solver_specific[:prox_evals], - solution = stats.solution, - final_obj = obj(model, stats.solution) - ) - return metrics -end - -############################# -# ======== LM run ======== # -############################# -function run_LM!(nls_model, x0; λ = 1.0, atol = 1e-3, rtol = 1e-3, verbose = 0, σk = 1e0) - reg_nls = RegularizedNLSModel(nls_model, NormL0(λ)) - solver = LMSolver(reg_nls) - stats = RegularizedExecutionStats(reg_nls) - t = @elapsed RegularizedOptimization.solve!(solver, reg_nls, stats; - x = x0, atol = atol, rtol = rtol, σk = σk, - verbose = verbose) - metrics = ( - name = "LM (NNMF)", - status = string(stats.status), - time = t, - iters = get(stats.solver_specific, :outer_iter, missing), - fevals = neval_residual(nls_model), - gevals = neval_jtprod_residual(nls_model) + neval_jprod_residual(nls_model), - proxcalls = stats.solver_specific[:prox_evals], - solution = stats.solution, - final_obj = obj(nls_model, stats.solution) - ) - return metrics -end - -############################# -# ====== LANCEMENTS ======= # -############################# -results = NamedTuple[] - -if :TR in CFG3.RUN_SOLVERS - push!(results, run_tr!(model, x0; λ = CFG3.LAMBDA_L0, qn = CFG3.QN_FOR_TR, atol = CFG3.TOL, rtol = CFG3.RTOL, verbose = CFG3.VERBOSE_RO, sub_kwargs = CFG3.SUB_KWARGS_R2N, opnorm_maxiter = CFG3.OPNORM_MAXITER)) -end -if :R2N in CFG3.RUN_SOLVERS - push!(results, run_r2n!(model, x0; λ = CFG3.LAMBDA_L0, qn = CFG3.QN_FOR_R2N, atol = CFG3.TOL, rtol = CFG3.RTOL, - verbose = CFG3.VERBOSE_RO, sub_kwargs = CFG3.SUB_KWARGS_R2N, σk = CFG3.SIGMAK_R2N, opnorm_maxiter = CFG3.OPNORM_MAXITER)) -end -if :LM in CFG3.RUN_SOLVERS - push!(results, run_LM!(nls_model, x0; λ = CFG3.LAMBDA_L0, atol = CFG3.TOL, rtol = CFG3.RTOL, - verbose = CFG3.VERBOSE_RO, σk = CFG3.SIGMAK_R2N)) -end - -using PrettyTables - -############################# -# ===== AFFICHAGE I/O ===== # -############################# - - -println("\n=== Comparaison solveurs ===") -for m in results - println("\n→ ", m.name) - println(" statut = ", m.status) - println(" temps (s) = ", round(m.time, digits=4)) - if m.iters !== missing - println(" itérations = ", m.iters) - end - println(" # f eval = ", m.fevals) - println(" # ∇f eval = ", m.gevals) - if m.proxcalls !== missing - println(" # prox appels = ", Int(m.proxcalls)) - end - println(" objective final", " = ", round(obj(model, m.solution), digits=4)) -end - -println("\n") -print_config(CFG3) - - -println("\nSummary :") -# Construire les données pour la table -data_nnmf = [ -(; name=m.name, - status=string(m.status), - time=round(m.time, digits=4), - fe=m.fevals, - ge=m.gevals, - prox = m.proxcalls === missing ? missing : Int(m.proxcalls), - obj = round(m.final_obj, digits=4)) -for m in results -] - -# En-têtes -table_str = pretty_table(String, - data_nnmf; - header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox", "#obj"], - tf = tf_unicode, - alignment = [:l, :c, :r, :r, :r, :r, :r], - crop = :none, - ) - - -open("Benchmarks/NNMF-comparison-f.txt", "w") do io -write(io, table_str) -end diff --git a/paper/examples/benchmark-svm.jl b/paper/examples/benchmark-svm.jl deleted file mode 100644 index f2a8500e..00000000 --- a/paper/examples/benchmark-svm.jl +++ /dev/null @@ -1,196 +0,0 @@ - -############################# -# ======== IMPORTS ======== # -############################# -using Random, LinearAlgebra -using ProximalOperators, ProximalCore, ProximalAlgorithms -using ADNLPModels, NLPModels, NLPModelsModifiers -using ShiftedProximalOperators -using RegularizedOptimization, RegularizedProblems -using MLDatasets - -include("comparison-config.jl") -using .ComparisonConfig: CFG2 - -include("Bench-utils.jl") -using .BenchUtils - -function print_config(CFG2) - println("Configuration:") - println(" SEED = $(CFG2.SEED)") - println(" LAMBDA_L0 = $(CFG2.LAMBDA_L0)") - println(" TOL = $(CFG2.TOL)") - println(" RTOL = $(CFG2.RTOL)") - println(" MAXIT_PANOC = $(CFG2.MAXIT_PANOC)") - println(" VERBOSE_PANOC = $(CFG2.VERBOSE_PANOC)") - println(" VERBOSE_RO = $(CFG2.VERBOSE_RO)") - println(" RUN_SOLVERS = $(CFG2.RUN_SOLVERS)") - println(" QN_FOR_TR = $(CFG2.QN_FOR_TR)") - println(" QN_FOR_R2N = $(CFG2.QN_FOR_R2N)") - println(" SUB_KWARGS_R2N = $(CFG2.SUB_KWARGS_R2N)") - println(" SIGMAK_R2N = $(CFG2.SIGMAK_R2N)") - println(" X0_SCALAR = $(CFG2.X0_SCALAR)") - println(" PRINT_TABLE = $(CFG2.PRINT_TABLE)") - println(" OPNORM_MAXITER = $(CFG2.OPNORM_MAXITER)") - println(" HESSIAN_SCALE = $(CFG2.HESSIAN_SCALE)") - println(" M_MONOTONE = $(CFG2.M_MONOTONE)") -end - -acc = vec -> length(findall(x -> x < 1, vec)) / length(vec) * 100 # for SVM - -############################# -# ===== PROBLÈME (SVM) ===== # -############################# -Random.seed!(CFG2.SEED) - -model, nls_train, _ = RegularizedProblems.svm_train_model() -x0 = model.meta.x0 - -############################# -# ======= PANOC run ======= # -############################# -function run_panoc!(model, x0; λ = 1.0, maxit = 500, tol = 1e-3, verbose = false) - # BenchUtils.make_adnlp_compatible!() - f = BenchUtils.Counting(model) - g = BenchUtils.Counting(RootNormLhalf(λ)) - algo = ProximalAlgorithms.PANOC(maxit = maxit, tol = tol, verbose = verbose) - t = @elapsed x̂, it = algo(x0 = x0, f = f, g = g) - metrics = ( - name = "PANOC (SVM)", - status = "first_order", - time = t, - iters = it, - fevals = f.eval_count, - gevals = f.gradient_count, - proxcalls = g.prox_count, - solution = x̂, - final_obj = obj(model, x̂) - ) - return metrics -end - -############################# -# ======== TR run ========= # -############################# -function ensure_qn(model, which::Symbol) - which === :LBFGS && return LBFGSModel(model) - which === :LSR1 && return LSR1Model(model) - error("QN inconnu: $which (attendu :LBFGS ou :LSR1)") -end - -function run_tr!(model, x0; λ = 1.0, qn = :LSR1, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), opnorm_maxiter = 20) - qn_model = ensure_qn(model, qn) - reset!(qn_model) # reset des compteurs - reg_nlp = RegularizedNLPModel(qn_model, RootNormLhalf(λ)) - solver = TRSolver(reg_nlp) - stats = RegularizedExecutionStats(reg_nlp) - t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; - x = x0, atol = atol, rtol = rtol, verbose = verbose, opnorm_maxiter = opnorm_maxiter, sub_kwargs = sub_kwargs) - metrics = ( - name = "TR ($(String(qn)), SVM)", - status = string(stats.status), - time = t, - iters = get(stats.solver_specific, :outer_iter, missing), - fevals = neval_obj(qn_model), - gevals = neval_grad(qn_model), - proxcalls = stats.solver_specific[:prox_evals], - solution = stats.solution, - final_obj = obj(model, stats.solution) - ) - return metrics -end - -############################# -# ======== R2N run ======== # -############################# -function run_r2n!(model, x0; λ = 1.0, qn = :LBFGS, atol = 1e-3, rtol = 1e-3, verbose = 0, sub_kwargs = (;), σk = 1e5, opnorm_maxiter = 20) - qn_model = ensure_qn(model, qn) - reset!(qn_model) - reg_nlp = RegularizedNLPModel(qn_model, RootNormLhalf(λ)) - solver = R2NSolver(reg_nlp) - stats = RegularizedExecutionStats(reg_nlp) - t = @elapsed RegularizedOptimization.solve!(solver, reg_nlp, stats; - x = x0, atol = atol, rtol = rtol, σk = σk, - verbose = verbose, sub_kwargs = sub_kwargs, opnorm_maxiter = opnorm_maxiter) - metrics = ( - name = "R2N ($(String(qn)), SVM)", - status = string(stats.status), - time = t, - iters = get(stats.solver_specific, :outer_iter, missing), - fevals = neval_obj(qn_model), - gevals = neval_grad(qn_model), - proxcalls = stats.solver_specific[:prox_evals], - solution = stats.solution, - final_obj = obj(model, stats.solution) - ) - return metrics -end - -############################# -# ====== LANCEMENTS ======= # -############################# -results = NamedTuple[] - -if :PANOC in CFG2.RUN_SOLVERS - push!(results, run_panoc!(model, x0; λ = CFG2.LAMBDA_L0, maxit = CFG2.MAXIT_PANOC, tol = CFG2.TOL, verbose = CFG2.VERBOSE_PANOC)) -end -if :TR in CFG2.RUN_SOLVERS - push!(results, run_tr!(model, x0; λ = CFG2.LAMBDA_L0, qn = CFG2.QN_FOR_TR, atol = CFG2.TOL, rtol = CFG2.RTOL, verbose = CFG2.VERBOSE_RO, sub_kwargs = CFG2.SUB_KWARGS_R2N, opnorm_maxiter = CFG2.OPNORM_MAXITER)) -end -if :R2N in CFG2.RUN_SOLVERS - push!(results, run_r2n!(model, x0; λ = CFG2.LAMBDA_L0, qn = CFG2.QN_FOR_R2N, atol = CFG2.TOL, rtol = CFG2.RTOL, - verbose = CFG2.VERBOSE_RO, sub_kwargs = CFG2.SUB_KWARGS_R2N, σk = CFG2.SIGMAK_R2N, opnorm_maxiter = CFG2.OPNORM_MAXITER)) -end - - -using PrettyTables - -############################# -# ===== AFFICHAGE I/O ===== # -############################# - - -println("\n=== Comparaison solveurs ===") -for m in results - println("\n→ ", m.name) - println(" statut = ", m.status) - println(" temps (s) = ", round(m.time, digits=4)) - if m.iters !== missing - println(" itérations = ", m.iters) - end - println(" # f eval = ", m.fevals) - println(" # ∇f eval = ", m.gevals) - if m.proxcalls !== missing - println(" # prox appels = ", Int(m.proxcalls)) - end - println(" objective final", " = ", round(obj(model, m.solution), digits=4)) - println("accuracy (%) = ", round(acc(residual(nls_train, m.solution)), digits=1)) -end - -println("\n") -print_config(CFG2) - -println("\nSummary :") -# Construire les données pour la table -data_svm = [ -(; name=m.name, - status=string(m.status), - time=round(m.time, digits=4), - fe=m.fevals, - ge=m.gevals, - prox = m.proxcalls === missing ? missing : Int(m.proxcalls), - obj = round(obj(model, m.solution), digits=4)) -for m in results -] - -# En-têtes -table_str = pretty_table(String, data_svm; - header = ["Method", "Status", "Time (s)", "#f", "#∇f", "#prox", "#obj"], - tf = tf_unicode, - alignment = [:l, :c, :r, :r, :r, :r, :r], - crop = :none, - ) - -open("Benchmarks/SVM-comparison-f.txt", "w") do io - write(io, table_str) -end diff --git a/paper/examples/comparison-config.jl b/paper/examples/comparison-config.jl index 56f3d075..7d36caac 100644 --- a/paper/examples/comparison-config.jl +++ b/paper/examples/comparison-config.jl @@ -3,8 +3,8 @@ module ComparisonConfig Base.@kwdef mutable struct Config SEED::Int = 1234 LAMBDA_L0::Float64 = 1.0 - TOL::Float64 = 1e-3 - RTOL::Float64 = 1e-3 + TOL::Float64 = 1e-4 + RTOL::Float64 = 1e-4 MAXIT_PANOC::Int = 10000 VERBOSE_PANOC::Bool = false VERBOSE_RO::Int = 0 @@ -12,17 +12,11 @@ Base.@kwdef mutable struct Config QN_FOR_TR::Symbol = :LSR1 QN_FOR_R2N::Symbol = :LBFGS SUB_KWARGS_R2N::NamedTuple = (; max_iter = 200) - SIGMAK_R2N::Float64 = 1e5 - X0_SCALAR::Float64 = 0.1 PRINT_TABLE::Bool = true - OPNORM_MAXITER::Int = 5 - HESSIAN_SCALE::Float64 = 1e-4 - M_MONOTONE::Int = 1 # for nonmonotone R2N end # One global, constant *binding* to a mutable object = type stable & editable -const CFG = Config(QN_FOR_TR = :LBFGS) -const CFG2 = Config(SIGMAK_R2N=eps()^(1 / 5), TOL = 1e-4, RTOL = 1e-4, QN_FOR_R2N=:LSR1) -const CFG3 = Config(SIGMAK_R2N=eps()^(1 / 5), TOL = 1e-4, RTOL = 1e-4, RUN_SOLVERS = [:LM, :TR, :R2N], QN_FOR_TR = :LBFGS) +const CFG = Config(QN_FOR_R2N=:LSR1) +const CFG2 = Config(RUN_SOLVERS = [:LM, :TR, :R2N], QN_FOR_TR = :LBFGS) end # module \ No newline at end of file diff --git a/paper/paper.md b/paper/paper.md index 51a07a20..b47445fb 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -171,7 +171,7 @@ We compare **PANOC** [@stella-themelis-sopasakis-patrinos-2017] (from [ProximalA In order to do so, we implemented a wrapper for **PANOC** to make it compatible with our problem definition. The results are summarized in the combined table below: -\input{Benchmark.tex} +\input{examples/Benchmark.tex} ## Discussion From 3f1c93916489574b712f6299cbe72c1d26601730 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 20:16:29 -0400 Subject: [PATCH 40/42] Adding a note on gradient evaluations for the LM solver. --- paper/paper.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index b47445fb..bc7f2cbb 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -74,7 +74,7 @@ Although these subproblems may require many proximal iterations, each proximal c The package provides a consistent API to formulate optimization problems and apply different solvers. It integrates seamlessly with the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) [@jso] ecosystem, an academic organization for nonlinear optimization software development, testing, and benchmarking. -On the one hand, the smooth probobjectivelems $f$ can be defined via [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) [@orban-siqueira-nlpmodels-2020], which provides a standardized Julia API for representing nonlinear programming (NLP) problems. +On the one hand, the smooth objective $f$ can be defined via [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) [@orban-siqueira-nlpmodels-2020], which provides a standardized Julia API for representing nonlinear programming (NLP) problems. Large collections of such problems are available in [CUTE.jl](https://github.com/JuliaSmoothOptimizers/CUTEst.jl) [@orban-siqueira-cutest-2020] and [OptimizationProblems.jl](https://github.com/JuliaSmoothOptimizers/OptimizationProblems.jl) [@migot-orban-siqueira-optimizationproblems-2023]. Another option is to use [RegularizedProblems.jl](https://github.com/JuliaSmoothOptimizers/RegularizedProblems.jl), which provides problem instances commonly used in the nonsmooth optimization literature, where $f$ can be paired with various nonsmooth terms $h$. @@ -173,6 +173,8 @@ The results are summarized in the combined table below: \input{examples/Benchmark.tex} +* For the LM solver, gradient evaluations count equals the number of Jacobian–vector and adjoint-Jacobian–vector products. + ## Discussion According to **status**, all methods successfully reduced the optimality measure below the specified tolerance which is set to $10^{-4}$ and thus converged to a **first-order** stationary point. From a2e0b9eb61e172d8e21d823c451f8fa61a1f0c28 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 20:16:50 -0400 Subject: [PATCH 41/42] Adding a note on gradient evaluations for the LM solver. --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index bc7f2cbb..71168acd 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -181,7 +181,7 @@ According to **status**, all methods successfully reduced the optimality measure However, the final objective values differ due to the nonconvexity of the problems. - **SVM with $\ell^{1/2}$ penalty:** **TR** and **R2N** require far fewer function and gradient evaluations than **PANOC**, at the expense of more proximal iterations. Since each proximal step is inexpensive, **TR** and **R2N** are much faster overall. -- **NNMF with constrained $\ell_0$ penalty:** **TR** outperforms **R2N**, while **LM** is competitive in terms of function calls but incurs many gradient evaluations. +- **NNMF with constrained $\ell_0$ penalty:** **TR** outperforms **R2N**, while **LM** is competitive in terms of function calls but incurs many Jacobian products. Additional tests (e.g., other regularizers, constraint types, and scaling dimensions) have also been conducted, and a full benchmarking campaign is currently underway. From ab8ef54a808f6cada8ce86298b58c2883af70536 Mon Sep 17 00:00:00 2001 From: MohamedLaghdafHABIBOULLAH Date: Tue, 30 Sep 2025 20:24:54 -0400 Subject: [PATCH 42/42] Clarify solver statistics and gradient evaluation details for the LM solver in paper.md --- paper/examples/Project.toml | 1 - paper/paper.md | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paper/examples/Project.toml b/paper/examples/Project.toml index 05b0675f..dfe38a1f 100644 --- a/paper/examples/Project.toml +++ b/paper/examples/Project.toml @@ -6,6 +6,5 @@ PrettyTables = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" ProximalAlgorithms = "140ffc9f-1907-541a-a177-7475e0a401e9" ProximalCore = "dc4f5ac2-75d1-4f31-931e-60435d74994b" ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" -RegularizedOptimization = "196f2941-2d58-45ba-9f13-43a2532b2fa8" RegularizedProblems = "ea076b23-609f-44d2-bb12-a4ae45328278" ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263" diff --git a/paper/paper.md b/paper/paper.md index 71168acd..e695e53b 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -169,11 +169,12 @@ solver = LMSolver(reg_nls) # Choose solver We compare **PANOC** [@stella-themelis-sopasakis-patrinos-2017] (from [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl)) against **TR**, **R2N**, and **LM** from our library. In order to do so, we implemented a wrapper for **PANOC** to make it compatible with our problem definition. -The results are summarized in the combined table below: + +We report the following solver statistics in table: the final value of \(f\) at convergence; the status of convergence of each solver (**Status**); the number of evaluations of the smooth objective ($\# f$); the number of evaluations of the gradient ($\# \nabla f$); the number of proximal operator evaluations ($\# \text{prox}$); the elapsed time $t$ in seconds and the final objective value $(f + h)(x^*)$ (**Objective**). \input{examples/Benchmark.tex} -* For the LM solver, gradient evaluations count equals the number of Jacobian–vector and adjoint-Jacobian–vector products. +* For the LM solver, gradient evaluations count $\#\nabla f$ equals the number of Jacobian–vector and adjoint-Jacobian–vector products. ## Discussion