Skip to content

Commit 97c5347

Browse files
committed
Move optimization API to "dsp_aware_pruning" module (new optimization tools coming)
1 parent c4af46a commit 97c5347

26 files changed

+160
-157
lines changed

docs/advanced/model_optimization.rst

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,11 @@ The code block below showcases three use cases of the hls4ml Optimization API -
1313
from tensorflow.keras.optimizers import Adam
1414
from tensorflow.keras.metrics import CategoricalAccuracy
1515
from tensorflow.keras.losses import CategoricalCrossentropy
16-
from hls4ml.optimization.keras import optimize_model
17-
from hls4ml.optimization.keras.utils import get_model_sparsity
18-
from hls4ml.optimization.attributes import get_attributes_from_keras_model
19-
from hls4ml.optimization.objectives import ParameterEstimator
20-
from hls4ml.optimization.scheduler import PolynomialScheduler
16+
from hls4ml.optimization.dsp_aware_pruning.keras import optimize_model
17+
from hls4ml.optimization.dsp_aware_pruning.keras.utils import get_model_sparsity
18+
from hls4ml.optimization.dsp_aware_pruning.attributes import get_attributes_from_keras_model
19+
from hls4ml.optimization.dsp_aware_pruning.objectives import ParameterEstimator
20+
from hls4ml.optimization.dsp_aware_pruning.scheduler import PolynomialScheduler
2121
# Define baseline model and load data
2222
# X_train, y_train = ...
2323
# X_val, y_val = ...
@@ -75,7 +75,7 @@ To optimize GPU FLOPs, the code is similar to above:
7575

7676
.. code-block:: Python
7777
78-
from hls4ml.optimization.objectives.gpu_objectives import GPUFLOPEstimator
78+
from hls4ml.optimization.dsp_aware_pruning.objectives.gpu_objectives import GPUFLOPEstimator
7979
8080
# Optimize model
8181
# Note the change from ParameterEstimator to GPUFLOPEstimator
@@ -98,7 +98,7 @@ Finally, optimizing Vivado DSPs is possible, given a hls4ml config:
9898
.. code-block:: Python
9999
100100
from hls4ml.utils.config import config_from_keras_model
101-
from hls4ml.optimization.objectives.vivado_objectives import VivadoDSPEstimator
101+
from hls4ml.optimization.dsp_aware_pruning.objectives.vivado_objectives import VivadoDSPEstimator
102102
103103
# Note the change from optimize_model to optimize_keras_model_for_hls4ml
104104
# The function optimize_keras_model_for_hls4ml acts as a wrapper for the function, parsing hls4ml config to model attributes

hls4ml/optimization/__init__.py

Lines changed: 3 additions & 108 deletions
Original file line numberDiff line numberDiff line change
@@ -1,108 +1,3 @@
1-
import numpy as np
2-
3-
from hls4ml.optimization.attributes import get_attributes_from_keras_model_and_hls4ml_config
4-
from hls4ml.optimization.keras import optimize_model
5-
6-
default_regularization_range = np.logspace(-6, -2, num=16).tolist()
7-
8-
9-
def optimize_keras_model_for_hls4ml(
10-
keras_model,
11-
hls_config,
12-
objective,
13-
scheduler,
14-
X_train,
15-
y_train,
16-
X_val,
17-
y_val,
18-
batch_size,
19-
epochs,
20-
optimizer,
21-
loss_fn,
22-
validation_metric,
23-
increasing,
24-
rtol,
25-
callbacks=None,
26-
ranking_metric='l1',
27-
local=False,
28-
verbose=False,
29-
rewinding_epochs=1,
30-
cutoff_bad_trials=3,
31-
directory='hls4ml-optimization',
32-
tuner='Bayesian',
33-
knapsack_solver='CBC_MIP',
34-
regularization_range=default_regularization_range,
35-
):
36-
'''
37-
Top-level function for optimizing a Keras model, given hls4ml config and a hardware objective(s)
38-
39-
Args:
40-
keras_model (keras.Model): Model to be optimized
41-
hls_config (dict): hls4ml configuration, obtained from hls4ml.utils.config.config_from_keras_model(...)
42-
objective (hls4ml.optimization.objectives.ObjectiveEstimator):
43-
Parameter, hardware or user-defined objective of optimization
44-
scheduler (hls4ml.optimization.scheduler.OptimizationScheduler):
45-
Sparsity scheduler, choose between constant, polynomial and binary
46-
X_train (np.array): Training inputs
47-
y_train (np.array): Training labels
48-
X_val (np.array): Validation inputs
49-
y_val (np.array): Validation labels
50-
batch_size (int): Batch size during training
51-
epochs (int): Maximum number of epochs to fine-tune model, in one iteration of pruning
52-
optimizer (keras.optimizers.Optimizer or equivalent-string description): Optimizer used during training
53-
loss_fn (keras.losses.Loss or equivalent loss description): Loss function used during training
54-
validation_metric (keras.metrics.Metric or equivalent loss description): Validation metric, used as a baseline
55-
increasing (boolean): If the metric improves with increased values;
56-
e.g. accuracy -> increasing = True, MSE -> increasing = False
57-
rtol (float): Relative tolerance;
58-
pruning stops when pruned_validation_metric < (or >) rtol * baseline_validation_metric
59-
callbacks (list of keras.callbacks.Callback) Currently not supported, developed in future versions
60-
ranking_metric (string): Metric used for ranking weights and structures;
61-
currently supported l1, l2, saliency and Oracle
62-
local (boolean): Layer-wise or global pruning
63-
verbose (boolean): Display debug logs during model optimization
64-
rewinding_epochs (int): Number of epochs to retrain model without weight freezing,
65-
allows regrowth of previously pruned weights
66-
cutoff_bad_trials (int): After how many bad trials (performance below threshold),
67-
should model pruning / weight sharing stop
68-
directory (string): Directory to store temporary results
69-
tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and None
70-
knapsack_solver (str): Algorithm to solve Knapsack problem when optimizing;
71-
default usually works well; for very large networks, greedy algorithm might be more suitable
72-
regularization_range (list): List of suitable hyperparameters for weight decay
73-
74-
Returns:
75-
keras.Model: Optimized model
76-
'''
77-
78-
# Extract model attributes
79-
model_attributes = get_attributes_from_keras_model_and_hls4ml_config(keras_model, hls_config)
80-
81-
# Optimize model
82-
return optimize_model(
83-
keras_model,
84-
model_attributes,
85-
objective,
86-
scheduler,
87-
X_train,
88-
y_train,
89-
X_val,
90-
y_val,
91-
batch_size,
92-
epochs,
93-
optimizer,
94-
loss_fn,
95-
validation_metric,
96-
increasing,
97-
rtol,
98-
callbacks=callbacks,
99-
ranking_metric=ranking_metric,
100-
local=local,
101-
verbose=verbose,
102-
rewinding_epochs=rewinding_epochs,
103-
cutoff_bad_trials=cutoff_bad_trials,
104-
directory=directory,
105-
tuner=tuner,
106-
knapsack_solver=knapsack_solver,
107-
regularization_range=regularization_range,
108-
)
1+
from .dsp_aware_pruning import optimize_keras_model_for_hls4ml # noqa: F401
2+
from .dsp_aware_pruning.attributes import get_attributes_from_keras_model_and_hls4ml_config # noqa: F401
3+
from .dsp_aware_pruning.keras import optimize_model # noqa: F401
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
import numpy as np
2+
3+
from hls4ml.optimization.dsp_aware_pruning.attributes import get_attributes_from_keras_model_and_hls4ml_config
4+
from hls4ml.optimization.dsp_aware_pruning.keras import optimize_model
5+
6+
default_regularization_range = np.logspace(-6, -2, num=16).tolist()
7+
8+
9+
def optimize_keras_model_for_hls4ml(
10+
keras_model,
11+
hls_config,
12+
objective,
13+
scheduler,
14+
X_train,
15+
y_train,
16+
X_val,
17+
y_val,
18+
batch_size,
19+
epochs,
20+
optimizer,
21+
loss_fn,
22+
validation_metric,
23+
increasing,
24+
rtol,
25+
callbacks=None,
26+
ranking_metric='l1',
27+
local=False,
28+
verbose=False,
29+
rewinding_epochs=1,
30+
cutoff_bad_trials=3,
31+
directory='hls4ml-optimization',
32+
tuner='Bayesian',
33+
knapsack_solver='CBC_MIP',
34+
regularization_range=default_regularization_range,
35+
):
36+
'''
37+
Top-level function for optimizing a Keras model, given hls4ml config and a hardware objective(s)
38+
39+
Args:
40+
keras_model (keras.Model): Model to be optimized
41+
hls_config (dict): hls4ml configuration, obtained from hls4ml.utils.config.config_from_keras_model(...)
42+
objective (hls4ml.optimization.objectives.ObjectiveEstimator):
43+
Parameter, hardware or user-defined objective of optimization
44+
scheduler (hls4ml.optimization.scheduler.OptimizationScheduler):
45+
Sparsity scheduler, choose between constant, polynomial and binary
46+
X_train (np.array): Training inputs
47+
y_train (np.array): Training labels
48+
X_val (np.array): Validation inputs
49+
y_val (np.array): Validation labels
50+
batch_size (int): Batch size during training
51+
epochs (int): Maximum number of epochs to fine-tune model, in one iteration of pruning
52+
optimizer (keras.optimizers.Optimizer or equivalent-string description): Optimizer used during training
53+
loss_fn (keras.losses.Loss or equivalent loss description): Loss function used during training
54+
validation_metric (keras.metrics.Metric or equivalent loss description): Validation metric, used as a baseline
55+
increasing (boolean): If the metric improves with increased values;
56+
e.g. accuracy -> increasing = True, MSE -> increasing = False
57+
rtol (float): Relative tolerance;
58+
pruning stops when pruned_validation_metric < (or >) rtol * baseline_validation_metric
59+
callbacks (list of keras.callbacks.Callback) Currently not supported, developed in future versions
60+
ranking_metric (string): Metric used for ranking weights and structures;
61+
currently supported l1, l2, saliency and Oracle
62+
local (boolean): Layer-wise or global pruning
63+
verbose (boolean): Display debug logs during model optimization
64+
rewinding_epochs (int): Number of epochs to retrain model without weight freezing,
65+
allows regrowth of previously pruned weights
66+
cutoff_bad_trials (int): After how many bad trials (performance below threshold),
67+
should model pruning / weight sharing stop
68+
directory (string): Directory to store temporary results
69+
tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and None
70+
knapsack_solver (str): Algorithm to solve Knapsack problem when optimizing;
71+
default usually works well; for very large networks, greedy algorithm might be more suitable
72+
regularization_range (list): List of suitable hyperparameters for weight decay
73+
74+
Returns:
75+
keras.Model: Optimized model
76+
'''
77+
78+
# Extract model attributes
79+
model_attributes = get_attributes_from_keras_model_and_hls4ml_config(keras_model, hls_config)
80+
81+
# Optimize model
82+
return optimize_model(
83+
keras_model,
84+
model_attributes,
85+
objective,
86+
scheduler,
87+
X_train,
88+
y_train,
89+
X_val,
90+
y_val,
91+
batch_size,
92+
epochs,
93+
optimizer,
94+
loss_fn,
95+
validation_metric,
96+
increasing,
97+
rtol,
98+
callbacks=callbacks,
99+
ranking_metric=ranking_metric,
100+
local=local,
101+
verbose=verbose,
102+
rewinding_epochs=rewinding_epochs,
103+
cutoff_bad_trials=cutoff_bad_trials,
104+
directory=directory,
105+
tuner=tuner,
106+
knapsack_solver=knapsack_solver,
107+
regularization_range=regularization_range,
108+
)

hls4ml/optimization/attributes.py renamed to hls4ml/optimization/dsp_aware_pruning/attributes.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22

33
import hls4ml
44
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType
5-
from hls4ml.optimization.config import SUPPORTED_STRUCTURES
6-
from hls4ml.optimization.keras.config import SUPPORTED_LAYERS
5+
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES
6+
from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS
77

88

99
class hls4mlAttributes:

hls4ml/optimization/keras/__init__.py renamed to hls4ml/optimization/dsp_aware_pruning/keras/__init__.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77
# Enables printing of loss tensors during custom training loop
88
from tensorflow.python.ops.numpy_ops import np_config
99

10-
import hls4ml.optimization.keras.utils as utils
11-
from hls4ml.optimization.config import SUPPORTED_STRUCTURES
12-
from hls4ml.optimization.keras.builder import build_optimizable_model, remove_custom_regularizers
13-
from hls4ml.optimization.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS, TMP_DIRECTORY
14-
from hls4ml.optimization.keras.masking import get_model_masks
15-
from hls4ml.optimization.keras.reduction import reduce_model
16-
from hls4ml.optimization.scheduler import OptimizationScheduler
10+
import hls4ml.optimization.dsp_aware_pruning.keras.utils as utils
11+
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES
12+
from hls4ml.optimization.dsp_aware_pruning.keras.builder import build_optimizable_model, remove_custom_regularizers
13+
from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS, TMP_DIRECTORY
14+
from hls4ml.optimization.dsp_aware_pruning.keras.masking import get_model_masks
15+
from hls4ml.optimization.dsp_aware_pruning.keras.reduction import reduce_model
16+
from hls4ml.optimization.dsp_aware_pruning.scheduler import OptimizationScheduler
1717

1818
np_config.enable_numpy_behavior()
1919
default_regularization_range = np.logspace(-6, -2, num=16).tolist()

hls4ml/optimization/keras/builder.py renamed to hls4ml/optimization/dsp_aware_pruning/keras/builder.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88
from tensorflow.keras.callbacks import EarlyStopping
99
from tensorflow.keras.layers import Conv2D, Dense
1010

11-
from hls4ml.optimization.keras.config import SUPPORTED_LAYERS, TMP_DIRECTORY
12-
from hls4ml.optimization.keras.regularizers import Conv2DRegularizer, DenseRegularizer
11+
from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS, TMP_DIRECTORY
12+
from hls4ml.optimization.dsp_aware_pruning.keras.regularizers import Conv2DRegularizer, DenseRegularizer
1313

1414
co = {}
1515
_add_supported_quantized_objects(co)

hls4ml/optimization/keras/masking.py renamed to hls4ml/optimization/dsp_aware_pruning/keras/masking.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@
66
from qkeras import QConv2D, QDense
77
from tensorflow.keras.layers import Conv2D, Dense
88

9-
from hls4ml.optimization.config import SUPPORTED_STRUCTURES
10-
from hls4ml.optimization.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS
11-
from hls4ml.optimization.knapsack import solve_knapsack
9+
from hls4ml.optimization.dsp_aware_pruning.config import SUPPORTED_STRUCTURES
10+
from hls4ml.optimization.dsp_aware_pruning.keras.config import SUPPORTED_LAYERS, SUPPORTED_METRICS
11+
from hls4ml.optimization.dsp_aware_pruning.knapsack import solve_knapsack
1212

1313

1414
def get_model_masks(

hls4ml/optimization/keras/reduction.py renamed to hls4ml/optimization/dsp_aware_pruning/keras/reduction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from tensorflow.keras.layers import Conv2D, Dense
33
from tensorflow.keras.models import Sequential
44

5-
from hls4ml.optimization.keras.utils import get_last_layer_with_weights
5+
from hls4ml.optimization.dsp_aware_pruning.keras.utils import get_last_layer_with_weights
66

77

88
def reduce_model(model):

0 commit comments

Comments
 (0)