Skip to content

ERROR: Unsupported layer type: Conv1DTranspose #860

Closed
@Choihyeonhwa

Description

@Choihyeonhwa

**I am trying to convert my model using the hls4ml library.

My model is a U-net model and has the following layers.**


Layer (type) Output Shape Param # Connected to

input_3 (InputLayer) [(None, 1280, 1)] 0 []

conv1d_6 (Conv1D) (None, 640, 64) 320 ['input_3[0][0]']

leaky_re_lu_5 (LeakyReLU) (None, 640, 64) 0 ['conv1d_6[0][0]']

conv1d_7 (Conv1D) (None, 320, 128) 32896 ['leaky_re_lu_5[0][0]']

batch_normalization_4 (BatchNo (None, 320, 128) 512 ['conv1d_7[0][0]']
rmalization)

leaky_re_lu_6 (LeakyReLU) (None, 320, 128) 0 ['batch_normalization_4[0][0]']

conv1d_8 (Conv1D) (None, 160, 256) 131328 ['leaky_re_lu_6[0][0]']

batch_normalization_5 (BatchNo (None, 160, 256) 1024 ['conv1d_8[0][0]']
rmalization)

leaky_re_lu_7 (LeakyReLU) (None, 160, 256) 0 ['batch_normalization_5[0][0]']

conv1d_9 (Conv1D) (None, 80, 512) 524800 ['leaky_re_lu_7[0][0]']

batch_normalization_6 (BatchNo (None, 80, 512) 2048 ['conv1d_9[0][0]']
rmalization)

leaky_re_lu_8 (LeakyReLU) (None, 80, 512) 0 ['batch_normalization_6[0][0]']

conv1d_10 (Conv1D) (None, 40, 512) 1049088 ['leaky_re_lu_8[0][0]']

batch_normalization_7 (BatchNo (None, 40, 512) 2048 ['conv1d_10[0][0]']
rmalization)

leaky_re_lu_9 (LeakyReLU) (None, 40, 512) 0 ['batch_normalization_7[0][0]']

conv1d_11 (Conv1D) (None, 20, 512) 1049088 ['leaky_re_lu_9[0][0]']

batch_normalization_8 (BatchNo (None, 20, 512) 2048 ['conv1d_11[0][0]']
rmalization)

leaky_re_lu_10 (LeakyReLU) (None, 20, 512) 0 ['batch_normalization_8[0][0]']

conv1d_12 (Conv1D) (None, 10, 512) 1049088 ['leaky_re_lu_10[0][0]']

batch_normalization_9 (BatchNo (None, 10, 512) 2048 ['conv1d_12[0][0]']
rmalization)

leaky_re_lu_11 (LeakyReLU) (None, 10, 512) 0 ['batch_normalization_9[0][0]']

conv1d_13 (Conv1D) (None, 5, 512) 1049088 ['leaky_re_lu_11[0][0]']

activation_1 (Activation) (None, 5, 512) 0 ['conv1d_13[0][0]']

conv1d_transpose (Conv1DTransp (None, 10, 512) 1049088 ['activation_1[0][0]']
ose)

batch_normalization_10 (BatchN (None, 10, 512) 2048 ['conv1d_transpose[0][0]']
ormalization)

dropout (Dropout) (None, 10, 512) 0 ['batch_normalization_10[0][0]']

concatenate_1 (Concatenate) (None, 10, 1024) 0 ['dropout[0][0]',
'leaky_re_lu_11[0][0]']

activation_2 (Activation) (None, 10, 1024) 0 ['concatenate_1[0][0]']

conv1d_transpose_1 (Conv1DTran (None, 20, 512) 2097664 ['activation_2[0][0]']
spose)

batch_normalization_11 (BatchN (None, 20, 512) 2048 ['conv1d_transpose_1[0][0]']
ormalization)

dropout_1 (Dropout) (None, 20, 512) 0 ['batch_normalization_11[0][0]']

concatenate_2 (Concatenate) (None, 20, 1024) 0 ['dropout_1[0][0]',
'leaky_re_lu_10[0][0]']

activation_3 (Activation) (None, 20, 1024) 0 ['concatenate_2[0][0]']

conv1d_transpose_2 (Conv1DTran (None, 40, 512) 2097664 ['activation_3[0][0]']
spose)

batch_normalization_12 (BatchN (None, 40, 512) 2048 ['conv1d_transpose_2[0][0]']
ormalization)

dropout_2 (Dropout) (None, 40, 512) 0 ['batch_normalization_12[0][0]']

concatenate_3 (Concatenate) (None, 40, 1024) 0 ['dropout_2[0][0]',
'leaky_re_lu_9[0][0]']

activation_4 (Activation) (None, 40, 1024) 0 ['concatenate_3[0][0]']

conv1d_transpose_3 (Conv1DTran (None, 80, 512) 2097664 ['activation_4[0][0]']
spose)

batch_normalization_13 (BatchN (None, 80, 512) 2048 ['conv1d_transpose_3[0][0]']
ormalization)

concatenate_4 (Concatenate) (None, 80, 1024) 0 ['batch_normalization_13[0][0]',
'leaky_re_lu_8[0][0]']

activation_5 (Activation) (None, 80, 1024) 0 ['concatenate_4[0][0]']

conv1d_transpose_4 (Conv1DTran (None, 160, 256) 1048832 ['activation_5[0][0]']
spose)

batch_normalization_14 (BatchN (None, 160, 256) 1024 ['conv1d_transpose_4[0][0]']
ormalization)

concatenate_5 (Concatenate) (None, 160, 512) 0 ['batch_normalization_14[0][0]',
'leaky_re_lu_7[0][0]']

activation_6 (Activation) (None, 160, 512) 0 ['concatenate_5[0][0]']

conv1d_transpose_5 (Conv1DTran (None, 320, 128) 262272 ['activation_6[0][0]']
spose)

batch_normalization_15 (BatchN (None, 320, 128) 512 ['conv1d_transpose_5[0][0]']
ormalization)

concatenate_6 (Concatenate) (None, 320, 256) 0 ['batch_normalization_15[0][0]',
'leaky_re_lu_6[0][0]']

activation_7 (Activation) (None, 320, 256) 0 ['concatenate_6[0][0]']

conv1d_transpose_6 (Conv1DTran (None, 640, 64) 65600 ['activation_7[0][0]']
spose)

batch_normalization_16 (BatchN (None, 640, 64) 256 ['conv1d_transpose_6[0][0]']
ormalization)

concatenate_7 (Concatenate) (None, 640, 128) 0 ['batch_normalization_16[0][0]',
'leaky_re_lu_5[0][0]']

activation_8 (Activation) (None, 640, 128) 0 ['concatenate_7[0][0]']

conv1d_transpose_7 (Conv1DTran (None, 1280, 1) 513 ['activation_8[0][0]']
spose)

leaky_re_lu_12 (LeakyReLU) (None, 1280, 1) 0 ['conv1d_transpose_7[0][0]']

==================================================================================================
Total params: 13,624,705
Trainable params: 13,614,849
Non-trainable params: 9,856

**However, during the process of converting hls4ml, it says that Conv1DTranspose is not supported.

The code to convert the model is as follows:**

import hls4ml
import tensorflow as tf

keras_model = tf.keras.models.load_model('/home/hhc/GAN/models/01model_L2.h5')

config = hls4ml.utils.config_from_keras_model(keras_model, granularity='name')

hls_model = hls4ml.converters.convert_from_keras_model(keras_model, output_dir='/home/hhc/GAN/FPGA/', hls_config=config)

Is there any way to solve this problem?

Metadata

Metadata

Assignees

No one assigned

    Labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions