Skip to content

【PaddleNLP No.6】Fix text_classification PIR #10497

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@

from paddlenlp.data import Pad, Tuple
from paddlenlp.transformers import AutoTokenizer
from paddlenlp.utils.env import (
PADDLE_INFERENCE_MODEL_SUFFIX,
PADDLE_INFERENCE_WEIGHTS_SUFFIX,
)

sys.path.append(".")

Expand Down Expand Up @@ -114,8 +118,8 @@ def __init__(
self.max_seq_length = max_seq_length
self.batch_size = batch_size

model_file = model_dir + "/inference.get_pooled_embedding.pdmodel"
params_file = model_dir + "/inference.get_pooled_embedding.pdiparams"
model_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_MODEL_SUFFIX}"
params_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_WEIGHTS_SUFFIX}"
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@

import paddlenlp as ppnlp
from paddlenlp.data import Pad, Tuple
from paddlenlp.utils.env import (
PADDLE_INFERENCE_MODEL_SUFFIX,
PADDLE_INFERENCE_WEIGHTS_SUFFIX,
)

# fmt: off
parser = argparse.ArgumentParser()
Expand Down Expand Up @@ -82,8 +86,8 @@ def __init__(
self.max_seq_length = max_seq_length
self.batch_size = batch_size

model_file = model_dir + "/inference.get_pooled_embedding.pdmodel"
params_file = model_dir + "/inference.get_pooled_embedding.pdiparams"
model_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_MODEL_SUFFIX}"
params_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_WEIGHTS_SUFFIX}"
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@

from paddlenlp.data import Pad, Tuple
from paddlenlp.transformers import AutoTokenizer
from paddlenlp.utils.env import (
PADDLE_INFERENCE_MODEL_SUFFIX,
PADDLE_INFERENCE_WEIGHTS_SUFFIX,
)

sys.path.append(".")

Expand Down Expand Up @@ -114,8 +118,8 @@ def __init__(
self.max_seq_length = max_seq_length
self.batch_size = batch_size

model_file = model_dir + "/inference.get_pooled_embedding.pdmodel"
params_file = model_dir + "/inference.get_pooled_embedding.pdiparams"
model_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_MODEL_SUFFIX}"
params_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_WEIGHTS_SUFFIX}"
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@

import paddlenlp as ppnlp
from paddlenlp.data import Pad, Tuple
from paddlenlp.utils.env import (
PADDLE_INFERENCE_MODEL_SUFFIX,
PADDLE_INFERENCE_WEIGHTS_SUFFIX,
)

# fmt: off
parser = argparse.ArgumentParser()
Expand Down Expand Up @@ -83,8 +87,8 @@ def __init__(
self.max_seq_length = max_seq_length
self.batch_size = batch_size

model_file = model_dir + "/inference.get_pooled_embedding.pdmodel"
params_file = model_dir + "/inference.get_pooled_embedding.pdiparams"
model_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_MODEL_SUFFIX}"
params_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_WEIGHTS_SUFFIX}"
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
Expand Down
2 changes: 1 addition & 1 deletion slm/applications/text_classification/multi_label/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ python export_model.py --params_path ./checkpoint/ --output_path ./export --mult
export/
├── float32.pdiparams
├── float32.pdiparams.info
└── float32.pdmodel
└── float32.json(PIR enabled)/float32.pdmodel(PIR disabled)
```
导出模型之后用于部署,项目提供了基于 ONNXRuntime 的 [离线部署方案](./deploy/predictor/README.md) 和基于 Paddle Serving 的 [在线服务化部署方案](./deploy/predictor/README.md)。

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@

from paddlenlp.data import Pad, Tuple
from paddlenlp.transformers import AutoTokenizer
from paddlenlp.utils.env import (
PADDLE_INFERENCE_MODEL_SUFFIX,
PADDLE_INFERENCE_WEIGHTS_SUFFIX,
)

sys.path.append(".")

Expand Down Expand Up @@ -114,8 +118,8 @@ def __init__(
self.max_seq_length = max_seq_length
self.batch_size = batch_size

model_file = model_dir + "/inference.get_pooled_embedding.pdmodel"
params_file = model_dir + "/inference.get_pooled_embedding.pdiparams"
model_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_MODEL_SUFFIX}"
params_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_WEIGHTS_SUFFIX}"
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@

import paddlenlp as ppnlp
from paddlenlp.data import Pad, Tuple
from paddlenlp.utils.env import (
PADDLE_INFERENCE_MODEL_SUFFIX,
PADDLE_INFERENCE_WEIGHTS_SUFFIX,
)

# fmt: off
parser = argparse.ArgumentParser()
Expand Down Expand Up @@ -84,8 +88,8 @@ def __init__(
self.max_seq_length = max_seq_length
self.batch_size = batch_size

model_file = model_dir + "/inference.get_pooled_embedding.pdmodel"
params_file = model_dir + "/inference.get_pooled_embedding.pdiparams"
model_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_MODEL_SUFFIX}"
params_file = model_dir + f"/inference.get_pooled_embedding{PADDLE_INFERENCE_WEIGHTS_SUFFIX}"
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
Expand Down