diff --git a/.gitignore b/.gitignore index 23a89fc2..7bbdc367 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ build/ *.bin *.model *.pte +*.ptd # Xcode xcuserdata/ diff --git a/.gitmodules b/.gitmodules index 24dcd6b7..605eab61 100644 --- a/.gitmodules +++ b/.gitmodules @@ -2,3 +2,8 @@ path = mv2/cpp/executorch url = https://github.com/pytorch/executorch.git branch = release/0.6 + +[submodule "program-data-separation/cpp/executorch"] + path = program-data-separation/cpp/executorch + url = https://github.com/pytorch/executorch.git + branch = main diff --git a/mv2/cpp/CMakeLists.txt b/mv2/cpp/CMakeLists.txt index fb1ef99a..fe62f858 100644 --- a/mv2/cpp/CMakeLists.txt +++ b/mv2/cpp/CMakeLists.txt @@ -12,7 +12,7 @@ option(EXECUTORCH_BUILD_EXTENSION_TENSOR "" ON) option(EXECUTORCH_BUILD_KERNELS_OPTIMIZED "" ON) option(EXECUTORCH_BUILD_XNNPACK "" ON) -# Add ExecutorTorch subdirectory +# Add ExecuTorch subdirectory add_subdirectory("executorch") set(DEMO_SOURCES main.cpp) diff --git a/program-data-separation/README.md b/program-data-separation/README.md new file mode 100644 index 00000000..c3e7cedd --- /dev/null +++ b/program-data-separation/README.md @@ -0,0 +1,76 @@ +# Program Data Separation Examples + +This directory provides an example of the Program Data Separation APIs in ExecuTorch. + +## Virtual environment setup +Create and activate a Python virtual environment: +```bash +python3 -m venv .venv && source .venv/bin/activate && pip install --upgrade pip +``` +Or alternatively, [install conda on your machine](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) +```bash +conda create -yn executorch-examples-mv2 python=3.10.0 && conda activate executorch-examples-mv2 +``` + +Install dependencies: + +[Please install ExecuTorch pip package from source](https://docs.pytorch.org/executorch/stable/using-executorch-building-from-source.html#install-executorch-pip-package-from-source), until executorch==0.7.0 is released. + +``` +pip install executorch==0.7.0 +``` + +## Export a model with program-data separation +To export a non-delegated linear model, into the current directory: +```python +python export.py --outdir . +``` +Expect the files 'linear.pte' and 'linear.ptd'. + +To export a linear model delegated to XNNPACK, into the current directory: +```python +python export.py --outdir . --xnnpack +``` +Expect the files 'linear_xnnpack.pte' and 'linear_xnnpack.ptd'. + +Note: +- PTE: contains the program execution logic. +- PTD: contains the constant tensors used by the PTE. + +For more information on the PTD data format, please see the [flat_tensor](https://github.com/pytorch/executorch/blob/main/extension/flat_tensor/README.md) directory. + +## Runtime (cpp) +The cpp/ directory contains the executorch submodule along with a main.cpp file that demonstrates how to load the PTE and PTD files and execute the program. + +First, export your PTE and PTD files using the instructions above. + +**Build instructions** + +Change to the cpp directory. +``` +cd cpp +``` + +Create build directory if it doesn't exist. +``` +mkdir -p build +cd build +``` + +Configure CMake. +``` +cmake -DCMAKE_BUILD_TYPE=Release .. +``` + +Build the project. +``` +cmake --build . -j$(nproc) +echo "Build complete! Executable located at: ./bin/executorch_program_data_separation" +``` + +Run the executable. +``` +./bin/executorch_program_data_separation --model-path ../../linear.pte --data-path ../../linear.ptd + +./bin/executorch_program_data_separation --model-path ../../linear_xnnpack.pte --data-path ../../linear_xnnpack.ptd +``` diff --git a/program-data-separation/cpp/CMakeLists.txt b/program-data-separation/cpp/CMakeLists.txt new file mode 100644 index 00000000..f1f670ac --- /dev/null +++ b/program-data-separation/cpp/CMakeLists.txt @@ -0,0 +1,45 @@ + +cmake_minimum_required(VERSION 3.18 FATAL_ERROR) +project(executorch_mv2_demo CXX) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Set options for executorch build. +option(EXECUTORCH_ENABLE_LOGGING "" ON) +option(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER "" ON) +option(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR "" ON) +option(EXECUTORCH_BUILD_EXTENSION_MODULE "" ON) +option(EXECUTORCH_BUILD_EXTENSION_TENSOR "" ON) +option(EXECUTORCH_BUILD_KERNELS_OPTIMIZED "" ON) +option(EXECUTORCH_BUILD_XNNPACK "" ON) + +# Add ExecuTorch subdirectory +add_subdirectory("executorch") + +set(DEMO_SOURCES main.cpp) + +# Create executable +add_executable(executorch_program_data_separation ${DEMO_SOURCES}) + +# Include directories +target_include_directories(executorch_program_data_separation PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) + +# Link libraries +target_link_libraries( + executorch_program_data_separation + PRIVATE executorch + extension_module_static + extension_flat_tensor + extension_tensor + xnnpack_backend + portable_ops_lib + portable_kernels + gflags +) + +# Set output directory +set_target_properties(executorch_program_data_separation + PROPERTIES + RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin" +) diff --git a/program-data-separation/cpp/executorch b/program-data-separation/cpp/executorch new file mode 160000 index 00000000..44564073 --- /dev/null +++ b/program-data-separation/cpp/executorch @@ -0,0 +1 @@ +Subproject commit 445640739fbc761a10e61430724cafb8a410198b diff --git a/program-data-separation/cpp/main.cpp b/program-data-separation/cpp/main.cpp new file mode 100644 index 00000000..460f85fc --- /dev/null +++ b/program-data-separation/cpp/main.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +#include + +DEFINE_string(model_path, "linear.pte", + "Model serialized in flatbuffer format."); +DEFINE_string(data_path, "linear.ptd", "Data serialized in flatbuffer format."); + +using namespace ::executorch::extension; + +int main(int argc, char *argv[]) { + + std::cout << "Running program-data separation example" << std::endl; + gflags::ParseCommandLineFlags(&argc, &argv, true); + + const char *model_path = FLAGS_model_path.c_str(); + const char *data_path = FLAGS_data_path.c_str(); + + // Load the model. + Module module(model_path, data_path); + + float input[3]; + auto tensor = from_blob(input, {3}); + + // Perform an inference. + const auto result = module.forward(tensor); + + if (result.ok()) { + const auto output = result->at(0).toTensor().const_data_ptr(); + for (int i = 0; i < 3; i++) { + std::cout << output[i] << std::endl; + } + std::cout << "Success" << std::endl; + } + + return 0; +} diff --git a/program-data-separation/export.py b/program-data-separation/export.py new file mode 100644 index 00000000..a30fe29b --- /dev/null +++ b/program-data-separation/export.py @@ -0,0 +1,115 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import argparse +import os + +from functools import partial +from typing import Dict, final, Optional, Sequence, Type + +import executorch.exir as exir +import torch + +from executorch.exir import ( + EdgeCompileConfig, + ExecutorchBackendConfig, + to_edge, + to_edge_transform_and_lower, +) +from executorch.exir.passes.external_constants_pass import ( + delegate_external_constants_pass, +) +from executorch.exir.program import ExecutorchProgramManager +from torch.export import export + + +class ModuleLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(3, 3) + + def forward(self, x: torch.Tensor): + return self.linear(x) + + def get_random_inputs(self): + return (torch.randn(3),) + + +def main() -> None: + + parser = argparse.ArgumentParser( + prog="export_program", + description="Exports nn.Module models to ExecuTorch .pte and .ptd files", + ) + parser.add_argument( + "--outdir", + type=str, + required=True, + help="Path to the directory to write .pte files and .ptd files to", + ) + parser.add_argument( + "--xnnpack", + action="store_true", + help="Export the model lowered to XNNPACK", + ) + args = parser.parse_args() + + if args.xnnpack: + print("Exporting to ExecuTorch with XNNPACK") + else: + print("Exporting to ExecuTorch") + + # Construct eager model. + model = ModuleLinear() + # Export model. + exported_program = torch.export.export(model, model.get_random_inputs()) + model_name = "linear_xnnpack" if args.xnnpack else "linear" + + # Lower to XNNPACK. + if args.xnnpack: + print("Lowering to XNNPACK...") + from executorch.backends.xnnpack.partition.xnnpack_partitioner import ( + XnnpackPartitioner, + ) + + partial_function = partial( + delegate_external_constants_pass, + ep=exported_program, + gen_tag_fn=lambda x: model_name, + ) + executorch_program = to_edge_transform_and_lower( + exported_program, + transform_passes=[partial_function], + compile_config=EdgeCompileConfig(_check_ir_validity=False), + partitioner=[XnnpackPartitioner()], + ).to_executorch(config=ExecutorchBackendConfig()) + + # No backends. + else: + print("Lowering to ExecuTorch...") + edge_program = to_edge(exported_program) + executorch_program = edge_program.to_executorch( + ExecutorchBackendConfig(external_constants=True) + ) + + print("Saving PTE and PTD files.") + os.makedirs(args.outdir, exist_ok=True) + pte_file = os.path.join(args.outdir, f"{model_name}.pte") + with open(pte_file, "wb") as fp: + executorch_program.write_to_file(fp) + if executorch_program._tensor_data.get("_default_external_constant"): + executorch_program._tensor_data[model_name] = ( + executorch_program._tensor_data.pop("_default_external_constant") + ) + executorch_program.write_tensor_data_to_file(args.outdir) + + print(f"Successfully exported {model_name}.pte and {model_name}.ptd") + + +if __name__ == "__main__": + main()