From a1c38b2f7afc0fe4cb733a423acf5ccca872086a Mon Sep 17 00:00:00 2001 From: RJ Ascani Date: Thu, 7 Sep 2023 22:07:52 +0000 Subject: [PATCH 1/4] Create codegen_preprocessor This PR creates the initial scaffolding for the codegen preprocessor. The preprocessor is a target-specific binary that will take a model, load it into a TFLM Interpreter, perform the Init & Prepare stages, then serialize the resulting data structures to an output file. Currently, all this binary does is load the model file and write an output file that simply contains the source model path in it. This will be expanded as we expose the data. BUG=b/295076056 --- codegen/BUILD | 1 + codegen/code_generator.py | 26 ++++++- codegen/examples/hello_world/README.md | 37 +++++++++- codegen/preprocessor/Makefile.inc | 17 +++++ codegen/preprocessor/main.cc | 87 +++++++++++++++++++++++ tensorflow/lite/micro/tools/make/Makefile | 15 ++++ 6 files changed, 180 insertions(+), 3 deletions(-) create mode 100644 codegen/preprocessor/Makefile.inc create mode 100644 codegen/preprocessor/main.cc diff --git a/codegen/BUILD b/codegen/BUILD index ae62c04c759..5daea9d9df4 100644 --- a/codegen/BUILD +++ b/codegen/BUILD @@ -64,6 +64,7 @@ py_binary( deps = [ ":graph", ":inference_generator", + "//codegen/preprocessor:preprocessor_schema_py", "//tensorflow/lite/tools:flatbuffer_utils", "@absl_py//absl:app", "@absl_py//absl/flags", diff --git a/codegen/code_generator.py b/codegen/code_generator.py index 95d05952322..a1dbd6e614a 100644 --- a/codegen/code_generator.py +++ b/codegen/code_generator.py @@ -22,11 +22,14 @@ from tflite_micro.codegen import inference_generator from tflite_micro.codegen import graph +from tflite_micro.codegen.preprocessor import preprocessor_schema_py_generated as preprocessor_fb from tflite_micro.tensorflow.lite.tools import flatbuffer_utils # Usage information: # Default: -# `bazel run codegen:code_generator -- --model=` +# `bazel run codegen:code_generator -- \ +# --model= \ +# --preprocessed_data=` # Output will be located at: /path/to/my_model.h|cc _MODEL_PATH = flags.DEFINE_string(name="model", @@ -34,6 +37,12 @@ help="Path to the TFLite model file.", required=True) +_PREPROCESSED_DATA_PATH = flags.DEFINE_string( + name="preprocessed_data", + default=None, + help="Path to output of codegen_preprocessor.", + required=True) + _OUTPUT_DIR = flags.DEFINE_string( name="output_dir", default=None, @@ -48,12 +57,27 @@ required=False) +def _read_preprocessed_data( + preprocessed_data_file: str) -> preprocessor_fb.DataT: + with open(preprocessed_data_file, 'rb') as file: + data_byte_array = bytearray(file.read()) + return preprocessor_fb.DataT.InitFromObj( + preprocessor_fb.Data.GetRootAs(data_byte_array, 0)) + + def main(argv: Sequence[str]) -> None: output_dir = _OUTPUT_DIR.value or os.path.dirname(_MODEL_PATH.value) output_name = _OUTPUT_NAME.value or os.path.splitext( os.path.basename(_MODEL_PATH.value))[0] model = flatbuffer_utils.read_model(_MODEL_PATH.value) + preprocessed_data = _read_preprocessed_data(_PREPROCESSED_DATA_PATH.value) + + print("Generating inference code for model:\n" + " model: {}\n" + " preprocessed_model: {}\n".format( + _MODEL_PATH.value, + preprocessed_data.inputModelPath.decode('utf-8'))) inference_generator.generate(output_dir, output_name, graph.OpCodeTable([model]), graph.Graph(model)) diff --git a/codegen/examples/hello_world/README.md b/codegen/examples/hello_world/README.md index 82c9f764170..c5c0945e185 100644 --- a/codegen/examples/hello_world/README.md +++ b/codegen/examples/hello_world/README.md @@ -1,18 +1,51 @@ # Codegen Hello World Example -This is a code-generated example of the hello world model. +This is a code-generated example of the hello world model. The process is +currently somewhat involved: + +## Build the preprocessor for your target + +This creates a target-specific preprocessor binary capable of performing the +init and prepare stages of the Interpreter and serializing the output. This +binary can be re-used for multiple models. + +### x86 +``` +make -f tensorflow/lite/micro/tools/make/Makefile codegen_preprocessor +``` + +## Run the preprocessor + +The preprocessor will take the provided model, create a TFLM Interpreter, and +allocate tensors. It will then capture and serialize the resulting data +structures needed for inference. For embedded targets, this should be run under +simulation. + +### x86 +``` +./gen/linux_x86_64_default/bin/codegen_preprocessor \ + $(pwd)/tensorflow/lite/micro/examples/hello_world/models/hello_world_int8.tflite \ + $(pwd)/gen/linux_86_64_default/genfiles/hello_world_int8.ppd +``` + +## Generate the inference code To generate the inference code at `codegen/example/hello_world_model.h/.cc`: +### x86 ``` bazel run codegen:code_generator -- \ --model $(pwd)/tensorflow/lite/micro/examples/hello_world/models/hello_world_int8.tflite \ + --preprocessed_data $(pwd)/gen/linux_86_64_default/genfiles/hello_world_int8.ppd \ --output_dir $(pwd)/codegen/examples/hello_world \ --output_name hello_world_model ``` -To compile the generated source, you can use the Makefile: +## Compile the generated inference code + + To compile the generated source, you can use the Makefile: +### x86 ``` make -f tensorflow/lite/micro/tools/make/Makefile codegen_hello_world ``` diff --git a/codegen/preprocessor/Makefile.inc b/codegen/preprocessor/Makefile.inc new file mode 100644 index 00000000000..b38d5edf45f --- /dev/null +++ b/codegen/preprocessor/Makefile.inc @@ -0,0 +1,17 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +CODEGEN_PREPROCESSOR_SRCS := \ +$(TENSORFLOW_ROOT)codegen/preprocessor/main.cc diff --git a/codegen/preprocessor/main.cc b/codegen/preprocessor/main.cc new file mode 100644 index 00000000000..23f0264a57d --- /dev/null +++ b/codegen/preprocessor/main.cc @@ -0,0 +1,87 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include +#include +#include + +#include "codegen/preprocessor/preprocessor_schema_generated.h" +#include "flatbuffers/flatbuffers.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace { + +std::unique_ptr ReadModelFile(const char* model_file_name) { + std::ifstream model_file(model_file_name, std::ios::binary); + if (!model_file.is_open()) { + std::cerr << "codegen_preprocessor: could not open model file: " + << model_file_name << std::endl; + return nullptr; + } + + model_file.seekg(0, std::ios::end); + size_t num_bytes = model_file.tellg(); + std::unique_ptr model_data(new char[num_bytes]); + model_file.read(model_data.get(), num_bytes); + + return model_data; +} + +int WriteOutputFile(const char* output_file_name, + flatbuffers::span output) { + std::ofstream output_file(output_file_name, std::ios::trunc); + if (!output_file.is_open()) { + std::cerr << "codegen_preprocessor: could not open output file: " + << output_file_name << std::endl; + return EXIT_FAILURE; + } + + output_file.write(reinterpret_cast(output.data()), output.size()); + return 0; +} + +} // namespace + +int main(int argc, char* argv[]) { + if (argc < 2) { + std::cerr << "codegen_preprocessor: invalid usage!" << std::endl; + std::cerr << "usage: codegen_preprocessor " + << std::endl; + return EXIT_FAILURE; + } + + const char* model_file_name = argv[1]; + const char* output_file_name = argv[2]; + + const auto model_data = ReadModelFile(model_file_name); + if (!model_data) { + return EXIT_FAILURE; + } + + // We have to create our own allocator, as the typical TFLM runtime disables + // its use (to avoid dynamic allocation). + flatbuffers::DefaultAllocator allocator; + flatbuffers::FlatBufferBuilder builder{2048, &allocator}; + const auto input_model_path = builder.CreateString(model_file_name); + + // Do the preprocess work. + + tflm::codegen::preprocessor::DataBuilder data_builder(builder); + data_builder.add_input_model_path(input_model_path); + builder.Finish(data_builder.Finish()); + + return WriteOutputFile(output_file_name, builder.GetBufferSpan()); +} diff --git a/tensorflow/lite/micro/tools/make/Makefile b/tensorflow/lite/micro/tools/make/Makefile index dc846edeeb6..c059792ccde 100644 --- a/tensorflow/lite/micro/tools/make/Makefile +++ b/tensorflow/lite/micro/tools/make/Makefile @@ -294,6 +294,8 @@ MICRO_LITE_BENCHMARKS := $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/tool MICROLITE_BENCHMARK_SRCS := \ $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/tools/benchmarking/*benchmark.cc) +MICRO_LITE_CODEGEN_PREPROCESSOR := $(TENSORFLOW_ROOT)codegen/preprocessor/Makefile.inc + MICRO_LITE_CODEGEN_EXAMPLES := $(shell find $(TENSORFLOW_ROOT)codegen/examples/ -name Makefile.inc) MICROLITE_TEST_SRCS := \ @@ -704,6 +706,9 @@ include $(MICRO_LITE_BENCHMARKS) # Load custom kernel tests. include $(MAKEFILE_DIR)/additional_tests.inc +# Load codegen preprocessor rules +include $(MICRO_LITE_CODEGEN_PREPROCESSOR) + # Create rules for downloading third-party dependencies. THIRD_PARTY_TARGETS := $(foreach DOWNLOAD,$(THIRD_PARTY_DOWNLOADS),$(eval $(call create_download_rule,$(DOWNLOAD)))) @@ -863,6 +868,16 @@ integration_tests: $(MICROLITE_INTEGRATION_TEST_TARGETS) generated_micro_mutable_op_resolver: $(MICROLITE_GEN_OP_RESOLVER_TEST_TARGETS) endif +CODEGEN_PREPROCESSOR_PATH := $(BINDIR)codegen_preprocessor + +codegen_preprocessor: $(CODEGEN_PREPROCESSOR_PATH) + +$(CODEGEN_PREPROCESSOR_PATH): $(CODEGEN_PREPROCESSOR_SRCS) $(MICROLITE_LIB_PATH) + @mkdir -p $(dir $@) + $(CXX) $(CXXFLAGS) $(INCLUDES) \ + -o $@ $< \ + $(MICROLITE_LIB_PATH) $(LDFLAGS) $(MICROLITE_LIBS) + # Just build the test targets build: $(MICROLITE_BUILD_TARGETS) From 27655815555b388306dfccb970fe5ca42a5b2e6d Mon Sep 17 00:00:00 2001 From: RJ Ascani Date: Fri, 8 Sep 2023 21:05:37 +0000 Subject: [PATCH 2/4] Create Make helpers for running codegen The codegen process is a multi-step process that requires compiling, executing code under simulation, and executing python scripts. To simplify this workflow, this commit adds Make helper functions for generating inference source code from a model and creating a binary with it. It also updates the hello world example to use these helpers and adds an update script for keeping the checked in generated source in sync. --- codegen/examples/hello_world/Makefile.inc | 14 ++--- codegen/examples/hello_world/README.md | 47 +++++----------- .../hello_world/update_example_source.sh | 29 ++++++++++ tensorflow/lite/micro/tools/make/Makefile | 4 +- .../micro/tools/make/helper_functions.inc | 53 +++++++++++++++++++ 5 files changed, 103 insertions(+), 44 deletions(-) create mode 100755 codegen/examples/hello_world/update_example_source.sh diff --git a/codegen/examples/hello_world/Makefile.inc b/codegen/examples/hello_world/Makefile.inc index 56e2da712f6..6dfcf18d4e5 100644 --- a/codegen/examples/hello_world/Makefile.inc +++ b/codegen/examples/hello_world/Makefile.inc @@ -1,10 +1,10 @@ -CODEGEN_HELLO_WORLD_SRCS := \ -$(TENSORFLOW_ROOT)codegen/examples/hello_world/hello_world.cc \ -$(TENSORFLOW_ROOT)codegen/examples/hello_world/hello_world_model.cc +CODEGEN_HELLO_WORLD_MODEL := \ +$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/hello_world/models/hello_world_int8.tflite -CODEGEN_HELLO_WORLD_HDRS := \ -$(TENSORFLOW_ROOT)codegen/examples/hello_world/hello_world_model.h +CODEGEN_HELLO_WORLD_SRCS := \ +$(TENSORFLOW_ROOT)codegen/examples/hello_world/hello_world.cc # Builds a standalone binary. -$(eval $(call microlite_test,codegen_hello_world,\ -$(CODEGEN_HELLO_WORLD_SRCS),,)) +$(eval $(call codegen_model_binary,codegen_hello_world,hello_world_model,\ +$(CODEGEN_HELLO_WORLD_MODEL),$(CODEGEN_HELLO_WORLD_SRCS),,)) + diff --git a/codegen/examples/hello_world/README.md b/codegen/examples/hello_world/README.md index c5c0945e185..d3c805e8ff5 100644 --- a/codegen/examples/hello_world/README.md +++ b/codegen/examples/hello_world/README.md @@ -1,51 +1,28 @@ # Codegen Hello World Example -This is a code-generated example of the hello world model. The process is -currently somewhat involved: +This is a code-generated example of the hello world model. The generated source +is checked in for now so that it can be reviewed during the prototyping stage. -## Build the preprocessor for your target +## Building the example executable +Please note that this will execute Bazel from make as part of the process. -This creates a target-specific preprocessor binary capable of performing the -init and prepare stages of the Interpreter and serializing the output. This -binary can be re-used for multiple models. - -### x86 ``` -make -f tensorflow/lite/micro/tools/make/Makefile codegen_preprocessor +make -f tensorflow/lite/micro/tools/make/Makefile codegen_hello_world ``` -## Run the preprocessor +## Running the example -The preprocessor will take the provided model, create a TFLM Interpreter, and -allocate tensors. It will then capture and serialize the resulting data -structures needed for inference. For embedded targets, this should be run under -simulation. +TODO(rjascani): The command works, but it'll just crash as we don't have all of +the data structures fullypopulated yet. -### x86 ``` -./gen/linux_x86_64_default/bin/codegen_preprocessor \ - $(pwd)/tensorflow/lite/micro/examples/hello_world/models/hello_world_int8.tflite \ - $(pwd)/gen/linux_86_64_default/genfiles/hello_world_int8.ppd +make -f tensorflow/lite/micro/tools/make/Makefile run_codegen_hello_world ``` -## Generate the inference code +## Updating the generated sources +To update the generated source, you can execute this make target: -To generate the inference code at `codegen/example/hello_world_model.h/.cc`: - -### x86 ``` -bazel run codegen:code_generator -- \ - --model $(pwd)/tensorflow/lite/micro/examples/hello_world/models/hello_world_int8.tflite \ - --preprocessed_data $(pwd)/gen/linux_86_64_default/genfiles/hello_world_int8.ppd \ - --output_dir $(pwd)/codegen/examples/hello_world \ - --output_name hello_world_model +./codegen/examples/hello_world/update_example_source.sh ``` -## Compile the generated inference code - - To compile the generated source, you can use the Makefile: - -### x86 -``` -make -f tensorflow/lite/micro/tools/make/Makefile codegen_hello_world -``` diff --git a/codegen/examples/hello_world/update_example_source.sh b/codegen/examples/hello_world/update_example_source.sh new file mode 100755 index 00000000000..df5e2ace365 --- /dev/null +++ b/codegen/examples/hello_world/update_example_source.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# +# Syncs the generated example source code in the repository. +# + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ROOT_DIR=${SCRIPT_DIR}/../../.. +cd "${ROOT_DIR}" + +make -j8 -f tensorflow/lite/micro/tools/make/Makefile codegen_hello_world +cp ./gen/linux_x86_64_default/genfiles/hello_world_model.h ${SCRIPT_DIR} +cp ./gen/linux_x86_64_default/genfiles/hello_world_model.cc ${SCRIPT_DIR} diff --git a/tensorflow/lite/micro/tools/make/Makefile b/tensorflow/lite/micro/tools/make/Makefile index c059792ccde..cbcd85fc83e 100644 --- a/tensorflow/lite/micro/tools/make/Makefile +++ b/tensorflow/lite/micro/tools/make/Makefile @@ -294,6 +294,8 @@ MICRO_LITE_BENCHMARKS := $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/tool MICROLITE_BENCHMARK_SRCS := \ $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/tools/benchmarking/*benchmark.cc) +CODEGEN_PREPROCESSOR_PATH := $(BINDIR)codegen_preprocessor + MICRO_LITE_CODEGEN_PREPROCESSOR := $(TENSORFLOW_ROOT)codegen/preprocessor/Makefile.inc MICRO_LITE_CODEGEN_EXAMPLES := $(shell find $(TENSORFLOW_ROOT)codegen/examples/ -name Makefile.inc) @@ -868,8 +870,6 @@ integration_tests: $(MICROLITE_INTEGRATION_TEST_TARGETS) generated_micro_mutable_op_resolver: $(MICROLITE_GEN_OP_RESOLVER_TEST_TARGETS) endif -CODEGEN_PREPROCESSOR_PATH := $(BINDIR)codegen_preprocessor - codegen_preprocessor: $(CODEGEN_PREPROCESSOR_PATH) $(CODEGEN_PREPROCESSOR_PATH): $(CODEGEN_PREPROCESSOR_SRCS) $(MICROLITE_LIB_PATH) diff --git a/tensorflow/lite/micro/tools/make/helper_functions.inc b/tensorflow/lite/micro/tools/make/helper_functions.inc index ad3d44c45e5..ca268d9204a 100644 --- a/tensorflow/lite/micro/tools/make/helper_functions.inc +++ b/tensorflow/lite/micro/tools/make/helper_functions.inc @@ -117,3 +117,56 @@ endef # 2 - File pattern, e.g: *.h recursive_find = $(wildcard $(1)$(2)) $(foreach dir,$(wildcard $(1)*),$(call recursive_find,$(dir)/,$(2))) +# Generates code capable of performing inference without an interpreter. It run +# the codegen preprocessor and the code generator. +# +# Arguments are: +# 1 - Name of target +# 2 - Generated source basename +# 3 - Model +# Calling eval on the output will create the targets that you need. +define codegen_model + +$(1)_MODEL := $(abspath $(3)) +$(1)_PREPROCESSOR_OUTPUT := $(abspath $(GENERATED_SRCS_DIR)/$(2).ppd) +$(1)_GENERATED_SRC_DIR := $(abspath $(GENERATED_SRCS_DIR)) + +$(1)_GENERATED_SRCS := $$($(1)_GENERATED_SRC_DIR)/$(2).cc +$(1)_GENERATED_HDRS := $$($(1)_GENERATED_SRC_DIR)/$(2).h + +$$($(1)_PREPROCESSOR_OUTPUT): $(CODEGEN_PREPROCESSOR_PATH) $$($(1)_MODEL) + @mkdir -p $$(dir $$@) + $$(TEST_SCRIPT) $(CODEGEN_PREPROCESSOR_PATH) $$($(1)_MODEL) $$($(1)_PREPROCESSOR_OUTPUT) + +$$($(1)_GENERATED_SRCS) $$($(1)_GENERATED_HDRS): $$($(1)_MODEL) $$($(1)_PREPROCESSOR_OUTPUT) + bazel run //codegen:code_generator -- \ + --model $$($(1)_MODEL) --preprocessed_data $$($(1)_PREPROCESSOR_OUTPUT) \ + --output_dir $$($(1)_GENERATED_SRC_DIR) --output_name $(2) + +$(1): $$($(1)_GENERATED_SRCS) $$($(1)_GENERATED_HDRS) + +endef # codegen_model + +# Generates and compiles code capable of performing inference without an +# interpreter. +# +# Arguments are: +# 1 - Name of target +# 2 - Generated source basename +# 3 - Model +# 4 - C/C++ source files +# 5 - C/C++ header files +# Calling eval on the output will create the targets that you need. +define codegen_model_binary + +$(1)_LOCAL_SRCS := $(4) +$(1)_LOCAL_HDRS := $(5) + +$(call codegen_model,$(1)_codegen,$(2),$(3)) + +$(1)_LOCAL_SRCS += $$($(1)_codegen_GENERATED_SRCS) +$(1)_LOCAL_HDRS += $$($(1)_codegen_GENERATED_HDRS) + +$(call microlite_test,$(1),$$($(1)_LOCAL_SRCS),$$($(1)_LOCAL_HDRS),,) + +endef # codegen_model_binary From 80c9a38544baeca54bd3348e4ccc812ff05f4c17 Mon Sep 17 00:00:00 2001 From: RJ Ascani Date: Fri, 8 Sep 2023 23:00:17 +0000 Subject: [PATCH 3/4] Reset file position back to begin before read --- codegen/preprocessor/main.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/codegen/preprocessor/main.cc b/codegen/preprocessor/main.cc index 23f0264a57d..96056e79d31 100644 --- a/codegen/preprocessor/main.cc +++ b/codegen/preprocessor/main.cc @@ -34,6 +34,7 @@ std::unique_ptr ReadModelFile(const char* model_file_name) { model_file.seekg(0, std::ios::end); size_t num_bytes = model_file.tellg(); + model_file.seekg(0,std::ios::beg); std::unique_ptr model_data(new char[num_bytes]); model_file.read(model_data.get(), num_bytes); From a3a7b7feecb464e5af30a44416f11f14e85c587e Mon Sep 17 00:00:00 2001 From: RJ Ascani Date: Mon, 11 Sep 2023 23:03:53 +0000 Subject: [PATCH 4/4] Fixes for CI --- codegen/preprocessor/main.cc | 2 +- .../micro/tools/make/helper_functions.inc | 29 ++++++++++--------- .../make/targets/cortex_m_qemu_makefile.inc | 1 + .../tools/make/targets/hexagon_makefile.inc | 1 + .../make/targets/riscv32_generic_makefile.inc | 1 + .../tools/make/targets/xtensa_makefile.inc | 1 + 6 files changed, 20 insertions(+), 15 deletions(-) diff --git a/codegen/preprocessor/main.cc b/codegen/preprocessor/main.cc index 96056e79d31..d6b5638a9f1 100644 --- a/codegen/preprocessor/main.cc +++ b/codegen/preprocessor/main.cc @@ -34,7 +34,7 @@ std::unique_ptr ReadModelFile(const char* model_file_name) { model_file.seekg(0, std::ios::end); size_t num_bytes = model_file.tellg(); - model_file.seekg(0,std::ios::beg); + model_file.seekg(0, std::ios::beg); std::unique_ptr model_data(new char[num_bytes]); model_file.read(model_data.get(), num_bytes); diff --git a/tensorflow/lite/micro/tools/make/helper_functions.inc b/tensorflow/lite/micro/tools/make/helper_functions.inc index ca268d9204a..272e82c0eb6 100644 --- a/tensorflow/lite/micro/tools/make/helper_functions.inc +++ b/tensorflow/lite/micro/tools/make/helper_functions.inc @@ -127,21 +127,22 @@ recursive_find = $(wildcard $(1)$(2)) $(foreach dir,$(wildcard $(1)*),$(call rec # Calling eval on the output will create the targets that you need. define codegen_model -$(1)_MODEL := $(abspath $(3)) -$(1)_PREPROCESSOR_OUTPUT := $(abspath $(GENERATED_SRCS_DIR)/$(2).ppd) -$(1)_GENERATED_SRC_DIR := $(abspath $(GENERATED_SRCS_DIR)) +$(1)_MODEL := $(3) +$(1)_PREPROCESSOR_OUTPUT := $(GENERATED_SRCS_DIR)/$(2).ppd -$(1)_GENERATED_SRCS := $$($(1)_GENERATED_SRC_DIR)/$(2).cc -$(1)_GENERATED_HDRS := $$($(1)_GENERATED_SRC_DIR)/$(2).h +$(1)_GENERATED_SRCS := $(GENERATED_SRCS_DIR)$(2).cc +$(1)_GENERATED_HDRS := $(GENERATED_SRCS_DIR)$(2).h $$($(1)_PREPROCESSOR_OUTPUT): $(CODEGEN_PREPROCESSOR_PATH) $$($(1)_MODEL) @mkdir -p $$(dir $$@) - $$(TEST_SCRIPT) $(CODEGEN_PREPROCESSOR_PATH) $$($(1)_MODEL) $$($(1)_PREPROCESSOR_OUTPUT) + $$(RUN_COMMAND) $(CODEGEN_PREPROCESSOR_PATH) \ + $(abspath $$($(1)_MODEL)) $(abspath $$($(1)_PREPROCESSOR_OUTPUT)) $$($(1)_GENERATED_SRCS) $$($(1)_GENERATED_HDRS): $$($(1)_MODEL) $$($(1)_PREPROCESSOR_OUTPUT) - bazel run //codegen:code_generator -- \ - --model $$($(1)_MODEL) --preprocessed_data $$($(1)_PREPROCESSOR_OUTPUT) \ - --output_dir $$($(1)_GENERATED_SRC_DIR) --output_name $(2) + cd $(TENSORFLOW_ROOT) && bazel run //codegen:code_generator -- \ + --model $(abspath $$($(1)_MODEL)) \ + --preprocessed_data $(abspath $$($(1)_PREPROCESSOR_OUTPUT)) \ + --output_dir $(abspath $(GENERATED_SRCS_DIR)) --output_name $(2) $(1): $$($(1)_GENERATED_SRCS) $$($(1)_GENERATED_HDRS) @@ -159,14 +160,14 @@ endef # codegen_model # Calling eval on the output will create the targets that you need. define codegen_model_binary -$(1)_LOCAL_SRCS := $(4) -$(1)_LOCAL_HDRS := $(5) +$(1)_CODEGEN_SRCS := $(4) +$(1)_CODEGEN_HDRS := $(5) $(call codegen_model,$(1)_codegen,$(2),$(3)) -$(1)_LOCAL_SRCS += $$($(1)_codegen_GENERATED_SRCS) -$(1)_LOCAL_HDRS += $$($(1)_codegen_GENERATED_HDRS) +$(1)_CODEGEN_SRCS += $$($(1)_codegen_GENERATED_SRCS) +$(1)_CODEGEN_HDRS += $$($(1)_codegen_GENERATED_HDRS) -$(call microlite_test,$(1),$$($(1)_LOCAL_SRCS),$$($(1)_LOCAL_HDRS),,) +$(call microlite_test,$(1),$$($(1)_CODEGEN_SRCS),$$($(1)_CODEGEN_HDRS),,) endef # codegen_model_binary diff --git a/tensorflow/lite/micro/tools/make/targets/cortex_m_qemu_makefile.inc b/tensorflow/lite/micro/tools/make/targets/cortex_m_qemu_makefile.inc index a83fc186a4f..8a0edfedfb6 100644 --- a/tensorflow/lite/micro/tools/make/targets/cortex_m_qemu_makefile.inc +++ b/tensorflow/lite/micro/tools/make/targets/cortex_m_qemu_makefile.inc @@ -44,3 +44,4 @@ EXCLUDED_TESTS := \ MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS)) TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_with_qemu.sh arm $(TARGET_ARCH) +RUN_COMMAND := qemu-arm -cpu $(TARGET_ARCH) diff --git a/tensorflow/lite/micro/tools/make/targets/hexagon_makefile.inc b/tensorflow/lite/micro/tools/make/targets/hexagon_makefile.inc index 2e7d18ca1cd..841117c6120 100644 --- a/tensorflow/lite/micro/tools/make/targets/hexagon_makefile.inc +++ b/tensorflow/lite/micro/tools/make/targets/hexagon_makefile.inc @@ -114,3 +114,4 @@ MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS)) TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_hexagon_binary.sh SIZE_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/size_hexagon_binary.sh +RUN_COMMAND := hexagon-sim diff --git a/tensorflow/lite/micro/tools/make/targets/riscv32_generic_makefile.inc b/tensorflow/lite/micro/tools/make/targets/riscv32_generic_makefile.inc index ce5f0eba504..933745ba85f 100644 --- a/tensorflow/lite/micro/tools/make/targets/riscv32_generic_makefile.inc +++ b/tensorflow/lite/micro/tools/make/targets/riscv32_generic_makefile.inc @@ -45,4 +45,5 @@ MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS)) LDFLAGS += -mno-relax TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_with_qemu.sh riscv32 rv32 SIZE_SCRIPT := ${TENSORFLOW_ROOT}tensorflow/lite/micro/testing/size_riscv32_binary.sh +RUN_COMMAND := qemu-riscv32 -cpu rv32 diff --git a/tensorflow/lite/micro/tools/make/targets/xtensa_makefile.inc b/tensorflow/lite/micro/tools/make/targets/xtensa_makefile.inc index 8d970c72bb9..6fda996ec58 100644 --- a/tensorflow/lite/micro/tools/make/targets/xtensa_makefile.inc +++ b/tensorflow/lite/micro/tools/make/targets/xtensa_makefile.inc @@ -72,6 +72,7 @@ CXXFLAGS += $(XTENSA_EXTRA_CFLAGS) TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_xtensa_binary.sh SIZE_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/size_xtensa_binary.sh +RUN_COMMAND := xt-run # TODO(b/158651472): Fix the memory_arena_threshold_test # TODO(b/174707181): Fix the micro_interpreter_test