diff --git a/tensorflow/lite/micro/kernels/conv.cc b/tensorflow/lite/micro/kernels/conv.cc index 3e4fb62318d..7be915ab51e 100644 --- a/tensorflow/lite/micro/kernels/conv.cc +++ b/tensorflow/lite/micro/kernels/conv.cc @@ -67,8 +67,8 @@ TfLiteStatus ConvEval(TfLiteContext* context, TfLiteNode* node) { weights_comp_td, data.weights_scratch_index), tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(micro_context, bias, bias_comp_td, - data.bias_scratch_index), + tflite::micro::GetOptionalTensorData( + micro_context, bias, bias_comp_td, data.bias_scratch_index), #else // USE_TFLM_COMPRESSION tflite::micro::GetTensorData(filter), tflite::micro::GetTensorShape(bias), @@ -92,7 +92,7 @@ TfLiteStatus ConvEval(TfLiteContext* context, TfLiteNode* node) { weights_comp_td, data.weights_scratch_index), tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData( + tflite::micro::GetOptionalTensorData( micro_context, bias, bias_comp_td, data.bias_scratch_index), #else // USE_TFLM_COMPRESSION tflite::micro::GetTensorData(filter), @@ -118,7 +118,7 @@ TfLiteStatus ConvEval(TfLiteContext* context, TfLiteNode* node) { #else // USE_TFLM_COMPRESSION tflite::micro::GetTensorData(filter), tflite::micro::GetTensorShape(bias), - tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorData(bias), #endif // USE_TFLM_COMPRESSION tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); @@ -162,7 +162,7 @@ TfLiteStatus ConvEval(TfLiteContext* context, TfLiteNode* node) { weights_comp_td, data.weights_scratch_index), tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData( + tflite::micro::GetOptionalTensorData( micro_context, bias, bias_comp_td, data.bias_scratch_index), #else // USE_TFLM_COMPRESSION tflite::micro::GetTensorData(filter), diff --git a/tensorflow/lite/micro/kernels/xtensa/conv.cc b/tensorflow/lite/micro/kernels/xtensa/conv.cc index 5eb7a1bb7d4..39618d41f66 100644 --- a/tensorflow/lite/micro/kernels/xtensa/conv.cc +++ b/tensorflow/lite/micro/kernels/xtensa/conv.cc @@ -72,7 +72,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { micro_context, filter, weights_comp_td, op_data.reference_op_data.weights_scratch_index), tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData( + tflite::micro::GetOptionalTensorData( micro_context, bias, bias_comp_td, op_data.reference_op_data.bias_scratch_index), #else // USE_TFLM_COMPRESSION diff --git a/tensorflow/lite/micro/kernels/xtensa/conv_hifi.cc b/tensorflow/lite/micro/kernels/xtensa/conv_hifi.cc index d9bbcdbea7d..b5d4b5ea859 100644 --- a/tensorflow/lite/micro/kernels/xtensa/conv_hifi.cc +++ b/tensorflow/lite/micro/kernels/xtensa/conv_hifi.cc @@ -1,4 +1,4 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -193,12 +193,13 @@ TfLiteStatus ConvEvalHifiInt16(TfLiteContext* context, TfLiteNode* node, const int8_t* filter_data = tflite::micro::GetTensorData( micro_context, filter, weights_comp_td, data.reference_op_data.weights_scratch_index); - const int64_t* bias_data = tflite::micro::GetTensorData( + const int64_t* bias_data = tflite::micro::GetOptionalTensorData( micro_context, bias, bias_comp_td, data.reference_op_data.bias_scratch_index); #else // USE_TFLM_COMPRESSION const int8_t* filter_data = tflite::micro::GetTensorData(filter); - const int64_t* bias_data = tflite::micro::GetTensorData(bias); + const int64_t* bias_data = + tflite::micro::GetOptionalTensorData(bias); #endif // USE_TFLM_COMPRESSION int16_t* output_data = tflite::micro::GetTensorData(output); @@ -307,11 +308,12 @@ TfLiteStatus ConvEvalHifiInt8(TfLiteContext* context, TfLiteNode* node, const int8_t* input_data = tflite::micro::GetTensorData(input); #ifdef USE_TFLM_COMPRESSION - const int32_t* bias_data = tflite::micro::GetTensorData( + const int32_t* bias_data = tflite::micro::GetOptionalTensorData( micro_context, bias, bias_comp_td, data.reference_op_data.bias_scratch_index); #else // USE_TFLM_COMPRESSION - const int32_t* bias_data = tflite::micro::GetTensorData(bias); + const int32_t* bias_data = + tflite::micro::GetOptionalTensorData(bias); #endif // USE_TFLM_COMPRESSION int8_t* output_data = tflite::micro::GetTensorData(output); diff --git a/tensorflow/lite/micro/kernels/xtensa/conv_int16_reference.cc b/tensorflow/lite/micro/kernels/xtensa/conv_int16_reference.cc index 0f583cdaceb..c50faa43e42 100644 --- a/tensorflow/lite/micro/kernels/xtensa/conv_int16_reference.cc +++ b/tensorflow/lite/micro/kernels/xtensa/conv_int16_reference.cc @@ -1,4 +1,4 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -68,8 +68,8 @@ TfLiteStatus ConvReferenceEvalInt16(TfLiteContext* context, TfLiteNode* node) { weights_comp_td, op_data.weights_scratch_index), tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(micro_context, bias, bias_comp_td, - op_data.bias_scratch_index), + tflite::micro::GetOptionalTensorData( + micro_context, bias, bias_comp_td, op_data.bias_scratch_index), #else // USE_TFLM_COMPRESSION tflite::micro::GetTensorData(filter), tflite::micro::GetTensorShape(bias), @@ -94,7 +94,7 @@ TfLiteStatus ConvReferenceEvalInt16(TfLiteContext* context, TfLiteNode* node) { #else // USE_TFLM_COMPRESSION tflite::micro::GetTensorData(filter), tflite::micro::GetTensorShape(bias), - tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorData(bias), #endif // USE_TFLM_COMPRESSION tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); diff --git a/tensorflow/lite/micro/kernels/xtensa/conv_int8_reference.cc b/tensorflow/lite/micro/kernels/xtensa/conv_int8_reference.cc index ba746f0ff8f..24adc64e19f 100644 --- a/tensorflow/lite/micro/kernels/xtensa/conv_int8_reference.cc +++ b/tensorflow/lite/micro/kernels/xtensa/conv_int8_reference.cc @@ -1,4 +1,4 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -81,8 +81,8 @@ TfLiteStatus ConvReferenceEvalInt8(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorShape(filter), filter_data, tflite::micro::GetTensorShape(bias), #ifdef USE_TFLM_COMPRESSION - tflite::micro::GetTensorData(micro_context, bias, bias_comp_td, - op_data.bias_scratch_index), + tflite::micro::GetOptionalTensorData( + micro_context, bias, bias_comp_td, op_data.bias_scratch_index), #else // USE_TFLM_COMPRESSION tflite::micro::GetOptionalTensorData(bias), #endif // USE_TFLM_COMPRESSION diff --git a/tensorflow/lite/micro/kernels/xtensa/conv_vision.cc b/tensorflow/lite/micro/kernels/xtensa/conv_vision.cc index 8a0330907c3..0da261f0aa4 100644 --- a/tensorflow/lite/micro/kernels/xtensa/conv_vision.cc +++ b/tensorflow/lite/micro/kernels/xtensa/conv_vision.cc @@ -36,8 +36,10 @@ TfLiteStatus ConvPrepareVision(TfLiteContext* context, TfLiteNode* node) { MicroContext* micro_context = GetMicroContext(context); TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); TfLiteTensor* bias = micro_context->AllocateTempInputTensor(node, kConvBiasTensor); + TF_LITE_ENSURE(context, bias != nullptr); const uint32_t input_height = SizeOfDimension(input, 1); const uint32_t input_width = SizeOfDimension(input, 2); @@ -47,8 +49,10 @@ TfLiteStatus ConvPrepareVision(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); TfLiteTensor* filter = micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); const uint32_t output_height = SizeOfDimension(output, 1); const uint32_t output_width = SizeOfDimension(output, 2); @@ -212,9 +216,7 @@ TfLiteStatus ConvPrepareVision(TfLiteContext* context, TfLiteNode* node) { micro_context->DeallocateTempTfLiteTensor(output); micro_context->DeallocateTempTfLiteTensor(input); micro_context->DeallocateTempTfLiteTensor(filter); - if (bias != nullptr) { - micro_context->DeallocateTempTfLiteTensor(bias); - } + micro_context->DeallocateTempTfLiteTensor(bias); return kTfLiteOk; }