Skip to content

Commit

Permalink
hps_accel: Add standalone pool test
Browse files Browse the repository at this point in the history
Adds standalone test for data pooling.

Signed-off-by: Alan Green <[email protected]>
  • Loading branch information
alanvgreen committed Feb 3, 2022
1 parent 21a4efb commit 85bedda
Show file tree
Hide file tree
Showing 8 changed files with 4,510 additions and 38 deletions.
3 changes: 2 additions & 1 deletion proj/hps_accel/src/conv2d_call.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,12 @@ void test_conv2d(const Conv2DData* data) {

const tflite::RuntimeShape& output_shape =
*(reinterpret_cast<const tflite::RuntimeShape*>(data->output_shape));

tflite::reference_integer_ops::ConvPerChannel(
*(reinterpret_cast<const tflite::ConvParams*>(data->params)),
reinterpret_cast<const int32_t*>(data->output_multiplier),
reinterpret_cast<const int32_t*>(data->output_shift), input_shape,
reinterpret_cast<const int8_t*>(arena_input),
arena_input,
*(reinterpret_cast<const tflite::RuntimeShape*>(data->filter_shape)),
reinterpret_cast<const int8_t*>(data->filter_data),
*(reinterpret_cast<const tflite::RuntimeShape*>(data->bias_shape)),
Expand Down
4,313 changes: 4,313 additions & 0 deletions proj/hps_accel/src/pool_03.cc

Large diffs are not rendered by default.

26 changes: 26 additions & 0 deletions proj/hps_accel/src/pool_03.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
/*
* Copyright 2021 The CFU-Playground Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef _POOL_03_H
#define _POOL_03_H

#include <cstdint>

#include "pool_call.h"

extern PoolData pool_03_data;

#endif // _POOL_03_H
69 changes: 69 additions & 0 deletions proj/hps_accel/src/pool_call.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
/*
* Copyright 2022 The CFU-Playground Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "pool_call.h"

#include <cstdio>

#include "playground_util/dump.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h"
#include "tflite.h"

void test_pool(const PoolData* data) {
printf("Testing Pool %s\n", data->name);
// Copy input arena
int8_t* arena_input = reinterpret_cast<int8_t*>(tflite_tensor_arena);
auto input_shape =
*(reinterpret_cast<const tflite::RuntimeShape*>(data->input_shape));
for (int i = 0; i < input_shape.FlatSize(); i++) {
arena_input[i] = data->input_data[i];
}
// Set up output arena
int8_t* arena_output =
reinterpret_cast<int8_t*>(tflite_tensor_arena) + 128 * 1024;
const tflite::RuntimeShape& output_shape =
*(reinterpret_cast<const tflite::RuntimeShape*>(data->output_shape));

tflite::reference_integer_ops::MaxPool(
*(reinterpret_cast<const tflite::PoolParams*>(data->params)), input_shape,
arena_input, output_shape, arena_output);

// Check for differences with output
int diff_count = 0;
int first_diff = 0;
int num_words = output_shape.FlatSize() / 4;
const int32_t* arena_words = reinterpret_cast<const int32_t*>(arena_output);
const int32_t* expected_words =
reinterpret_cast<const int32_t*>(data->output_data);
for (int i = 0; i < num_words; i++) {
if (arena_words[i] != expected_words[i]) {
diff_count++;
if (diff_count == 1) {
first_diff = i;
}
}
}

if (diff_count == 0) {
printf("OK - output identical to golden output\n");
} else {
printf("FAIL - %d differences, first at word %d\n", diff_count, first_diff);
printf("actual:\n");
dump_hex(arena_words + first_diff, 16);
printf("expected:\n");
dump_hex(expected_words + first_diff, 16);
}
}
36 changes: 36 additions & 0 deletions proj/hps_accel/src/pool_call.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/*
* Copyright 2022 The CFU-Playground Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef _POOL_CALL_H
#define _POOL_CALL_H

#include <cstdint>

// Functionality for calling MaxPool from a test harnesses

struct PoolData {
const char* name;
const uint8_t* params;
const uint8_t* input_shape;
const uint8_t* input_data;
const uint8_t* output_shape;
const uint8_t* output_data;
};

// Tests MaxPool with the data in the given structure
void test_pool(const PoolData* data);

#endif // _POOL_CALL_H
7 changes: 5 additions & 2 deletions proj/hps_accel/src/proj_menu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,11 @@
#include "conv2d_23.h"
#include "conv2d_23p.h"
#include "conv2d_call.h"
#include "fixedpoint/fixedpoint.h"
#include "hps_cfu.h"
#include "menu.h"
#include "playground_util/random.h"
#include "pool_03.h"
#include "pool_call.h"

namespace {

Expand Down Expand Up @@ -78,6 +79,7 @@ void do_test_layer_04(void) { test_conv2d(&conv2d_layer_04_data); }
void do_test_layer_05(void) { test_conv2d(&conv2d_layer_05_data); }
void do_test_layer_06(void) { test_conv2d(&conv2d_layer_06_data); }
void do_test_layer_23p(void) { test_conv2d(&conv2d_layer_23p_data); }
void do_test_pool_03(void) { test_pool(&pool_03_data); }

struct Menu MENU = {
"Project Menu",
Expand All @@ -97,7 +99,8 @@ struct Menu MENU = {
MENU_ITEM('4', "test layer 04", do_test_layer_04),
MENU_ITEM('5', "test layer 05", do_test_layer_05),
MENU_ITEM('6', "test layer 06", do_test_layer_06),
MENU_ITEM('P', "test layer 23p", do_test_layer_23p),
MENU_ITEM('7', "test layer 23p", do_test_layer_23p),
MENU_ITEM('a', "test pool 03", do_test_pool_03),
MENU_END,
},
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ inline void ConvPerChannel(
output_shape, output_data);
}

#ifdef SHOW_OUTPUT_HASHES
#ifdef SHOW_CONV_HASHES
static int hash_layer = 0;
int32_t hash = murmurhash3_32(reinterpret_cast<uint8_t*>(output_data),
output_shape.FlatSize());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ limitations under the License.
#include <cstdio>
#include <limits>

#include "playground_util/murmurhash.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling_accel.h"

Expand Down Expand Up @@ -84,40 +85,11 @@ inline bool AveragePool(const PoolParams& params,
return true;
}

inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& output_shape,
int8_t* output_data) {
#if SHOW_MAX_POOL_PARAMS
// padding_width, padding_height,
// stride_width, stride_height, filter_height, filter_width,
// quantized_activation_min, quantized_activation_max,
// input_shape[0], input_shape[1], input_shape[2], input_shape[3],
// output_shape[0], output_shape[1], output_shape[2], output_shape[3]
printf("\n");
const auto& padding = params.padding_values;
printf("%d, %d, ", padding.width, padding.height);
printf("%d, %d, %d, %d, ", params.stride_height, params.stride_width,
params.filter_height, params.filter_width);
printf("%ld, %ld, ", params.quantized_activation_min,
params.quantized_activation_max);
printf("%ld, %ld, %ld, %ld, ", input_shape.Dims(0), input_shape.Dims(1),
input_shape.Dims(2), input_shape.Dims(3));
printf("%ld, %ld, %ld, %ld, ", output_shape.Dims(0), output_shape.Dims(1),
output_shape.Dims(2), output_shape.Dims(3));

printf("\n");
#endif

#ifdef ACCEL_MAX_POOL
#if GATEWARE_GEN != 2
#error MAX_POOL op requires gateware gen 2
#endif
if (CanAccelerateMaxPool(params, input_shape, output_shape)) {
return AccelerateMaxPool(params, input_shape, input_data, output_shape,
output_data);
}
#endif

inline void UnacceleratedMaxPool(const PoolParams& params,
const RuntimeShape& input_shape,
const int8_t* input_data,
const RuntimeShape& output_shape,
int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_GE(params.quantized_activation_min,
Expand Down Expand Up @@ -172,6 +144,58 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
}
}

inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& output_shape,
int8_t* output_data) {
#if SHOW_MAX_POOL_PARAMS
// padding_width, padding_height,
// stride_width, stride_height, filter_height, filter_width,
// quantized_activation_min, quantized_activation_max,
// input_shape[0], input_shape[1], input_shape[2], input_shape[3],
// output_shape[0], output_shape[1], output_shape[2], output_shape[3]
printf("\n");
const auto& padding = params.padding_values;
printf("%d, %d, ", padding.width, padding.height);
printf("%d, %d, %d, %d, ", params.stride_height, params.stride_width,
params.filter_height, params.filter_width);
printf("%ld, %ld, ", params.quantized_activation_min,
params.quantized_activation_max);
printf("%ld, %ld, %ld, %ld, ", input_shape.Dims(0), input_shape.Dims(1),
input_shape.Dims(2), input_shape.Dims(3));
printf("%ld, %ld, %ld, %ld, ", output_shape.Dims(0), output_shape.Dims(1),
output_shape.Dims(2), output_shape.Dims(3));

printf("\n");
#endif

bool accelerated = false;

#ifdef ACCEL_MAX_POOL
#if GATEWARE_GEN != 2
#error MAX_POOL op requires gateware gen 2
#endif
accelerated = CanAccelerateMaxPool(params, input_shape, output_shape);
if (accelerated) {
AccelerateMaxPool(params, input_shape, input_data, output_shape,
output_data);
}
#endif
if (!accelerated) {
UnacceleratedMaxPool(params, input_shape, input_data, output_shape,
output_data);
}

#ifdef SHOW_POOL_HASHES
static int hash_layer = 0;
int32_t input_hash = murmurhash3_32(reinterpret_cast<const uint8_t*>(input_data),
input_shape.FlatSize());
int32_t output_hash = murmurhash3_32(reinterpret_cast<const uint8_t*>(output_data),
output_shape.FlatSize());
printf("%3d, %08lx, %08lx\n", hash_layer, input_hash, output_hash);
hash_layer++;
#endif
}

inline bool AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const int16_t* input_data,
Expand Down

0 comments on commit 85bedda

Please sign in to comment.