Skip to content
This repository was archived by the owner on Feb 24, 2025. It is now read-only.

Commit 495bfd6

Browse files
authored
Sync from tflite-micro. (#150)
1 parent 06841a8 commit 495bfd6

File tree

11 files changed

+189
-57
lines changed

11 files changed

+189
-57
lines changed

src/tensorflow/lite/core/api/error_reporter_macro.h

Lines changed: 0 additions & 36 deletions
This file was deleted.

src/tensorflow/lite/core/api/flatbuffer_conversions.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2338,6 +2338,9 @@ TfLiteStatus ParseTransposeConv(const Operator* op,
23382338
params->padding = ConvertPadding(transpose_conv_params->padding());
23392339
params->stride_width = transpose_conv_params->stride_w();
23402340
params->stride_height = transpose_conv_params->stride_h();
2341+
2342+
params->activation =
2343+
ConvertActivation(transpose_conv_params->fused_activation_function());
23412344
} else {
23422345
// TODO(b/157480169): We should either return kTfLiteError or fill in some
23432346
// reasonable defaults in the params struct. We are not doing so until we

src/tensorflow/lite/core/c/builtin_op_data.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -415,9 +415,13 @@ typedef struct {
415415
} TfLiteArgMinParams;
416416

417417
typedef struct {
418+
// Parameters supported by version 1:
418419
TfLitePadding padding;
419420
int stride_width;
420421
int stride_height;
422+
423+
// Parameters supported by version 4:
424+
TfLiteFusedActivation activation;
421425
} TfLiteTransposeConvParams;
422426

423427
typedef struct {

src/tensorflow/lite/core/c/common.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1005,7 +1005,15 @@ typedef enum TfLiteDelegateFlags {
10051005
// 3. This flag requires that the original execution plan only have ops with
10061006
// valid registrations (and not 'dummy' custom ops like with Flex).
10071007
// WARNING: This feature is experimental and subject to change.
1008-
kTfLiteDelegateFlagsRequirePropagatedShapes = 2
1008+
kTfLiteDelegateFlagsRequirePropagatedShapes = 2,
1009+
1010+
// This flag can be used by delegates to request per-operator profiling. If a
1011+
// node is a delegate node, this flag will be checked before profiling. If
1012+
// set, then the node will not be profiled. The delegate will then add per
1013+
// operator information using Profiler::EventType::OPERATOR_INVOKE_EVENT and
1014+
// the results will appear in the operator-wise Profiling section and not in
1015+
// the Delegate internal section.
1016+
kTfLiteDelegateFlagsPerOperatorProfiling = 4
10091017
} TfLiteDelegateFlags;
10101018

10111019
// WARNING: This is an experimental interface that is subject to change.

src/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,8 @@ inline void TransposeConv(
5555
const int output_width = output_shape.Dims(2);
5656
const int32_t input_offset = params.input_offset;
5757
const int32_t output_offset = params.output_offset;
58-
const int32_t output_activation_min = std::numeric_limits<int8_t>::min();
59-
const int32_t output_activation_max = std::numeric_limits<int8_t>::max();
58+
const int32_t output_activation_min = params.quantized_activation_min;
59+
const int32_t output_activation_max = params.quantized_activation_max;
6060
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
6161

6262
const int num_elements = output_shape.FlatSize();
@@ -153,8 +153,8 @@ inline void TransposeConv(
153153
const int filter_width = filter_shape.Dims(2);
154154
const int output_height = output_shape.Dims(1);
155155
const int output_width = output_shape.Dims(2);
156-
const int32_t output_activation_min = std::numeric_limits<int16_t>::min();
157-
const int32_t output_activation_max = std::numeric_limits<int16_t>::max();
156+
const int32_t output_activation_min = params.quantized_activation_min;
157+
const int32_t output_activation_max = params.quantized_activation_max;
158158
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
159159

160160
const int num_elements = output_shape.FlatSize();

src/tensorflow/lite/kernels/internal/reference/transpose_conv.h

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,8 @@ inline void TransposeConv(
4949
const int filter_width = filter_shape.Dims(2);
5050
const int output_height = output_shape.Dims(1);
5151
const int output_width = output_shape.Dims(2);
52+
const float output_activation_min = params.float_activation_min;
53+
const float output_activation_max = params.float_activation_max;
5254
if (bias_data) {
5355
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
5456
}
@@ -99,14 +101,18 @@ inline void TransposeConv(
99101
}
100102
}
101103
}
102-
if (bias_data) {
103-
for (int batch = 0; batch < batches; ++batch) {
104-
for (int out_y = 0; out_y < output_height; ++out_y) {
105-
for (int out_x = 0; out_x < output_width; ++out_x) {
106-
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
107-
output_data[Offset(output_shape, batch, out_y, out_x,
108-
out_channel)] += bias_data[out_channel];
109-
}
104+
105+
for (int batch = 0; batch < batches; ++batch) {
106+
for (int out_y = 0; out_y < output_height; ++out_y) {
107+
for (int out_x = 0; out_x < output_width; ++out_x) {
108+
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
109+
float acc = output_data[Offset(output_shape, batch, out_y, out_x,
110+
out_channel)];
111+
if (bias_data) acc += bias_data[out_channel];
112+
113+
output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
114+
ActivationFunctionWithMinMax(acc, output_activation_min,
115+
output_activation_max);
110116
}
111117
}
112118
}

src/tensorflow/lite/micro/kernels/transpose_conv.cpp

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,8 +261,15 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
261261

262262
switch (input->type) { // Already know in/out types are same.
263263
case kTfLiteFloat32: {
264+
const auto& params =
265+
*(reinterpret_cast<TfLiteConvParams*>(node->builtin_data));
266+
ConvParams op_params = data.params;
267+
CalculateActivationRange(params.activation,
268+
&op_params.float_activation_min,
269+
&op_params.float_activation_max);
270+
264271
reference_ops::TransposeConv(
265-
data.params, tflite::micro::GetTensorShape(input),
272+
op_params, tflite::micro::GetTensorShape(input),
266273
tflite::micro::GetTensorData<float>(input),
267274
tflite::micro::GetTensorShape(filter),
268275
tflite::micro::GetTensorData<float>(filter),

src/tensorflow/lite/micro/micro_allocation_info.cpp

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,6 @@ void AllocationInfoBuilder::UpdateFirstCreated(AllocationInfo* current,
3939
TFLITE_DCHECK(current->first_created <= allocation_scope_count);
4040
if (current->first_created == kUninitializedLifetime) {
4141
current->first_created = allocation_scope_count;
42-
// TODO(b/257084942): This will ensure that tensors that are outputs from an
43-
// OP but not inputs to any other OP also have a reasonable lifetime.
44-
// This bug will be used to add automated tests for this issue.
45-
current->last_used = allocation_scope_count;
4642
}
4743
}
4844

@@ -245,6 +241,9 @@ TfLiteStatus AllocationInfoBuilder::MarkAllocationLifetimes(
245241
const int tensor_index = subgraph->inputs()->Get(i);
246242
AllocationInfo* current = &subgraph_allocation_info[tensor_index];
247243
UpdateFirstCreated(current, allocation_scope_count_);
244+
// This will ensure that the tensors that are inputs to the subgraphs
245+
// but not used in any ops also have a reasonable lifetime.
246+
UpdateLastUsed(current, allocation_scope_count_);
248247
}
249248

250249
for (uint32_t i = 0; i < operators_size; i++) {
@@ -316,6 +315,11 @@ TfLiteStatus AllocationInfoBuilder::MarkAllocationLifetimes(
316315
subgraph->outputs() != nullptr && i < subgraph->outputs()->size(); ++i) {
317316
const int tensor_index = subgraph->outputs()->Get(i);
318317
AllocationInfo* current = &subgraph_allocation_info[tensor_index];
318+
// Make sure to assign the First created value of the subgraph output
319+
// This will handle the case where the subgraph is empty. This helps
320+
// ensure all tensors have valid lifetimes before those are used by the
321+
// memory planner.
322+
UpdateFirstCreated(current, allocation_scope_count_);
319323
UpdateLastUsed(current, allocation_scope_count_);
320324
}
321325
return kTfLiteOk;

src/tensorflow/lite/micro/test_helpers.cpp

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -918,6 +918,117 @@ const Model* BuildSimpleModelWithSubgraphsAndIf() {
918918
return model;
919919
}
920920

921+
const Model* BuildSimpleModelWithIfAndEmptySubgraph() {
922+
using flatbuffers::Offset;
923+
flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
924+
925+
constexpr size_t buffers_size = 1;
926+
const Offset<Buffer> buffers[buffers_size] = {
927+
CreateBuffer(*builder),
928+
};
929+
const int32_t condition_tensor_shape[] = {1};
930+
const int32_t data_tensor_shape[] = {1, 2};
931+
constexpr size_t tensors_size = 4;
932+
const Offset<Tensor> subgraph1_tensors[tensors_size] = {
933+
CreateTensor(*builder, builder->CreateVector(condition_tensor_shape, 1),
934+
TensorType_BOOL, 0,
935+
builder->CreateString("condition tensor"), 0, false),
936+
CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
937+
TensorType_FLOAT32, 0,
938+
builder->CreateString("input_tensor1"), 0, false),
939+
CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
940+
TensorType_FLOAT32, 0,
941+
builder->CreateString("input_tensor2"), 0, false),
942+
CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
943+
TensorType_FLOAT32, 0,
944+
builder->CreateString("output_tensor"), 0, false),
945+
};
946+
const Offset<Tensor> subgraph2_tensors[tensors_size] = {
947+
CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
948+
TensorType_FLOAT32, 0,
949+
builder->CreateString("input_tensor1"), 0, false),
950+
CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
951+
TensorType_FLOAT32, 0,
952+
builder->CreateString("input_tensor2"), 0, false),
953+
CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
954+
TensorType_FLOAT32, 0,
955+
builder->CreateString("output_tensor"), 0, false),
956+
};
957+
const Offset<Tensor> subgraph3_tensors[tensors_size] = {
958+
CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
959+
TensorType_FLOAT32, 0,
960+
builder->CreateString("input_tensor1"), 0, false),
961+
CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
962+
TensorType_FLOAT32, 0,
963+
builder->CreateString("input_tensor2"), 0, false),
964+
CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
965+
TensorType_FLOAT32, 0,
966+
builder->CreateString("output_tensor"), 0, false),
967+
};
968+
969+
constexpr size_t if_inputs_size = 3;
970+
const int32_t if_inputs[if_inputs_size] = {0, 1, 2};
971+
constexpr size_t outputs_size = 1;
972+
const int32_t if_outputs[outputs_size] = {3};
973+
constexpr size_t operator_inputs_size = 2;
974+
const int32_t operator_inputs[operator_inputs_size] = {0, 1};
975+
const int32_t operator_outputs[outputs_size] = {2};
976+
constexpr size_t operators_size = 1;
977+
const Offset<Operator> subgraph1_operators[operators_size] = {
978+
CreateOperator(
979+
*builder, 0, builder->CreateVector(if_inputs, if_inputs_size),
980+
builder->CreateVector(if_outputs, outputs_size),
981+
BuiltinOptions_IfOptions, CreateIfOptions(*builder, 1, 2).Union()),
982+
};
983+
const Offset<Operator> subgraph2_operators[operators_size] = {
984+
CreateOperator(
985+
*builder, 1,
986+
builder->CreateVector(operator_inputs, operator_inputs_size),
987+
builder->CreateVector(operator_outputs, outputs_size),
988+
BuiltinOptions_NONE),
989+
};
990+
constexpr size_t subgraphs_size = 3;
991+
const Offset<SubGraph> subgraphs[subgraphs_size] = {
992+
CreateSubGraph(*builder, builder->CreateVector(subgraph1_tensors, 4),
993+
builder->CreateVector(if_inputs, if_inputs_size),
994+
builder->CreateVector(if_outputs, outputs_size),
995+
builder->CreateVector(subgraph1_operators, operators_size),
996+
builder->CreateString("if_subgraph")),
997+
CreateSubGraph(
998+
*builder, builder->CreateVector(subgraph2_tensors, 3),
999+
builder->CreateVector(operator_inputs, operator_inputs_size),
1000+
builder->CreateVector(operator_outputs, outputs_size),
1001+
builder->CreateVector(subgraph2_operators, operators_size),
1002+
builder->CreateString("then_subgraph")),
1003+
CreateSubGraph(
1004+
*builder, builder->CreateVector(subgraph3_tensors, 3),
1005+
builder->CreateVector(operator_inputs, operator_inputs_size),
1006+
builder->CreateVector(operator_outputs, outputs_size), 0,
1007+
builder->CreateString("else_subgraph")),
1008+
};
1009+
constexpr size_t operator_codes_size = 3;
1010+
const Offset<OperatorCode> operator_codes[operator_codes_size] = {
1011+
CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
1012+
"multiple_inputs_op",
1013+
/*version=*/0, BuiltinOperator_IF),
1014+
CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
1015+
"multiple_inputs_op",
1016+
/*version=*/0, BuiltinOperator_ADD),
1017+
CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
1018+
"multiple_inputs_op",
1019+
/*version=*/0, BuiltinOperator_MUL),
1020+
};
1021+
const Offset<Model> model_offset = CreateModel(
1022+
*builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
1023+
builder->CreateVector(subgraphs, subgraphs_size),
1024+
builder->CreateString("test_model"),
1025+
builder->CreateVector(buffers, buffers_size));
1026+
FinishModelBuffer(*builder, model_offset);
1027+
void* model_pointer = builder->GetBufferPointer();
1028+
const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
1029+
return model;
1030+
}
1031+
9211032
const Model* BuildSimpleModelWithSubgraphsAndWhile() {
9221033
using flatbuffers::Offset;
9231034
flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
@@ -1604,6 +1715,14 @@ const Model* GetSimpleModelWithSubgraphsAndIf() {
16041715
return model;
16051716
}
16061717

1718+
const Model* GetSimpleModelWithIfAndEmptySubgraph() {
1719+
static Model* model = nullptr;
1720+
if (!model) {
1721+
model = const_cast<Model*>(BuildSimpleModelWithIfAndEmptySubgraph());
1722+
}
1723+
return model;
1724+
}
1725+
16071726
const Model* GetSimpleModelWithSubgraphsAndWhile() {
16081727
static Model* model = nullptr;
16091728
if (!model) {

src/tensorflow/lite/micro/test_helpers.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,9 @@ const Model* GetSimpleStatefulModel();
157157
// Returns a flatbuffer model with "if" and two subgraphs.
158158
const Model* GetSimpleModelWithSubgraphsAndIf();
159159

160+
// Returns a flatbuffer model with "if" and two subgraphs one of which is empty.
161+
const Model* GetSimpleModelWithIfAndEmptySubgraph();
162+
160163
// Returns a flatbuffer model with "while" and three subgraphs.
161164
const Model* GetSimpleModelWithSubgraphsAndWhile();
162165

0 commit comments

Comments
 (0)