Skip to content

Commit

Permalink
Sync from upstream TF.
Browse files Browse the repository at this point in the history
  • Loading branch information
TFLM-bot committed Sep 14, 2023
1 parent 3323a41 commit 00d55c5
Show file tree
Hide file tree
Showing 5 changed files with 126 additions and 13 deletions.
10 changes: 9 additions & 1 deletion tensorflow/lite/core/api/flatbuffer_conversions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1305,6 +1305,9 @@ TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,

params->dilation_width_factor = schema_params->dilation_w_factor();
params->dilation_height_factor = schema_params->dilation_h_factor();
TF_LITE_ENSURE_STATUS(
ConvertTensorType(schema_params->quantized_bias_type(),
&params->quantized_bias_type, error_reporter));
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
Expand Down Expand Up @@ -1519,7 +1522,9 @@ TfLiteStatus ParseFullyConnected(const Operator* op,
params->keep_num_dims = schema_params->keep_num_dims();
params->asymmetric_quantize_inputs =
schema_params->asymmetric_quantize_inputs();

TF_LITE_ENSURE_STATUS(
ConvertTensorType(schema_params->quantized_bias_type(),
&params->quantized_bias_type, error_reporter));
switch (schema_params->weights_format()) {
case FullyConnectedOptionsWeightsFormat_DEFAULT:
params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
Expand Down Expand Up @@ -2450,6 +2455,9 @@ TfLiteStatus ParseTransposeConv(const Operator* op,

params->activation =
ConvertActivation(transpose_conv_params->fused_activation_function());
TF_LITE_ENSURE_STATUS(
ConvertTensorType(transpose_conv_params->quantized_bias_type(),
&params->quantized_bias_type, error_reporter));
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
Expand Down
12 changes: 12 additions & 0 deletions tensorflow/lite/core/c/builtin_op_data.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ typedef struct {
// Note: Version 2 supports dilation values not equal to 1.
int dilation_width_factor;
int dilation_height_factor;

// Parameters for CONV_2D version 7 or above.
// Used to determine the default value for the quantized bias.
TfLiteType quantized_bias_type;
} TfLiteConvParams;

typedef struct {
Expand Down Expand Up @@ -194,6 +198,10 @@ typedef struct {
// If set to true and the weights are quantized, then non constant inputs
// are quantized at evaluation time with asymmetric quantization.
bool asymmetric_quantize_inputs;

// Parameters for FullyConnected version 10 or above.
// Used to determine the default value for the quantized bias.
TfLiteType quantized_bias_type;
} TfLiteFullyConnectedParams;

typedef enum {
Expand Down Expand Up @@ -431,6 +439,10 @@ typedef struct {

// Parameters supported by version 4:
TfLiteFusedActivation activation;

// Parameters for TransposeConv version 5 or above.
// Used to determine the default value for the quantized bias.
TfLiteType quantized_bias_type;
} TfLiteTransposeConvParams;

typedef struct {
Expand Down
45 changes: 42 additions & 3 deletions tensorflow/lite/python/schema_py_generated.py
Original file line number Diff line number Diff line change
Expand Up @@ -2640,7 +2640,14 @@ def DilationHFactor(self):
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 1

def Conv2DOptionsStart(builder): builder.StartObject(6)
# Conv2DOptions
def QuantizedBiasType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0

def Conv2DOptionsStart(builder): builder.StartObject(7)
def Start(builder):
return Conv2DOptionsStart(builder)
def Conv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
Expand All @@ -2661,6 +2668,9 @@ def AddDilationWFactor(builder, dilationWFactor):
def Conv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(5, dilationHFactor, 1)
def AddDilationHFactor(builder, dilationHFactor):
return Conv2DOptionsAddDilationHFactor(builder, dilationHFactor)
def Conv2DOptionsAddQuantizedBiasType(builder, quantizedBiasType): builder.PrependInt8Slot(6, quantizedBiasType, 0)
def AddQuantizedBiasType(builder, quantizedBiasType):
return Conv2DOptionsAddQuantizedBiasType(builder, quantizedBiasType)
def Conv2DOptionsEnd(builder): return builder.EndObject()
def End(builder):
return Conv2DOptionsEnd(builder)
Expand All @@ -2675,6 +2685,7 @@ def __init__(self):
self.fusedActivationFunction = 0 # type: int
self.dilationWFactor = 1 # type: int
self.dilationHFactor = 1 # type: int
self.quantizedBiasType = 0 # type: int

@classmethod
def InitFromBuf(cls, buf, pos):
Expand All @@ -2698,6 +2709,7 @@ def _UnPack(self, conv2doptions):
self.fusedActivationFunction = conv2doptions.FusedActivationFunction()
self.dilationWFactor = conv2doptions.DilationWFactor()
self.dilationHFactor = conv2doptions.DilationHFactor()
self.quantizedBiasType = conv2doptions.QuantizedBiasType()

# Conv2DOptionsT
def Pack(self, builder):
Expand All @@ -2708,6 +2720,7 @@ def Pack(self, builder):
Conv2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
Conv2DOptionsAddDilationWFactor(builder, self.dilationWFactor)
Conv2DOptionsAddDilationHFactor(builder, self.dilationHFactor)
Conv2DOptionsAddQuantizedBiasType(builder, self.quantizedBiasType)
conv2doptions = Conv2DOptionsEnd(builder)
return conv2doptions
# automatically generated by the FlatBuffers compiler, do not modify
Expand Down Expand Up @@ -4512,7 +4525,14 @@ def AsymmetricQuantizeInputs(self):
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False

def FullyConnectedOptionsStart(builder): builder.StartObject(4)
# FullyConnectedOptions
def QuantizedBiasType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0

def FullyConnectedOptionsStart(builder): builder.StartObject(5)
def Start(builder):
return FullyConnectedOptionsStart(builder)
def FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
Expand All @@ -4527,6 +4547,9 @@ def AddKeepNumDims(builder, keepNumDims):
def FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0)
def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
return FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
def FullyConnectedOptionsAddQuantizedBiasType(builder, quantizedBiasType): builder.PrependInt8Slot(4, quantizedBiasType, 0)
def AddQuantizedBiasType(builder, quantizedBiasType):
return FullyConnectedOptionsAddQuantizedBiasType(builder, quantizedBiasType)
def FullyConnectedOptionsEnd(builder): return builder.EndObject()
def End(builder):
return FullyConnectedOptionsEnd(builder)
Expand All @@ -4539,6 +4562,7 @@ def __init__(self):
self.weightsFormat = 0 # type: int
self.keepNumDims = False # type: bool
self.asymmetricQuantizeInputs = False # type: bool
self.quantizedBiasType = 0 # type: int

@classmethod
def InitFromBuf(cls, buf, pos):
Expand All @@ -4560,6 +4584,7 @@ def _UnPack(self, fullyConnectedOptions):
self.weightsFormat = fullyConnectedOptions.WeightsFormat()
self.keepNumDims = fullyConnectedOptions.KeepNumDims()
self.asymmetricQuantizeInputs = fullyConnectedOptions.AsymmetricQuantizeInputs()
self.quantizedBiasType = fullyConnectedOptions.QuantizedBiasType()

# FullyConnectedOptionsT
def Pack(self, builder):
Expand All @@ -4568,6 +4593,7 @@ def Pack(self, builder):
FullyConnectedOptionsAddWeightsFormat(builder, self.weightsFormat)
FullyConnectedOptionsAddKeepNumDims(builder, self.keepNumDims)
FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
FullyConnectedOptionsAddQuantizedBiasType(builder, self.quantizedBiasType)
fullyConnectedOptions = FullyConnectedOptionsEnd(builder)
return fullyConnectedOptions
# automatically generated by the FlatBuffers compiler, do not modify
Expand Down Expand Up @@ -16436,7 +16462,14 @@ def FusedActivationFunction(self):
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0

def TransposeConvOptionsStart(builder): builder.StartObject(4)
# TransposeConvOptions
def QuantizedBiasType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0

def TransposeConvOptionsStart(builder): builder.StartObject(5)
def Start(builder):
return TransposeConvOptionsStart(builder)
def TransposeConvOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
Expand All @@ -16451,6 +16484,9 @@ def AddStrideH(builder, strideH):
def TransposeConvOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(3, fusedActivationFunction, 0)
def AddFusedActivationFunction(builder, fusedActivationFunction):
return TransposeConvOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
def TransposeConvOptionsAddQuantizedBiasType(builder, quantizedBiasType): builder.PrependInt8Slot(4, quantizedBiasType, 0)
def AddQuantizedBiasType(builder, quantizedBiasType):
return TransposeConvOptionsAddQuantizedBiasType(builder, quantizedBiasType)
def TransposeConvOptionsEnd(builder): return builder.EndObject()
def End(builder):
return TransposeConvOptionsEnd(builder)
Expand All @@ -16463,6 +16499,7 @@ def __init__(self):
self.strideW = 0 # type: int
self.strideH = 0 # type: int
self.fusedActivationFunction = 0 # type: int
self.quantizedBiasType = 0 # type: int

@classmethod
def InitFromBuf(cls, buf, pos):
Expand All @@ -16484,6 +16521,7 @@ def _UnPack(self, transposeConvOptions):
self.strideW = transposeConvOptions.StrideW()
self.strideH = transposeConvOptions.StrideH()
self.fusedActivationFunction = transposeConvOptions.FusedActivationFunction()
self.quantizedBiasType = transposeConvOptions.QuantizedBiasType()

# TransposeConvOptionsT
def Pack(self, builder):
Expand All @@ -16492,6 +16530,7 @@ def Pack(self, builder):
TransposeConvOptionsAddStrideW(builder, self.strideW)
TransposeConvOptionsAddStrideH(builder, self.strideH)
TransposeConvOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
TransposeConvOptionsAddQuantizedBiasType(builder, self.quantizedBiasType)
transposeConvOptions = TransposeConvOptionsEnd(builder)
return transposeConvOptions
# automatically generated by the FlatBuffers compiler, do not modify
Expand Down
12 changes: 12 additions & 0 deletions tensorflow/lite/schema/schema.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -789,6 +789,9 @@ table Conv2DOptions {
fused_activation_function:ActivationFunctionType;
dilation_w_factor:int = 1;
dilation_h_factor:int = 1;
// Parameters for Conv2D version 8 or above.
// When set, quantized_bias_type defines the dtype for both bias and accumulator.
quantized_bias_type: TensorType;
}

// Options for both Conv3D and Conv3DTranspose.
Expand Down Expand Up @@ -896,6 +899,10 @@ table FullyConnectedOptions {
// If set to true, then weights-only op will use asymmetric quantization for
// inputs.
asymmetric_quantize_inputs: bool;

// Parameters for FullyConnected version 11 or above.
// When set, quantized_bias_type defines the dtype for both bias and accumulator.
quantized_bias_type: TensorType;
}

table SoftmaxOptions {
Expand Down Expand Up @@ -1155,6 +1162,11 @@ table TransposeConvOptions {

// Parameters supported by version 4:
fused_activation_function:ActivationFunctionType = NONE;

// Parameters for TransposeConv version 5 or above.
// If set, use this for bias and accumulator.
// When set, quantized_bias_type defines the dtype for both bias and accumulator.
quantized_bias_type: TensorType;
}

table ExpandDimsOptions {
Expand Down
Loading

0 comments on commit 00d55c5

Please sign in to comment.