Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automated sync from github.com/tensorflow/tensorflow #2723

Merged
merged 2 commits into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions tensorflow/compiler/mlir/lite/schema/schema.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,19 @@ table CustomQuantization {
// Represents a specific quantization technique's parameters.
union QuantizationDetails {
CustomQuantization,
BlockwiseQuantization,
}

// Parameters for blockwise quantization.
table BlockwiseQuantization {
// index to the scale tensor, the tensor can be found in tensors array in
// subgraph.
scales: int;
// index to the zero point tensor. If zero_points is -1, the zero point is
// assumed to be 0, following the convention of optional tensors in tflite.
zero_points: int;
// The block size of the tensor.
block_size: int;
}

// Parameters for converting a quantized tensor back to float.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/core/c/c_api_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ extern "C" {
#define TFL_CAPI_EXPORT
#elif defined(TFL_STATIC_LIBRARY_BUILD)
#define TFL_CAPI_EXPORT
#else // not definded TFL_STATIC_LIBRARY_BUILD
#else // not defined TFL_STATIC_LIBRARY_BUILD
#if defined(_WIN32)
#ifdef TFL_COMPILE_LIBRARY
#define TFL_CAPI_EXPORT __declspec(dllexport)
Expand Down
4 changes: 3 additions & 1 deletion tensorflow/lite/core/c/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,9 @@ typedef enum TfLiteExternalContextType {
kTfLiteGemmLowpContext = 1, /// include gemm_support.h to use.
kTfLiteEdgeTpuContext = 2, /// Placeholder for Edge TPU support.
kTfLiteCpuBackendContext = 3, /// include cpu_backend_context.h to use.
kTfLiteMaxExternalContexts = 4
kTfLiteLiteRtBufferContext =
4, /// include external_litert_buffer_context.h to use.
kTfLiteMaxExternalContexts = 5
} TfLiteExternalContextType;

// Forward declare so dependent structs and methods can reference these types
Expand Down
2 changes: 2 additions & 0 deletions tensorflow/lite/kernels/internal/reference/comparisons.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_

#include <cstdint>

#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/macros.h"
#include "tensorflow/lite/kernels/internal/common.h"
Expand Down
108 changes: 107 additions & 1 deletion tensorflow/lite/python/schema_py_generated.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,16 @@ class TensorType(object):
class QuantizationDetails(object):
NONE = 0
CustomQuantization = 1
BlockwiseQuantization = 2

def QuantizationDetailsCreator(unionType, table):
from flatbuffers.table import Table
if not isinstance(table, Table):
return None
if unionType == QuantizationDetails().CustomQuantization:
return CustomQuantizationT.InitFromBuf(table.Bytes, table.Pos)
if unionType == QuantizationDetails().BlockwiseQuantization:
return BlockwiseQuantizationT.InitFromBuf(table.Bytes, table.Pos)
return None


Expand Down Expand Up @@ -949,6 +952,109 @@ def Pack(self, builder):
return customQuantization


class BlockwiseQuantization(object):
__slots__ = ['_tab']

@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = BlockwiseQuantization()
x.Init(buf, n + offset)
return x

@classmethod
def GetRootAsBlockwiseQuantization(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def BlockwiseQuantizationBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)

# BlockwiseQuantization
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)

# BlockwiseQuantization
def Scales(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0

# BlockwiseQuantization
def ZeroPoints(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0

# BlockwiseQuantization
def BlockSize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0

def BlockwiseQuantizationStart(builder):
builder.StartObject(3)

def BlockwiseQuantizationAddScales(builder, scales):
builder.PrependInt32Slot(0, scales, 0)

def BlockwiseQuantizationAddZeroPoints(builder, zeroPoints):
builder.PrependInt32Slot(1, zeroPoints, 0)

def BlockwiseQuantizationAddBlockSize(builder, blockSize):
builder.PrependInt32Slot(2, blockSize, 0)

def BlockwiseQuantizationEnd(builder):
return builder.EndObject()



class BlockwiseQuantizationT(object):

# BlockwiseQuantizationT
def __init__(self):
self.scales = 0 # type: int
self.zeroPoints = 0 # type: int
self.blockSize = 0 # type: int

@classmethod
def InitFromBuf(cls, buf, pos):
blockwiseQuantization = BlockwiseQuantization()
blockwiseQuantization.Init(buf, pos)
return cls.InitFromObj(blockwiseQuantization)

@classmethod
def InitFromPackedBuf(cls, buf, pos=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
return cls.InitFromBuf(buf, pos+n)

@classmethod
def InitFromObj(cls, blockwiseQuantization):
x = BlockwiseQuantizationT()
x._UnPack(blockwiseQuantization)
return x

# BlockwiseQuantizationT
def _UnPack(self, blockwiseQuantization):
if blockwiseQuantization is None:
return
self.scales = blockwiseQuantization.Scales()
self.zeroPoints = blockwiseQuantization.ZeroPoints()
self.blockSize = blockwiseQuantization.BlockSize()

# BlockwiseQuantizationT
def Pack(self, builder):
BlockwiseQuantizationStart(builder)
BlockwiseQuantizationAddScales(builder, self.scales)
BlockwiseQuantizationAddZeroPoints(builder, self.zeroPoints)
BlockwiseQuantizationAddBlockSize(builder, self.blockSize)
blockwiseQuantization = BlockwiseQuantizationEnd(builder)
return blockwiseQuantization


class QuantizationParameters(object):
__slots__ = ['_tab']

Expand Down Expand Up @@ -1157,7 +1263,7 @@ def __init__(self):
self.scale = None # type: List[float]
self.zeroPoint = None # type: List[int]
self.detailsType = 0 # type: int
self.details = None # type: Union[None, CustomQuantizationT]
self.details = None # type: Union[None, CustomQuantizationT, BlockwiseQuantizationT]
self.quantizedDimension = 0 # type: int

@classmethod
Expand Down
Loading
Loading