Hi ! I’m wondering if there is currently any way to create a custom tensor type in Keras, similar to:
torch.Tensor
in PyTorch (subclassing Tensor)tf.experimental.ExtensionType
in TensorFlow
The goal would be to create a framework-agnostic tensor type that works naturally with keras.ops and integrates into Keras layers without breaking backend compatibility.
Here is an example for illustration:
import keras
import numpy as np
class QuantizedTensor(Tensor):
def __init__(self, values: Tensor, scale: Tensor, value_bits: int):
self.values = keras.ops.convert_to_tensor(values, dtype="float32")
self.scale = keras.ops.convert_to_tensor(scale, dtype="float32")
self.value_bits = value_bits
class QuantizedDense(keras.layers.Dense):
def build(self, input_shape):
return super().build(input_shape)
def call(self, inputs):
# Quantize the kernel
self.kernel = self.quantizer(self.kernel)
# MatMul between quantized tensors
out = keras.ops.matmul(inputs.values, self.kernel.values)
int_max = keras.ops.power(2.0, inputs.value_bits)
out = keras.ops.clip(out, -int_max, int_max - 1)
return QuantizedTensor(out, inputs.scale * self.kernel.scale)
values = np.ones((1, 32), dtype=np.float32)
scale = np.array(1.5, dtype=np.float32)
value_bits = 8
quantized_tensor = QuantizedTensor(values, scale, value_bits)
qlayer = QuantizedDense(units=10)
outputs = qlayer(quantized_tensor)
Thanks !