Skip to content

Commit

Permalink
refactor: rm dup fields BlobSize -> DataLength
Browse files Browse the repository at this point in the history
  • Loading branch information
hopeyen committed Oct 2, 2024
1 parent e8386ab commit e1d454b
Show file tree
Hide file tree
Showing 6 changed files with 29 additions and 54 deletions.
18 changes: 9 additions & 9 deletions disperser/meterer/meterer.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ func (m *Meterer) ValidateBinIndex(blobHeader BlobHeader, reservation *ActiveRes
// TODO: Bin limit should be direct write to the Store
func (m *Meterer) IncrementBinUsage(ctx context.Context, blobHeader BlobHeader, reservation *ActiveReservation) error {
//todo: sizes use uint64?
recordedSize := max(blobHeader.BlobSize, uint32(m.MinChargeableSize))
recordedSize := max(blobHeader.DataLength, uint32(m.MinChargeableSize))
newUsage, err := m.OffchainStore.UpdateReservationBin(ctx, blobHeader.AccountID, uint64(blobHeader.BinIndex), recordedSize)
if err != nil {
return fmt.Errorf("failed to increment bin usage: %w", err)
Expand Down Expand Up @@ -218,7 +218,7 @@ func (m *Meterer) ServeOnDemandRequest(ctx context.Context, blobHeader BlobHeade
return fmt.Errorf("invalid quorum for On-Demand Request: %w", err)
}
// update blob header to use the miniumum chargeable size
blobHeader.BlobSize = max(blobHeader.BlobSize, uint32(m.MinChargeableSize))
blobHeader.DataLength = max(blobHeader.DataLength, uint32(m.MinChargeableSize))
err := m.OffchainStore.AddOnDemandPayment(ctx, blobHeader)
if err != nil {
return fmt.Errorf("failed to update cumulative payment: %w", err)
Expand All @@ -243,27 +243,27 @@ func (m *Meterer) ServeOnDemandRequest(ctx context.Context, blobHeader BlobHeade
// ValidatePayment checks if the provided payment header is valid against the local accounting
// prevPmt is the largest cumulative payment strictly less than blobHeader.cumulativePayment if exists
// nextPmt is the smallest cumulative payment strictly greater than blobHeader.cumulativePayment if exists
// nextPmtBlobSize is the blobSize of corresponding to nextPmt if exists
// nextPmtDataLength is the dataLength of corresponding to nextPmt if exists
func (m *Meterer) ValidatePayment(ctx context.Context, blobHeader BlobHeader, onDemandPayment *OnDemandPayment) error {
if blobHeader.CumulativePayment > uint64(onDemandPayment.CumulativePayment) {
return fmt.Errorf("request claims a cumulative payment greater than the on-chain deposit")
}

prevPmt, nextPmt, nextPmtBlobSize, err := m.OffchainStore.GetRelevantOnDemandRecords(ctx, blobHeader.AccountID, blobHeader.CumulativePayment) // zero if DNE
prevPmt, nextPmt, nextPmtDataLength, err := m.OffchainStore.GetRelevantOnDemandRecords(ctx, blobHeader.AccountID, blobHeader.CumulativePayment) // zero if DNE
if err != nil {
return fmt.Errorf("failed to get relevant on-demand records: %w", err)
}
// the current request must increment cumulative payment by a magnitude sufficient to cover the blob size
if prevPmt+uint64(blobHeader.BlobSize*m.Config.PricePerChargeable/m.Config.MinChargeableSize) > blobHeader.CumulativePayment {
if prevPmt+uint64(blobHeader.DataLength*m.Config.PricePerChargeable/m.Config.MinChargeableSize) > blobHeader.CumulativePayment {
return fmt.Errorf("insufficient cumulative payment increment")
}
// the current request must not break the payment magnitude for the next payment if the two requests were delivered out-of-order
if nextPmt != 0 && blobHeader.CumulativePayment+uint64(nextPmtBlobSize*m.Config.PricePerChargeable/m.Config.MinChargeableSize) > nextPmt {
if nextPmt != 0 && blobHeader.CumulativePayment+uint64(nextPmtDataLength*m.Config.PricePerChargeable/m.Config.MinChargeableSize) > nextPmt {
return fmt.Errorf("breaking cumulative payment invariants")
}
// check passed: blob can be safely inserted into the set of payments
// prevPmt + blobHeader.BlobSize * m.FixedFeePerByte <= blobHeader.CumulativePayment
// <= nextPmt - nextPmtBlobSize * m.FixedFeePerByte > nextPmt
// prevPmt + blobHeader.DataLength * m.FixedFeePerByte <= blobHeader.CumulativePayment
// <= nextPmt - nextPmtDataLength * m.FixedFeePerByte > nextPmt
return nil
}

Expand All @@ -282,7 +282,7 @@ func (m *Meterer) ValidateGlobalBinIndex(blobHeader BlobHeader) (uint32, error)
// IncrementBinUsage increments the bin usage atomically and checks for overflow
func (m *Meterer) IncrementGlobalBinUsage(ctx context.Context, blobHeader BlobHeader) error {
globalIndex := uint64(time.Now().Unix())
newUsage, err := m.OffchainStore.UpdateGlobalBin(ctx, globalIndex, blobHeader.BlobSize)
newUsage, err := m.OffchainStore.UpdateGlobalBin(ctx, globalIndex, blobHeader.DataLength)
if err != nil {
return fmt.Errorf("failed to increment global bin usage: %w", err)
}
Expand Down
10 changes: 5 additions & 5 deletions disperser/meterer/meterer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ func TestMetererReservations(t *testing.T) {
BinIndex: uint32(time.Now().Unix()) / mt.Config.ReservationWindow,
CumulativePayment: 0,
Commitment: *commitment,
BlobSize: 2000,
DataLength: 2000,
QuorumNumbers: []uint8{0},
Signature: []byte{78, 212, 55, 45, 156, 217, 21, 240, 47, 141, 18, 213, 226, 196, 4, 51, 245, 110, 20, 106, 244, 142, 142, 49, 213, 21, 34, 151, 118, 254, 46, 89, 48, 84, 250, 46, 179, 228, 46, 51, 106, 164, 122, 11, 26, 101, 10, 10, 243, 2, 30, 46, 95, 125, 189, 237, 236, 91, 130, 224, 240, 151, 106, 204, 1},
}
Expand Down Expand Up @@ -208,8 +208,8 @@ func TestMetererReservations(t *testing.T) {
// test bin usage
accountID := crypto.PubkeyToAddress(privateKey2.PublicKey).Hex()
for i := 0; i < 9; i++ {
blobSize := 20
header, err = meterer.ConstructBlobHeader(signer, 1, 1, binIndex, 0, *commitment, uint32(blobSize), quoromNumbers, privateKey2)
dataLength := 20
header, err = meterer.ConstructBlobHeader(signer, 1, 1, binIndex, 0, *commitment, uint32(dataLength), quoromNumbers, privateKey2)
assert.NoError(t, err)
err = mt.MeterRequest(ctx, *header)
assert.NoError(t, err)
Expand All @@ -220,7 +220,7 @@ func TestMetererReservations(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, accountID, item["AccountID"].(*types.AttributeValueMemberS).Value)
assert.Equal(t, strconv.Itoa(int(binIndex)), item["BinIndex"].(*types.AttributeValueMemberN).Value)
assert.Equal(t, strconv.Itoa(int((i+1)*blobSize)), item["BinUsage"].(*types.AttributeValueMemberN).Value)
assert.Equal(t, strconv.Itoa(int((i+1)*dataLength)), item["BinUsage"].(*types.AttributeValueMemberN).Value)

}
// frist over flow is allowed
Expand Down Expand Up @@ -267,7 +267,7 @@ func TestMetererOnDemand(t *testing.T) {
BinIndex: binIndex,
CumulativePayment: 1,
Commitment: *commitment,
BlobSize: 2000,
DataLength: 2000,
QuorumNumbers: quorumNumbers,
Signature: []byte{78, 212, 55, 45, 156, 217, 21, 240, 47, 141, 18, 213, 226, 196, 4, 51, 245, 110, 20, 106, 244, 142, 142, 49, 213, 21, 34, 151, 118, 254, 46, 89, 48, 84, 250, 46, 179, 228, 46, 51, 106, 164, 122, 11, 26, 101, 10, 10, 243, 2, 30, 46, 95, 125, 189, 237, 236, 91, 130, 224, 240, 151, 106, 204, 1},
}
Expand Down
12 changes: 6 additions & 6 deletions disperser/meterer/offchain_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ type ReservationBin struct {

type PaymentTuple struct {
CumulativePayment uint64
BlobSize uint32
DataLength uint32
}

type GlobalBin struct {
Expand Down Expand Up @@ -193,7 +193,7 @@ func (s *OffchainStore) AddOnDemandPayment(ctx context.Context, blobHeader BlobH
commondynamodb.Item{
"AccountID": &types.AttributeValueMemberS{Value: blobHeader.AccountID},
"CumulativePayments": &types.AttributeValueMemberN{Value: strconv.FormatUint(blobHeader.CumulativePayment, 10)},
"BlobSize": &types.AttributeValueMemberN{Value: strconv.FormatUint(uint64(blobHeader.BlobSize), 10)},
"DataLength": &types.AttributeValueMemberN{Value: strconv.FormatUint(uint64(blobHeader.DataLength), 10)},
},
)

Expand Down Expand Up @@ -257,18 +257,18 @@ func (s *OffchainStore) GetRelevantOnDemandRecords(ctx context.Context, accountI
return 0, 0, 0, fmt.Errorf("failed to query the next payment for account: %w", err)
}
var nextPayment uint64
var nextBlobSize uint32
var nextDataLength uint32
if len(largerResult) > 0 {
nextPayment, err = strconv.ParseUint(largerResult[0]["CumulativePayments"].(*types.AttributeValueMemberN).Value, 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to parse next payment: %w", err)
}
blobSize, err := strconv.ParseUint(largerResult[0]["BlobSize"].(*types.AttributeValueMemberN).Value, 10, 32)
dataLength, err := strconv.ParseUint(largerResult[0]["DataLength"].(*types.AttributeValueMemberN).Value, 10, 32)
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to parse blob size: %w", err)
}
nextBlobSize = uint32(blobSize)
nextDataLength = uint32(dataLength)
}

return prevPayment, nextPayment, nextBlobSize, nil
return prevPayment, nextPayment, nextDataLength, nil
}
14 changes: 7 additions & 7 deletions disperser/meterer/offchain_store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ func TestOnDemandUsageBasicOperations(t *testing.T) {
commondynamodb.Item{
"AccountID": &types.AttributeValueMemberS{Value: "account1"},
"CumulativePayments": &types.AttributeValueMemberN{Value: "1"},
"BlobSize": &types.AttributeValueMemberN{Value: "1000"},
"DataLength": &types.AttributeValueMemberN{Value: "1000"},
},
)
assert.NoError(t, err)
Expand All @@ -194,7 +194,7 @@ func TestOnDemandUsageBasicOperations(t *testing.T) {
items[i] = commondynamodb.Item{
"AccountID": &types.AttributeValueMemberS{Value: fmt.Sprintf("account%d", i%repetitions)},
"CumulativePayments": &types.AttributeValueMemberN{Value: fmt.Sprintf("%d", i)},
"BlobSize": &types.AttributeValueMemberN{Value: fmt.Sprintf("%d", i*1000)},
"DataLength": &types.AttributeValueMemberN{Value: fmt.Sprintf("%d", i*1000)},
}
}
unprocessed, err := dynamoClient.PutItems(ctx, tableName, items)
Expand All @@ -208,7 +208,7 @@ func TestOnDemandUsageBasicOperations(t *testing.T) {
})
assert.NoError(t, err)
assert.Equal(t, "1", item["CumulativePayments"].(*types.AttributeValueMemberN).Value)
assert.Equal(t, "1000", item["BlobSize"].(*types.AttributeValueMemberN).Value)
assert.Equal(t, "1000", item["DataLength"].(*types.AttributeValueMemberN).Value)

queryResult, err := dynamoClient.QueryIndex(ctx, tableName, indexName, "AccountID = :account", commondynamodb.ExpresseionValues{
":account": &types.AttributeValueMemberS{
Expand All @@ -218,7 +218,7 @@ func TestOnDemandUsageBasicOperations(t *testing.T) {
assert.Len(t, queryResult, numItems/repetitions)
for _, item := range queryResult {
cumulativePayments, _ := strconv.Atoi(item["CumulativePayments"].(*types.AttributeValueMemberN).Value)
assert.Equal(t, fmt.Sprintf("%d", cumulativePayments*1000), item["BlobSize"].(*types.AttributeValueMemberN).Value)
assert.Equal(t, fmt.Sprintf("%d", cumulativePayments*1000), item["DataLength"].(*types.AttributeValueMemberN).Value)
}
queryResult, err = dynamoClient.QueryIndex(ctx, tableName, indexName, "AccountID = :account_id", commondynamodb.ExpresseionValues{
":account_id": &types.AttributeValueMemberS{
Expand All @@ -234,15 +234,15 @@ func TestOnDemandUsageBasicOperations(t *testing.T) {
}, commondynamodb.Item{
"AccountID": &types.AttributeValueMemberS{Value: "account1"},
"CumulativePayments": &types.AttributeValueMemberN{Value: "3"},
"BlobSize": &types.AttributeValueMemberN{Value: "3000"},
"DataLength": &types.AttributeValueMemberN{Value: "3000"},
})
assert.NoError(t, err)
assert.Equal(t, "3000", updatedItem["BlobSize"].(*types.AttributeValueMemberN).Value)
assert.Equal(t, "3000", updatedItem["DataLength"].(*types.AttributeValueMemberN).Value)

item, err = dynamoClient.GetItem(ctx, tableName, commondynamodb.Key{
"AccountID": &types.AttributeValueMemberS{Value: "account1"},
"CumulativePayments": &types.AttributeValueMemberN{Value: "1"},
})
assert.NoError(t, err)
assert.Equal(t, "3000", item["BlobSize"].(*types.AttributeValueMemberN).Value)
assert.Equal(t, "3000", item["DataLength"].(*types.AttributeValueMemberN).Value)
}
28 changes: 2 additions & 26 deletions disperser/meterer/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,34 +29,11 @@ type BlobHeader struct {
BinIndex uint32

Signature []byte
BlobSize uint32
// TODO: we are thinking the contract can use uint128 for cumulative payment,
// but the definition on v2 uses uint64. Double check with team.
CumulativePayment uint64
}

// // EIP712Domain represents the EIP-712 domain for our blob headers
// var EIP712Domain = apitypes.TypedDataDomain{
// Name: "EigenDA",
// Version: "1",
// ChainId: (*math.HexOrDecimal256)(big.NewInt(17000)),
// VerifyingContract: common.HexToAddress("0x1234000000000000000000000000000000000000").Hex(),
// }

// Protocol defines parameters: epoch length and rate-limit window interval
type Reservation struct {
dataRate uint32 // bandwith being reserved
startEpoch uint32 // index of epoch where reservation begins
endEpoch uint32 // index of epoch where reservation ends
quorumSplit []byte // each byte is a percentage at the corresponding quorum index
}

// Protocol defines parameters: FixedFeePerByte; fine to leave global rate-limit offchain atm
type OnDemand struct {
amountDeposited big.Int
amountCollected big.Int
}

// EIP712Signer handles EIP-712 signing operations
type EIP712Signer struct {
domain apitypes.TypedDataDomain
Expand Down Expand Up @@ -201,7 +178,7 @@ func ConstructBlobHeader(
binIndex uint32,
cumulativePayment uint64,
commitment core.G1Point,
blobSize uint32,
dataLength uint32,
quorumNumbers []uint8,
privateKey *ecdsa.PrivateKey,
) (*BlobHeader, error) {
Expand All @@ -214,8 +191,7 @@ func ConstructBlobHeader(
CumulativePayment: cumulativePayment,
Commitment: commitment,
QuorumNumbers: quorumNumbers,
BlobSize: blobSize,
DataLength: blobSize,
DataLength: dataLength,
}

signature, err := signer.SignBlobHeader(header, privateKey)
Expand Down
1 change: 0 additions & 1 deletion disperser/meterer/types_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ func TestEIP712Signer(t *testing.T) {
Commitment: *commitment,
DataLength: 1024,
QuorumNumbers: []uint8{1},
BlobSize: 1024,
}

t.Run("SignBlobHeader", func(t *testing.T) {
Expand Down

0 comments on commit e1d454b

Please sign in to comment.