Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix s3 panic from keccak commitment put requests #129

Merged
merged 5 commits into from
Sep 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions e2e/optimism_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,9 @@ func TestOptimismKeccak256Commitment(gt *testing.T) {

testCfg := e2e.TestConfig(useMemory())
testCfg.UseKeccak256ModeS3 = true
proxyTS, shutDown := e2e.CreateTestSuite(gt, testCfg)

tsConfig := e2e.TestSuiteConfig(gt, testCfg)
proxyTS, shutDown := e2e.CreateTestSuite(gt, tsConfig)
defer shutDown()

t := actions.NewDefaultTesting(gt)
Expand Down Expand Up @@ -176,7 +178,8 @@ func TestOptimismAltDACommitment(gt *testing.T) {
gt.Skip("Skipping test as INTEGRATION or TESTNET env var not set")
}

proxyTS, shutDown := e2e.CreateTestSuite(gt, e2e.TestConfig(useMemory()))
tsConfig := e2e.TestSuiteConfig(gt, e2e.TestConfig(useMemory()))
proxyTS, shutDown := e2e.CreateTestSuite(gt, tsConfig)
defer shutDown()

t := actions.NewDefaultTesting(gt)
Expand Down
48 changes: 40 additions & 8 deletions e2e/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ func TestOptimismClientWithKeccak256Commitment(t *testing.T) {
testCfg := e2e.TestConfig(useMemory())
testCfg.UseKeccak256ModeS3 = true

ts, kill := e2e.CreateTestSuite(t, testCfg)
tsConfig := e2e.TestSuiteConfig(t, testCfg)
ts, kill := e2e.CreateTestSuite(t, tsConfig)
defer kill()

daClient := op_plasma.NewDAClient(ts.Address(), false, true)
Expand All @@ -41,6 +42,30 @@ func TestOptimismClientWithKeccak256Commitment(t *testing.T) {
require.Equal(t, testPreimage, preimage)
}

func TestKeccak256CommitmentRequestErrorsWhenS3NotSet(t *testing.T) {
if !runIntegrationTests && !runTestnetIntegrationTests {
t.Skip("Skipping test as INTEGRATION or TESTNET env var not set")
}

t.Parallel()

testCfg := e2e.TestConfig(useMemory())
testCfg.UseKeccak256ModeS3 = true

tsConfig := e2e.TestSuiteConfig(t, testCfg)
tsConfig.S3Config.Endpoint = ""
ts, kill := e2e.CreateTestSuite(t, tsConfig)
defer kill()

daClient := op_plasma.NewDAClient(ts.Address(), false, true)

testPreimage := []byte(e2e.RandString(100))

_, err := daClient.SetInput(ts.Ctx, testPreimage)
// TODO: the server currently returns an internal server error. Should it return a 400 instead?
require.Error(t, err)
}

/*
this test asserts that the data can be posted/read to EigenDA
with a concurrent S3 backend configured
Expand All @@ -53,7 +78,8 @@ func TestOptimismClientWithGenericCommitment(t *testing.T) {

t.Parallel()

ts, kill := e2e.CreateTestSuite(t, e2e.TestConfig(useMemory()))
tsConfig := e2e.TestSuiteConfig(t, e2e.TestConfig(useMemory()))
ts, kill := e2e.CreateTestSuite(t, tsConfig)
defer kill()

daClient := op_plasma.NewDAClient(ts.Address(), false, false)
Expand All @@ -77,7 +103,8 @@ func TestProxyClient(t *testing.T) {

t.Parallel()

ts, kill := e2e.CreateTestSuite(t, e2e.TestConfig(useMemory()))
tsConfig := e2e.TestSuiteConfig(t, e2e.TestConfig(useMemory()))
ts, kill := e2e.CreateTestSuite(t, tsConfig)
defer kill()

cfg := &client.Config{
Expand All @@ -104,7 +131,8 @@ func TestProxyServerWithLargeBlob(t *testing.T) {

t.Parallel()

ts, kill := e2e.CreateTestSuite(t, e2e.TestConfig(useMemory()))
tsConfig := e2e.TestSuiteConfig(t, e2e.TestConfig(useMemory()))
ts, kill := e2e.CreateTestSuite(t, tsConfig)
defer kill()

cfg := &client.Config{
Expand All @@ -131,7 +159,8 @@ func TestProxyServerWithOversizedBlob(t *testing.T) {

t.Parallel()

ts, kill := e2e.CreateTestSuite(t, e2e.TestConfig(useMemory()))
tsConfig := e2e.TestSuiteConfig(t, e2e.TestConfig(useMemory()))
ts, kill := e2e.CreateTestSuite(t, tsConfig)
defer kill()

cfg := &client.Config{
Expand Down Expand Up @@ -172,7 +201,8 @@ func TestProxyServerCaching(t *testing.T) {
testCfg := e2e.TestConfig(useMemory())
testCfg.UseS3Caching = true

ts, kill := e2e.CreateTestSuite(t, testCfg)
tsConfig := e2e.TestSuiteConfig(t, testCfg)
ts, kill := e2e.CreateTestSuite(t, tsConfig)
defer kill()

cfg := &client.Config{
Expand Down Expand Up @@ -214,7 +244,8 @@ func TestProxyServerCachingWithRedis(t *testing.T) {
testCfg := e2e.TestConfig(useMemory())
testCfg.UseRedisCaching = true

ts, kill := e2e.CreateTestSuite(t, testCfg)
tsConfig := e2e.TestSuiteConfig(t, testCfg)
ts, kill := e2e.CreateTestSuite(t, tsConfig)
defer kill()

cfg := &client.Config{
Expand Down Expand Up @@ -266,7 +297,8 @@ func TestProxyServerReadFallback(t *testing.T) {
testCfg.UseS3Fallback = true
testCfg.Expiration = time.Millisecond * 1

ts, kill := e2e.CreateTestSuite(t, testCfg)
tsConfig := e2e.TestSuiteConfig(t, testCfg)
ts, kill := e2e.CreateTestSuite(t, tsConfig)
defer kill()

cfg := &client.Config{
Expand Down
41 changes: 22 additions & 19 deletions e2e/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,9 @@ const (
)

type Cfg struct {
UseMemory bool
Expiration time.Duration
UseMemory bool
Expiration time.Duration
// at most one of the below options should be true
UseKeccak256ModeS3 bool
UseS3Caching bool
UseRedisCaching bool
Expand Down Expand Up @@ -84,15 +85,7 @@ func createS3Config(eigendaCfg server.Config) server.CLIConfig {
}
}

type TestSuite struct {
Ctx context.Context
Log log.Logger
Server *server.Server
}

func CreateTestSuite(t *testing.T, testCfg *Cfg) (TestSuite, func()) {
ctx := context.Background()

func TestSuiteConfig(t *testing.T, testCfg *Cfg) server.CLIConfig {
// load signer key from environment
pk := os.Getenv(privateKey)
if pk == "" && !testCfg.UseMemory {
Expand All @@ -112,12 +105,6 @@ func CreateTestSuite(t *testing.T, testCfg *Cfg) (TestSuite, func()) {
pollInterval = time.Minute * 1
}

log := oplog.NewLogger(os.Stdout, oplog.CLIConfig{
Level: log.LevelDebug,
Format: oplog.FormatLogFmt,
Color: true,
}).New("role", svcName)

eigendaCfg := server.Config{
ClientConfig: clients.EigenDAClientConfig{
RPC: holeskyDA,
Expand All @@ -143,7 +130,6 @@ func CreateTestSuite(t *testing.T, testCfg *Cfg) (TestSuite, func()) {
}

var cfg server.CLIConfig

switch {
case testCfg.UseKeccak256ModeS3:
cfg = createS3Config(eigendaCfg)
Expand All @@ -167,9 +153,26 @@ func CreateTestSuite(t *testing.T, testCfg *Cfg) (TestSuite, func()) {
}
}

return cfg
}

type TestSuite struct {
Ctx context.Context
Log log.Logger
Server *server.Server
}

func CreateTestSuite(t *testing.T, testSuiteCfg server.CLIConfig) (TestSuite, func()) {
log := oplog.NewLogger(os.Stdout, oplog.CLIConfig{
Level: log.LevelDebug,
Format: oplog.FormatLogFmt,
Color: true,
}).New("role", svcName)

ctx := context.Background()
store, err := server.LoadStoreRouter(
ctx,
cfg,
testSuiteCfg,
log,
)
require.NoError(t, err)
Expand Down
4 changes: 2 additions & 2 deletions server/load_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
)

// populateTargets ... creates a list of storage backends based on the provided target strings
func populateTargets(targets []string, s3 *store.S3Store, redis *store.RedStore) []store.PrecomputedKeyStore {
func populateTargets(targets []string, s3 store.PrecomputedKeyStore, redis *store.RedStore) []store.PrecomputedKeyStore {
stores := make([]store.PrecomputedKeyStore, len(targets))

for i, f := range targets {
Expand Down Expand Up @@ -42,7 +42,7 @@ func populateTargets(targets []string, s3 *store.S3Store, redis *store.RedStore)
func LoadStoreRouter(ctx context.Context, cfg CLIConfig, log log.Logger) (store.IRouter, error) {
// create S3 backend store (if enabled)
var err error
var s3 *store.S3Store
var s3 store.PrecomputedKeyStore
var redis *store.RedStore

if cfg.S3Config.Bucket != "" && cfg.S3Config.Endpoint != "" {
Expand Down
Loading