Skip to content

Commit

Permalink
chore: Update commitment schemas in README - update env ingestion
Browse files Browse the repository at this point in the history
  • Loading branch information
epociask committed Aug 22, 2024
1 parent f8257e8 commit f2d05e2
Show file tree
Hide file tree
Showing 3 changed files with 74 additions and 40 deletions.
15 changes: 13 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ In order to disperse to the EigenDA network in production, or at high throughput
| `--s3.bucket` | | `$EIGENDA_PROXY_S3_BUCKET` | Bucket name for S3 storage. |
| `--s3.path` | | `$EIGENDA_PROXY_S3_PATH` | Bucket path for S3 storage. |
| `--s3.endpoint` | | `$EIGENDA_PROXY_S3_ENDPOINT` | Endpoint for S3 storage. |
| `--s3.backup` | `false` | `$EIGENDA_PROXY_S3_BACKUP` | Backup mode for S3. | whether to use S3 as a backup store to ensure resiliency in case of EigenDA read failure |
| `--s3.timeout` | `5s` | `$EIGENDA_PROXY_S3_TIMEOUT` | timeout for S3 storage operations (e.g. get, put) |
| `--help, -h` | `false` | | Show help. |
| `--version, -v` | `false` | | Print the version. |

Expand Down Expand Up @@ -86,6 +88,9 @@ An optional `--eigenda-eth-confirmation-depth` flag can be provided to specify a

An ephemeral memory store backend can be used for faster feedback testing when testing rollup integrations. To target this feature, use the CLI flags `--memstore.enabled`, `--memstore.expiration`.

### S3 Fallback
An optional S3 fallback `--s3.backup` can be triggered to ensure resiliency when **reading**. When enabled, a blob is persisted to S3 after being successfully dispersed using the keccak256 hash of the existing commitment for the entity key. In the event that blobs cannot be read from EigenDA, they will then be retrieved from S3.


## Metrics

Expand Down Expand Up @@ -154,6 +159,8 @@ We also provide network-specific example env configuration files in `.env.exampl
Container can be built via running `make docker-build`.

## Commitment Schemas
Currently, there are two commitment modes supported with unique encoding schemas for each. The `version byte` is shared for all modes and denotes which version of the EigenDA certificate is being used/requested. The following versions are currently supported:
* `0x0`: V0 certificate type (i.e, dispersal blob info struct with verification against service manager)

### Optimism Commitment Mode
For `alt-da` clients running on Optimism, the following commitment schema is supported:
Expand All @@ -165,8 +172,12 @@ For `alt-da` clients running on Optimism, the following commitment schema is sup
type type byte
```

### Generic Commitment Mode
For generic clients communicating with proxy, the following commitment schema is supported:
Both `keccak256` (i.e, S3 storage using hash of pre-image for commitment value) and `generic` (i.e, EigenDA) are supported to ensure cross-compatibility with alt-da storage backends if desired by a rollup operator.

OP Stack itself only has a conception of the first byte (`commit type`) and does no semantical interpretation of any subsequent bytes within the encoding. The `da layer type` byte for EigenDA is always `0x0`. However it is currently unused by OP Stack with name space values still being actively [discussed](https://github.com/ethereum-optimism/specs/discussions/135#discussioncomment-9271282).

### Simple Commitment Mode
For simple clients communicating with proxy (e.g, arbitrum nitro), the following commitment schema is supported:

```
0 1 N
Expand Down
97 changes: 60 additions & 37 deletions server/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import (
"github.com/Layr-Labs/eigenda/api/clients"
"github.com/Layr-Labs/eigenda/api/clients/codecs"
"github.com/Layr-Labs/eigenda/encoding/kzg"
opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/urfave/cli/v2"
)

Expand Down Expand Up @@ -45,6 +44,8 @@ const (
S3EndpointFlagName = "s3.endpoint"
S3AccessKeyIDFlagName = "s3.access-key-id" // #nosec G101
S3AccessKeySecretFlagName = "s3.access-key-secret" // #nosec G101
S3BackupFlagName = "s3.backup"
S3TimeoutFlagName = "s3.timeout"
)

const BytesPerSymbol = 31
Expand Down Expand Up @@ -140,6 +141,8 @@ func ReadConfig(ctx *cli.Context) Config {
Endpoint: ctx.String(S3EndpointFlagName),
AccessKeyID: ctx.String(S3AccessKeyIDFlagName),
AccessKeySecret: ctx.String(S3AccessKeySecretFlagName),
Backup: ctx.Bool(S3BackupFlagName),
Timeout: ctx.Duration(S3TimeoutFlagName),
},
ClientConfig: clients.EigenDAClientConfig{
RPC: ctx.String(EigenDADisperserRPCFlagName),
Expand Down Expand Up @@ -217,11 +220,60 @@ func (cfg *Config) Check() error {
return nil
}

func CLIFlags(envPrefix string) []cli.Flag {
prefixEnvVars := func(name string) []string {
return opservice.PrefixEnvVar(envPrefix, name)
}
// flags used for S3 backend configuration
func s3Flags() []cli.Flag {
return []cli.Flag{
&cli.StringFlag{
Name: S3CredentialTypeFlagName,
Usage: "The way to authenticate to S3, options are [iam, static]",
EnvVars: prefixEnvVars("S3_CREDENTIAL_TYPE"),
},
&cli.StringFlag{
Name: S3BucketFlagName,
Usage: "bucket name for S3 storage",
EnvVars: prefixEnvVars("S3_BUCKET"),
},
&cli.StringFlag{
Name: S3PathFlagName,
Usage: "path for S3 storage",
EnvVars: prefixEnvVars("S3_PATH"),
},
&cli.StringFlag{
Name: S3EndpointFlagName,
Usage: "endpoint for S3 storage",
Value: "",
EnvVars: prefixEnvVars("S3_ENDPOINT"),
},
&cli.StringFlag{
Name: S3AccessKeyIDFlagName,
Usage: "access key id for S3 storage",
Value: "",
EnvVars: prefixEnvVars("S3_ACCESS_KEY_ID"),
},
&cli.StringFlag{
Name: S3AccessKeySecretFlagName,
Usage: "access key secret for S3 storage",
Value: "",
EnvVars: prefixEnvVars("S3_ACCESS_KEY_SECRET"),
},
&cli.BoolFlag{
Name: S3BackupFlagName,
Usage: "whether to use S3 as a backup store to ensure resiliency in case of EigenDA read failure",
Value: false,
EnvVars: prefixEnvVars("S3_BACKUP"),
},
&cli.DurationFlag{
Name: S3TimeoutFlagName,
Usage: "timeout for S3 storage operations (e.g. get, put)",
Value: 5 * time.Second,
EnvVars: prefixEnvVars("S3_TIMEOUT"),
},
}
}

func CLIFlags() []cli.Flag {
// TODO: Decompose all flags into constituent parts based on their respective category / usage
flags := []cli.Flag{
&cli.StringFlag{
Name: EigenDADisperserRPCFlagName,
Usage: "RPC endpoint of the EigenDA disperser.",
Expand Down Expand Up @@ -325,37 +377,8 @@ func CLIFlags(envPrefix string) []cli.Flag {
Value: 25 * time.Minute,
EnvVars: []string{"MEMSTORE_EXPIRATION"},
},
&cli.StringFlag{
Name: S3CredentialTypeFlagName,
Usage: "The way to authenticate to S3, options are [iam, static]",
EnvVars: prefixEnvVars("S3_CREDENTIAL_TYPE"),
},
&cli.StringFlag{
Name: S3BucketFlagName,
Usage: "bucket name for S3 storage",
EnvVars: prefixEnvVars("S3_BUCKET"),
},
&cli.StringFlag{
Name: S3PathFlagName,
Usage: "path for S3 storage",
EnvVars: prefixEnvVars("S3_PATH"),
},
&cli.StringFlag{
Name: S3EndpointFlagName,
Usage: "endpoint for S3 storage",
Value: "",
EnvVars: prefixEnvVars("S3_ENDPOINT"),
},
&cli.StringFlag{
Name: S3AccessKeyIDFlagName,
Usage: "access key id for S3 storage",
Value: "",
EnvVars: prefixEnvVars("S3_ACCESS_KEY_ID"),
}, &cli.StringFlag{
Name: S3AccessKeySecretFlagName,
Usage: "access key secret for S3 storage",
Value: "",
EnvVars: prefixEnvVars("S3_ACCESS_KEY_SECRET"),
},
}

flags = append(flags, s3Flags()...)
return flags
}
2 changes: 1 addition & 1 deletion server/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ var optionalFlags = []cli.Flag{}

func init() {
optionalFlags = append(optionalFlags, oplog.CLIFlags(EnvVarPrefix)...)
optionalFlags = append(optionalFlags, CLIFlags(EnvVarPrefix)...)
optionalFlags = append(optionalFlags, CLIFlags()...)
optionalFlags = append(optionalFlags, opmetrics.CLIFlags(EnvVarPrefix)...)
Flags = append(requiredFlags, optionalFlags...) //nolint:gocritic // this is a global variable
}
Expand Down

0 comments on commit f2d05e2

Please sign in to comment.