diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2fe63effc..14fb5da9a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -300,3 +300,32 @@ db-snapshot: retry: 2 tags: - zombienet-polkadot-integration-test + +multiple-networks: + stage: deploy + <<: *kubernetes-env + image: "paritypr/zombienet:${CI_COMMIT_SHORT_SHA}" + rules: + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + # needs: + # - job: publish-docker-pr + + variables: + GH_DIR: "https://github.com/paritytech/zombienet/tree/${CI_COMMIT_SHORT_SHA}/tests" + + before_script: + - echo "Zombienet multiple networks test" + - echo "paritypr/zombienet:${CI_COMMIT_SHORT_SHA}" + - echo "${GH_DIR}" + - export DEBUG=zombie* + + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --test="0014-multiple-networks.zndsl" + allow_failure: true + retry: 2 + tags: + - zombienet-polkadot-integration-test diff --git a/Cargo.lock b/Cargo.lock index 89cb24031..1a4993698 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -285,7 +285,7 @@ dependencies = [ [[package]] name = "dsl-parser-wrapper" -version = "0.1.7" +version = "0.1.8" dependencies = [ "parser", "serde_json", diff --git a/crates/parser-wrapper/Cargo.toml b/crates/parser-wrapper/Cargo.toml index 0e629f45c..0c691d4cc 100644 --- a/crates/parser-wrapper/Cargo.toml +++ b/crates/parser-wrapper/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dsl-parser-wrapper" -version = "0.1.7" +version = "0.1.8" edition = "2021" description = "Zombienet DSL parser: produces a test definition, in json format, that can be used with the ZombieNet's test-runnner." license = "GPL-3.0-or-later" diff --git a/crates/parser/src/ast.rs b/crates/parser/src/ast.rs index d6b31b7cf..e0d153cc0 100644 --- a/crates/parser/src/ast.rs +++ b/crates/parser/src/ast.rs @@ -147,7 +147,7 @@ pub struct Assertion { #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct TestDefinition { pub description: Option, - pub network: String, + pub networks: Vec, pub creds: String, pub assertions: Vec, } diff --git a/crates/parser/src/lib.rs b/crates/parser/src/lib.rs index 148c1c40d..c4206aabc 100644 --- a/crates/parser/src/lib.rs +++ b/crates/parser/src/lib.rs @@ -212,7 +212,7 @@ pub fn parse(unparsed_file: &str) -> Result return Err(errors::ParserError::ParseError(e.to_string())), }; - let mut network: Option = None; + let mut networks: Vec = vec![]; let mut creds: Option = None; let mut description: Option = None; let mut assertions: Vec = vec![]; @@ -233,7 +233,10 @@ pub fn parse(unparsed_file: &str) -> Result { - network = Some(record.into_inner().as_str().to_owned()); + let mut pairs = record.into_inner(); + let file_path = get_pair(&mut pairs, "file_path")?.as_str(); + + networks.push(file_path.to_owned()); } Rule::creds => { let mut pairs = record.into_inner(); @@ -604,7 +607,7 @@ pub fn parse(unparsed_file: &str) -> Result Result", "ZNDSL file (.zndsl) describing the tests") .argument( - "[runningNetworkSpec]", + "[runningNetworksSpec...]", "Path to the network spec json, for using a running network for running the test", ) .action(asyncAction(test)); diff --git a/javascript/packages/cli/zombienet-dsl-parser-wrapper-0.1.8.tgz b/javascript/packages/cli/zombienet-dsl-parser-wrapper-0.1.8.tgz new file mode 100644 index 000000000..2b535346a Binary files /dev/null and b/javascript/packages/cli/zombienet-dsl-parser-wrapper-0.1.8.tgz differ diff --git a/javascript/packages/orchestrator/README.md b/javascript/packages/orchestrator/README.md index 97551ee29..a0bfd1d6c 100644 --- a/javascript/packages/orchestrator/README.md +++ b/javascript/packages/orchestrator/README.md @@ -322,6 +322,16 @@ npm install npm run build ``` +### Build `parser-wrapper` locally (optional) + +```bash +cd zombienet/crates/parser-wrapper +wasm-pack build --release --target nodejs --scope zombienet +cd zombienet/javascript +npm link ../crates/parser-wrapper/pkg/ +npm run build +``` + ### Download and install needed artifacts (optional) For an easier and faster setup of your local environment, run: diff --git a/javascript/packages/orchestrator/src/network.ts b/javascript/packages/orchestrator/src/network.ts index fb4ddaa7c..2c273b7bf 100644 --- a/javascript/packages/orchestrator/src/network.ts +++ b/javascript/packages/orchestrator/src/network.ts @@ -259,7 +259,7 @@ export class Network { const nodes = this.groups[nodeOrGroupName]; if (!nodes) - throw new Error(`Noode or Group: ${nodeOrGroupName} not present`); + throw new Error(`Node or Group: ${nodeOrGroupName} not present`); return nodes; } @@ -283,6 +283,35 @@ export class Network { } } + getNetworkInfo() { + return { + tmpDir: this.tmpDir, + chainSpecPath: this.chainSpecFullPath, + relay: this.relay.map((node: any) => { + const { name, wsUri, prometheusUri, userDefinedTypes } = node; + return { name, wsUri, prometheusUri, userDefinedTypes }; + }), + paras: Object.keys(this.paras).reduce((memo: any, paraId: any) => { + const { chainSpecPath, wasmPath, statePath } = this.paras[paraId]; + memo[paraId] = { chainSpecPath, wasmPath, statePath }; + memo[paraId].nodes = this.paras[paraId].nodes.map((node) => { + return { ...node }; + }); + return memo; + }, {}), + nodesByName: Object.keys(this.nodesByName).reduce( + (memo: any, nodeName) => { + const { name, wsUri, prometheusUri, userDefinedTypes, parachainId } = + this.nodesByName[nodeName]; + memo[nodeName] = { name, wsUri, prometheusUri, userDefinedTypes }; + if (parachainId) memo[nodeName].parachainId = parachainId; + return memo; + }, + {}, + ), + }; + } + // show links for access and debug showNetworkInfo(provider: string) { const logTable = new CreateLogTable({ diff --git a/javascript/packages/orchestrator/src/orchestrator.ts b/javascript/packages/orchestrator/src/orchestrator.ts index d8a8ed1c9..6e17e4517 100644 --- a/javascript/packages/orchestrator/src/orchestrator.ts +++ b/javascript/packages/orchestrator/src/orchestrator.ts @@ -528,8 +528,8 @@ export async function start( await network.dumpLogs(); await network.stop(); } - if (cronInterval) clearInterval(cronInterval); - process.exit(1); + clearInterval(cronInterval); + throw error; } } diff --git a/javascript/packages/orchestrator/src/providers/client.ts b/javascript/packages/orchestrator/src/providers/client.ts index ccf557be9..6b09a90ca 100644 --- a/javascript/packages/orchestrator/src/providers/client.ts +++ b/javascript/packages/orchestrator/src/providers/client.ts @@ -103,13 +103,12 @@ export abstract class Client { abstract getLogsCommand(name: string): string; } -let client: Client; +let client: Client | undefined; export function getClient(): Client { if (!client) throw new Error("Client not initialized"); return client; } -export function setClient(c: Client) { - if (client) throw new Error("Client already initialized"); +export function setClient(c: Client | undefined) { client = c; } diff --git a/javascript/packages/orchestrator/src/providers/k8s/kubeClient.ts b/javascript/packages/orchestrator/src/providers/k8s/kubeClient.ts index fb12987e2..59fe51d58 100644 --- a/javascript/packages/orchestrator/src/providers/k8s/kubeClient.ts +++ b/javascript/packages/orchestrator/src/providers/k8s/kubeClient.ts @@ -43,10 +43,6 @@ export function initClient( return client; } -// Here we cache each file we upload from local -// to just cp between pods and not upload again the same file. -const fileUploadCache: any = {}; - export class KubeClient extends Client { namespace: string; chainId?: string; @@ -59,6 +55,9 @@ export class KubeClient extends Client { localMagicFilepath: string; remoteDir: string; dataDir: string; + // Here we cache each file we upload from local + // to just cp between pods and not upload again the same file. + fileUploadCache: any = {}; constructor(configPath: string, namespace: string, tmpDir: string) { super(configPath, namespace, tmpDir, "kubectl", "kubernetes"); @@ -421,7 +420,7 @@ export class KubeClient extends Client { const fileHash = getSha256(fileBuffer.toString()); const parts = localFilePath.split("/"); const fileName = parts[parts.length - 1]; - if (!fileUploadCache[fileHash]) { + if (!this.fileUploadCache[fileHash]) { await this.uploadToFileserver(localFilePath, fileName, fileHash); } @@ -863,7 +862,7 @@ export class KubeClient extends Client { debug("copyFileToPod", args); const result = await this.runCommand(args); debug(result); - fileUploadCache[fileHash] = fileName; + this.fileUploadCache[fileHash] = fileName; } getLogsCommand(name: string): string { return `kubectl logs -f ${name} -c ${name} -n ${this.namespace}`; diff --git a/javascript/packages/orchestrator/src/test-runner/index.ts b/javascript/packages/orchestrator/src/test-runner/index.ts index b088cf098..290ed3bc7 100644 --- a/javascript/packages/orchestrator/src/test-runner/index.ts +++ b/javascript/packages/orchestrator/src/test-runner/index.ts @@ -11,6 +11,7 @@ import path from "path"; import { Network, rebuildNetwork } from "../network"; import { start } from "../orchestrator"; import { Providers } from "../providers"; +import { setClient } from "../providers/client"; import { LaunchConfig, TestDefinition } from "../types"; import assertions from "./assertions"; import commands from "./commands"; @@ -26,6 +27,87 @@ export interface BackchannelMap { [propertyName: string]: any; } +function findNetwork( + networks: Network[], + nodeOrGroupName?: string, +): Network | undefined { + if (!nodeOrGroupName) { + return networks[0]; + } + + const network = networks.find((network) => { + try { + network.getNodes(nodeOrGroupName); + return true; + } catch { + // continue searching + } + }); + + if (!network) + throw new Error(`Node or Group: ${nodeOrGroupName} not present`); + + return network; +} + +function showNetworkLogsLocation( + network: Network, + logsPath: string, + inCI: boolean, +) { + console.log( + `\n\t${decorators.magenta( + "📓 To see the full logs of the nodes please go to:", + )}`, + ); + switch (network.client.providerName) { + case "kubernetes": + if (inCI) { + // show links to grafana and also we need to move the logs to artifacts + const networkEndTime = new Date().getTime(); + for (const node of network.relay) { + const loki_url = getLokiUrl( + network.namespace, + node.name, + network.networkStartTime!, + networkEndTime, + ); + console.log( + `\t${decorators.magenta(node.name)}: ${decorators.green(loki_url)}`, + ); + } + + for (const [paraId, parachain] of Object.entries(network.paras)) { + console.log(`\n\tParaId: ${decorators.magenta(paraId)}`); + for (const node of parachain.nodes) { + const loki_url = getLokiUrl( + network.namespace, + node.name, + network.networkStartTime!, + networkEndTime, + ); + console.log( + `\t\t${decorators.magenta(node.name)}: ${decorators.green( + loki_url, + )}`, + ); + } + } + + // logs are also collected as artifacts + console.log( + `\n\n\t ${decorators.yellow( + "📓 Logs are also available in the artifacts' pipeline in gitlab", + )}`, + ); + } + break; + default: + console.log(`\n\t${decorators.magenta(logsPath)}`); + break; + } +} + export async function run( configBasePath: string, testName: string, @@ -34,27 +116,33 @@ export async function run( inCI = false, concurrency = 1, silent = false, - runningNetworkSpecPath: string | undefined, + runningNetworksSpec: string[] | undefined, dir: string | undefined, ) { setSilent(silent); - let network: Network; + const networks: Network[] = []; const backchannelMap: BackchannelMap = {}; let suiteName: string = testName; if (testDef.description) suiteName += `( ${testDef.description} )`; - // read network file - const networkConfigFilePath = fs.existsSync(testDef.network) - ? testDef.network - : path.resolve(configBasePath, testDef.network); + const networkConfigs: LaunchConfig[] = []; + for (let networkConfigFilePath of testDef.networks) { + // read network file + if (!fs.existsSync(networkConfigFilePath)) + networkConfigFilePath = path.resolve( + configBasePath, + networkConfigFilePath, + ); + const networkConfig = readNetworkConfig(networkConfigFilePath); - const config: LaunchConfig = readNetworkConfig(networkConfigFilePath); + // set the provider + if (!networkConfig.settings) + networkConfig.settings = { provider, timeout: DEFAULT_GLOBAL_TIMEOUT }; + else networkConfig.settings.provider = provider; - // set the provider - if (!config.settings) - config.settings = { provider, timeout: DEFAULT_GLOBAL_TIMEOUT }; - else config.settings.provider = provider; + networkConfigs.push(networkConfig); + } // find creds file const credsFile = inCI ? "config" : testDef.creds; @@ -76,131 +164,93 @@ export async function run( if (credsFileExistInPath) creds = credsFileExistInPath + "/" + credsFile; } - if (!creds && config.settings.provider === "kubernetes") - throw new Error(`Invalid credential file path: ${credsFile}`); + for (const networkConfig of networkConfigs) { + if (!creds && networkConfig.settings.provider === "kubernetes") + throw new Error(`Invalid credential file path: ${credsFile}`); + } // create suite const suite = Suite.create(mocha.suite, suiteName); suite.beforeAll("launching", async function () { - const launchTimeout = config.settings?.timeout || 500; - this.timeout(launchTimeout * 1000); - try { - if (!runningNetworkSpecPath) { - console.log(`\t Launching network... this can take a while.`); - network = await start(creds!, config, { - spawnConcurrency: concurrency, - inCI, - silent, - dir, - }); - } else { - const runningNetworkSpec: any = require(runningNetworkSpecPath); - if (provider !== runningNetworkSpec.client.providerName) - throw new Error( - `Invalid provider, the provider set doesn't match with the running network definition`, + for (const [networkConfigIdx, networkConfig] of networkConfigs.entries()) { + const launchTimeout = networkConfig.settings?.timeout || 500; + this.timeout(launchTimeout * 1000); + + const runningNetworkSpecPath = + runningNetworksSpec && runningNetworksSpec[networkConfigIdx]; + try { + if (runningNetworkSpecPath) + console.log("runningNetworkSpecPath", runningNetworkSpecPath); + + let network: Network; + if (!runningNetworkSpecPath) { + console.log( + `\n\n\t Launching network ${testDef.networks[networkConfigIdx]} ... this can take a while.`, ); + network = await start(creds!, networkConfig, { + spawnConcurrency: concurrency, + inCI, + silent, + dir, + }); + } else { + const runningNetworkSpec: any = require(runningNetworkSpecPath); + if (provider !== runningNetworkSpec.client.providerName) + throw new Error( + `Invalid provider, the provider set doesn't match with the running network definition`, + ); + + const { client, namespace, tmpDir } = runningNetworkSpec; + // initialize the Client + const initClient = Providers.get( + runningNetworkSpec.client.providerName, + ).initClient(client.configPath, namespace, tmpDir); + // initialize the network + network = rebuildNetwork(initClient, runningNetworkSpec); + } - const { client, namespace, tmpDir } = runningNetworkSpec; - // initialize the Client - const initClient = Providers.get( - runningNetworkSpec.client.providerName, - ).initClient(client.configPath, namespace, tmpDir); - // initialize the network - network = rebuildNetwork(initClient, runningNetworkSpec); + networks.push(network); + network.showNetworkInfo(networkConfig.settings.provider); + } catch (err) { + console.log( + `\n${decorators.red( + "Error launching the network!", + )} \t ${decorators.bright(err)}`, + ); + for (const network of networks) { + await network.stop(); + } + exitMocha(100); } - - network.showNetworkInfo(config.settings.provider); - - await sleep(5 * 1000); - return; - } catch (err) { - console.log( - `\n${decorators.red( - "Error launching the network!", - )} \t ${decorators.bright(err)}`, - ); - exitMocha(100); } + + await sleep(5 * 1000); + return; }); suite.afterAll("teardown", async function () { this.timeout(180 * 1000); - if (network && !network.wasRunning) { - const logsPath = await network.dumpLogs(false); - const tests = this.test?.parent?.tests; - - if (tests) { - const failed = tests.filter((test) => { - return test.state !== "passed"; - }); - if (failed.length) { - console.log( - `\n\n\t${decorators.red("❌ One or more of your test failed...")}`, - ); - } + const tests = this.test?.parent?.tests; - // All test passed, just remove the network - console.log(`\n\t ${decorators.green("Deleting network")}`); - await network.stop(); + const failed = tests?.some((test) => test.state !== "passed") ?? false; + if (failed) { + console.log( + `\n\n\t${decorators.red("❌ One or more of your tests failed...")}`, + ); + } - // show logs + for (const [networkIdx, network] of networks.entries()) { + if (network && !network.wasRunning) { + console.log("\n"); + const logsPath = await network.dumpLogs(false); console.log( - `\n\n\t${decorators.magenta( - "📓 To see the full logs of the nodes please go to:", + `\n\t ${decorators.green( + `Deleting network ${testDef.networks[networkIdx]}`, )}`, ); - switch (network.client.providerName) { - case "podman": - case "native": - console.log(`\n\t${decorators.magenta(logsPath)}`); - break; - case "kubernetes": - if (inCI) { - // show links to grafana and also we need to move the logs to artifacts - const networkEndtime = new Date().getTime(); - for (const node of network.relay) { - const loki_url = getLokiUrl( - network.namespace, - node.name, - network.networkStartTime!, - networkEndtime, - ); - console.log( - `\t${decorators.magenta(node.name)}: ${decorators.green( - loki_url, - )}`, - ); - } - - for (const [paraId, parachain] of Object.entries(network.paras)) { - console.log(`\n\tParaId: ${decorators.magenta(paraId)}`); - for (const node of parachain.nodes) { - const loki_url = getLokiUrl( - network.namespace, - node.name, - network.networkStartTime!, - networkEndtime, - ); - console.log( - `\t\t${decorators.magenta(node.name)}: ${decorators.green( - loki_url, - )}`, - ); - } - } - - // logs are also collaected as artifacts - console.log( - `\n\n\t ${decorators.yellow( - "📓 Logs are also available in the artifacts' pipeline in gitlab", - )}`, - ); - } else { - console.log(`\n\t${decorators.magenta(logsPath)}`); - } - break; - } + await network.stop(); + showNetworkLogsLocation(network, logsPath, inCI); } } return; @@ -220,10 +270,14 @@ export async function run( } const testFn = generator(assertion.parsed.args); - const test = new Test( - assertion.original_line, - async () => await testFn(network, backchannelMap, configBasePath), - ); + const test = new Test(assertion.original_line, async () => { + // Find the first network that contains the node and run the test on it. + const network = findNetwork(networks, assertion.parsed.args.node_name); + + setClient(network?.client); + await testFn(network, backchannelMap, configBasePath); + return; + }); suite.addTest(test); test.timeout(0); } diff --git a/javascript/packages/orchestrator/src/types.ts b/javascript/packages/orchestrator/src/types.ts index c772c963f..fc5c2f9a4 100644 --- a/javascript/packages/orchestrator/src/types.ts +++ b/javascript/packages/orchestrator/src/types.ts @@ -288,7 +288,7 @@ export interface MultiAddressByNode { } export interface TestDefinition { - network: string; + networks: string[]; creds: string; description?: string; assertions: Assertion[]; diff --git a/tests/0014-multiple-networks.zndsl b/tests/0014-multiple-networks.zndsl new file mode 100644 index 000000000..a8fb694b8 --- /dev/null +++ b/tests/0014-multiple-networks.zndsl @@ -0,0 +1,16 @@ +Description: Multiple Networks test +Network: ./0014-network-1.toml +Network: ./0014-network-2.toml +Creds: config + +# network-1 - metrics +alice: reports node_roles is 4 +alice: reports sub_libp2p_is_major_syncing is 0 + +# network-1 - logs +bob: log line matches glob "*rted #1*" within 10 seconds +bob: log line matches "Imported #[0-9]+" within 10 seconds + +# network-2 - metrics +a: reports node_roles is 4 +b: reports sub_libp2p_is_major_syncing is 0 \ No newline at end of file diff --git a/tests/0014-network-1.toml b/tests/0014-network-1.toml new file mode 100644 index 000000000..a802cdfaf --- /dev/null +++ b/tests/0014-network-1.toml @@ -0,0 +1,16 @@ +[relaychain] +default_image = "docker.io/parity/polkadot:latest" +default_command = "polkadot" +default_args = [ "-lparachain=debug" ] + +chain = "rococo-local" + + [[relaychain.nodes]] + name = "alice" + validator = true + + [[relaychain.nodes]] + name = "bob" + image = "docker.io/parity/polkadot:latest" + validator = true + args = ["--database=paritydb-experimental"] diff --git a/tests/0014-network-2.toml b/tests/0014-network-2.toml new file mode 100644 index 000000000..0dcccc334 --- /dev/null +++ b/tests/0014-network-2.toml @@ -0,0 +1,25 @@ +[relaychain] +default_image = "docker.io/parity/polkadot:latest" +default_command = "polkadot" +default_args = [ "-lparachain=debug" ] + +chain = "rococo-local" + + [[relaychain.node_groups]] + name = "a" + args = [ "-lparachain=debug", "--database=paritydb-experimental" ] + count = 5 + + [[relaychain.node_groups]] + name = "b" + count = 5 + +[[parachains]] +id = 100 + + [[parachains.collator_groups]] + count = 2 + [parachains.collator_groups.collator] + name = "collator" + command = "polkadot-parachain" + image = "docker.io/parity/polkadot-parachain:latest" \ No newline at end of file