-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Initial release of cluster-standup-teardown module (#4)
* Initial release of cluster-standup-teardown module Signed-off-by: Marcus Noble <[email protected]> * Added cli documentation Signed-off-by: Marcus Noble <[email protected]> * Clean go mod Signed-off-by: Marcus Noble <[email protected]> * Handle lint issues Signed-off-by: Marcus Noble <[email protected]> * Added nancy ignore Signed-off-by: Marcus Noble <[email protected]> * Switch to using go-test instead of go-build Signed-off-by: Marcus Noble <[email protected]> * Move EKS clusterbuilder under capa Signed-off-by: Marcus Noble <[email protected]> --------- Signed-off-by: Marcus Noble <[email protected]>
- Loading branch information
1 parent
c30f754
commit 159a137
Showing
33 changed files
with
2,586 additions
and
8 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,14 @@ | ||
# Affects all versions of archiver which is required by vault. | ||
# Taken from: https://github.com/giantswarm/opsctl/pull/1072/files#diff-bbe4a7fb12c43622bce7c6840c770e9995be614626a219942ca138403629cb69R1 | ||
CVE-2019-10743 until=2021-10-17 | ||
|
||
# google.golang.org/[email protected] | ||
CVE-2024-24786 | ||
|
||
# helm.sh/helm/[email protected] | ||
CVE-2019-25210 | ||
CVE-2024-26147 | ||
CVE-2024-25620 | ||
|
||
# k8s.io/[email protected] | ||
CVE-2020-8561 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
FROM golang:1.21 AS build | ||
|
||
WORKDIR /app | ||
|
||
ADD go.mod go.sum ./ | ||
|
||
RUN go mod download | ||
|
||
ADD . . | ||
|
||
RUN CGO_ENABLED=0 GOOS=linux go build -o standup ./cmd/standup/ | ||
RUN CGO_ENABLED=0 GOOS=linux go build -o teardown ./cmd/teardown/ | ||
|
||
FROM debian:bookworm-slim | ||
|
||
WORKDIR /app | ||
|
||
RUN apt-get update \ | ||
&& apt-get install --no-install-recommends --no-install-suggests -y ca-certificates \ | ||
&& rm -rf /var/lib/apt/lists/* | ||
|
||
COPY --from=build /app /app | ||
|
||
ENV PATH /app:$PATH | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,217 @@ | ||
package main | ||
|
||
import ( | ||
"context" | ||
"encoding/json" | ||
"fmt" | ||
"os" | ||
"path" | ||
"time" | ||
|
||
. "github.com/onsi/gomega" | ||
|
||
"github.com/giantswarm/clustertest" | ||
"github.com/giantswarm/clustertest/pkg/application" | ||
"github.com/giantswarm/clustertest/pkg/client" | ||
"github.com/giantswarm/clustertest/pkg/organization" | ||
"github.com/giantswarm/clustertest/pkg/utils" | ||
"github.com/giantswarm/clustertest/pkg/wait" | ||
"github.com/spf13/cobra" | ||
cr "sigs.k8s.io/controller-runtime/pkg/client" | ||
|
||
"github.com/giantswarm/cluster-standup-teardown/cmd/standup/types" | ||
"github.com/giantswarm/cluster-standup-teardown/pkg/clusterbuilder/providers/capa" | ||
"github.com/giantswarm/cluster-standup-teardown/pkg/clusterbuilder/providers/capv" | ||
"github.com/giantswarm/cluster-standup-teardown/pkg/clusterbuilder/providers/capvcd" | ||
"github.com/giantswarm/cluster-standup-teardown/pkg/clusterbuilder/providers/capz" | ||
"github.com/giantswarm/cluster-standup-teardown/pkg/standup" | ||
) | ||
|
||
var ( | ||
standupCmd = &cobra.Command{ | ||
Use: "standup", | ||
Long: "Standup create a test workload cluster in a standard, reproducible way.\nA valid Management Cluster kubeconfig must be available and set to the `E2E_KUBECONFIG` environment variable.", | ||
Example: "standup --provider aws --context capa", | ||
Args: cobra.NoArgs, | ||
RunE: run, | ||
} | ||
|
||
provider string | ||
kubeContext string | ||
clusterValues string | ||
defaultAppValues string | ||
clusterVersion string | ||
defaultAppVersion string | ||
outputDirectory string | ||
|
||
controlPlaneNodes int | ||
workerNodes int | ||
|
||
// Functions to run after cluster creation to confirm it is up and ready to use | ||
clusterReadyFns []func(wcClient *client.Client) = []func(wcClient *client.Client){ | ||
func(wcClient *client.Client) { | ||
_ = wait.For( | ||
wait.AreNumNodesReady(context.Background(), wcClient, controlPlaneNodes, &cr.MatchingLabels{"node-role.kubernetes.io/control-plane": ""}), | ||
wait.WithTimeout(20*time.Minute), | ||
wait.WithInterval(15*time.Second), | ||
) | ||
}, | ||
func(wcClient *client.Client) { | ||
_ = wait.For( | ||
wait.AreNumNodesReady(context.Background(), wcClient, workerNodes, client.DoesNotHaveLabels{"node-role.kubernetes.io/control-plane"}), | ||
wait.WithTimeout(20*time.Minute), | ||
wait.WithInterval(15*time.Second), | ||
) | ||
}, | ||
} | ||
) | ||
|
||
func init() { | ||
standupCmd.Flags().StringVar(&provider, "provider", "", "The provider (required)") | ||
standupCmd.Flags().StringVar(&kubeContext, "context", "", "The kubernetes context to use (required)") | ||
|
||
standupCmd.Flags().StringVar(&clusterValues, "cluster-values", "", "The path to the cluster app values") | ||
standupCmd.Flags().StringVar(&defaultAppValues, "default-apps-values", "", "The path to the default-apps app values") | ||
standupCmd.Flags().IntVar(&controlPlaneNodes, "control-plane-nodes", 1, "The number of control plane nodes to wait for being ready") | ||
standupCmd.Flags().IntVar(&workerNodes, "worker-nodes", 1, "The number of worker nodes to wait for being ready") | ||
standupCmd.Flags().StringVar(&outputDirectory, "output", "./", "The directory to store the results.json and kubeconfig in") | ||
standupCmd.Flags().StringVar(&clusterVersion, "cluster-version", "latest", "The version of the cluster app to install") | ||
standupCmd.Flags().StringVar(&defaultAppVersion, "default-apps-version", "latest", "The version of the default-apps app to install") | ||
|
||
_ = standupCmd.MarkFlagRequired("provider") | ||
_ = standupCmd.MarkFlagRequired("context") | ||
} | ||
|
||
func main() { | ||
if err := standupCmd.Execute(); err != nil { | ||
os.Exit(1) | ||
} | ||
} | ||
|
||
func run(cmd *cobra.Command, args []string) error { | ||
// Required to be able to use our module with Gomega assertions without Ginkgo | ||
RegisterFailHandler(func(message string, callerSkip ...int) { | ||
panic(message) | ||
}) | ||
|
||
cmd.SilenceUsage = true | ||
|
||
ctx := context.Background() | ||
|
||
framework, err := clustertest.New(kubeContext) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
provider := application.Provider(provider) | ||
clusterName := utils.GenerateRandomName("t") | ||
orgName := utils.GenerateRandomName("t") | ||
|
||
fmt.Printf("Standing up cluster...\n\nProvider:\t\t%s\nCluster Name:\t\t%s\nOrg Name:\t\t%s\nResults Directory:\t%s\n\n", provider, clusterName, orgName, outputDirectory) | ||
|
||
var cluster *application.Cluster | ||
switch provider { | ||
case application.ProviderVSphere: | ||
clusterBuilder := capv.ClusterBuilder{} | ||
cluster = clusterBuilder.NewClusterApp(clusterName, orgName, clusterValues, defaultAppValues). | ||
WithAppVersions(clusterVersion, defaultAppVersion) | ||
case application.ProviderCloudDirector: | ||
clusterBuilder := capvcd.ClusterBuilder{} | ||
cluster = clusterBuilder.NewClusterApp(clusterName, orgName, clusterValues, defaultAppValues). | ||
WithAppVersions(clusterVersion, defaultAppVersion) | ||
case application.ProviderAWS: | ||
clusterBuilder := capa.ClusterBuilder{} | ||
cluster = clusterBuilder.NewClusterApp(clusterName, orgName, clusterValues, defaultAppValues). | ||
WithAppVersions(clusterVersion, defaultAppVersion) | ||
case application.ProviderEKS: | ||
clusterBuilder := capa.ManagedClusterBuilder{} | ||
cluster = clusterBuilder.NewClusterApp(clusterName, orgName, clusterValues, defaultAppValues). | ||
WithAppVersions(clusterVersion, defaultAppVersion) | ||
// As EKS has no control plane we only check for worker nodes being ready | ||
clusterReadyFns = []func(wcClient *client.Client){ | ||
func(wcClient *client.Client) { | ||
_ = wait.For( | ||
wait.AreNumNodesReady(context.Background(), wcClient, workerNodes, &cr.MatchingLabels{"node-role.kubernetes.io/worker": ""}), | ||
wait.WithTimeout(20*time.Minute), | ||
wait.WithInterval(15*time.Second), | ||
) | ||
}, | ||
} | ||
case application.ProviderAzure: | ||
clusterBuilder := capz.ClusterBuilder{} | ||
cluster = clusterBuilder.NewClusterApp(clusterName, orgName, clusterValues, defaultAppValues). | ||
WithAppVersions(clusterVersion, defaultAppVersion) | ||
default: | ||
cluster = application.NewClusterApp(clusterName, provider). | ||
WithAppVersions(clusterVersion, defaultAppVersion). | ||
WithOrg(organization.New(orgName)). | ||
WithAppValuesFile(path.Clean(clusterValues), path.Clean(defaultAppValues), &application.TemplateValues{ | ||
ClusterName: clusterName, | ||
Organization: orgName, | ||
}) | ||
} | ||
|
||
// Create the results file with the details we have already incase the cluster creation fails | ||
result := types.StandupResult{ | ||
Provider: string(provider), | ||
ClusterName: clusterName, | ||
OrgName: orgName, | ||
Namespace: cluster.GetNamespace(), | ||
ClusterVersion: cluster.ClusterApp.Version, | ||
KubeconfigPath: "", | ||
} | ||
|
||
resultsFile, err := os.Create(path.Join(outputDirectory, "results.json")) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
resultBytes, err := json.Marshal(result) | ||
if err != nil { | ||
return err | ||
} | ||
_, err = resultsFile.Write(resultBytes) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
resultsFile.Close() | ||
|
||
cluster, err = standup.New(framework, false, clusterReadyFns...).Standup(cluster) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
// Save the kubeconfig for the WC | ||
kubeconfigFile, err := os.Create(path.Join(outputDirectory, "kubeconfig")) | ||
if err != nil { | ||
return err | ||
} | ||
defer kubeconfigFile.Close() | ||
|
||
kubeconfig, err := framework.MC().GetClusterKubeConfig(ctx, cluster.Name, cluster.GetNamespace()) | ||
if err != nil { | ||
return err | ||
} | ||
_, err = kubeconfigFile.Write([]byte(kubeconfig)) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
// Update the results file with the kubeconfig path | ||
result.KubeconfigPath = kubeconfigFile.Name() | ||
|
||
resultsFile, err = os.Create(path.Join(outputDirectory, "results.json")) | ||
if err != nil { | ||
return err | ||
} | ||
defer resultsFile.Close() | ||
|
||
resultBytes, err = json.Marshal(result) | ||
if err != nil { | ||
return err | ||
} | ||
_, err = resultsFile.Write(resultBytes) | ||
|
||
return err | ||
} |
Oops, something went wrong.