diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index 148852a..fd34f69 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -31,15 +31,10 @@ jobs: - name: Checkout uses: actions/checkout@v3 - - name: Set up Go 1.21 + - name: Setup Go uses: actions/setup-go@v4 with: - go-version: '1.21.x' - - - name: Lint - uses: golangci/golangci-lint-action@v3 - with: - args: --build-tags integration -p bugs -p unused --timeout=3m + go-version-file: 'go.mod' - name: Make tag run: | @@ -49,7 +44,7 @@ jobs: - name: Build run: - make all + make build - name: Build and push image uses: docker/build-push-action@v4 @@ -57,3 +52,35 @@ jobs: context: . push: true tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.tag }} + + integration: + name: Integration Test + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Setup Go + uses: actions/setup-go@v4 + with: + go-version-file: 'go.mod' + cache: false + + - name: Lint + uses: golangci/golangci-lint-action@v3 + with: + args: --build-tags integration -p bugs -p unused --timeout=3m + + - name: Test + run: | + make test + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.8.0 + with: + install_only: true + + - name: Test + run: | + make test-integration diff --git a/Makefile b/Makefile index 4916174..fc881c0 100644 --- a/Makefile +++ b/Makefile @@ -14,30 +14,39 @@ LINKMODE := -extldflags '-static -s -w' \ -X 'github.com/metal-stack/v.GitSHA1=$(SHA)' \ -X 'github.com/metal-stack/v.BuildDate=$(BUILDDATE)' -.PHONY: all -all: +KUBECONFIG := $(shell pwd)/.kubeconfig +GO_RUN := $(or $(GO_RUN),) +ifneq ($(GO_RUN),) +GO_RUN_ARG := -run $(GO_RUN) +endif + +.PHONY: build +build: go mod tidy go build -ldflags "$(LINKMODE)" -tags 'osusergo netgo static_build' -o bin/backup-restore-sidecar github.com/metal-stack/backup-restore-sidecar/cmd strip bin/backup-restore-sidecar +.PHONY: test +test: build + go test -cover ./... + +.PHONY: test-integration +test-integration: kind-cluster-create + kind --name backup-restore-sidecar load docker-image ghcr.io/metal-stack/backup-restore-sidecar:latest + KUBECONFIG=$(KUBECONFIG) go test $(GO_RUN_ARG) -tags=integration -count 1 -v -p 1 -timeout 10m ./... + .PHONY: proto proto: make -C proto protoc .PHONY: dockerimage -dockerimage: all +dockerimage: build docker build -t ghcr.io/metal-stack/backup-restore-sidecar:${DOCKER_TAG} . -.PHONY: dockerpush -dockerpush: - docker push ghcr.io/metal-stack/backup-restore-sidecar:${DOCKER_TAG} - # # # # the following tasks can be used to set up a development environment # # # -KUBECONFIG := $(shell pwd)/.kubeconfig - .PHONY: start-postgres start-postgres: $(MAKE) start DB=postgres @@ -70,6 +79,7 @@ kind-cluster-create: dockerimage @if ! kind get clusters | grep backup-restore-sidecar > /dev/null; then \ kind create cluster \ --name backup-restore-sidecar \ + --config kind.yaml \ --kubeconfig $(KUBECONFIG); fi .PHONY: cleanup diff --git a/api/v1/backup.pb.go b/api/v1/backup.pb.go new file mode 100644 index 0000000..f3e90dc --- /dev/null +++ b/api/v1/backup.pb.go @@ -0,0 +1,419 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: v1/backup.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ListBackupsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListBackupsRequest) Reset() { + *x = ListBackupsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_backup_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBackupsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBackupsRequest) ProtoMessage() {} + +func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_backup_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBackupsRequest.ProtoReflect.Descriptor instead. +func (*ListBackupsRequest) Descriptor() ([]byte, []int) { + return file_v1_backup_proto_rawDescGZIP(), []int{0} +} + +type BackupListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` +} + +func (x *BackupListResponse) Reset() { + *x = BackupListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_backup_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackupListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupListResponse) ProtoMessage() {} + +func (x *BackupListResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_backup_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupListResponse.ProtoReflect.Descriptor instead. +func (*BackupListResponse) Descriptor() ([]byte, []int) { + return file_v1_backup_proto_rawDescGZIP(), []int{1} +} + +func (x *BackupListResponse) GetBackups() []*Backup { + if x != nil { + return x.Backups + } + return nil +} + +type Backup struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (x *Backup) Reset() { + *x = Backup{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_backup_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Backup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Backup) ProtoMessage() {} + +func (x *Backup) ProtoReflect() protoreflect.Message { + mi := &file_v1_backup_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Backup.ProtoReflect.Descriptor instead. +func (*Backup) Descriptor() ([]byte, []int) { + return file_v1_backup_proto_rawDescGZIP(), []int{2} +} + +func (x *Backup) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Backup) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Backup) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +type RestoreBackupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *RestoreBackupRequest) Reset() { + *x = RestoreBackupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_backup_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreBackupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreBackupRequest) ProtoMessage() {} + +func (x *RestoreBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_backup_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreBackupRequest.ProtoReflect.Descriptor instead. +func (*RestoreBackupRequest) Descriptor() ([]byte, []int) { + return file_v1_backup_proto_rawDescGZIP(), []int{3} +} + +func (x *RestoreBackupRequest) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +type RestoreBackupResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RestoreBackupResponse) Reset() { + *x = RestoreBackupResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_backup_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreBackupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreBackupResponse) ProtoMessage() {} + +func (x *RestoreBackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_backup_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreBackupResponse.ProtoReflect.Descriptor instead. +func (*RestoreBackupResponse) Descriptor() ([]byte, []int) { + return file_v1_backup_proto_rawDescGZIP(), []int{4} +} + +var File_v1_backup_proto protoreflect.FileDescriptor + +var file_v1_backup_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x02, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x14, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3a, 0x0a, 0x12, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x24, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, + 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x70, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x30, 0x0a, 0x14, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x17, 0x0a, 0x15, + 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x94, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x18, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x67, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x6c, 0x2d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2f, 0x64, 0x72, + 0x6f, 0x70, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0xa2, + 0x02, 0x03, 0x56, 0x58, 0x58, 0xaa, 0x02, 0x02, 0x56, 0x31, 0xca, 0x02, 0x02, 0x56, 0x31, 0xe2, + 0x02, 0x0e, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x02, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_v1_backup_proto_rawDescOnce sync.Once + file_v1_backup_proto_rawDescData = file_v1_backup_proto_rawDesc +) + +func file_v1_backup_proto_rawDescGZIP() []byte { + file_v1_backup_proto_rawDescOnce.Do(func() { + file_v1_backup_proto_rawDescData = protoimpl.X.CompressGZIP(file_v1_backup_proto_rawDescData) + }) + return file_v1_backup_proto_rawDescData +} + +var file_v1_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_v1_backup_proto_goTypes = []interface{}{ + (*ListBackupsRequest)(nil), // 0: v1.ListBackupsRequest + (*BackupListResponse)(nil), // 1: v1.BackupListResponse + (*Backup)(nil), // 2: v1.Backup + (*RestoreBackupRequest)(nil), // 3: v1.RestoreBackupRequest + (*RestoreBackupResponse)(nil), // 4: v1.RestoreBackupResponse + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp +} +var file_v1_backup_proto_depIdxs = []int32{ + 2, // 0: v1.BackupListResponse.backups:type_name -> v1.Backup + 5, // 1: v1.Backup.timestamp:type_name -> google.protobuf.Timestamp + 0, // 2: v1.BackupService.ListBackups:input_type -> v1.ListBackupsRequest + 3, // 3: v1.BackupService.RestoreBackup:input_type -> v1.RestoreBackupRequest + 1, // 4: v1.BackupService.ListBackups:output_type -> v1.BackupListResponse + 4, // 5: v1.BackupService.RestoreBackup:output_type -> v1.RestoreBackupResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_v1_backup_proto_init() } +func file_v1_backup_proto_init() { + if File_v1_backup_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_v1_backup_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBackupsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_backup_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_backup_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Backup); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_backup_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreBackupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_backup_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreBackupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_v1_backup_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_v1_backup_proto_goTypes, + DependencyIndexes: file_v1_backup_proto_depIdxs, + MessageInfos: file_v1_backup_proto_msgTypes, + }.Build() + File_v1_backup_proto = out.File + file_v1_backup_proto_rawDesc = nil + file_v1_backup_proto_goTypes = nil + file_v1_backup_proto_depIdxs = nil +} diff --git a/api/v1/backup_grpc.pb.go b/api/v1/backup_grpc.pb.go new file mode 100644 index 0000000..a1d92e2 --- /dev/null +++ b/api/v1/backup_grpc.pb.go @@ -0,0 +1,144 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: v1/backup.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + BackupService_ListBackups_FullMethodName = "/v1.BackupService/ListBackups" + BackupService_RestoreBackup_FullMethodName = "/v1.BackupService/RestoreBackup" +) + +// BackupServiceClient is the client API for BackupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type BackupServiceClient interface { + ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*BackupListResponse, error) + RestoreBackup(ctx context.Context, in *RestoreBackupRequest, opts ...grpc.CallOption) (*RestoreBackupResponse, error) +} + +type backupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewBackupServiceClient(cc grpc.ClientConnInterface) BackupServiceClient { + return &backupServiceClient{cc} +} + +func (c *backupServiceClient) ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*BackupListResponse, error) { + out := new(BackupListResponse) + err := c.cc.Invoke(ctx, BackupService_ListBackups_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) RestoreBackup(ctx context.Context, in *RestoreBackupRequest, opts ...grpc.CallOption) (*RestoreBackupResponse, error) { + out := new(RestoreBackupResponse) + err := c.cc.Invoke(ctx, BackupService_RestoreBackup_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BackupServiceServer is the server API for BackupService service. +// All implementations should embed UnimplementedBackupServiceServer +// for forward compatibility +type BackupServiceServer interface { + ListBackups(context.Context, *ListBackupsRequest) (*BackupListResponse, error) + RestoreBackup(context.Context, *RestoreBackupRequest) (*RestoreBackupResponse, error) +} + +// UnimplementedBackupServiceServer should be embedded to have forward compatible implementations. +type UnimplementedBackupServiceServer struct { +} + +func (UnimplementedBackupServiceServer) ListBackups(context.Context, *ListBackupsRequest) (*BackupListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListBackups not implemented") +} +func (UnimplementedBackupServiceServer) RestoreBackup(context.Context, *RestoreBackupRequest) (*RestoreBackupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestoreBackup not implemented") +} + +// UnsafeBackupServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to BackupServiceServer will +// result in compilation errors. +type UnsafeBackupServiceServer interface { + mustEmbedUnimplementedBackupServiceServer() +} + +func RegisterBackupServiceServer(s grpc.ServiceRegistrar, srv BackupServiceServer) { + s.RegisterService(&BackupService_ServiceDesc, srv) +} + +func _BackupService_ListBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBackupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).ListBackups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_ListBackups_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).ListBackups(ctx, req.(*ListBackupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_RestoreBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreBackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).RestoreBackup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_RestoreBackup_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).RestoreBackup(ctx, req.(*RestoreBackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// BackupService_ServiceDesc is the grpc.ServiceDesc for BackupService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var BackupService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "v1.BackupService", + HandlerType: (*BackupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListBackups", + Handler: _BackupService_ListBackups_Handler, + }, + { + MethodName: "RestoreBackup", + Handler: _BackupService_RestoreBackup_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "v1/backup.proto", +} diff --git a/api/v1/database.pb.go b/api/v1/database.pb.go new file mode 100644 index 0000000..341bf27 --- /dev/null +++ b/api/v1/database.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: v1/database.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CreateBackupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateBackupRequest) Reset() { + *x = CreateBackupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_database_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateBackupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBackupRequest) ProtoMessage() {} + +func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_database_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBackupRequest.ProtoReflect.Descriptor instead. +func (*CreateBackupRequest) Descriptor() ([]byte, []int) { + return file_v1_database_proto_rawDescGZIP(), []int{0} +} + +type CreateBackupResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateBackupResponse) Reset() { + *x = CreateBackupResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_database_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateBackupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBackupResponse) ProtoMessage() {} + +func (x *CreateBackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_database_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBackupResponse.ProtoReflect.Descriptor instead. +func (*CreateBackupResponse) Descriptor() ([]byte, []int) { + return file_v1_database_proto_rawDescGZIP(), []int{1} +} + +var File_v1_database_proto protoreflect.FileDescriptor + +var file_v1_database_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x76, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x22, 0x15, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, + 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x54, 0x0a, 0x0f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x69, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x6c, 0x2d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2f, + 0x64, 0x72, 0x6f, 0x70, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, + 0x31, 0xa2, 0x02, 0x03, 0x56, 0x58, 0x58, 0xaa, 0x02, 0x02, 0x56, 0x31, 0xca, 0x02, 0x02, 0x56, + 0x31, 0xe2, 0x02, 0x0e, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0xea, 0x02, 0x02, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_v1_database_proto_rawDescOnce sync.Once + file_v1_database_proto_rawDescData = file_v1_database_proto_rawDesc +) + +func file_v1_database_proto_rawDescGZIP() []byte { + file_v1_database_proto_rawDescOnce.Do(func() { + file_v1_database_proto_rawDescData = protoimpl.X.CompressGZIP(file_v1_database_proto_rawDescData) + }) + return file_v1_database_proto_rawDescData +} + +var file_v1_database_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_v1_database_proto_goTypes = []interface{}{ + (*CreateBackupRequest)(nil), // 0: v1.CreateBackupRequest + (*CreateBackupResponse)(nil), // 1: v1.CreateBackupResponse +} +var file_v1_database_proto_depIdxs = []int32{ + 0, // 0: v1.DatabaseService.CreateBackup:input_type -> v1.CreateBackupRequest + 1, // 1: v1.DatabaseService.CreateBackup:output_type -> v1.CreateBackupResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_v1_database_proto_init() } +func file_v1_database_proto_init() { + if File_v1_database_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_v1_database_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateBackupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_database_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateBackupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_v1_database_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_v1_database_proto_goTypes, + DependencyIndexes: file_v1_database_proto_depIdxs, + MessageInfos: file_v1_database_proto_msgTypes, + }.Build() + File_v1_database_proto = out.File + file_v1_database_proto_rawDesc = nil + file_v1_database_proto_goTypes = nil + file_v1_database_proto_depIdxs = nil +} diff --git a/api/v1/database_grpc.pb.go b/api/v1/database_grpc.pb.go new file mode 100644 index 0000000..9838e25 --- /dev/null +++ b/api/v1/database_grpc.pb.go @@ -0,0 +1,107 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: v1/database.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + DatabaseService_CreateBackup_FullMethodName = "/v1.DatabaseService/CreateBackup" +) + +// DatabaseServiceClient is the client API for DatabaseService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DatabaseServiceClient interface { + CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*CreateBackupResponse, error) +} + +type databaseServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewDatabaseServiceClient(cc grpc.ClientConnInterface) DatabaseServiceClient { + return &databaseServiceClient{cc} +} + +func (c *databaseServiceClient) CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*CreateBackupResponse, error) { + out := new(CreateBackupResponse) + err := c.cc.Invoke(ctx, DatabaseService_CreateBackup_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DatabaseServiceServer is the server API for DatabaseService service. +// All implementations should embed UnimplementedDatabaseServiceServer +// for forward compatibility +type DatabaseServiceServer interface { + CreateBackup(context.Context, *CreateBackupRequest) (*CreateBackupResponse, error) +} + +// UnimplementedDatabaseServiceServer should be embedded to have forward compatible implementations. +type UnimplementedDatabaseServiceServer struct { +} + +func (UnimplementedDatabaseServiceServer) CreateBackup(context.Context, *CreateBackupRequest) (*CreateBackupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateBackup not implemented") +} + +// UnsafeDatabaseServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DatabaseServiceServer will +// result in compilation errors. +type UnsafeDatabaseServiceServer interface { + mustEmbedUnimplementedDatabaseServiceServer() +} + +func RegisterDatabaseServiceServer(s grpc.ServiceRegistrar, srv DatabaseServiceServer) { + s.RegisterService(&DatabaseService_ServiceDesc, srv) +} + +func _DatabaseService_CreateBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateBackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServiceServer).CreateBackup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DatabaseService_CreateBackup_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServiceServer).CreateBackup(ctx, req.(*CreateBackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// DatabaseService_ServiceDesc is the grpc.ServiceDesc for DatabaseService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var DatabaseService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "v1.DatabaseService", + HandlerType: (*DatabaseServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateBackup", + Handler: _DatabaseService_CreateBackup_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "v1/database.proto", +} diff --git a/api/v1/initializer.pb.go b/api/v1/initializer.pb.go index 3adafa6..ab2639d 100644 --- a/api/v1/initializer.pb.go +++ b/api/v1/initializer.pb.go @@ -72,14 +72,14 @@ func (StatusResponse_InitializerStatus) EnumDescriptor() ([]byte, []int) { return file_v1_initializer_proto_rawDescGZIP(), []int{1, 0} } -type Empty struct { +type StatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *Empty) Reset() { - *x = Empty{} +func (x *StatusRequest) Reset() { + *x = StatusRequest{} if protoimpl.UnsafeEnabled { mi := &file_v1_initializer_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -87,13 +87,13 @@ func (x *Empty) Reset() { } } -func (x *Empty) String() string { +func (x *StatusRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Empty) ProtoMessage() {} +func (*StatusRequest) ProtoMessage() {} -func (x *Empty) ProtoReflect() protoreflect.Message { +func (x *StatusRequest) ProtoReflect() protoreflect.Message { mi := &file_v1_initializer_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -105,8 +105,8 @@ func (x *Empty) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Empty.ProtoReflect.Descriptor instead. -func (*Empty) Descriptor() ([]byte, []int) { +// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. +func (*StatusRequest) Descriptor() ([]byte, []int) { return file_v1_initializer_proto_rawDescGZIP(), []int{0} } @@ -169,22 +169,23 @@ var File_v1_initializer_proto protoreflect.FileDescriptor var file_v1_initializer_proto_rawDesc = []byte{ 0x0a, 0x14, 0x76, 0x31, 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x22, 0xb3, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x49, - 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x10, - 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, - 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, - 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x32, 0x3d, 0x0a, 0x12, 0x49, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x27, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x09, 0x2e, 0x76, 0x31, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb3, 0x01, 0x0a, 0x0e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x49, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0c, 0x0a, 0x08, 0x43, + 0x48, 0x45, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x53, + 0x54, 0x4f, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x4e, 0x45, + 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, + 0x03, 0x32, 0x45, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x11, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6c, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, @@ -211,12 +212,12 @@ var file_v1_initializer_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_v1_initializer_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_v1_initializer_proto_goTypes = []interface{}{ (StatusResponse_InitializerStatus)(0), // 0: v1.StatusResponse.InitializerStatus - (*Empty)(nil), // 1: v1.Empty + (*StatusRequest)(nil), // 1: v1.StatusRequest (*StatusResponse)(nil), // 2: v1.StatusResponse } var file_v1_initializer_proto_depIdxs = []int32{ 0, // 0: v1.StatusResponse.status:type_name -> v1.StatusResponse.InitializerStatus - 1, // 1: v1.InitializerService.Status:input_type -> v1.Empty + 1, // 1: v1.InitializerService.Status:input_type -> v1.StatusRequest 2, // 2: v1.InitializerService.Status:output_type -> v1.StatusResponse 2, // [2:3] is the sub-list for method output_type 1, // [1:2] is the sub-list for method input_type @@ -232,7 +233,7 @@ func file_v1_initializer_proto_init() { } if !protoimpl.UnsafeEnabled { file_v1_initializer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Empty); i { + switch v := v.(*StatusRequest); i { case 0: return &v.state case 1: diff --git a/api/v1/initializer_grpc.pb.go b/api/v1/initializer_grpc.pb.go index 2535214..4432467 100644 --- a/api/v1/initializer_grpc.pb.go +++ b/api/v1/initializer_grpc.pb.go @@ -26,7 +26,7 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type InitializerServiceClient interface { - Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) } type initializerServiceClient struct { @@ -37,7 +37,7 @@ func NewInitializerServiceClient(cc grpc.ClientConnInterface) InitializerService return &initializerServiceClient{cc} } -func (c *initializerServiceClient) Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) { +func (c *initializerServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) err := c.cc.Invoke(ctx, InitializerService_Status_FullMethodName, in, out, opts...) if err != nil { @@ -50,14 +50,14 @@ func (c *initializerServiceClient) Status(ctx context.Context, in *Empty, opts . // All implementations should embed UnimplementedInitializerServiceServer // for forward compatibility type InitializerServiceServer interface { - Status(context.Context, *Empty) (*StatusResponse, error) + Status(context.Context, *StatusRequest) (*StatusResponse, error) } // UnimplementedInitializerServiceServer should be embedded to have forward compatible implementations. type UnimplementedInitializerServiceServer struct { } -func (UnimplementedInitializerServiceServer) Status(context.Context, *Empty) (*StatusResponse, error) { +func (UnimplementedInitializerServiceServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") } @@ -73,7 +73,7 @@ func RegisterInitializerServiceServer(s grpc.ServiceRegistrar, srv InitializerSe } func _InitializerService_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) + in := new(StatusRequest) if err := dec(in); err != nil { return nil, err } @@ -85,7 +85,7 @@ func _InitializerService_Status_Handler(srv interface{}, ctx context.Context, de FullMethod: InitializerService_Status_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(InitializerServiceServer).Status(ctx, req.(*Empty)) + return srv.(InitializerServiceServer).Status(ctx, req.(*StatusRequest)) } return interceptor(ctx, in, info, handler) } diff --git a/cmd/internal/backup/backup.go b/cmd/internal/backup/backup.go index a5ff627..6a665b9 100644 --- a/cmd/internal/backup/backup.go +++ b/cmd/internal/backup/backup.go @@ -2,67 +2,66 @@ package backup import ( "context" + "fmt" "os" "path" backuproviders "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/compress" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/constants" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/database" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/metrics" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" cron "github.com/robfig/cron/v3" "go.uber.org/zap" + "golang.org/x/sync/semaphore" ) -// Start starts the backup component, which is periodically taking backups of the database -func Start(ctx context.Context, log *zap.SugaredLogger, backupSchedule string, db database.DatabaseProber, bp backuproviders.BackupProvider, metrics *metrics.Metrics, comp *compress.Compressor) error { - log.Info("database is now available, starting periodic backups") +type BackuperConfig struct { + Log *zap.SugaredLogger + BackupSchedule string + DatabaseProber database.DatabaseProber + BackupProvider backuproviders.BackupProvider + Metrics *metrics.Metrics + Compressor *compress.Compressor +} - c := cron.New() +type Backuper struct { + log *zap.SugaredLogger + backupSchedule string + db database.DatabaseProber + bp backuproviders.BackupProvider + metrics *metrics.Metrics + comp *compress.Compressor + sem *semaphore.Weighted +} - id, err := c.AddFunc(backupSchedule, func() { - err := db.Backup() - if err != nil { - metrics.CountError("create") - log.Errorw("database backup failed", "error", err) - return - } - log.Infow("successfully backed up database") +func New(config *BackuperConfig) *Backuper { + return &Backuper{ + log: config.Log, + backupSchedule: config.BackupSchedule, + db: config.DatabaseProber, + bp: config.BackupProvider, + metrics: config.Metrics, + comp: config.Compressor, + // sem guards backups to be taken concurrently + sem: semaphore.NewWeighted(1), + } +} - backupArchiveName := bp.GetNextBackupName() +// Start starts the backup component, which is periodically taking backups of the database +func (b *Backuper) Start(ctx context.Context) error { + b.log.Info("database is now available, starting periodic backups") - backupFilePath := path.Join(constants.BackupDir, backupArchiveName) - if err := os.RemoveAll(backupFilePath + comp.Extension()); err != nil { - metrics.CountError("delete_prior") - log.Errorw("could not delete priorly uploaded backup", "error", err) - return - } + c := cron.New() - filename, err := comp.Compress(backupFilePath) + id, err := c.AddFunc(b.backupSchedule, func() { + err := b.CreateBackup(ctx) if err != nil { - metrics.CountError("compress") - log.Errorw("unable to compress backup", "error", err) - return + b.log.Errorw("error creating backup", "error", err) } - log.Info("compressed backup") - err = bp.UploadBackup(filename) - if err != nil { - metrics.CountError("upload") - log.Errorw("error uploading backup", "error", err) - return - } - log.Info("uploaded backup to backup provider bucket") - metrics.CountBackup(filename) - err = bp.CleanupBackups() - if err != nil { - metrics.CountError("cleanup") - log.Errorw("cleaning up backups failed", "error", err) - } else { - log.Infow("cleaned up backups") - } for _, e := range c.Entries() { - log.Infow("scheduling next backup", "at", e.Next.String()) + b.log.Infow("scheduling next backup", "at", e.Next.String()) } }) if err != nil { @@ -70,8 +69,59 @@ func Start(ctx context.Context, log *zap.SugaredLogger, backupSchedule string, d } c.Start() - log.Infow("scheduling next backup", "at", c.Entry(id).Next.String()) + b.log.Infow("scheduling next backup", "at", c.Entry(id).Next.String()) <-ctx.Done() c.Stop() return nil } + +func (b *Backuper) CreateBackup(ctx context.Context) error { + if !b.sem.TryAcquire(1) { + return constants.ErrBackupAlreadyInProgress + } + defer b.sem.Release(1) + + err := b.db.Backup(ctx) + if err != nil { + b.metrics.CountError("create") + return fmt.Errorf("database backup failed: %w", err) + } + + b.log.Infow("successfully backed up database") + + backupArchiveName := b.bp.GetNextBackupName(ctx) + + backupFilePath := path.Join(constants.BackupDir, backupArchiveName) + if err := os.RemoveAll(backupFilePath + b.comp.Extension()); err != nil { + b.metrics.CountError("delete_prior") + return fmt.Errorf("could not delete priorly uploaded backup: %w", err) + } + + filename, err := b.comp.Compress(backupFilePath) + if err != nil { + b.metrics.CountError("compress") + return fmt.Errorf("unable to compress backup: %w", err) + } + + b.log.Info("compressed backup") + + err = b.bp.UploadBackup(ctx, filename) + if err != nil { + b.metrics.CountError("upload") + return fmt.Errorf("error uploading backup: %w", err) + } + + b.log.Info("uploaded backup to backup provider bucket") + + b.metrics.CountBackup(filename) + + err = b.bp.CleanupBackups(ctx) + if err != nil { + b.metrics.CountError("cleanup") + b.log.Errorw("cleaning up backups failed", "error", err) + } else { + b.log.Infow("cleaned up backups") + } + + return nil +} diff --git a/cmd/internal/backup/providers/contract.go b/cmd/internal/backup/providers/contract.go index 2ed3fc3..9ed47fb 100644 --- a/cmd/internal/backup/providers/contract.go +++ b/cmd/internal/backup/providers/contract.go @@ -1,14 +1,17 @@ package providers -import "time" +import ( + "context" + "time" +) type BackupProvider interface { - EnsureBackupBucket() error - ListBackups() (BackupVersions, error) - CleanupBackups() error - GetNextBackupName() string - DownloadBackup(version *BackupVersion) error - UploadBackup(sourcePath string) error + EnsureBackupBucket(ctx context.Context) error + ListBackups(ctx context.Context) (BackupVersions, error) + CleanupBackups(ctx context.Context) error + GetNextBackupName(ctx context.Context) string + DownloadBackup(ctx context.Context, version *BackupVersion) error + UploadBackup(ctx context.Context, sourcePath string) error } type BackupVersions interface { diff --git a/cmd/internal/backup/providers/gcp/gcp.go b/cmd/internal/backup/providers/gcp/gcp.go index a3acc5e..37a32eb 100644 --- a/cmd/internal/backup/providers/gcp/gcp.go +++ b/cmd/internal/backup/providers/gcp/gcp.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "net/http" - "os" "path" "path/filepath" "strconv" @@ -14,11 +13,13 @@ import ( "errors" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/constants" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" + "github.com/spf13/afero" "go.uber.org/zap" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" + "google.golang.org/api/option" "cloud.google.com/go/storage" ) @@ -29,6 +30,7 @@ const ( // BackupProviderGCP implements the backup provider interface for GCP type BackupProviderGCP struct { + fs afero.Fs log *zap.SugaredLogger c *storage.Client config *BackupProviderConfigGCP @@ -42,6 +44,8 @@ type BackupProviderConfigGCP struct { ObjectPrefix string ObjectsToKeep int64 ProjectID string + FS afero.Fs + ClientOpts []option.ClientOption } func (c *BackupProviderConfigGCP) validate() error { @@ -51,14 +55,17 @@ func (c *BackupProviderConfigGCP) validate() error { if c.ProjectID == "" { return errors.New("gcp project id must not be empty") } + for _, opt := range c.ClientOpts { + if opt == nil { + return errors.New("option can not be nil") + } + } return nil } // New returns a GCP backup provider -func New(log *zap.SugaredLogger, config *BackupProviderConfigGCP) (*BackupProviderGCP, error) { - ctx := context.Background() - +func New(ctx context.Context, log *zap.SugaredLogger, config *BackupProviderConfigGCP) (*BackupProviderGCP, error) { if config == nil { return nil, errors.New("gcp backup provider requires a provider config") } @@ -69,13 +76,16 @@ func New(log *zap.SugaredLogger, config *BackupProviderConfigGCP) (*BackupProvid if config.BackupName == "" { config.BackupName = defaultBackupName } + if config.FS == nil { + config.FS = afero.NewOsFs() + } err := config.validate() if err != nil { return nil, err } - client, err := storage.NewClient(ctx) + client, err := storage.NewClient(ctx, config.ClientOpts...) if err != nil { return nil, err } @@ -84,13 +94,12 @@ func New(log *zap.SugaredLogger, config *BackupProviderConfigGCP) (*BackupProvid c: client, config: config, log: log, + fs: config.FS, }, nil } // EnsureBackupBucket ensures a backup bucket at the backup provider -func (b *BackupProviderGCP) EnsureBackupBucket() error { - ctx := context.Background() - +func (b *BackupProviderGCP) EnsureBackupBucket(ctx context.Context) error { bucket := b.c.Bucket(b.config.BucketName) lifecycle := storage.Lifecycle{ Rules: []storage.LifecycleRule{ @@ -133,34 +142,35 @@ func (b *BackupProviderGCP) EnsureBackupBucket() error { } // CleanupBackups cleans up backups according to the given backup cleanup policy at the backup provider -func (b *BackupProviderGCP) CleanupBackups() error { +func (b *BackupProviderGCP) CleanupBackups(_ context.Context) error { // nothing to do here, done with lifecycle rules return nil } // DownloadBackup downloads the given backup version to the restoration folder -func (b *BackupProviderGCP) DownloadBackup(version *providers.BackupVersion) error { +func (b *BackupProviderGCP) DownloadBackup(ctx context.Context, version *providers.BackupVersion) error { gen, err := strconv.ParseInt(version.Version, 10, 64) if err != nil { return err } - ctx := context.Background() - bucket := b.c.Bucket(b.config.BucketName) + downloadFileName := version.Name + if strings.Contains(downloadFileName, "/") { + downloadFileName = filepath.Base(downloadFileName) + } + backupFilePath := path.Join(constants.DownloadDir, downloadFileName) + + b.log.Infow("downloading", "object", version.Name, "gen", gen, "to", backupFilePath) + r, err := bucket.Object(version.Name).Generation(gen).NewReader(ctx) if err != nil { return fmt.Errorf("backup not found: %w", err) } defer r.Close() - downloadFileName := version.Name - if strings.Contains(downloadFileName, "/") { - downloadFileName = filepath.Base(downloadFileName) - } - backupFilePath := path.Join(constants.DownloadDir, downloadFileName) - f, err := os.Create(backupFilePath) + f, err := b.fs.Create(backupFilePath) if err != nil { return err } @@ -175,11 +185,10 @@ func (b *BackupProviderGCP) DownloadBackup(version *providers.BackupVersion) err } // UploadBackup uploads a backup to the backup provider -func (b *BackupProviderGCP) UploadBackup(sourcePath string) error { - ctx := context.Background() +func (b *BackupProviderGCP) UploadBackup(ctx context.Context, sourcePath string) error { bucket := b.c.Bucket(b.config.BucketName) - r, err := os.Open(sourcePath) + r, err := b.fs.Open(sourcePath) if err != nil { return err } @@ -203,15 +212,13 @@ func (b *BackupProviderGCP) UploadBackup(sourcePath string) error { } // GetNextBackupName returns a name for the next backup archive that is going to be uploaded -func (b *BackupProviderGCP) GetNextBackupName() string { +func (b *BackupProviderGCP) GetNextBackupName(_ context.Context) string { // name is constant because we use lifecycle rule to cleanup return b.config.BackupName } // ListBackups lists the available backups of the backup provider -func (b *BackupProviderGCP) ListBackups() (providers.BackupVersions, error) { - ctx := context.Background() - +func (b *BackupProviderGCP) ListBackups(ctx context.Context) (providers.BackupVersions, error) { bucket := b.c.Bucket(b.config.BucketName) query := &storage.Query{ diff --git a/cmd/internal/backup/providers/gcp/gcp_integration_test.go b/cmd/internal/backup/providers/gcp/gcp_integration_test.go new file mode 100644 index 0000000..6769b4c --- /dev/null +++ b/cmd/internal/backup/providers/gcp/gcp_integration_test.go @@ -0,0 +1,228 @@ +//go:build integration + +package gcp + +import ( + "context" + "crypto/tls" + "fmt" + "io" + iofs "io/fs" + "net/http" + "path" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "go.uber.org/zap/zaptest" + "google.golang.org/api/option" +) + +func Test_BackupProviderGCP(t *testing.T) { + var ( + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) + log = zaptest.NewLogger(t).Sugar() + ) + + defer cancel() + + c, conn := startFakeGcsContainer(t, ctx) + defer func() { + if t.Failed() { + r, err := c.Logs(ctx) + assert.NoError(t, err) + + if err == nil { + logs, err := io.ReadAll(r) + assert.NoError(t, err) + + fmt.Println(string(logs)) + } + } + err := c.Terminate(ctx) + require.NoError(t, err) + }() + + var ( + endpoint = conn.Endpoint + "/storage/v1/" + backupAmount = 5 + expectedBackupName = defaultBackupName + ".tar.gz" + prefix = fmt.Sprintf("test-with-%d", backupAmount) + + fs = afero.NewMemMapFs() + + transCfg = &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec + } + httpClient = &http.Client{Transport: transCfg} + ) + + p, err := New(ctx, log, &BackupProviderConfigGCP{ + BucketName: "test", + BucketLocation: "europe-west3", + ObjectPrefix: prefix, + ProjectID: "test-project-id", + FS: fs, + ClientOpts: []option.ClientOption{option.WithEndpoint(endpoint), option.WithHTTPClient(httpClient)}, + }) + require.NoError(t, err) + require.NotNil(t, p) + + t.Run("ensure backup bucket", func(t *testing.T) { + err := p.EnsureBackupBucket(ctx) + require.NoError(t, err) + }) + + if t.Failed() { + return + } + + t.Run("verify upload", func(t *testing.T) { + for i := 0; i < backupAmount; i++ { + backupName := p.GetNextBackupName(ctx) + ".tar.gz" + assert.Equal(t, expectedBackupName, backupName) + + backupPath := path.Join(constants.UploadDir, backupName) + backupContent := fmt.Sprintf("precious data %d", i) + + err = afero.WriteFile(fs, backupPath, []byte(backupContent), 0600) + require.NoError(t, err) + + err = p.UploadBackup(ctx, backupPath) + require.NoError(t, err) + + // cleaning up after test + err = fs.Remove(backupPath) + require.NoError(t, err) + } + }) + + if t.Failed() { + return + } + + t.Run("list backups", func(t *testing.T) { + versions, err := p.ListBackups(ctx) + require.NoError(t, err) + + _, err = versions.Get("foo") + assert.Error(t, err) + + allVersions := versions.List() + // even if the amount is larger than max backups to keep the fake server + // does not clean it up with lifecycle management + require.Len(t, allVersions, backupAmount) + + for i, v := range allVersions { + v := v + + fmt.Println(v) + + assert.True(t, strings.HasSuffix(v.Name, ".tar.gz")) + assert.NotZero(t, v.Date) + + getVersion, err := versions.Get(v.Version) + assert.NoError(t, err) + assert.Equal(t, v, getVersion) + + if i == 0 { + continue + } + assert.True(t, v.Date.Before(allVersions[i-1].Date)) + } + + latestVersion := versions.Latest() + assert.Equal(t, allVersions[0], latestVersion) + }) + + if t.Failed() { + return + } + + t.Run("verify download", func(t *testing.T) { + versions, err := p.ListBackups(ctx) + require.NoError(t, err) + + latestVersion := versions.Latest() + require.NotNil(t, latestVersion) + + err = p.DownloadBackup(ctx, latestVersion) + require.NoError(t, err) + + downloadPath := path.Join(constants.DownloadDir, expectedBackupName) + gotContent, err := afero.ReadFile(fs, downloadPath) + require.NoError(t, err) + + backupContent := fmt.Sprintf("precious data %d", backupAmount-1) + require.Equal(t, backupContent, string(gotContent)) + + // cleaning up after test + err = fs.Remove(downloadPath) + require.NoError(t, err) + }) + + if t.Failed() { + return + } + + t.Run("verify cleanup", func(t *testing.T) { + err := p.CleanupBackups(ctx) + require.NoError(t, err) + }) + + if t.Failed() { + return + } + + err = afero.Walk(fs, "/", func(path string, info iofs.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + return fmt.Errorf("provider messed around in the file system at: %s", path) + }) + require.NoError(t, err) +} + +type connectionDetails struct { + Endpoint string +} + +func startFakeGcsContainer(t testing.TB, ctx context.Context) (testcontainers.Container, *connectionDetails) { + c, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "fsouza/fake-gcs-server", // tested with fsouza/fake-gcs-server:1.47.4 + ExposedPorts: []string{"4443"}, + HostConfigModifier: func(hc *container.HostConfig) { + // Unfortunately we must use host network as the public host must exactly match the client endpoint + // see for example: https://github.com/fsouza/fake-gcs-server/issues/196 + // + // without it the download does not work because the server directs to the wrong (public?) endpoint + hc.NetworkMode = "host" + }, + Cmd: []string{"-backend", "memory", "-log-level", "debug", "-public-host", "localhost:4443"}, + WaitingFor: wait.ForAll( + wait.ForListeningPort("4443/tcp"), + ), + }, + Started: true, + Logger: testcontainers.TestLogger(t), + }) + require.NoError(t, err) + + conn := &connectionDetails{ + Endpoint: "https://localhost:4443", + } + + return c, conn +} diff --git a/cmd/internal/backup/providers/gcp/versions.go b/cmd/internal/backup/providers/gcp/versions.go index 9889d7f..e4b7bee 100644 --- a/cmd/internal/backup/providers/gcp/versions.go +++ b/cmd/internal/backup/providers/gcp/versions.go @@ -18,7 +18,6 @@ func (b BackupVersionsGCP) Latest() *providers.BackupVersion { if len(result) == 0 { return nil } - b.Sort(result, false) return result[0] } @@ -38,6 +37,8 @@ func (b BackupVersionsGCP) List() []*providers.BackupVersion { } } + b.Sort(result, false) + return result } diff --git a/cmd/internal/backup/providers/local/local.go b/cmd/internal/backup/providers/local/local.go index c43fb71..bcc9517 100644 --- a/cmd/internal/backup/providers/local/local.go +++ b/cmd/internal/backup/providers/local/local.go @@ -1,6 +1,7 @@ package local import ( + "context" "fmt" "os" "path/filepath" @@ -9,18 +10,20 @@ import ( "errors" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/constants" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/utils" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" + "github.com/spf13/afero" "go.uber.org/zap" ) const ( - defaultLocalBackupPath = "/tmp/backup-restore-sidecar/local-provider" + defaultLocalBackupPath = constants.SidecarBaseDir + "/local-provider" ) // BackupProviderLocal implements the backup provider interface for no backup provider (useful to disable sidecar functionality in development environments) type BackupProviderLocal struct { + fs afero.Fs log *zap.SugaredLogger config *BackupProviderConfigLocal nextBackupCount int64 @@ -30,6 +33,7 @@ type BackupProviderLocal struct { type BackupProviderConfigLocal struct { LocalBackupPath string ObjectsToKeep int64 + FS afero.Fs } func (c *BackupProviderConfigLocal) validate() error { @@ -48,6 +52,9 @@ func New(log *zap.SugaredLogger, config *BackupProviderConfigLocal) (*BackupProv if config.LocalBackupPath == "" { config.LocalBackupPath = defaultLocalBackupPath } + if config.FS == nil { + config.FS = afero.NewOsFs() + } err := config.validate() if err != nil { @@ -57,53 +64,59 @@ func New(log *zap.SugaredLogger, config *BackupProviderConfigLocal) (*BackupProv return &BackupProviderLocal{ config: config, log: log, + fs: config.FS, }, nil } // EnsureBackupBucket ensures a backup bucket at the backup provider -func (b *BackupProviderLocal) EnsureBackupBucket() error { +func (b *BackupProviderLocal) EnsureBackupBucket(_ context.Context) error { b.log.Infow("ensuring backup bucket called for provider local") - if err := os.RemoveAll(b.config.LocalBackupPath); err != nil { - return fmt.Errorf("could not clean local backup directory: %w", err) - } - if err := os.MkdirAll(b.config.LocalBackupPath, 0777); err != nil { + if err := b.fs.MkdirAll(b.config.LocalBackupPath, 0777); err != nil { return fmt.Errorf("could not create local backup directory: %w", err) } + return nil } // CleanupBackups cleans up backups according to the given backup cleanup policy at the backup provider -func (b *BackupProviderLocal) CleanupBackups() error { +func (b *BackupProviderLocal) CleanupBackups(_ context.Context) error { b.log.Infow("cleanup backups called for provider local") + return nil } // DownloadBackup downloads the given backup version to the restoration folder -func (b *BackupProviderLocal) DownloadBackup(version *providers.BackupVersion) error { +func (b *BackupProviderLocal) DownloadBackup(_ context.Context, version *providers.BackupVersion) error { b.log.Infow("download backup called for provider local") + source := filepath.Join(b.config.LocalBackupPath, version.Name) destination := filepath.Join(constants.DownloadDir, version.Name) - err := utils.Copy(source, destination) + + err := utils.Copy(b.fs, source, destination) if err != nil { return err } + return nil } // UploadBackup uploads a backup to the backup provider -func (b *BackupProviderLocal) UploadBackup(sourcePath string) error { +func (b *BackupProviderLocal) UploadBackup(_ context.Context, sourcePath string) error { b.log.Infow("upload backups called for provider local") + destination := filepath.Join(b.config.LocalBackupPath, filepath.Base(sourcePath)) - err := utils.Copy(sourcePath, destination) + + err := utils.Copy(b.fs, sourcePath, destination) if err != nil { return err } + return nil } // GetNextBackupName returns a name for the next backup archive that is going to be uploaded -func (b *BackupProviderLocal) GetNextBackupName() string { +func (b *BackupProviderLocal) GetNextBackupName(_ context.Context) string { name := strconv.FormatInt(b.nextBackupCount, 10) b.nextBackupCount++ b.nextBackupCount = b.nextBackupCount % b.config.ObjectsToKeep @@ -111,9 +124,10 @@ func (b *BackupProviderLocal) GetNextBackupName() string { } // ListBackups lists the available backups of the backup provider -func (b *BackupProviderLocal) ListBackups() (providers.BackupVersions, error) { +func (b *BackupProviderLocal) ListBackups(_ context.Context) (providers.BackupVersions, error) { b.log.Infow("listing backups called for provider local") - d, err := os.Open(b.config.LocalBackupPath) + + d, err := b.fs.Open(b.config.LocalBackupPath) if err != nil { return nil, err } @@ -125,7 +139,7 @@ func (b *BackupProviderLocal) ListBackups() (providers.BackupVersions, error) { var files []os.FileInfo for _, name := range names { - info, err := os.Stat(filepath.Join(b.config.LocalBackupPath, name)) + info, err := b.fs.Stat(filepath.Join(b.config.LocalBackupPath, name)) if err != nil { return nil, err } diff --git a/cmd/internal/backup/providers/local/local_test.go b/cmd/internal/backup/providers/local/local_test.go new file mode 100644 index 0000000..7da66b1 --- /dev/null +++ b/cmd/internal/backup/providers/local/local_test.go @@ -0,0 +1,178 @@ +package local + +import ( + "context" + "fmt" + iofs "io/fs" + "path" + "strings" + "testing" + + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" +) + +func Test_BackupProviderLocal(t *testing.T) { + var ( + ctx = context.Background() + localProviderBackupPath = defaultLocalBackupPath + log = zaptest.NewLogger(t).Sugar() + ) + + for _, backupAmount := range []int{0, 1, 5, constants.DefaultObjectsToKeep + 5} { + t.Run(fmt.Sprintf("testing with %d backups", backupAmount), func(t *testing.T) { + fs := afero.NewMemMapFs() + + p, err := New(log, &BackupProviderConfigLocal{ + FS: fs, + }) + require.NoError(t, err) + require.NotNil(t, p) + + t.Run("ensure backup bucket", func(t *testing.T) { + err := p.EnsureBackupBucket(ctx) + require.NoError(t, err) + + info, err := fs.Stat(defaultLocalBackupPath) + require.NoError(t, err) + assert.True(t, info.IsDir()) + }) + + if t.Failed() { + return + } + + t.Run("verify upload", func(t *testing.T) { + for i := 0; i < backupAmount; i++ { + backupName := p.GetNextBackupName(ctx) + ".tar.gz" + backupPath := path.Join(constants.UploadDir, backupName) + backupContent := fmt.Sprintf("precious data %d", i) + + err = afero.WriteFile(fs, backupPath, []byte(backupContent), 0600) + require.NoError(t, err) + + err = p.UploadBackup(ctx, backupPath) + require.NoError(t, err) + + localPath := path.Join(localProviderBackupPath, backupName) + _, err := fs.Stat(localPath) + require.NoError(t, err) + + backupFiles, err := afero.ReadDir(fs, localProviderBackupPath) + require.NoError(t, err) + if i+1 > constants.DefaultObjectsToKeep { + require.Len(t, backupFiles, constants.DefaultObjectsToKeep) + } else { + require.Len(t, backupFiles, i+1) + } + + backedupContent, err := afero.ReadFile(fs, localPath) + require.NoError(t, err) + require.Equal(t, backupContent, string(backedupContent)) + + // cleaning up after test + err = fs.Remove(backupPath) + require.NoError(t, err) + } + }) + + if t.Failed() { + return + } + + if backupAmount <= 0 { + return + } + + t.Run("list backups", func(t *testing.T) { + versions, err := p.ListBackups(ctx) + require.NoError(t, err) + + _, err = versions.Get("foo") + assert.Error(t, err) + + allVersions := versions.List() + amount := backupAmount + if backupAmount > constants.DefaultObjectsToKeep { + amount = constants.DefaultObjectsToKeep + } + require.Len(t, allVersions, amount) + + for i, v := range allVersions { + v := v + + assert.True(t, strings.HasSuffix(v.Name, ".tar.gz")) + assert.NotZero(t, v.Date) + + getVersion, err := versions.Get(v.Version) + assert.NoError(t, err) + assert.Equal(t, v, getVersion) + + if i == 0 { + continue + } + assert.True(t, v.Date.Before(allVersions[i-1].Date)) + } + + latestVersion := versions.Latest() + assert.Equal(t, allVersions[0], latestVersion) + }) + + if t.Failed() { + return + } + + t.Run("verify download", func(t *testing.T) { + versions, err := p.ListBackups(ctx) + require.NoError(t, err) + + latestVersion := versions.Latest() + require.NotNil(t, latestVersion) + + err = p.DownloadBackup(ctx, latestVersion) + require.NoError(t, err) + + downloadPath := path.Join(constants.DownloadDir, latestVersion.Name) + gotContent, err := afero.ReadFile(fs, downloadPath) + require.NoError(t, err) + + require.Equal(t, fmt.Sprintf("precious data %d", backupAmount-1), string(gotContent)) + + // cleaning up after test + err = fs.Remove(downloadPath) + require.NoError(t, err) + }) + + if t.Failed() { + return + } + + t.Run("verify cleanup", func(t *testing.T) { + err := p.CleanupBackups(ctx) + require.NoError(t, err) + }) + + if t.Failed() { + return + } + + err = afero.Walk(fs, "/", func(path string, info iofs.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + if strings.HasPrefix(path, localProviderBackupPath) { + return nil + } + + return fmt.Errorf("provider messed around in the file system at: %s", path) + }) + require.NoError(t, err) + }) + } +} diff --git a/cmd/internal/backup/providers/s3/s3.go b/cmd/internal/backup/providers/s3/s3.go index 0fc249d..e4db402 100644 --- a/cmd/internal/backup/providers/s3/s3.go +++ b/cmd/internal/backup/providers/s3/s3.go @@ -1,7 +1,7 @@ package s3 import ( - "os" + "context" "path" "path/filepath" "strings" @@ -9,7 +9,8 @@ import ( "errors" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/constants" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" + "github.com/spf13/afero" "go.uber.org/zap" @@ -27,6 +28,7 @@ const ( // BackupProviderS3 implements the backup provider interface for S3 type BackupProviderS3 struct { + fs afero.Fs log *zap.SugaredLogger c *s3.S3 sess *session.Session @@ -43,6 +45,7 @@ type BackupProviderConfigS3 struct { BackupName string ObjectPrefix string ObjectsToKeep int64 + FS afero.Fs } func (c *BackupProviderConfigS3) validate() error { @@ -64,7 +67,6 @@ func (c *BackupProviderConfigS3) validate() error { // New returns a S3 backup provider func New(log *zap.SugaredLogger, config *BackupProviderConfigS3) (*BackupProviderS3, error) { - if config == nil { return nil, errors.New("s3 backup provider requires a provider config") } @@ -75,6 +77,9 @@ func New(log *zap.SugaredLogger, config *BackupProviderConfigS3) (*BackupProvide if config.BackupName == "" { config.BackupName = defaultBackupName } + if config.FS == nil { + config.FS = afero.NewOsFs() + } err := config.validate() if err != nil { @@ -100,11 +105,12 @@ func New(log *zap.SugaredLogger, config *BackupProviderConfigS3) (*BackupProvide sess: newSession, config: config, log: log, + fs: config.FS, }, nil } // EnsureBackupBucket ensures a backup bucket at the backup provider -func (b *BackupProviderS3) EnsureBackupBucket() error { +func (b *BackupProviderS3) EnsureBackupBucket(ctx context.Context) error { bucket := aws.String(b.config.BucketName) // create bucket @@ -112,7 +118,7 @@ func (b *BackupProviderS3) EnsureBackupBucket() error { Bucket: bucket, } - _, err := b.c.CreateBucket(cparams) + _, err := b.c.CreateBucketWithContext(ctx, cparams) if err != nil { // FIXME check how to migrate to errors.As //nolint @@ -135,7 +141,7 @@ func (b *BackupProviderS3) EnsureBackupBucket() error { Status: aws.String("Enabled"), }, } - _, err = b.c.PutBucketVersioning(versioning) + _, err = b.c.PutBucketVersioningWithContext(ctx, versioning) if err != nil { // FIXME check how to migrate to errors.As //nolint @@ -165,7 +171,7 @@ func (b *BackupProviderS3) EnsureBackupBucket() error { }, }, } - _, err = b.c.PutBucketLifecycleConfiguration(lifecycle) + _, err = b.c.PutBucketLifecycleConfigurationWithContext(ctx, lifecycle) if err != nil { // FIXME check how to migrate to errors.As //nolint @@ -182,21 +188,23 @@ func (b *BackupProviderS3) EnsureBackupBucket() error { } // CleanupBackups cleans up backups according to the given backup cleanup policy at the backup provider -func (b *BackupProviderS3) CleanupBackups() error { +func (b *BackupProviderS3) CleanupBackups(_ context.Context) error { // nothing to do here, done with lifecycle rules return nil } // DownloadBackup downloads the given backup version to the restoration folder -func (b *BackupProviderS3) DownloadBackup(version *providers.BackupVersion) error { +func (b *BackupProviderS3) DownloadBackup(ctx context.Context, version *providers.BackupVersion) error { bucket := aws.String(b.config.BucketName) downloadFileName := version.Name if strings.Contains(downloadFileName, "/") { downloadFileName = filepath.Base(downloadFileName) } + backupFilePath := path.Join(constants.DownloadDir, downloadFileName) - f, err := os.Create(backupFilePath) + + f, err := b.fs.Create(backupFilePath) if err != nil { return err } @@ -204,7 +212,9 @@ func (b *BackupProviderS3) DownloadBackup(version *providers.BackupVersion) erro downloader := s3manager.NewDownloader(b.sess) - _, err = downloader.Download(f, + _, err = downloader.DownloadWithContext( + ctx, + f, &s3.GetObjectInput{ Bucket: bucket, Key: &version.Name, @@ -218,10 +228,10 @@ func (b *BackupProviderS3) DownloadBackup(version *providers.BackupVersion) erro } // UploadBackup uploads a backup to the backup provider -func (b *BackupProviderS3) UploadBackup(sourcePath string) error { +func (b *BackupProviderS3) UploadBackup(ctx context.Context, sourcePath string) error { bucket := aws.String(b.config.BucketName) - r, err := os.Open(sourcePath) + r, err := b.fs.Open(sourcePath) if err != nil { return err } @@ -235,7 +245,7 @@ func (b *BackupProviderS3) UploadBackup(sourcePath string) error { b.log.Debugw("uploading object", "src", sourcePath, "dest", destination) uploader := s3manager.NewUploader(b.sess) - _, err = uploader.Upload(&s3manager.UploadInput{ + _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{ Bucket: bucket, Key: aws.String(destination), Body: r, @@ -248,22 +258,23 @@ func (b *BackupProviderS3) UploadBackup(sourcePath string) error { } // GetNextBackupName returns a name for the next backup archive that is going to be uploaded -func (b *BackupProviderS3) GetNextBackupName() string { +func (b *BackupProviderS3) GetNextBackupName(_ context.Context) string { // name is constant because we use lifecycle rule to cleanup return b.config.BackupName } // ListBackups lists the available backups of the backup provider -func (b *BackupProviderS3) ListBackups() (providers.BackupVersions, error) { +func (b *BackupProviderS3) ListBackups(ctx context.Context) (providers.BackupVersions, error) { bucket := aws.String(b.config.BucketName) - it, err := b.c.ListObjectVersions(&s3.ListObjectVersionsInput{ + it, err := b.c.ListObjectVersionsWithContext(ctx, &s3.ListObjectVersionsInput{ Bucket: bucket, Prefix: &b.config.ObjectPrefix, }) if err != nil { return nil, err } + return BackupVersionsS3{ objectAttrs: it.Versions, }, nil diff --git a/cmd/internal/backup/providers/s3/s3_integration_test.go b/cmd/internal/backup/providers/s3/s3_integration_test.go new file mode 100644 index 0000000..d62e184 --- /dev/null +++ b/cmd/internal/backup/providers/s3/s3_integration_test.go @@ -0,0 +1,225 @@ +//go:build integration + +package s3 + +import ( + "context" + "fmt" + "io" + iofs "io/fs" + "path" + "strings" + "testing" + "time" + + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "go.uber.org/zap/zaptest" +) + +func Test_BackupProviderS3(t *testing.T) { + var ( + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) + log = zaptest.NewLogger(t).Sugar() + ) + + defer cancel() + + c, conn := startMinioContainer(t, ctx) + defer func() { + if t.Failed() { + r, err := c.Logs(ctx) + assert.NoError(t, err) + + if err == nil { + logs, err := io.ReadAll(r) + assert.NoError(t, err) + + fmt.Println(string(logs)) + } + } + err := c.Terminate(ctx) + require.NoError(t, err) + }() + + var ( + endpoint = conn.Endpoint + backupAmount = 5 + expectedBackupName = defaultBackupName + ".tar.gz" + prefix = fmt.Sprintf("test-with-%d", backupAmount) + + fs = afero.NewMemMapFs() + ) + + p, err := New(log, &BackupProviderConfigS3{ + BucketName: "test", + Endpoint: endpoint, + Region: "dummy", + AccessKey: "ACCESSKEY", + SecretKey: "SECRETKEY", + ObjectPrefix: prefix, + FS: fs, + }) + require.NoError(t, err) + require.NotNil(t, p) + + t.Run("ensure backup bucket", func(t *testing.T) { + err := p.EnsureBackupBucket(ctx) + require.NoError(t, err) + }) + + if t.Failed() { + return + } + + t.Run("verify upload", func(t *testing.T) { + for i := 0; i < backupAmount; i++ { + backupName := p.GetNextBackupName(ctx) + ".tar.gz" + assert.Equal(t, expectedBackupName, backupName) + + backupPath := path.Join(constants.UploadDir, backupName) + backupContent := fmt.Sprintf("precious data %d", i) + + err = afero.WriteFile(fs, backupPath, []byte(backupContent), 0600) + require.NoError(t, err) + + err = p.UploadBackup(ctx, backupPath) + require.NoError(t, err) + + // cleaning up after test + err = fs.Remove(backupPath) + require.NoError(t, err) + } + }) + + if t.Failed() { + return + } + + if backupAmount <= 0 { + return + } + + t.Run("list backups", func(t *testing.T) { + versions, err := p.ListBackups(ctx) + require.NoError(t, err) + + _, err = versions.Get("foo") + assert.Error(t, err) + + allVersions := versions.List() + require.Len(t, allVersions, backupAmount) + + for i, v := range allVersions { + v := v + + fmt.Println(v) + + assert.True(t, strings.HasSuffix(v.Name, ".tar.gz")) + assert.NotZero(t, v.Date) + + getVersion, err := versions.Get(v.Version) + assert.NoError(t, err) + assert.Equal(t, v, getVersion) + + if i == 0 { + continue + } + assert.True(t, v.Date.Before(allVersions[i-1].Date)) + } + + latestVersion := versions.Latest() + assert.Equal(t, allVersions[0], latestVersion) + }) + + if t.Failed() { + return + } + + t.Run("verify download", func(t *testing.T) { + versions, err := p.ListBackups(ctx) + require.NoError(t, err) + + latestVersion := versions.Latest() + require.NotNil(t, latestVersion) + + err = p.DownloadBackup(ctx, latestVersion) + require.NoError(t, err) + + downloadPath := path.Join(constants.DownloadDir, expectedBackupName) + gotContent, err := afero.ReadFile(fs, downloadPath) + require.NoError(t, err) + + backupContent := fmt.Sprintf("precious data %d", backupAmount-1) + require.Equal(t, backupContent, string(gotContent)) + + // cleaning up after test + err = fs.Remove(downloadPath) + require.NoError(t, err) + }) + + if t.Failed() { + return + } + + t.Run("verify cleanup", func(t *testing.T) { + err := p.CleanupBackups(ctx) + require.NoError(t, err) + }) + + if t.Failed() { + return + } + + err = afero.Walk(fs, "/", func(path string, info iofs.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + return fmt.Errorf("provider messed around in the file system at: %s", path) + }) + require.NoError(t, err) +} + +type connectionDetails struct { + Endpoint string +} + +func startMinioContainer(t testing.TB, ctx context.Context) (testcontainers.Container, *connectionDetails) { + c, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "quay.io/minio/minio", + ExposedPorts: []string{"9000"}, + Cmd: []string{"server", "/data"}, + Env: map[string]string{ + "MINIO_ROOT_USER": "ACCESSKEY", + "MINIO_ROOT_PASSWORD": "SECRETKEY", + }, + WaitingFor: wait.ForAll( + wait.ForListeningPort("9000/tcp"), + ), + }, + Started: true, + Logger: testcontainers.TestLogger(t), + }) + require.NoError(t, err) + + host, err := c.Host(ctx) + require.NoError(t, err) + + port, err := c.MappedPort(ctx, "9000") + require.NoError(t, err) + + conn := &connectionDetails{ + Endpoint: "http://" + host + ":" + port.Port(), + } + + return c, conn +} diff --git a/cmd/internal/backup/providers/s3/versions.go b/cmd/internal/backup/providers/s3/versions.go index 15e951f..21947ca 100644 --- a/cmd/internal/backup/providers/s3/versions.go +++ b/cmd/internal/backup/providers/s3/versions.go @@ -19,7 +19,6 @@ func (b BackupVersionsS3) Latest() *providers.BackupVersion { if len(result) == 0 { return nil } - b.Sort(result, false) return result[0] } @@ -27,19 +26,16 @@ func (b BackupVersionsS3) Latest() *providers.BackupVersion { func (b BackupVersionsS3) List() []*providers.BackupVersion { var result []*providers.BackupVersion - tmp := make(map[int64]bool) for _, attr := range b.objectAttrs { - ok := tmp[attr.LastModified.Unix()] - if !ok { - tmp[attr.LastModified.Unix()] = true - result = append(result, &providers.BackupVersion{ - Name: *attr.Key, - Version: *attr.VersionId, - Date: *attr.LastModified, - }) - } + result = append(result, &providers.BackupVersion{ + Name: *attr.Key, + Version: *attr.VersionId, + Date: *attr.LastModified, + }) } + b.Sort(result, false) + return result } diff --git a/cmd/internal/compress/compress.go b/cmd/internal/compress/compress.go index f10b97e..b7386d7 100644 --- a/cmd/internal/compress/compress.go +++ b/cmd/internal/compress/compress.go @@ -4,7 +4,7 @@ import ( "fmt" "path/filepath" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/constants" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "github.com/mholt/archiver/v3" ) diff --git a/cmd/internal/database/contract.go b/cmd/internal/database/contract.go index 71426d8..c5da4f6 100644 --- a/cmd/internal/database/contract.go +++ b/cmd/internal/database/contract.go @@ -1,11 +1,13 @@ package database +import "context" + type DatabaseInitializer interface { // Check indicates whether a restore of the database is required or not. - Check() (bool, error) + Check(ctx context.Context) (bool, error) // Recover performs a restore of the database. - Recover() error + Recover(ctx context.Context) error // Upgrade performs an upgrade of the database in case a newer version of the database is detected. // @@ -13,15 +15,15 @@ type DatabaseInitializer interface { // This behavior is intended to reduce unnecessary downtime caused by misconfigurations. // // Once the upgrade was made, any error condition will require to recover the database from backup. - Upgrade() error + Upgrade(ctx context.Context) error } type DatabaseProber interface { // Probe figures out if the database is running and available for taking backups. - Probe() error + Probe(ctx context.Context) error // Backup creates a backup of the database. - Backup() error + Backup(ctx context.Context) error } type Database interface { diff --git a/cmd/internal/database/etcd/etcd.go b/cmd/internal/database/etcd/etcd.go index 491cac1..34f955e 100644 --- a/cmd/internal/database/etcd/etcd.go +++ b/cmd/internal/database/etcd/etcd.go @@ -1,12 +1,13 @@ package etcd import ( + "context" "fmt" "os" "path" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/constants" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/utils" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "go.uber.org/zap" ) @@ -43,7 +44,7 @@ func New(log *zap.SugaredLogger, datadir, caCert, cert, key, endpoints, name str } // Check checks whether a backup needs to be restored or not, returns true if it needs a backup -func (db *Etcd) Check() (bool, error) { +func (db *Etcd) Check(_ context.Context) (bool, error) { empty, err := utils.IsEmpty(db.datadir) if err != nil { return false, err @@ -57,7 +58,7 @@ func (db *Etcd) Check() (bool, error) { } // Backup takes a full Backup of etcd with etcdctl. -func (db *Etcd) Backup() error { +func (db *Etcd) Backup(ctx context.Context) error { snapshotFileName := path.Join(constants.BackupDir, "snapshot.db") if err := os.RemoveAll(constants.BackupDir); err != nil { return fmt.Errorf("could not clean backup directory %w", err) @@ -68,7 +69,7 @@ func (db *Etcd) Backup() error { } // Create a etcd snapshot. - out, err := db.etcdctl(true, "snapshot", "save", snapshotFileName) + out, err := db.etcdctl(ctx, true, "snapshot", "save", snapshotFileName) if err != nil { return fmt.Errorf("error running backup command: %s", out) } @@ -79,7 +80,7 @@ func (db *Etcd) Backup() error { return fmt.Errorf("backup file was not created: %s", snapshotFileName) } - out, err = db.etcdctl(false, "snapshot", "status", "--write-out", "json", snapshotFileName) + out, err = db.etcdctl(ctx, false, "snapshot", "status", "--write-out", "json", snapshotFileName) if err != nil { return fmt.Errorf("backup was not created correct: %s", out) } @@ -90,13 +91,13 @@ func (db *Etcd) Backup() error { } // Recover restores a database backup -func (db *Etcd) Recover() error { +func (db *Etcd) Recover(ctx context.Context) error { snapshotFileName := path.Join(constants.RestoreDir, "snapshot.db") if _, err := os.Stat(snapshotFileName); os.IsNotExist(err) { return fmt.Errorf("restore file is not present: %s", snapshotFileName) } - out, err := db.etcdutl("snapshot", "status", "--write-out", "json", snapshotFileName) + out, err := db.etcdutl(ctx, "snapshot", "status", "--write-out", "json", snapshotFileName) if err != nil { return fmt.Errorf("restored backup file was not created correct: %s", out) } @@ -111,7 +112,7 @@ func (db *Etcd) Recover() error { return fmt.Errorf("could not remove database data directory %w", err) } - out, err = db.etcdutl("snapshot", "restore", "--data-dir", db.datadir, snapshotFileName) + out, err = db.etcdutl(ctx, "snapshot", "restore", "--data-dir", db.datadir, snapshotFileName) if err != nil { return fmt.Errorf("unable to restore:%w", err) } @@ -128,8 +129,8 @@ func (db *Etcd) Recover() error { } // Probe figures out if the database is running and available for taking backups. -func (db *Etcd) Probe() error { - out, err := db.etcdctl(true, "get", "foo") +func (db *Etcd) Probe(ctx context.Context) error { + out, err := db.etcdctl(ctx, true, "get", "foo") if err != nil { return fmt.Errorf("unable to retrieve key:%s %w", out, err) } @@ -137,11 +138,11 @@ func (db *Etcd) Probe() error { } // Upgrade performs an upgrade of the database in case a newer version of the database is detected. -func (db *Etcd) Upgrade() error { +func (db *Etcd) Upgrade(ctx context.Context) error { return nil } -func (db *Etcd) etcdctl(withConnectionArgs bool, args ...string) (string, error) { +func (db *Etcd) etcdctl(ctx context.Context, withConnectionArgs bool, args ...string) (string, error) { var ( etcdctlEnvs []string etcdctlArgs []string @@ -153,14 +154,14 @@ func (db *Etcd) etcdctl(withConnectionArgs bool, args ...string) (string, error) etcdctlArgs = append(etcdctlArgs, db.connectionArgs()...) } - out, err := db.executor.ExecuteCommandWithOutput(etcdctlCommand, etcdctlEnvs, etcdctlArgs...) + out, err := db.executor.ExecuteCommandWithOutput(ctx, etcdctlCommand, etcdctlEnvs, etcdctlArgs...) if err != nil { return out, fmt.Errorf("error running etcdctl command: %s", out) } return out, nil } -func (db *Etcd) etcdutl(args ...string) (string, error) { +func (db *Etcd) etcdutl(ctx context.Context, args ...string) (string, error) { var ( etcdutlEnvs []string etcdutlArgs []string @@ -168,7 +169,7 @@ func (db *Etcd) etcdutl(args ...string) (string, error) { etcdutlArgs = append(etcdutlArgs, args...) - out, err := db.executor.ExecuteCommandWithOutput(etcdutlCommand, etcdutlEnvs, etcdutlArgs...) + out, err := db.executor.ExecuteCommandWithOutput(ctx, etcdutlCommand, etcdutlEnvs, etcdutlArgs...) if err != nil { return out, fmt.Errorf("error running etcdutl command: %s", out) } diff --git a/cmd/internal/database/postgres/postgres.go b/cmd/internal/database/postgres/postgres.go index 059546b..b6eb0ab 100644 --- a/cmd/internal/database/postgres/postgres.go +++ b/cmd/internal/database/postgres/postgres.go @@ -1,6 +1,7 @@ package postgres import ( + "context" "fmt" "net" "os" @@ -8,8 +9,8 @@ import ( "strconv" "time" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/constants" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/utils" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "go.uber.org/zap" ) @@ -46,7 +47,7 @@ func New(log *zap.SugaredLogger, datadir string, host string, port int, user str } // Check checks whether a backup needs to be restored or not, returns true if it needs a backup -func (db *Postgres) Check() (bool, error) { +func (db *Postgres) Check(_ context.Context) (bool, error) { empty, err := utils.IsEmpty(db.datadir) if err != nil { return false, err @@ -60,12 +61,12 @@ func (db *Postgres) Check() (bool, error) { } // Backup takes a backup of the database -func (db *Postgres) Backup() error { +func (db *Postgres) Backup(ctx context.Context) error { // for new databases the postgres binaries required for Upgrade() cannot be copied before the database is running // therefore this happens in the backup task where the database is already available // // implication: one backup has to be taken before an upgrade can be made - err := db.copyPostgresBinaries(false) + err := db.copyPostgresBinaries(ctx, false) if err != nil { return err } @@ -94,7 +95,7 @@ func (db *Postgres) Backup() error { env = append(env, "PGPASSWORD="+db.password) } - out, err := db.executor.ExecuteCommandWithOutput(postgresBackupCmd, env, args...) + out, err := db.executor.ExecuteCommandWithOutput(ctx, postgresBackupCmd, env, args...) if err != nil { return fmt.Errorf("error running backup command: %s %w", out, err) } @@ -112,7 +113,7 @@ func (db *Postgres) Backup() error { } // Recover restores a database backup -func (db *Postgres) Recover() error { +func (db *Postgres) Recover(ctx context.Context) error { for _, p := range []string{postgresBaseTar, postgresWalTar} { fullPath := path.Join(constants.RestoreDir, p) if _, err := os.Stat(fullPath); os.IsNotExist(err) { @@ -124,7 +125,7 @@ func (db *Postgres) Recover() error { return fmt.Errorf("could not clean database data directory: %w", err) } - out, err := db.executor.ExecuteCommandWithOutput("tar", nil, "-xzvf", path.Join(constants.RestoreDir, postgresBaseTar), "-C", db.datadir) + out, err := db.executor.ExecuteCommandWithOutput(ctx, "tar", nil, "-xzvf", path.Join(constants.RestoreDir, postgresBaseTar), "-C", db.datadir) if err != nil { return fmt.Errorf("error untaring base backup: %s %w", out, err) } @@ -139,7 +140,7 @@ func (db *Postgres) Recover() error { return fmt.Errorf("could not create pg_wal directory: %w", err) } - out, err = db.executor.ExecuteCommandWithOutput("tar", nil, "-xzvf", path.Join(constants.RestoreDir, postgresWalTar), "-C", path.Join(db.datadir, "pg_wal")) + out, err = db.executor.ExecuteCommandWithOutput(ctx, "tar", nil, "-xzvf", path.Join(constants.RestoreDir, postgresWalTar), "-C", path.Join(db.datadir, "pg_wal")) if err != nil { return fmt.Errorf("error untaring wal backup: %s %w", out, err) } @@ -152,7 +153,8 @@ func (db *Postgres) Recover() error { } // Probe figures out if the database is running and available for taking backups. -func (db *Postgres) Probe() error { +func (db *Postgres) Probe(ctx context.Context) error { + // TODO: use postgres client to connect conn, err := net.DialTimeout("tcp", net.JoinHostPort(db.host, strconv.Itoa(db.port)), connectionTimeout) if err != nil { return fmt.Errorf("connection error:%w", err) diff --git a/cmd/internal/database/postgres/upgrade.go b/cmd/internal/database/postgres/upgrade.go index d4f063e..acea17d 100644 --- a/cmd/internal/database/postgres/upgrade.go +++ b/cmd/internal/database/postgres/upgrade.go @@ -1,6 +1,7 @@ package postgres import ( + "context" "errors" "fmt" "io/fs" @@ -32,7 +33,7 @@ var ( ) // Upgrade performs an upgrade of the database in case a newer version of the database is detected. -func (db *Postgres) Upgrade() error { +func (db *Postgres) Upgrade(ctx context.Context) error { start := time.Now() // First check if there are data already present @@ -43,7 +44,7 @@ func (db *Postgres) Upgrade() error { } // If this is a database directory, save actual postgres binaries for a later major upgrade - err := db.copyPostgresBinaries(true) + err := db.copyPostgresBinaries(ctx, true) if err != nil { return err } @@ -64,7 +65,7 @@ func (db *Postgres) Upgrade() error { } // Now check the version of the actual postgres binaries - binaryVersionMajor, err := db.getBinaryVersion(postgresConfigCmd) + binaryVersionMajor, err := db.getBinaryVersion(ctx, postgresConfigCmd) if err != nil { db.log.Errorw("unable to get binary version, skipping upgrade", "error", err) return nil @@ -89,7 +90,7 @@ func (db *Postgres) Upgrade() error { } // We need to upgrade, therefore old binaries are required - oldBinaryVersionMajor, err := db.getBinaryVersion(oldPostgresConfigCmd) + oldBinaryVersionMajor, err := db.getBinaryVersion(ctx, oldPostgresConfigCmd) if err != nil { db.log.Errorw("unable to get old binary version, skipping upgrade", "error", err) return nil @@ -125,6 +126,7 @@ func (db *Postgres) Upgrade() error { cmd := exec.Command(postgresInitDBCmd, "-D", newDataDirTemp) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr + cmd.Env = os.Environ() cmd.SysProcAttr = &syscall.SysProcAttr{ Credential: &syscall.Credential{Uid: uint32(uid)}, } @@ -156,7 +158,7 @@ func (db *Postgres) Upgrade() error { return err } - newPostgresBinDir, err := db.getBinDir(postgresConfigCmd) + newPostgresBinDir, err := db.getBinDir(ctx, postgresConfigCmd) if err != nil { return fmt.Errorf("unable to detect bin dir of actual postgres %w", err) } @@ -168,9 +170,10 @@ func (db *Postgres) Upgrade() error { "--new-bindir", newPostgresBinDir, "--link", } - cmd = exec.Command(postgresUpgradeCmd, pgUpgradeArgs...) //nolint:gosec + cmd = exec.CommandContext(ctx, postgresUpgradeCmd, pgUpgradeArgs...) //nolint:gosec cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr + cmd.Env = os.Environ() cmd.SysProcAttr = &syscall.SysProcAttr{ Credential: &syscall.Credential{Uid: uint32(uid)}, } @@ -203,10 +206,10 @@ func (db *Postgres) Upgrade() error { // Helpers -func (db *Postgres) getBinaryVersion(pgConfigCmd string) (int, error) { +func (db *Postgres) getBinaryVersion(ctx context.Context, pgConfigCmd string) (int, error) { // pg_config --version // PostgreSQL 12.16 - cmd := exec.Command(pgConfigCmd, "--version") + cmd := exec.CommandContext(ctx, pgConfigCmd, "--version") out, err := cmd.CombinedOutput() if err != nil { return 0, fmt.Errorf("unable to detect postgres binary version: %w", err) @@ -254,8 +257,8 @@ func (db *Postgres) isCommandPresent(command string) bool { return true } -func (db *Postgres) getBinDir(pgConfigCmd string) (string, error) { - cmd := exec.Command(pgConfigCmd, "--bindir") +func (db *Postgres) getBinDir(ctx context.Context, pgConfigCmd string) (string, error) { + cmd := exec.CommandContext(ctx, pgConfigCmd, "--bindir") out, err := cmd.CombinedOutput() if err != nil { return "", err @@ -265,13 +268,13 @@ func (db *Postgres) getBinDir(pgConfigCmd string) (string, error) { } // copyPostgresBinaries is needed to save old postgres binaries for a later major upgrade -func (db *Postgres) copyPostgresBinaries(override bool) error { - binDir, err := db.getBinDir(postgresConfigCmd) +func (db *Postgres) copyPostgresBinaries(ctx context.Context, override bool) error { + binDir, err := db.getBinDir(ctx, postgresConfigCmd) if err != nil { return err } - version, err := db.getBinaryVersion(postgresConfigCmd) + version, err := db.getBinaryVersion(ctx, postgresConfigCmd) if err != nil { return err } @@ -291,7 +294,7 @@ func (db *Postgres) copyPostgresBinaries(override bool) error { } db.log.Infow("copying postgres binaries for later upgrades", "from", binDir, "to", pgBinDir) - copy := exec.Command("cp", "-av", binDir, pgBinDir) + copy := exec.CommandContext(ctx, "cp", "-av", binDir, pgBinDir) copy.Stdout = os.Stdout copy.Stderr = os.Stderr err = copy.Run() diff --git a/cmd/internal/database/rethinkdb/rethinkdb.go b/cmd/internal/database/rethinkdb/rethinkdb.go index ea3c56f..620ade6 100644 --- a/cmd/internal/database/rethinkdb/rethinkdb.go +++ b/cmd/internal/database/rethinkdb/rethinkdb.go @@ -3,7 +3,6 @@ package rethinkdb import ( "context" "fmt" - "net" "os" "os/exec" "path/filepath" @@ -13,16 +12,18 @@ import ( "errors" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/constants" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/probe" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/utils" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "go.uber.org/zap" + "golang.org/x/sync/errgroup" + + r "gopkg.in/rethinkdb/rethinkdb-go.v6" ) const ( - connectionTimeout = 1 * time.Second - restoreDatabaseStartupTimeout = 30 * time.Second - restoreDatabaseShutdownTimeout = 10 * time.Second + connectionTimeout = 1 * time.Second + restoreDatabaseStartupTimeout = 30 * time.Second rethinkDBCmd = "rethinkdb" rethinkDBDumpCmd = "rethinkdb-dump" @@ -41,13 +42,11 @@ type RethinkDB struct { passwordFile string log *zap.SugaredLogger executor *utils.CmdExecutor - ctx context.Context } // New instantiates a new rethinkdb database -func New(ctx context.Context, log *zap.SugaredLogger, datadir string, url string, passwordFile string) *RethinkDB { +func New(log *zap.SugaredLogger, datadir string, url string, passwordFile string) *RethinkDB { return &RethinkDB{ - ctx: ctx, log: log, datadir: datadir, url: url, @@ -57,7 +56,7 @@ func New(ctx context.Context, log *zap.SugaredLogger, datadir string, url string } // Check checks whether a backup needs to be restored or not, returns true if it needs a backup -func (db *RethinkDB) Check() (bool, error) { +func (db *RethinkDB) Check(_ context.Context) (bool, error) { empty, err := utils.IsEmpty(db.datadir) if err != nil { return false, err @@ -71,7 +70,7 @@ func (db *RethinkDB) Check() (bool, error) { } // Backup takes a backup of the database -func (db *RethinkDB) Backup() error { +func (db *RethinkDB) Backup(ctx context.Context) error { if err := os.RemoveAll(constants.BackupDir); err != nil { return fmt.Errorf("could not clean backup directory: %w", err) } @@ -88,9 +87,10 @@ func (db *RethinkDB) Backup() error { args = append(args, "--connect="+db.url) } - out, err := db.executor.ExecuteCommandWithOutput(rethinkDBDumpCmd, nil, args...) + out, err := db.executor.ExecuteCommandWithOutput(ctx, rethinkDBDumpCmd, nil, args...) + fmt.Println(out) if err != nil { - return fmt.Errorf("error running backup command: %s %w", out, err) + return fmt.Errorf("error running backup command: %w", err) } if strings.Contains(out, "0 rows exported from 0 tables, with 0 secondary indexes, and 0 hook functions") { @@ -101,55 +101,88 @@ func (db *RethinkDB) Backup() error { return fmt.Errorf("backup file was not created: %s", rethinkDBBackupFilePath) } - db.log.Debugw("successfully took backup of rethinkdb database", "output", out) + db.log.Debugw("successfully took backup of rethinkdb database") return nil } // Recover restores a database backup -func (db *RethinkDB) Recover() error { +func (db *RethinkDB) Recover(ctx context.Context) error { if _, err := os.Stat(rethinkDBRestoreFilePath); os.IsNotExist(err) { return fmt.Errorf("restore file not present: %s", rethinkDBRestoreFilePath) } + passwordRaw, err := os.ReadFile(db.passwordFile) + if err != nil { + return fmt.Errorf("unable to read rethinkdb password file at %s: %w", db.passwordFile, err) + } + // rethinkdb requires to be running when restoring a backup. // however, if we let the real database container start, we cannot interrupt it anymore in case // an issue occurs during the restoration. therefore, we spin up an own instance of rethinkdb // inside the sidecar against which we can restore. - db.log.Infow("starting rethinkdb database within sidecar for restore") - //nolint - cmd := exec.Command(rethinkDBCmd, "--bind", "all", "--driver-port", "1", "--directory", db.datadir) - if err := cmd.Start(); err != nil { - return fmt.Errorf("unable to start database within sidecar for restore: %w", err) - } - defer func() { - _ = cmd.Process.Kill() - }() + var ( + cmd *exec.Cmd + g, _ = errgroup.WithContext(ctx) + rethinkdbCtx, cancelRethinkdb = context.WithCancel(ctx) // cancel sends a KILL signal to the process - db.log.Infow("waiting for rethinkdb database to come up") + // IMPORTANT: when the recovery goes wrong, the database directory MUST be cleaned up + // otherwise on pod restart the database directory is not empty anymore and + // the backup-restore-sidecar will assume it's a fresh database and let the + // database start without restored data, which can mess up things big time - restoreDB := New(db.ctx, db.log, db.datadir, "localhost:1", "") + handleFailedRecovery = func(restoreErr error) error { + db.log.Errorw("trying to handle failed database recovery", "error", restoreErr) - done := make(chan bool) - defer close(done) + // kill the rethinkdb process + cancelRethinkdb() - probeCtx, probeCancel := context.WithTimeout(context.Background(), restoreDatabaseStartupTimeout) - defer probeCancel() + db.log.Info("waiting for async rethinkdb go routine to stop") + + err := g.Wait() + if err != nil { + db.log.Errorw("rethinkdb go routine finished with error", "error", err) + } - var err error - go func() { - err = probe.Start(probeCtx, restoreDB.log, restoreDB) - done <- true - }() - select { - case <-done: + if err := os.RemoveAll(db.datadir); err != nil { + db.log.Errorw("unable to cleanup database data directory after failed recovery attempt, high risk of starting with fresh database on container restart", "err", err) + } else { + db.log.Info("cleaned up database data directory after failed recovery attempt to prevent start of fresh database") + } + + return restoreErr + } + ) + + defer cancelRethinkdb() + + g.Go(func() error { + args := []string{"--bind", "all", "--driver-port", "1", "--directory", db.datadir, "--initial-password", strings.TrimSpace(string(passwordRaw))} + db.log.Debugw("execute rethinkdb", "args", args) + + cmd = exec.CommandContext(rethinkdbCtx, rethinkDBCmd, args...) // nolint:gosec + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() if err != nil { - return fmt.Errorf("error while probing: %w", err) + return fmt.Errorf("unable to run rethinkdb: %w", err) } - db.log.Infow("rethinkdb in sidecar is now available, now triggering restore commands...") - case <-probeCtx.Done(): - return errors.New("rethinkdb database did not come up in time") + + db.log.Info("rethinkdb process finished") + + return nil + }) + + db.log.Infow("waiting for rethinkdb database to come up") + + probeCtx, probeCancel := context.WithTimeout(ctx, restoreDatabaseStartupTimeout) + defer probeCancel() + + restoreDB := New(db.log, db.datadir, "localhost:1", db.passwordFile) + err = probe.Start(probeCtx, restoreDB.log, restoreDB) + if err != nil { + return handleFailedRecovery(fmt.Errorf("rethinkdb did not come up: %w", err)) } args := []string{} @@ -161,41 +194,54 @@ func (db *RethinkDB) Recover() error { } args = append(args, rethinkDBRestoreFilePath) - out, err := db.executor.ExecuteCommandWithOutput(rethinkDBRestoreCmd, nil, args...) + out, err := db.executor.ExecuteCommandWithOutput(ctx, rethinkDBRestoreCmd, nil, args...) + fmt.Println(out) if err != nil { - return fmt.Errorf("error running restore command: %s %w", out, err) + return handleFailedRecovery(fmt.Errorf("error running restore command: %w", err)) } if err := cmd.Process.Signal(syscall.SIGTERM); err != nil { - return fmt.Errorf("failed to send sigterm signal to rethinkdb: %w", err) + db.log.Errorw("failed to send sigterm signal to rethinkdb, killing it", "error", err) + cancelRethinkdb() } - wait := make(chan error) - go func() { wait <- cmd.Wait() }() - select { - case err := <-wait: - if err != nil { - return fmt.Errorf("rethinkdb did not shutdown cleanly: %w", err) - } - db.log.Infow("successfully restored rethinkdb database", "output", out) - case <-time.After(restoreDatabaseShutdownTimeout): - return fmt.Errorf("rethinkdb did not shutdown cleanly after %s", restoreDatabaseShutdownTimeout) + err = g.Wait() + if err != nil { + db.log.Errorw("rethinkdb process not properly terminated, but restore was successful", "error", err) + } else { + db.log.Infow("successfully restored rethinkdb database") } return nil } // Probe figures out if the database is running and available for taking backups. -func (db *RethinkDB) Probe() error { - conn, err := net.DialTimeout("tcp", db.url, connectionTimeout) +func (db *RethinkDB) Probe(ctx context.Context) error { + passwordRaw, err := os.ReadFile(db.passwordFile) if err != nil { - return fmt.Errorf("connection error: %w", err) + return fmt.Errorf("unable to read rethinkdb password file at %s: %w", db.passwordFile, err) } - defer conn.Close() + + session, err := r.Connect(r.ConnectOpts{ + Addresses: []string{db.url}, + Username: "admin", + Password: strings.TrimSpace(string(passwordRaw)), + MaxIdle: 10, + MaxOpen: 20, + }) + if err != nil { + return fmt.Errorf("cannot create rethinkdb client: %w", err) + } + + _, err = r.DB("rethinkdb").Table("server_status").Run(session) + if err != nil { + return fmt.Errorf("error retrieving rethinkdb server status: %w", err) + } + return nil } // Upgrade performs an upgrade of the database in case a newer version of the database is detected. -func (db *RethinkDB) Upgrade() error { +func (db *RethinkDB) Upgrade(_ context.Context) error { return nil } diff --git a/cmd/internal/initializer/client.go b/cmd/internal/initializer/client.go deleted file mode 100644 index 07696f0..0000000 --- a/cmd/internal/initializer/client.go +++ /dev/null @@ -1,35 +0,0 @@ -package initializer - -import ( - "context" - "fmt" - "net/url" - - v1 "github.com/metal-stack/backup-restore-sidecar/api/v1" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -// NewInitializerClient returns a new initializer client. -func NewInitializerClient(ctx context.Context, rawurl string, log *zap.SugaredLogger) (v1.InitializerServiceClient, error) { - parsedurl, err := url.Parse(rawurl) - if err != nil { - return nil, err - } - if parsedurl.Host == "" { - return nil, fmt.Errorf("invalid url:%s, must be in the form scheme://host[:port]/basepath", rawurl) - } - - opts := []grpc.DialOption{ - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithBlock(), - } - - conn, err := grpc.DialContext(ctx, parsedurl.Host, opts...) - if err != nil { - return nil, err - } - - return v1.NewInitializerServiceClient(conn), nil -} diff --git a/cmd/internal/initializer/initializer.go b/cmd/internal/initializer/initializer.go index ec214fc..e2fffed 100644 --- a/cmd/internal/initializer/initializer.go +++ b/cmd/internal/initializer/initializer.go @@ -10,10 +10,12 @@ import ( "strings" v1 "github.com/metal-stack/backup-restore-sidecar/api/v1" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/compress" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/constants" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/database" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/metrics" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "go.uber.org/zap" "google.golang.org/grpc" @@ -31,10 +33,11 @@ type Initializer struct { db database.Database bp providers.BackupProvider comp *compress.Compressor + metrics *metrics.Metrics dbDataDir string } -func New(log *zap.SugaredLogger, addr string, db database.Database, bp providers.BackupProvider, comp *compress.Compressor, dbDataDir string) *Initializer { +func New(log *zap.SugaredLogger, addr string, db database.Database, bp providers.BackupProvider, comp *compress.Compressor, metrics *metrics.Metrics, dbDataDir string) *Initializer { return &Initializer{ currentStatus: &v1.StatusResponse{ Status: v1.StatusResponse_CHECKING, @@ -46,6 +49,7 @@ func New(log *zap.SugaredLogger, addr string, db database.Database, bp providers bp: bp, comp: comp, dbDataDir: dbDataDir, + metrics: metrics, } } @@ -66,9 +70,23 @@ func (i *Initializer) Start(ctx context.Context) { grpcServer := grpc.NewServer(opts...) - initializerService := newService(i.currentStatus) + initializerService := newInitializerService(i.currentStatus) + backupService := newBackupProviderService(i.bp, i.Restore) + databaseService := newDatabaseService(func() error { + backuper := backup.New(&backup.BackuperConfig{ + Log: i.log, + DatabaseProber: i.db, + BackupProvider: i.bp, + Metrics: i.metrics, + Compressor: i.comp, + }) + + return backuper.CreateBackup(ctx) + }) v1.RegisterInitializerServiceServer(grpcServer, initializerService) + v1.RegisterBackupServiceServer(grpcServer, backupService) + v1.RegisterDatabaseServiceServer(grpcServer, databaseService) i.log.Infow("start initializer server", "address", i.addr) @@ -89,14 +107,14 @@ func (i *Initializer) Start(ctx context.Context) { } }() - err = i.initialize() + err = i.initialize(ctx) if err != nil { i.log.Fatalw("error initializing database, shutting down", "error", err) } i.currentStatus.Status = v1.StatusResponse_UPGRADING i.currentStatus.Message = "start upgrading database" - err = i.db.Upgrade() + err = i.db.Upgrade(ctx) if err != nil { i.log.Fatalw("upgrade database failed", "error", err) } @@ -106,7 +124,7 @@ func (i *Initializer) Start(ctx context.Context) { i.currentStatus.Message = "done" } -func (i *Initializer) initialize() error { +func (i *Initializer) initialize(ctx context.Context) error { i.log.Info("start running initializer") i.log.Info("ensuring database data directory") @@ -117,7 +135,7 @@ func (i *Initializer) initialize() error { i.log.Info("ensuring backup bucket") i.currentStatus.Message = "ensuring backup bucket" - err = i.bp.EnsureBackupBucket() + err = i.bp.EnsureBackupBucket(ctx) if err != nil { return fmt.Errorf("unable to ensure backup bucket: %w", err) } @@ -126,7 +144,7 @@ func (i *Initializer) initialize() error { i.currentStatus.Status = v1.StatusResponse_CHECKING i.currentStatus.Message = "checking database" - needsBackup, err := i.db.Check() + needsBackup, err := i.db.Check(ctx) if err != nil { return fmt.Errorf("unable to check data of database: %w", err) } @@ -138,7 +156,7 @@ func (i *Initializer) initialize() error { i.log.Info("database potentially needs to be restored, looking for backup") - versions, err := i.bp.ListBackups() + versions, err := i.bp.ListBackups(ctx) if err != nil { return fmt.Errorf("unable retrieve backup versions: %w", err) } @@ -149,7 +167,7 @@ func (i *Initializer) initialize() error { return nil } - err = i.Restore(latestBackup) + err = i.Restore(ctx, latestBackup) if err != nil { return fmt.Errorf("unable to restore database: %w", err) } @@ -158,7 +176,7 @@ func (i *Initializer) initialize() error { } // Restore restores the database with the given backup version -func (i *Initializer) Restore(version *providers.BackupVersion) error { +func (i *Initializer) Restore(ctx context.Context, version *providers.BackupVersion) error { i.log.Infow("restoring backup", "version", version.Version, "date", version.Date.String()) i.currentStatus.Status = v1.StatusResponse_RESTORING @@ -183,7 +201,7 @@ func (i *Initializer) Restore(version *providers.BackupVersion) error { return fmt.Errorf("could not delete priorly downloaded file: %w", err) } - err := i.bp.DownloadBackup(version) + err := i.bp.DownloadBackup(ctx, version) if err != nil { return fmt.Errorf("unable to download backup: %w", err) } @@ -195,7 +213,7 @@ func (i *Initializer) Restore(version *providers.BackupVersion) error { } i.currentStatus.Message = "restoring backup" - err = i.db.Recover() + err = i.db.Recover(ctx) if err != nil { return fmt.Errorf("restoring database was not successful: %w", err) } diff --git a/cmd/internal/initializer/service.go b/cmd/internal/initializer/service.go index 291c356..ca7905d 100644 --- a/cmd/internal/initializer/service.go +++ b/cmd/internal/initializer/service.go @@ -2,19 +2,101 @@ package initializer import ( "context" + "fmt" + v1 "github.com/metal-stack/backup-restore-sidecar/api/v1" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" ) -type service struct { +type initializerService struct { currentStatus *v1.StatusResponse } -func newService(currentStatus *v1.StatusResponse) service { - return service{ +func newInitializerService(currentStatus *v1.StatusResponse) *initializerService { + return &initializerService{ currentStatus: currentStatus, } } -func (i service) Status(context.Context, *v1.Empty) (*v1.StatusResponse, error) { - return i.currentStatus, nil +func (s *initializerService) Status(context.Context, *v1.StatusRequest) (*v1.StatusResponse, error) { + return s.currentStatus, nil +} + +type backupService struct { + bp providers.BackupProvider + restoreFn func(ctx context.Context, version *providers.BackupVersion) error +} + +func newBackupProviderService(bp providers.BackupProvider, restoreFn func(ctx context.Context, version *providers.BackupVersion) error) *backupService { + return &backupService{ + bp: bp, + restoreFn: restoreFn, + } +} + +func (s *backupService) ListBackups(ctx context.Context, _ *v1.ListBackupsRequest) (*v1.BackupListResponse, error) { + versions, err := s.bp.ListBackups(ctx) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + backups := versions.List() + versions.Sort(backups, false) + + response := &v1.BackupListResponse{} + for _, b := range backups { + b := b + response.Backups = append(response.Backups, &v1.Backup{ + Name: b.Name, + Version: b.Version, + Timestamp: timestamppb.New(b.Date), + }) + } + + return response, nil +} + +func (s *backupService) RestoreBackup(ctx context.Context, req *v1.RestoreBackupRequest) (*v1.RestoreBackupResponse, error) { + if req.Version == "" { + return nil, status.Error(codes.InvalidArgument, "version to restore must be defined explicitly") + } + + versions, err := s.bp.ListBackups(ctx) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + version, err := versions.Get(req.Version) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + err = s.restoreFn(ctx, version) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("error restoring backup: %s", err)) + } + + return &v1.RestoreBackupResponse{}, nil +} + +type databaseService struct { + backupFn func() error +} + +func newDatabaseService(backupFn func() error) *databaseService { + return &databaseService{ + backupFn: backupFn, + } +} + +func (s *databaseService) CreateBackup(ctx context.Context, _ *v1.CreateBackupRequest) (*v1.CreateBackupResponse, error) { + err := s.backupFn() + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("error creating backup: %s", err)) + } + + return &v1.CreateBackupResponse{}, nil } diff --git a/cmd/internal/probe/probe.go b/cmd/internal/probe/probe.go index 543ecad..472e3a3 100644 --- a/cmd/internal/probe/probe.go +++ b/cmd/internal/probe/probe.go @@ -22,7 +22,7 @@ func Start(ctx context.Context, log *zap.SugaredLogger, db database.DatabaseProb case <-ctx.Done(): return errors.New("received stop signal, stop probing") case <-time.After(probeInterval): - err := db.Probe() + err := db.Probe(ctx) if err == nil { return nil } diff --git a/cmd/internal/utils/cmd.go b/cmd/internal/utils/cmd.go index d8e2fdc..37a1ed5 100644 --- a/cmd/internal/utils/cmd.go +++ b/cmd/internal/utils/cmd.go @@ -20,13 +20,13 @@ func NewExecutor(log *zap.SugaredLogger) *CmdExecutor { } } -func (c *CmdExecutor) ExecuteCommandWithOutput(command string, env []string, arg ...string) (string, error) { +func (c *CmdExecutor) ExecuteCommandWithOutput(ctx context.Context, command string, env []string, arg ...string) (string, error) { commandWithPath, err := exec.LookPath(command) if err != nil { return fmt.Sprintf("unable to find command:%s in path", command), err } c.log.Infow("running command", "command", commandWithPath, "args", strings.Join(arg, " ")) - cmd := exec.Command(commandWithPath, arg...) + cmd := exec.CommandContext(ctx, commandWithPath, arg...) cmd.Env = os.Environ() cmd.Env = append(cmd.Env, env...) return runCommandWithOutput(cmd, true) diff --git a/cmd/internal/utils/files.go b/cmd/internal/utils/files.go index 98c9383..a2fceef 100644 --- a/cmd/internal/utils/files.go +++ b/cmd/internal/utils/files.go @@ -5,6 +5,8 @@ import ( "io" "os" "path/filepath" + + "github.com/spf13/afero" ) // IsEmpty returns whether a directory is empty or not @@ -43,14 +45,14 @@ func RemoveContents(dir string) error { } // Copy copies a file from source to a destination -func Copy(src, dst string) error { - in, err := os.Open(src) +func Copy(fs afero.Fs, src, dst string) error { + in, err := fs.Open(src) if err != nil { return err } defer in.Close() - out, err := os.Create(dst) + out, err := fs.Create(dst) if err != nil { return err } @@ -60,5 +62,6 @@ func Copy(src, dst string) error { if err != nil { return err } + return nil } diff --git a/cmd/internal/wait/wait.go b/cmd/internal/wait/wait.go index 480ad03..53bb766 100644 --- a/cmd/internal/wait/wait.go +++ b/cmd/internal/wait/wait.go @@ -5,7 +5,7 @@ import ( "time" v1 "github.com/metal-stack/backup-restore-sidecar/api/v1" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/initializer" + "github.com/metal-stack/backup-restore-sidecar/pkg/client" "go.uber.org/zap" ) @@ -15,7 +15,7 @@ const ( // Start starts a wait component that will return when the initializer server has done its job func Start(ctx context.Context, log *zap.SugaredLogger, addr string) error { - client, err := initializer.NewInitializerClient(ctx, addr, log) + client, err := client.New(ctx, addr) if err != nil { return err } @@ -28,7 +28,7 @@ func Start(ctx context.Context, log *zap.SugaredLogger, addr string) error { log.Info("received stop signal, shutting down") return nil case <-time.After(waitInterval): - resp, err := client.Status(ctx, &v1.Empty{}) + resp, err := client.InitializerServiceClient().Status(ctx, &v1.StatusRequest{}) if err != nil { log.Errorw("error retrieving initializer server response", "error", err) continue diff --git a/cmd/main.go b/cmd/main.go index 7f856fa..1d86ae6 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -9,13 +9,13 @@ import ( "os/signal" "strings" + v1 "github.com/metal-stack/backup-restore-sidecar/api/v1" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers/gcp" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers/local" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers/s3" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/compress" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/constants" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/database" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/database/etcd" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/database/postgres" @@ -25,6 +25,8 @@ import ( "github.com/metal-stack/backup-restore-sidecar/cmd/internal/probe" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/utils" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/wait" + "github.com/metal-stack/backup-restore-sidecar/pkg/client" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "github.com/metal-stack/v" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -102,10 +104,6 @@ var rootCmd = &cobra.Command{ PersistentPreRunE: func(cmd *cobra.Command, args []string) error { initLogging() initConfig() - initSignalHandlers() - if err := initDatabase(); err != nil { - return err - } return nil }, } @@ -115,6 +113,10 @@ var startCmd = &cobra.Command{ Short: "starts the sidecar", Long: "the initializer will prepare starting the database. if there is no data or corrupt data, it checks whether there is a backup available and restore it prior to running allow running the database. The sidecar will then wait until the database is available and then take backups periodically", PreRunE: func(cmd *cobra.Command, args []string) error { + initSignalHandlers() + if err := initDatabase(); err != nil { + return err + } return initBackupProvider() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -137,13 +139,42 @@ var startCmd = &cobra.Command{ if err != nil { return err } - initializer.New(logger.Named("initializer"), addr, db, bp, comp, viper.GetString(databaseDatadirFlg)).Start(stop) + + metrics := metrics.New() + metrics.Start(logger.Named("metrics")) + + initializer.New(logger.Named("initializer"), addr, db, bp, comp, metrics, viper.GetString(databaseDatadirFlg)).Start(stop) if err := probe.Start(stop, logger.Named("probe"), db); err != nil { return err } - metrics := metrics.New() - metrics.Start(logger.Named("metrics")) - return backup.Start(stop, logger.Named("backup"), viper.GetString(backupCronScheduleFlg), db, bp, metrics, comp) + + backuper := backup.New(&backup.BackuperConfig{ + Log: logger.Named("backup"), + BackupSchedule: viper.GetString(backupCronScheduleFlg), + DatabaseProber: db, + BackupProvider: bp, + Metrics: metrics, + Compressor: comp, + }) + + return backuper.Start(stop) + }, +} + +var createBackupCmd = &cobra.Command{ + Use: "create-backup", + Short: "create backup takes a database backup out of the regular time schedule", + PreRunE: func(cmd *cobra.Command, args []string) error { + return initBackupProvider() + }, + RunE: func(cmd *cobra.Command, args []string) error { + c, err := client.New(context.Background(), viper.GetString(serverAddrFlg)) + if err != nil { + return fmt.Errorf("error creating client: %w", err) + } + + _, err = c.DatabaseServiceClient().CreateBackup(context.Background(), &v1.CreateBackupRequest{}) + return err }, } @@ -157,19 +188,16 @@ var restoreCmd = &cobra.Command{ if len(args) == 0 { return errors.New("no version argument given") } - versions, err := bp.ListBackups() - if err != nil { - return err - } - version, err := versions.Get(args[0]) - if err != nil { - return err - } - comp, err := compress.New(viper.GetString(compressionMethod)) + + c, err := client.New(context.Background(), viper.GetString(serverAddrFlg)) if err != nil { - return err + return fmt.Errorf("error creating client: %w", err) } - return initializer.New(logger.Named("initializer"), "", db, bp, comp, viper.GetString(databaseDatadirFlg)).Restore(version) + + _, err = c.BackupServiceClient().RestoreBackup(context.Background(), &v1.RestoreBackupRequest{ + Version: args[0], + }) + return err }, } @@ -177,22 +205,24 @@ var restoreListCmd = &cobra.Command{ Use: "list-versions", Aliases: []string{"ls"}, Short: "lists available backups", - PreRunE: func(cmd *cobra.Command, args []string) error { - return initBackupProvider() - }, RunE: func(cmd *cobra.Command, args []string) error { - versions, err := bp.ListBackups() + c, err := client.New(context.Background(), viper.GetString(serverAddrFlg)) if err != nil { - return err + return fmt.Errorf("error creating client: %w", err) } - backups := versions.List() - versions.Sort(backups, false) + + backups, err := c.BackupServiceClient().ListBackups(context.Background(), &v1.ListBackupsRequest{}) + if err != nil { + return fmt.Errorf("error listing backups: %w", err) + } + var data [][]string - for _, b := range backups { - data = append(data, []string{b.Date.String(), b.Name, b.Version}) + for _, b := range backups.Backups { + data = append(data, []string{b.Timestamp.AsTime().String(), b.Name, b.Version}) } + p := utils.NewTablePrinter() - p.Print([]string{"Data", "Name", "Version"}, data) + p.Print([]string{"Date", "Name", "Version"}, data) return nil }, } @@ -200,6 +230,9 @@ var restoreListCmd = &cobra.Command{ var waitCmd = &cobra.Command{ Use: "wait", Short: "waits for the initializer to be done", + PreRun: func(cmd *cobra.Command, args []string) { + initSignalHandlers() + }, RunE: func(cmd *cobra.Command, args []string) error { if err := wait.Start(stop, logger.Named("wait"), viper.GetString(serverAddrFlg)); err != nil { return err @@ -229,7 +262,7 @@ func main() { } func init() { - rootCmd.AddCommand(startCmd, waitCmd, restoreCmd) + rootCmd.AddCommand(startCmd, waitCmd, restoreCmd, createBackupCmd) rootCmd.PersistentFlags().StringP(logLevelFlg, "", "info", "sets the application log level") rootCmd.PersistentFlags().StringP(databaseFlg, "", "", "the kind of the database [postgres|rethinkdb|etcd]") @@ -377,7 +410,6 @@ func initDatabase() error { ) case "rethinkdb": db = rethinkdb.New( - stop, logger.Named("rethinkdb"), datadir, viper.GetString(rethinkDBURLFlg), @@ -408,6 +440,7 @@ func initBackupProvider() error { switch bpString { case "gcp": bp, err = gcp.New( + context.Background(), logger.Named("backup"), &gcp.BackupProviderConfigGCP{ ObjectPrefix: viper.GetString(objectPrefixFlg), diff --git a/deploy/etcd-local.yaml b/deploy/etcd-local.yaml index 0def8a0..d822603 100644 --- a/deploy/etcd-local.yaml +++ b/deploy/etcd-local.yaml @@ -1,139 +1,152 @@ +# DO NOT EDIT! This is auto-generated by the integration tests +--- apiVersion: apps/v1 kind: StatefulSet metadata: + creationTimestamp: null labels: - app: etcd-statefulset - instance: etcd-main - name: etcd - role: main - name: etcd-main + app: etcd + name: etcd spec: - podManagementPolicy: OrderedReady replicas: 1 - revisionHistoryLimit: 10 selector: matchLabels: - instance: etcd-main - name: etcd - serviceName: etcd-main-client + app: etcd + serviceName: etcd template: metadata: + creationTimestamp: null labels: - app: etcd-statefulset - instance: etcd-main - name: etcd - role: main + app: etcd spec: containers: - - image: quay.io/coreos/etcd:v3.5.7 - # can also be gcr.io/etcd-development/etcd - name: etcd - command: + - command: - backup-restore-sidecar - wait - imagePullPolicy: IfNotPresent + image: quay.io/coreos/etcd:v3.5.7 livenessProbe: exec: command: - /usr/local/bin/etcdctl - endpoint - health + - --endpoints=127.0.0.1:32379 failureThreshold: 3 initialDelaySeconds: 15 periodSeconds: 5 successThreshold: 1 timeoutSeconds: 1 + name: etcd ports: - - containerPort: 2380 - name: server - protocol: TCP - - containerPort: 2379 + - containerPort: 32379 name: client protocol: TCP + - containerPort: 32380 + name: server + protocol: TCP readinessProbe: failureThreshold: 3 httpGet: path: /health - port: 2381 + port: 32381 scheme: HTTP initialDelaySeconds: 15 periodSeconds: 5 successThreshold: 1 timeoutSeconds: 1 - resources: - limits: - cpu: 900m - memory: 3G - requests: - cpu: 300m - memory: 1G - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File + resources: {} volumeMounts: - mountPath: /data - name: etcd - - name: bin-provision + name: data + - mountPath: /usr/local/bin/backup-restore-sidecar + name: bin-provision subPath: backup-restore-sidecar - mountPath: /usr/local/bin/backup-restore-sidecar - - name: backup-restore-sidecar-config - mountPath: /etc/backup-restore-sidecar - - image: quay.io/coreos/etcd:v3.5.7 - name: backup-restore-sidecar - command: + - mountPath: /etc/backup-restore-sidecar + name: backup-restore-sidecar-config + - command: - backup-restore-sidecar - start - --log-level=debug - env: [] + image: quay.io/coreos/etcd:v3.5.7 + name: backup-restore-sidecar + ports: + - containerPort: 8000 + name: grpc + resources: {} volumeMounts: - - name: etcd - mountPath: /data - - name: bin-provision + - mountPath: /backup + name: backup + - mountPath: /data + name: data + - mountPath: /etc/backup-restore-sidecar + name: backup-restore-sidecar-config + - mountPath: /usr/local/bin/backup-restore-sidecar + name: bin-provision subPath: backup-restore-sidecar - mountPath: /usr/local/bin/backup-restore-sidecar - - name: backup-restore-sidecar-config - mountPath: /etc/backup-restore-sidecar initContainers: - - name: backup-restore-sidecar-provider - image: ghcr.io/metal-stack/backup-restore-sidecar:latest - imagePullPolicy: IfNotPresent - command: + - command: - cp - /backup-restore-sidecar - /bin-provision - ports: - - containerPort: 2112 + image: ghcr.io/metal-stack/backup-restore-sidecar:latest + imagePullPolicy: IfNotPresent + name: backup-restore-sidecar-provider + resources: {} volumeMounts: - - name: bin-provision - mountPath: /bin-provision + - mountPath: /bin-provision + name: bin-provision volumes: - - name: etcd + - name: data + persistentVolumeClaim: + claimName: data + - name: backup persistentVolumeClaim: - claimName: etcd - - name: backup-restore-sidecar-config - configMap: - name: backup-restore-sidecar-config-etcd - - name: bin-provision - emptyDir: {} + claimName: backup + - configMap: + name: backup-restore-sidecar-config-postgres + name: backup-restore-sidecar-config + - emptyDir: {} + name: bin-provision + updateStrategy: {} volumeClaimTemplates: - metadata: - name: etcd + creationTimestamp: null + name: data spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi + status: {} + - metadata: + creationTimestamp: null + name: backup + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: {} +status: + availableReplicas: 0 + replicas: 0 --- apiVersion: v1 -kind: ConfigMap -metadata: - name: backup-restore-sidecar-config-etcd data: config.yaml: | + --- + bind-addr: 0.0.0.0 db: etcd db-data-directory: /data/etcd/ backup-provider: local backup-cron-schedule: "*/1 * * * *" object-prefix: etcd-test + etcd-endpoints: http://localhost:32379 post-exec-cmds: - - etcd --data-dir=/data/etcd --listen-metrics-urls http://0.0.0.0:2381 + - etcd --data-dir=/data/etcd --listen-client-urls http://0.0.0.0:32379 --advertise-client-urls http://0.0.0.0:32379 --listen-peer-urls http://0.0.0.0:32380 --initial-advertise-peer-urls http://0.0.0.0:32380 --initial-cluster default=http://0.0.0.0:32380 --listen-metrics-urls http://0.0.0.0:32381 +kind: ConfigMap +metadata: + creationTimestamp: null + name: backup-restore-sidecar-config-postgres diff --git a/deploy/postgres-local.yaml b/deploy/postgres-local.yaml index 0a5e0bc..ffabd86 100644 --- a/deploy/postgres-local.yaml +++ b/deploy/postgres-local.yaml @@ -1,29 +1,28 @@ +# DO NOT EDIT! This is auto-generated by the integration tests --- apiVersion: apps/v1 kind: StatefulSet metadata: + creationTimestamp: null labels: app: postgres name: postgres spec: - serviceName: postgres replicas: 1 selector: matchLabels: app: postgres + serviceName: postgres template: metadata: + creationTimestamp: null labels: app: postgres spec: containers: - - image: postgres:12-alpine - name: postgres - command: + - command: - backup-restore-sidecar - wait - ports: - - containerPort: 5432 env: - name: POSTGRES_DB valueFrom: @@ -45,17 +44,54 @@ spec: secretKeyRef: key: POSTGRES_DATA name: postgres + image: postgres:12-alpine + livenessProbe: + exec: + command: + - /bin/sh + - -c + - exec + - pg_isready + - -U + - postgres + - -h + - 127.0.0.1 + - -p + - "5432" + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: postgres + ports: + - containerPort: 5432 + readinessProbe: + exec: + command: + - /bin/sh + - -c + - exec + - pg_isready + - -U + - postgres + - -h + - 127.0.0.1 + - -p + - "5432" + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + resources: {} volumeMounts: - - name: postgres - mountPath: /data - - name: bin-provision + - mountPath: /data + name: data + - mountPath: /usr/local/bin/backup-restore-sidecar + name: bin-provision subPath: backup-restore-sidecar - mountPath: /usr/local/bin/backup-restore-sidecar - - name: backup-restore-sidecar-config - mountPath: /etc/backup-restore-sidecar - - image: postgres:12-alpine - name: backup-restore-sidecar - command: + - mountPath: /etc/backup-restore-sidecar + name: backup-restore-sidecar-config + - command: - backup-restore-sidecar - start - --log-level=debug @@ -70,52 +106,77 @@ spec: secretKeyRef: key: POSTGRES_USER name: postgres + image: postgres:12-alpine + name: backup-restore-sidecar + ports: + - containerPort: 8000 + name: grpc + resources: {} volumeMounts: - - name: postgres - mountPath: /data - - name: bin-provision + - mountPath: /backup + name: backup + - mountPath: /data + name: data + - mountPath: /etc/backup-restore-sidecar + name: backup-restore-sidecar-config + - mountPath: /usr/local/bin/backup-restore-sidecar + name: bin-provision subPath: backup-restore-sidecar - mountPath: /usr/local/bin/backup-restore-sidecar - - name: backup-restore-sidecar-config - mountPath: /etc/backup-restore-sidecar initContainers: - - name: backup-restore-sidecar-provider - image: ghcr.io/metal-stack/backup-restore-sidecar:latest - imagePullPolicy: IfNotPresent - command: + - command: - cp - /backup-restore-sidecar - /bin-provision - ports: - - containerPort: 2112 + image: ghcr.io/metal-stack/backup-restore-sidecar:latest + imagePullPolicy: IfNotPresent + name: backup-restore-sidecar-provider + resources: {} volumeMounts: - - name: bin-provision - mountPath: /bin-provision + - mountPath: /bin-provision + name: bin-provision volumes: - - name: postgres + - name: data persistentVolumeClaim: - claimName: postgres - - name: backup-restore-sidecar-config - configMap: + claimName: data + - name: backup + persistentVolumeClaim: + claimName: backup + - configMap: name: backup-restore-sidecar-config-postgres - - name: bin-provision - emptyDir: {} + name: backup-restore-sidecar-config + - emptyDir: {} + name: bin-provision + updateStrategy: {} volumeClaimTemplates: - metadata: - name: postgres + creationTimestamp: null + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: {} + - metadata: + creationTimestamp: null + name: backup spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi + status: {} +status: + availableReplicas: 0 + replicas: 0 --- apiVersion: v1 -kind: ConfigMap -metadata: - name: backup-restore-sidecar-config-postgres data: config.yaml: | + --- + bind-addr: 0.0.0.0 db: postgres db-data-directory: /data/postgres/ backup-provider: local @@ -123,21 +184,27 @@ data: object-prefix: postgres-test compression-method: tar post-exec-cmds: - - docker-entrypoint.sh postgres + - docker-entrypoint.sh postgres +kind: ConfigMap +metadata: + creationTimestamp: null + name: backup-restore-sidecar-config-postgres --- apiVersion: v1 kind: Secret metadata: + creationTimestamp: null name: postgres stringData: + POSTGRES_DATA: /data/postgres/ POSTGRES_DB: postgres - POSTGRES_USER: test POSTGRES_PASSWORD: test123! - POSTGRES_DATA: /data/postgres/ + POSTGRES_USER: postgres --- apiVersion: v1 kind: Service metadata: + creationTimestamp: null labels: app: postgres name: postgres @@ -146,8 +213,10 @@ spec: - name: "5432" port: 5432 targetPort: 5432 - - name: "metrics" + - name: metrics port: 2112 targetPort: 2112 selector: app: postgres +status: + loadBalancer: {} diff --git a/deploy/rethinkdb-local.yaml b/deploy/rethinkdb-local.yaml index 9342ab9..994088f 100644 --- a/deploy/rethinkdb-local.yaml +++ b/deploy/rethinkdb-local.yaml @@ -1,112 +1,139 @@ +# DO NOT EDIT! This is auto-generated by the integration tests --- apiVersion: apps/v1 kind: StatefulSet metadata: + creationTimestamp: null labels: app: rethinkdb name: rethinkdb spec: - serviceName: rethinkdb replicas: 1 selector: matchLabels: app: rethinkdb + serviceName: rethinkdb template: metadata: + creationTimestamp: null labels: app: rethinkdb spec: containers: - - image: rethinkdb:2.4.0 - name: rethinkdb - command: + - command: - backup-restore-sidecar - wait env: - name: RETHINKDB_PASSWORD valueFrom: secretKeyRef: - name: rethinkdb key: rethinkdb-password + name: rethinkdb + image: rethinkdb:2.4.0 + name: rethinkdb ports: - containerPort: 8080 - containerPort: 28015 + resources: {} volumeMounts: - mountPath: /data - name: rethinkdb - - name: bin-provision + name: data + - mountPath: /usr/local/bin/backup-restore-sidecar + name: bin-provision subPath: backup-restore-sidecar - mountPath: /usr/local/bin/backup-restore-sidecar - - name: backup-restore-sidecar-config - mountPath: /etc/backup-restore-sidecar - - image: rethinkdb:2.4.0 - name: backup-restore-sidecar - command: + - mountPath: /etc/backup-restore-sidecar + name: backup-restore-sidecar-config + - command: - backup-restore-sidecar - start - --log-level=debug + image: rethinkdb:2.4.0 + name: backup-restore-sidecar + ports: + - containerPort: 8000 + name: grpc + resources: {} volumeMounts: - - name: rethinkdb - mountPath: /data - - name: rethinkdb-credentials - mountPath: /rethinkdb-secret - - name: backup-restore-sidecar-config - mountPath: /etc/backup-restore-sidecar - - name: bin-provision + - mountPath: /backup + name: backup + - mountPath: /data + name: data + - mountPath: /rethinkdb-secret + name: rethinkdb-credentials + - mountPath: /etc/backup-restore-sidecar + name: backup-restore-sidecar-config + - mountPath: /usr/local/bin/backup-restore-sidecar + name: bin-provision subPath: backup-restore-sidecar - mountPath: /usr/local/bin/backup-restore-sidecar - - name: bin-provision + - mountPath: /usr/local/bin/rethinkdb-dump + name: bin-provision subPath: rethinkdb-dump - mountPath: /usr/local/bin/rethinkdb-dump - - name: bin-provision + - mountPath: /usr/local/bin/rethinkdb-restore + name: bin-provision subPath: rethinkdb-restore - mountPath: /usr/local/bin/rethinkdb-restore initContainers: - - name: backup-restore-sidecar-provider - image: ghcr.io/metal-stack/backup-restore-sidecar:latest - imagePullPolicy: IfNotPresent - command: + - command: - cp - /backup-restore-sidecar - /rethinkdb/rethinkdb-dump - /rethinkdb/rethinkdb-restore - /bin-provision - ports: - - containerPort: 2112 + image: ghcr.io/metal-stack/backup-restore-sidecar:latest + imagePullPolicy: IfNotPresent + name: backup-restore-sidecar-provider + resources: {} volumeMounts: - - name: bin-provision - mountPath: /bin-provision + - mountPath: /bin-provision + name: bin-provision volumes: - - name: rethinkdb + - name: data persistentVolumeClaim: - claimName: rethinkdb + claimName: data + - name: backup + persistentVolumeClaim: + claimName: backup - name: rethinkdb-credentials secret: - secretName: rethinkdb items: - key: rethinkdb-password path: rethinkdb-password.txt - - name: backup-restore-sidecar-config - configMap: + secretName: rethinkdb + - configMap: name: backup-restore-sidecar-config-rethinkdb - - name: bin-provision - emptyDir: {} + name: backup-restore-sidecar-config + - emptyDir: {} + name: bin-provision + updateStrategy: {} volumeClaimTemplates: - metadata: - name: rethinkdb + creationTimestamp: null + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: {} + - metadata: + creationTimestamp: null + name: backup spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi + status: {} +status: + availableReplicas: 0 + replicas: 0 --- apiVersion: v1 -kind: ConfigMap -metadata: - name: backup-restore-sidecar-config-rethinkdb data: config.yaml: | + --- + bind-addr: 0.0.0.0 db: rethinkdb db-data-directory: /data/rethinkdb/ backup-provider: local @@ -114,22 +141,25 @@ data: backup-cron-schedule: "*/1 * * * *" object-prefix: rethinkdb-test post-exec-cmds: - # IMPORTANT: the --directory needs to point to the exact sidecar data dir, otherwise the database will be restored to the wrong location - - rethinkdb --bind all --directory /data/rethinkdb --initial-password ${RETHINKDB_PASSWORD} + # IMPORTANT: the --directory needs to point to the exact sidecar data dir, otherwise the database will be restored to the wrong location + - rethinkdb --bind all --directory /data/rethinkdb --initial-password ${RETHINKDB_PASSWORD} +kind: ConfigMap +metadata: + creationTimestamp: null + name: backup-restore-sidecar-config-rethinkdb --- apiVersion: v1 kind: Secret metadata: + creationTimestamp: null name: rethinkdb - labels: - app: rethinkdb -type: Opaque stringData: - rethinkdb-password: "test123!" + rethinkdb-password: test123! --- apiVersion: v1 kind: Service metadata: + creationTimestamp: null labels: app: rethinkdb name: rethinkdb @@ -137,12 +167,14 @@ spec: ports: - name: "10080" port: 10080 - targetPort: 8080 + targetPort: 10080 - name: "28015" port: 28015 targetPort: 28015 - - name: "metrics" + - name: metrics port: 2112 targetPort: 2112 selector: app: rethinkdb +status: + loadBalancer: {} diff --git a/go.mod b/go.mod index e20447d..8a8cbde 100644 --- a/go.mod +++ b/go.mod @@ -5,19 +5,34 @@ go 1.21 require ( cloud.google.com/go/storage v1.32.0 github.com/Masterminds/semver/v3 v3.2.1 - github.com/aws/aws-sdk-go v1.44.324 + github.com/avast/retry-go/v4 v4.5.0 + github.com/aws/aws-sdk-go v1.45.2 + github.com/docker/docker v24.0.5+incompatible github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 + github.com/lib/pq v1.10.9 + github.com/metal-stack/metal-lib v0.13.3 github.com/metal-stack/v v1.0.3 github.com/mholt/archiver/v3 v3.5.1 github.com/olekukonko/tablewriter v0.0.5 github.com/prometheus/client_golang v1.16.0 github.com/robfig/cron/v3 v3.0.1 + github.com/spf13/afero v1.9.5 github.com/spf13/cobra v1.7.0 github.com/spf13/viper v1.16.0 + github.com/stretchr/testify v1.8.4 + github.com/testcontainers/testcontainers-go v0.23.0 + go.etcd.io/etcd/client/v3 v3.5.9 go.uber.org/zap v1.25.0 - google.golang.org/api v0.137.0 + golang.org/x/sync v0.3.0 + google.golang.org/api v0.138.0 google.golang.org/grpc v1.57.0 google.golang.org/protobuf v1.31.0 + gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.2 + k8s.io/api v0.28.1 + k8s.io/apimachinery v0.28.1 + k8s.io/client-go v0.28.1 + sigs.k8s.io/controller-runtime v0.16.1 + sigs.k8s.io/yaml v1.3.0 ) require ( @@ -25,55 +40,109 @@ require ( cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.2 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/andybalholm/brotli v1.0.5 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/containerd/containerd v1.7.5 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cpuguy83/dockercfg v0.3.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.5 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nwaples/rardecode v1.1.3 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc4 // indirect + github.com/opencontainers/runc v1.1.9 // indirect + github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/pelletier/go-toml/v2 v2.0.9 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect + go.etcd.io/etcd/api/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.12.0 // indirect + golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb // indirect + golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.14.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/term v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.12.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230815205213-6bfd019c3878 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230815205213-6bfd019c3878 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230815205213-6bfd019c3878 // indirect + gopkg.in/cenkalti/backoff.v2 v2.2.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index b179e57..7e8cd43 100644 --- a/go.sum +++ b/go.sum @@ -45,22 +45,40 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.32.0 h1:5w6DxEGOnktmJHarxAOUywxVW9lbNWIzlzzUltG/3+o= cloud.google.com/go/storage v1.32.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek= +github.com/Microsoft/hcsshim v0.10.0-rc.8/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM= github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aws/aws-sdk-go v1.44.324 h1:/uja9PtgeeqrZCPOJTenjMLNpciIMuzaRKooq+erG4A= -github.com/aws/aws-sdk-go v1.44.324/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/avast/retry-go/v4 v4.5.0 h1:QoRAZZ90cj5oni2Lsgl2GW8mNTnUCnmpx/iKpwVisHg= +github.com/avast/retry-go/v4 v4.5.0/go.mod h1:7hLEXp0oku2Nir2xBAsg0PTphp9z71bN5Aq1fboC3+I= +github.com/aws/aws-sdk-go v1.45.2 h1:hTong9YUklQKqzrGk3WnKABReb5R8GjbG4Y6dEQfjnk= +github.com/aws/aws-sdk-go v1.45.2/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= +github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -76,13 +94,34 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/containerd/containerd v1.7.5 h1:i9T9XpAWMe11BHMN7pu1BZqOGjXaKTPyz2v+KYOZgkY= +github.com/containerd/containerd v1.7.5/go.mod h1:ieJNCSzASw2shSGYLHx8NAE7WsZ/gEigo5fQ78W5Zvw= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= +github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY= github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -90,8 +129,11 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -100,7 +142,23 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -139,6 +197,8 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -152,6 +212,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -168,6 +231,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg= github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -184,18 +249,28 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -211,30 +286,69 @@ github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/metal-stack/metal-lib v0.13.3 h1:BOhwcKHILmBZd2pz2YMOhj8QxzDaz3G0F/CGuYhnu8o= +github.com/metal-stack/metal-lib v0.13.3/go.mod h1:BAR7fjdoV7DDg8i9GpJQBDaNSFirOcBs0vLYTBnhHQU= github.com/metal-stack/v v1.0.3 h1:Sh2oBlnxrCUD+mVpzfC8HiqL045YWkxs0gpTvkjppqs= github.com/metal-stack/v v1.0.3/go.mod h1:YTahEu7/ishwpYKnp/VaW/7nf8+PInogkfGwLcGPdXg= github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo= github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nwaples/rardecode v1.1.3 h1:cWCaZwfM5H7nAD6PyEdcVnczzV8i/JtotnyW/dD9lEc= github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= +github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.9 h1:XR0VIHTGce5eWPkaPesqTBrhW2yAcaraWfsEalNwQLM= +github.com/opencontainers/runc v1.1.9/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= @@ -242,6 +356,7 @@ github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuR github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -265,7 +380,10 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= @@ -280,7 +398,9 @@ github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -294,6 +414,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/testcontainers/testcontainers-go v0.23.0 h1:ERYTSikX01QczBLPZpqsETTBO7lInqEP349phDOVJVs= +github.com/testcontainers/testcontainers-go v0.23.0/go.mod h1:3gzuZfb7T9qfcH2pHpV4RLlWrPjeWNQah6XlYQ32c4I= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= @@ -305,6 +427,12 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= +go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= +go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= +go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= +go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= +go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -316,8 +444,8 @@ go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -328,6 +456,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -345,6 +474,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb h1:mIKbk8weKhSeLH2GmUTrvx8CjkyJmnU1wFmg59CUjFA= +golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -369,8 +500,11 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -431,6 +565,7 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -441,6 +576,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -466,16 +602,20 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -486,11 +626,13 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -542,12 +684,16 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -567,8 +713,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.137.0 h1:QrKX6uNvzJLr0Fd3vWVqcyrcmFoYi036VUAsZbiF4+s= -google.golang.org/api v0.137.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY= +google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0= +google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -656,15 +802,27 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII= +gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.2 h1:tczPZjdz6soV2thcuq1IFOuNLrBUGonFyUXBbIWXWis= +gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.2/go.mod h1:c7Wo0IjB7JL9B9Avv0UZKorYJCUhiergpj3u1WtGT1E= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -672,6 +830,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -679,6 +839,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108= +k8s.io/api v0.28.1/go.mod h1:uBYwID+66wiL28Kn2tBjBYQdEU0Xk0z5qF8bIBqk/Dg= +k8s.io/apiextensions-apiserver v0.28.0 h1:CszgmBL8CizEnj4sj7/PtLGey6Na3YgWyGCPONv7E9E= +k8s.io/apiextensions-apiserver v0.28.0/go.mod h1:uRdYiwIuu0SyqJKriKmqEN2jThIJPhVmOWETm8ud1VE= +k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY= +k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw= +k8s.io/client-go v0.28.1 h1:pRhMzB8HyLfVwpngWKE8hDcXRqifh1ga2Z/PU9SXVK8= +k8s.io/client-go v0.28.1/go.mod h1:pEZA3FqOsVkCc07pFVzK076R+P/eXqsgx5zuuRWukNE= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/controller-runtime v0.16.1 h1:+15lzrmHsE0s2kNl0Dl8cTchI5Cs8qofo5PGcPrV9z0= +sigs.k8s.io/controller-runtime v0.16.1/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/integration/etcd_test.go b/integration/etcd_test.go new file mode 100644 index 0000000..b51c7b9 --- /dev/null +++ b/integration/etcd_test.go @@ -0,0 +1,320 @@ +//go:build integration + +package integration_test + +import ( + "context" + "testing" + "time" + + "github.com/avast/retry-go/v4" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" + "github.com/metal-stack/metal-lib/pkg/pointer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + _ "github.com/lib/pq" +) + +var ( + etcdContainerImage = "quay.io/coreos/etcd:v3.5.7" +) + +func Test_ETCD_Restore(t *testing.T) { + restoreFlow(t, &flowSpec{ + databaseType: "etcd", + sts: etcdSts, + backingResources: etcdBackingResources, + addTestData: addEtcdTestData, + verifyTestData: verifyEtcdTestData, + }) +} + +func etcdSts(namespace string, image string) *appsv1.StatefulSet { + if image == "" { + image = etcdContainerImage + } + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd", + Namespace: namespace, + Labels: map[string]string{ + "app": "etcd", + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "etcd", + Replicas: pointer.Pointer(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "etcd", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "etcd", + }, + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Name: "etcd", + Image: image, + Command: []string{"backup-restore-sidecar", "wait"}, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/usr/local/bin/etcdctl", "endpoint", "health", "--endpoints=127.0.0.1:32379"}, + }, + }, + InitialDelaySeconds: 15, + TimeoutSeconds: 1, + PeriodSeconds: 5, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/health", + Port: intstr.FromInt(32381), + Scheme: corev1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: 15, + TimeoutSeconds: 1, + PeriodSeconds: 5, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + Ports: []corev1.ContainerPort{ + // default ports are taken by kind etcd because running in host network + { + ContainerPort: 32379, + Name: "client", + Protocol: corev1.ProtocolTCP, + }, + { + ContainerPort: 32380, + Name: "server", + Protocol: corev1.ProtocolTCP, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "data", + MountPath: "/data", + }, + { + Name: "bin-provision", + SubPath: "backup-restore-sidecar", + MountPath: "/usr/local/bin/backup-restore-sidecar", + }, + { + Name: "backup-restore-sidecar-config", + MountPath: "/etc/backup-restore-sidecar", + }, + }, + }, + { + Name: "backup-restore-sidecar", + Image: image, + Command: []string{"backup-restore-sidecar", "start", "--log-level=debug"}, + Ports: []corev1.ContainerPort{ + { + Name: "grpc", + ContainerPort: 8000, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "backup", + MountPath: constants.SidecarBaseDir, + }, + { + Name: "data", + MountPath: "/data", + }, + { + Name: "backup-restore-sidecar-config", + MountPath: "/etc/backup-restore-sidecar", + }, + { + Name: "bin-provision", + SubPath: "backup-restore-sidecar", + MountPath: "/usr/local/bin/backup-restore-sidecar", + }, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "backup-restore-sidecar-provider", + Image: backupRestoreSidecarContainerImage, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "cp", + "/backup-restore-sidecar", + "/bin-provision", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "bin-provision", + MountPath: "/bin-provision", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "data", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "data", + }, + }, + }, + { + Name: "backup", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "backup", + }, + }, + }, + { + Name: "backup-restore-sidecar-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "backup-restore-sidecar-config-postgres", + }, + }, + }, + }, + { + Name: "bin-provision", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "data", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "backup", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + }, + } +} + +func etcdBackingResources(namespace string) []client.Object { + return []client.Object{ + &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-restore-sidecar-config-postgres", + Namespace: namespace, + }, + Data: map[string]string{ + "config.yaml": `--- +bind-addr: 0.0.0.0 +db: etcd +db-data-directory: /data/etcd/ +backup-provider: local +backup-cron-schedule: "*/1 * * * *" +object-prefix: etcd-test +etcd-endpoints: http://localhost:32379 +post-exec-cmds: +- etcd --data-dir=/data/etcd --listen-client-urls http://0.0.0.0:32379 --advertise-client-urls http://0.0.0.0:32379 --listen-peer-urls http://0.0.0.0:32380 --initial-advertise-peer-urls http://0.0.0.0:32380 --initial-cluster default=http://0.0.0.0:32380 --listen-metrics-urls http://0.0.0.0:32381 +`, + }, + }, + } +} + +func newEtcdClient(t *testing.T, ctx context.Context) *clientv3.Client { + var cli *clientv3.Client + + err := retry.Do(func() error { + var err error + cli, err = clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:32379"}, + DialTimeout: 5 * time.Second, + }) + if err != nil { + return err + } + + return nil + }, retry.Context(ctx)) + require.NoError(t, err) + + return cli +} + +func addEtcdTestData(t *testing.T, ctx context.Context) { + cli := newEtcdClient(t, ctx) + defer cli.Close() + + _, err := cli.Put(ctx, "1", "I am precious") + require.NoError(t, err) +} + +func verifyEtcdTestData(t *testing.T, ctx context.Context) { + cli := newEtcdClient(t, ctx) + defer cli.Close() + + resp, err := cli.Get(ctx, "1") + require.NoError(t, err) + require.Len(t, resp.Kvs, 1) + + ev := resp.Kvs[0] + assert.Equal(t, "1", string(ev.Key)) + assert.Equal(t, "I am precious", string(ev.Value)) +} diff --git a/integration/main_test.go b/integration/main_test.go new file mode 100644 index 0000000..7b5fdef --- /dev/null +++ b/integration/main_test.go @@ -0,0 +1,402 @@ +//go:build integration + +package integration_test + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "runtime" + "strings" + "testing" + "time" + + "github.com/avast/retry-go/v4" + v1 "github.com/metal-stack/backup-restore-sidecar/api/v1" + brsclient "github.com/metal-stack/backup-restore-sidecar/pkg/client" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/yaml" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" +) + +type flowSpec struct { + databaseType string + // slice of images, executed in order during upgrade + databaseImages []string + sts func(namespace, image string) *appsv1.StatefulSet + backingResources func(namespace string) []client.Object + addTestData func(t *testing.T, ctx context.Context) + verifyTestData func(t *testing.T, ctx context.Context) +} + +const ( + backupRestoreSidecarContainerImage = "ghcr.io/metal-stack/backup-restore-sidecar:latest" +) + +var ( + restConfig *rest.Config + c client.Client +) + +func TestMain(m *testing.M) { + var err error + c, err = newKubernetesClient() + if err != nil { + fmt.Printf("error creating kubernetes client: %s\n", err) + os.Exit(1) + } + + os.Exit(m.Run()) +} + +func restoreFlow(t *testing.T, spec *flowSpec) { + t.Log("running restore flow") + var ( + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Minute) + ns = testNamespace(t) + image string + ) + if len(spec.databaseImages) > 0 { + image = spec.databaseImages[0] + } + + defer cancel() + + cleanup := func() { + t.Log("running cleanup") + + err := c.Delete(ctx, ns) + require.NoError(t, client.IgnoreNotFound(err), "cleanup did not succeed") + + err = waitUntilNotFound(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns.Name, + }, + }) + require.NoError(t, err, "cleanup did not succeed") + } + cleanup() + defer cleanup() + + err := c.Create(ctx, ns) + require.NoError(t, client.IgnoreAlreadyExists(err)) + + t.Log("applying resource manifests") + + objects := func() []client.Object { + objects := []client.Object{spec.sts(ns.Name, image)} + objects = append(objects, spec.backingResources(ns.Name)...) + return objects + } + + dumpToExamples(t, spec.databaseType+"-local.yaml", objects()...) + + for _, o := range objects() { + o := o + err = c.Create(ctx, o) + require.NoError(t, err) + } + + podName := spec.sts(ns.Name, image).Name + "-0" + + err = waitForPodRunnig(ctx, podName, ns.Name) + require.NoError(t, err) + + t.Log("adding test data to database") + + spec.addTestData(t, ctx) + + t.Log("taking a backup") + + brsc, err := brsclient.New(ctx, "http://localhost:8000") + require.NoError(t, err) + + _, err = brsc.DatabaseServiceClient().CreateBackup(ctx, &v1.CreateBackupRequest{}) + if err != nil && !errors.Is(err, constants.ErrBackupAlreadyInProgress) { + require.NoError(t, err) + } + + var backup *v1.Backup + err = retry.Do(func() error { + backups, err := brsc.BackupServiceClient().ListBackups(ctx, &v1.ListBackupsRequest{}) + if err != nil { + return err + } + + if len(backups.Backups) == 0 { + return fmt.Errorf("no backups were made yet") + } + + backup = backups.Backups[0] + + return nil + }, retry.Context(ctx), retry.Attempts(0), retry.MaxDelay(2*time.Second)) + require.NoError(t, err) + require.NotNil(t, backup) + + t.Log("remove sts and delete data volume") + + err = c.Delete(ctx, spec.sts(ns.Name, image)) + require.NoError(t, err) + + err = c.Delete(ctx, &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "data-" + podName, + Namespace: ns.Name, + }, + }) + require.NoError(t, err) + + err = waitUntilNotFound(ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: ns.Name, + }, + }) + require.NoError(t, err) + + t.Log("recreate sts") + + err = c.Create(ctx, spec.sts(ns.Name, image)) + require.NoError(t, err) + + err = waitForPodRunnig(ctx, podName, ns.Name) + require.NoError(t, err) + + t.Log("verify that data gets restored") + + spec.verifyTestData(t, ctx) +} + +func upgradeFlow(t *testing.T, spec *flowSpec) { + t.Log("running upgrade flow") + + require.GreaterOrEqual(t, len(spec.databaseImages), 2, "at least 2 database images must be specified for the upgrade test") + + var ( + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Minute) + ns = testNamespace(t) + initialImage = spec.databaseImages[0] + nextImages = spec.databaseImages[1:] + ) + + defer cancel() + + cleanup := func() { + t.Log("running cleanup") + + err := c.Delete(ctx, ns) + require.NoError(t, client.IgnoreNotFound(err), "cleanup did not succeed") + + err = waitUntilNotFound(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns.Name, + }, + }) + require.NoError(t, err, "cleanup did not succeed") + } + cleanup() + defer cleanup() + + err := c.Create(ctx, ns) + require.NoError(t, client.IgnoreAlreadyExists(err)) + + t.Log("applying resource manifests") + + objects := func() []client.Object { + objects := []client.Object{spec.sts(ns.Name, initialImage)} + objects = append(objects, spec.backingResources(ns.Name)...) + return objects + } + + for _, o := range objects() { + o := o + err = c.Create(ctx, o) + require.NoError(t, err) + } + + podName := spec.sts(ns.Name, initialImage).Name + "-0" + + err = waitForPodRunnig(ctx, podName, ns.Name) + require.NoError(t, err) + + t.Log("adding test data to database") + + spec.addTestData(t, ctx) + + t.Log("taking a backup") + + brsc, err := brsclient.New(ctx, "http://localhost:8000") + require.NoError(t, err) + + _, err = brsc.DatabaseServiceClient().CreateBackup(ctx, &v1.CreateBackupRequest{}) + assert.NoError(t, err) + + var backup *v1.Backup + err = retry.Do(func() error { + backups, err := brsc.BackupServiceClient().ListBackups(ctx, &v1.ListBackupsRequest{}) + if err != nil { + return err + } + + if len(backups.Backups) == 0 { + return fmt.Errorf("no backups were made yet") + } + + backup = backups.Backups[0] + + return nil + }, retry.Context(ctx), retry.Attempts(0), retry.MaxDelay(2*time.Second)) + require.NoError(t, err) + require.NotNil(t, backup) + + for _, image := range nextImages { + image := image + nextSts := spec.sts(ns.Name, image).DeepCopy() + t.Logf("deploy sts with next database version %q, container %q", image, nextSts.Spec.Template.Spec.Containers[0].Image) + + err = c.Update(ctx, nextSts, &client.UpdateOptions{}) + require.NoError(t, err) + + time.Sleep(10 * time.Second) + + // TODO maybe better wait for generation changed + err = waitForPodRunnig(ctx, podName, ns.Name) + require.NoError(t, err) + + t.Log("verify that data is still the same") + + spec.verifyTestData(t, ctx) + } +} + +func testNamespace(t *testing.T) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespaceName(t), + }, + } +} + +func namespaceName(t *testing.T) string { + const max = 63 // max length for k8s namespaces is 63 chars + n := strings.ToLower(strings.ReplaceAll(t.Name(), "_", "-")) + if len(n) > max { + return n[:max] + } + return n +} + +func newKubernetesClient() (client.Client, error) { + restConfig = config.GetConfigOrDie() + c, err := client.New(restConfig, client.Options{}) + if err != nil { + return nil, err + } + + nodes := &corev1.NodeList{} + err = c.List(context.Background(), nodes) + if err != nil { + return nil, err + } + + for _, n := range nodes.Items { + n := n + if !strings.HasPrefix(n.Spec.ProviderID, "kind://") && os.Getenv("SKIP_KIND_VALIDATIONS") != "1" { + return nil, fmt.Errorf("for security reasons only running against kind clusters") + } + } + + return c, nil +} + +func dumpToExamples(t *testing.T, name string, resources ...client.Object) { + content := []byte(`# DO NOT EDIT! This is auto-generated by the integration tests +--- +`) + + for i, r := range resources { + r.SetNamespace("") // not needed for example manifests + + r := r.DeepCopyObject() + + if sts, ok := r.(*appsv1.StatefulSet); ok { + // host network is only for integration testing purposes + sts.Spec.Template.Spec.HostNetwork = false + } + + raw, err := yaml.Marshal(r) + require.NoError(t, err) + + if i != len(resources)-1 { + raw = append(raw, []byte("---\n")...) + } + + content = append(content, raw...) + } + + _, filename, _, _ := runtime.Caller(1) + + dest := path.Join(path.Dir(filename), "..", "deploy", name) + t.Logf("example manifest written to %s", dest) + + err := os.WriteFile(dest, content, 0600) + require.NoError(t, err) +} + +func waitForPodRunnig(ctx context.Context, name, namespace string) error { + return retry.Do(func() error { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + err := c.Get(ctx, client.ObjectKeyFromObject(pod), pod) + if err != nil { + return err + } + + if pod.Status.Phase != corev1.PodRunning { + return fmt.Errorf("pod is not yet running running") + } + + if len(pod.Spec.Containers) != len(pod.Status.ContainerStatuses) { + return fmt.Errorf("not all containers available in status") + } + + for _, status := range pod.Status.ContainerStatuses { + if !status.Ready { + return fmt.Errorf("container not yet ready: %s", status.Name) + } + } + + return nil + }, retry.Context(ctx), retry.Attempts(0)) +} + +func waitUntilNotFound(ctx context.Context, obj client.Object) error { + return retry.Do(func() error { + err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return err + } + + return fmt.Errorf("resource is still running: %s", obj.GetName()) + }, retry.Context(ctx), retry.Attempts(0)) +} diff --git a/integration/postgres_test.go b/integration/postgres_test.go new file mode 100644 index 0000000..dd8ffe9 --- /dev/null +++ b/integration/postgres_test.go @@ -0,0 +1,466 @@ +//go:build integration + +package integration_test + +import ( + "context" + "database/sql" + "fmt" + "testing" + + "github.com/avast/retry-go/v4" + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" + "github.com/metal-stack/metal-lib/pkg/pointer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + _ "github.com/lib/pq" +) + +const ( + postgresDB = "postgres" + postgresPassword = "test123!" + postgresUser = "postgres" +) + +var ( + postgresContainerImage = "postgres:12-alpine" +) + +func Test_Postgres_Restore(t *testing.T) { + restoreFlow(t, &flowSpec{ + databaseType: "postgres", + sts: postgresSts, + backingResources: postgresBackingResources, + addTestData: addPostgresTestData, + verifyTestData: verifyPostgresTestData, + }) +} + +func Test_Postgres_Upgrade(t *testing.T) { + upgradeFlow(t, &flowSpec{ + databaseType: "postgres", + databaseImages: []string{ + "postgres:12-alpine", + // "postgres:13-alpine", commented to test if two versions upgrade also work + "postgres:14-alpine", + "postgres:15-alpine", + }, + sts: postgresSts, + backingResources: postgresBackingResources, + addTestData: addPostgresTestData, + verifyTestData: verifyPostgresTestData, + }) +} + +func postgresSts(namespace, image string) *appsv1.StatefulSet { + if image == "" { + image = postgresContainerImage + } + + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres", + Namespace: namespace, + Labels: map[string]string{ + "app": "postgres", + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "postgres", + Replicas: pointer.Pointer(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "postgres", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "postgres", + }, + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Name: "postgres", + Image: image, + Command: []string{"backup-restore-sidecar", "wait"}, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "exec", "pg_isready", "-U", postgresUser, "-h", "127.0.0.1", "-p", "5432"}, + }, + }, + InitialDelaySeconds: 30, + TimeoutSeconds: 5, + PeriodSeconds: 10, + SuccessThreshold: 1, + FailureThreshold: 6, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "exec", "pg_isready", "-U", postgresUser, "-h", "127.0.0.1", "-p", "5432"}, + }, + }, + InitialDelaySeconds: 5, + TimeoutSeconds: 5, + PeriodSeconds: 10, + }, + Env: []corev1.EnvVar{ + { + Name: "POSTGRES_DB", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "postgres", + }, + Key: "POSTGRES_DB", + }, + }, + }, + { + Name: "POSTGRES_USER", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "postgres", + }, + Key: "POSTGRES_USER", + }, + }, + }, + { + Name: "POSTGRES_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "postgres", + }, + Key: "POSTGRES_PASSWORD", + }, + }, + }, + { + Name: "PGDATA", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "postgres", + }, + Key: "POSTGRES_DATA", + }, + }, + }, + }, + Ports: []corev1.ContainerPort{ + { + ContainerPort: 5432, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "data", + MountPath: "/data", + }, + { + Name: "bin-provision", + SubPath: "backup-restore-sidecar", + MountPath: "/usr/local/bin/backup-restore-sidecar", + }, + { + Name: "backup-restore-sidecar-config", + MountPath: "/etc/backup-restore-sidecar", + }, + }, + }, + { + Name: "backup-restore-sidecar", + Image: image, + Command: []string{"backup-restore-sidecar", "start", "--log-level=debug"}, + Env: []corev1.EnvVar{ + { + Name: "BACKUP_RESTORE_SIDECAR_POSTGRES_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "postgres", + }, + Key: "POSTGRES_PASSWORD", + }, + }, + }, + { + Name: "BACKUP_RESTORE_SIDECAR_POSTGRES_USER", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "postgres", + }, + Key: "POSTGRES_USER", + }, + }, + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "grpc", + ContainerPort: 8000, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "backup", + MountPath: constants.SidecarBaseDir, + }, + { + Name: "data", + MountPath: "/data", + }, + { + Name: "backup-restore-sidecar-config", + MountPath: "/etc/backup-restore-sidecar", + }, + { + Name: "bin-provision", + SubPath: "backup-restore-sidecar", + MountPath: "/usr/local/bin/backup-restore-sidecar", + }, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "backup-restore-sidecar-provider", + Image: backupRestoreSidecarContainerImage, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "cp", + "/backup-restore-sidecar", + "/bin-provision", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "bin-provision", + MountPath: "/bin-provision", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "data", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "data", + }, + }, + }, + { + Name: "backup", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "backup", + }, + }, + }, + { + Name: "backup-restore-sidecar-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "backup-restore-sidecar-config-postgres", + }, + }, + }, + }, + { + Name: "bin-provision", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "data", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "backup", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + }, + } +} + +func postgresBackingResources(namespace string) []client.Object { + return []client.Object{ + &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-restore-sidecar-config-postgres", + Namespace: namespace, + }, + Data: map[string]string{ + "config.yaml": `--- +bind-addr: 0.0.0.0 +db: postgres +db-data-directory: /data/postgres/ +backup-provider: local +backup-cron-schedule: "*/1 * * * *" +object-prefix: postgres-test +compression-method: tar +post-exec-cmds: +- docker-entrypoint.sh postgres +`, + }, + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres", + Namespace: namespace, + }, + StringData: map[string]string{ + "POSTGRES_DB": postgresDB, + "POSTGRES_USER": postgresUser, + "POSTGRES_PASSWORD": postgresPassword, + "POSTGRES_DATA": "/data/postgres/", + }, + }, + &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres", + Namespace: namespace, + Labels: map[string]string{ + "app": "postgres", + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "postgres", + }, + Ports: []corev1.ServicePort{ + { + Name: "5432", + Port: 5432, + TargetPort: intstr.FromInt32(5432), + }, + { + Name: "metrics", + Port: 2112, + TargetPort: intstr.FromInt32(2112), + }, + }, + }, + }, + } +} + +func newPostgresSession(t *testing.T, ctx context.Context) *sql.DB { + var db *sql.DB + err := retry.Do(func() error { + connString := fmt.Sprintf("host=127.0.0.1 port=5432 user=%s password=%s dbname=%s sslmode=disable", postgresUser, postgresPassword, postgresDB) + + var err error + db, err = sql.Open("postgres", connString) + if err != nil { + return err + } + + err = db.PingContext(ctx) + if err != nil { + return err + } + + return nil + }, retry.Context(ctx)) + require.NoError(t, err) + + return db +} + +func addPostgresTestData(t *testing.T, ctx context.Context) { + db := newPostgresSession(t, ctx) + defer db.Close() + + var ( + createStmt = `CREATE TABLE backuprestore ( + data text NOT NULL + );` + insertStmt = `INSERT INTO backuprestore("data") VALUES ('I am precious');` + ) + + _, err := db.Exec(createStmt) + require.NoError(t, err) + + _, err = db.Exec(insertStmt) + require.NoError(t, err) +} + +func verifyPostgresTestData(t *testing.T, ctx context.Context) { + db := newPostgresSession(t, ctx) + defer db.Close() + + rows, err := db.Query(`SELECT "data" FROM backuprestore;`) + require.NoError(t, err) + require.NoError(t, rows.Err()) + defer rows.Close() + + require.True(t, rows.Next()) + var data string + + err = rows.Scan(&data) + require.NoError(t, err) + + assert.Equal(t, "I am precious", data) + assert.False(t, rows.Next()) +} diff --git a/integration/rethinkdb_test.go b/integration/rethinkdb_test.go new file mode 100644 index 0000000..69b164c --- /dev/null +++ b/integration/rethinkdb_test.go @@ -0,0 +1,419 @@ +//go:build integration + +package integration_test + +import ( + "context" + "fmt" + "testing" + + "github.com/metal-stack/backup-restore-sidecar/pkg/constants" + "github.com/metal-stack/metal-lib/pkg/pointer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/avast/retry-go/v4" + r "gopkg.in/rethinkdb/rethinkdb-go.v6" +) + +type rethinkDbTestData struct { + ID string `rethinkdb:"id"` + Data string `rethinkdb:"data"` +} + +const ( + rethinkDbPassword = "test123!" + rethinkDbDatabaseName = "backup-restore" + rethinkDbTable = "precioustestdata" +) + +var ( + rethinkDbContainerImage = "rethinkdb:2.4.0" +) + +func Test_RethinkDB_Restore(t *testing.T) { + restoreFlow(t, &flowSpec{ + databaseType: "rethinkdb", + sts: rethinkDbSts, + backingResources: rethinkDbBackingResources, + addTestData: addRethinkDbTestData, + verifyTestData: verifyRethinkDbTestData, + }) +} + +func rethinkDbSts(namespace, image string) *appsv1.StatefulSet { + if image == "" { + image = rethinkDbContainerImage + } + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "rethinkdb", + Namespace: namespace, + Labels: map[string]string{ + "app": "rethinkdb", + }, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "rethinkdb", + Replicas: pointer.Pointer(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "rethinkdb", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "rethinkdb", + }, + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Name: "rethinkdb", + Image: image, + Command: []string{"backup-restore-sidecar", "wait"}, + Env: []corev1.EnvVar{ + { + Name: "RETHINKDB_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "rethinkdb", + }, + Key: "rethinkdb-password", + }, + }, + }, + }, + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8080, + }, + { + ContainerPort: 28015, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "data", + MountPath: "/data", + }, + { + Name: "bin-provision", + SubPath: "backup-restore-sidecar", + MountPath: "/usr/local/bin/backup-restore-sidecar", + }, + { + Name: "backup-restore-sidecar-config", + MountPath: "/etc/backup-restore-sidecar", + }, + }, + }, + { + Name: "backup-restore-sidecar", + Image: image, + Command: []string{"backup-restore-sidecar", "start", "--log-level=debug"}, + Ports: []corev1.ContainerPort{ + { + Name: "grpc", + ContainerPort: 8000, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "backup", + MountPath: constants.SidecarBaseDir, + }, + { + Name: "data", + MountPath: "/data", + }, + { + Name: "rethinkdb-credentials", + MountPath: "/rethinkdb-secret", + }, + { + Name: "backup-restore-sidecar-config", + MountPath: "/etc/backup-restore-sidecar", + }, + { + Name: "bin-provision", + SubPath: "backup-restore-sidecar", + MountPath: "/usr/local/bin/backup-restore-sidecar", + }, + { + Name: "bin-provision", + SubPath: "rethinkdb-dump", + MountPath: "/usr/local/bin/rethinkdb-dump", + }, + { + Name: "bin-provision", + SubPath: "rethinkdb-restore", + MountPath: "/usr/local/bin/rethinkdb-restore", + }, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "backup-restore-sidecar-provider", + Image: backupRestoreSidecarContainerImage, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "cp", + "/backup-restore-sidecar", + "/rethinkdb/rethinkdb-dump", + "/rethinkdb/rethinkdb-restore", + "/bin-provision", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "bin-provision", + MountPath: "/bin-provision", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "data", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "data", + }, + }, + }, + { + Name: "backup", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "backup", + }, + }, + }, + { + Name: "rethinkdb-credentials", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "rethinkdb", + Items: []corev1.KeyToPath{ + { + Key: "rethinkdb-password", + Path: "rethinkdb-password.txt", + }, + }, + }, + }, + }, + { + Name: "backup-restore-sidecar-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "backup-restore-sidecar-config-rethinkdb", + }, + }, + }, + }, + { + Name: "bin-provision", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "data", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "backup", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + }, + } +} + +func rethinkDbBackingResources(namespace string) []client.Object { + return []client.Object{ + &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-restore-sidecar-config-rethinkdb", + Namespace: namespace, + }, + Data: map[string]string{ + "config.yaml": `--- +bind-addr: 0.0.0.0 +db: rethinkdb +db-data-directory: /data/rethinkdb/ +backup-provider: local +rethinkdb-passwordfile: /rethinkdb-secret/rethinkdb-password.txt +backup-cron-schedule: "*/1 * * * *" +object-prefix: rethinkdb-test +post-exec-cmds: +# IMPORTANT: the --directory needs to point to the exact sidecar data dir, otherwise the database will be restored to the wrong location +- rethinkdb --bind all --directory /data/rethinkdb --initial-password ${RETHINKDB_PASSWORD} +`, + }, + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "rethinkdb", + Namespace: namespace, + }, + StringData: map[string]string{ + "rethinkdb-password": rethinkDbPassword, + }, + }, + &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "rethinkdb", + Namespace: namespace, + Labels: map[string]string{ + "app": "rethinkdb", + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "rethinkdb", + }, + Ports: []corev1.ServicePort{ + { + Name: "10080", + Port: 10080, + TargetPort: intstr.FromInt32(10080), + }, + { + Name: "28015", + Port: 28015, + TargetPort: intstr.FromInt32(28015), + }, + { + Name: "metrics", + Port: 2112, + TargetPort: intstr.FromInt32(2112), + }, + }, + }, + }, + } +} + +func newRethinkdbSession(t *testing.T, ctx context.Context) *r.Session { + var session *r.Session + err := retry.Do(func() error { + var err error + session, err = r.Connect(r.ConnectOpts{ + Addresses: []string{"localhost:28015"}, + Database: rethinkDbDatabaseName, + Username: "admin", + Password: rethinkDbPassword, + MaxIdle: 10, + MaxOpen: 20, + }) + if err != nil { + return fmt.Errorf("cannot connect to DB: %w", err) + } + + return nil + }, retry.Context(ctx)) + require.NoError(t, err) + + return session +} + +func addRethinkDbTestData(t *testing.T, ctx context.Context) { + session := newRethinkdbSession(t, ctx) + + _, err := r.DBCreate(rethinkDbDatabaseName).RunWrite(session) + require.NoError(t, err) + + _, err = r.DB(rethinkDbDatabaseName).TableCreate(rethinkDbTable).RunWrite(session) + require.NoError(t, err) + + _, err = r.DB(rethinkDbDatabaseName).Table(rethinkDbTable).Insert(rethinkDbTestData{ + ID: "1", + Data: "i am precious", + }).RunWrite(session) + require.NoError(t, err) + + cursor, err := r.DB(rethinkDbDatabaseName).Table(rethinkDbTable).Get("1").Run(session) + require.NoError(t, err) + + var d1 rethinkDbTestData + err = cursor.One(&d1) + require.NoError(t, err) + require.Equal(t, "i am precious", d1.Data) +} + +func verifyRethinkDbTestData(t *testing.T, ctx context.Context) { + session := newRethinkdbSession(t, ctx) + + var d2 rethinkDbTestData + err := retry.Do(func() error { + cursor, err := r.DB(rethinkDbDatabaseName).Table(rethinkDbTable).Get("1").Run(session) + if err != nil { + return err + } + + err = cursor.One(&d2) + if err != nil { + return err + } + + return nil + }) + require.NoError(t, err) + + assert.Equal(t, "i am precious", d2.Data) +} diff --git a/kind.yaml b/kind.yaml new file mode 100644 index 0000000..02520dc --- /dev/null +++ b/kind.yaml @@ -0,0 +1,21 @@ +--- +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + apiServerPort: 6443 + apiServerAddress: 0.0.0.0 +nodes: +- role: control-plane + extraPortMappings: + - containerPort: 8000 + hostPort: 8000 + listenAddress: 0.0.0.0 + - containerPort: 28015 + hostPort: 28015 + listenAddress: 0.0.0.0 + - containerPort: 5432 + hostPort: 5432 + listenAddress: 0.0.0.0 + - containerPort: 32379 + hostPort: 32379 + listenAddress: 0.0.0.0 diff --git a/pkg/client/client.go b/pkg/client/client.go new file mode 100644 index 0000000..4d66046 --- /dev/null +++ b/pkg/client/client.go @@ -0,0 +1,56 @@ +package client + +import ( + "context" + "fmt" + "net/url" + + v1 "github.com/metal-stack/backup-restore-sidecar/api/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +type Client interface { + InitializerServiceClient() v1.InitializerServiceClient + BackupServiceClient() v1.BackupServiceClient + DatabaseServiceClient() v1.DatabaseServiceClient +} + +type client struct { + conn *grpc.ClientConn +} + +// New returns a new backup-restore-sidecar grpc client. +func New(ctx context.Context, rawurl string) (Client, error) { + parsedurl, err := url.Parse(rawurl) + if err != nil { + return nil, err + } + if parsedurl.Host == "" { + return nil, fmt.Errorf("invalid url:%s, must be in the form scheme://host[:port]/basepath", rawurl) + } + + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithBlock(), + } + + conn, err := grpc.DialContext(ctx, parsedurl.Host, opts...) + if err != nil { + return nil, err + } + + return &client{conn: conn}, nil +} + +func (c *client) InitializerServiceClient() v1.InitializerServiceClient { + return v1.NewInitializerServiceClient(c.conn) +} + +func (c *client) BackupServiceClient() v1.BackupServiceClient { + return v1.NewBackupServiceClient(c.conn) +} + +func (c *client) DatabaseServiceClient() v1.DatabaseServiceClient { + return v1.NewDatabaseServiceClient(c.conn) +} diff --git a/cmd/internal/constants/constants.go b/pkg/constants/constants.go similarity index 62% rename from cmd/internal/constants/constants.go rename to pkg/constants/constants.go index 7e4a12c..b4fa598 100644 --- a/cmd/internal/constants/constants.go +++ b/pkg/constants/constants.go @@ -4,12 +4,16 @@ const ( // DefaultObjectsToKeep are the default number of objects to keep at the cloud provider bucket DefaultObjectsToKeep = 20 + // SidecarBaseDir is the directory in which the sidecar puts backups or downloads backups to + // this should be backed by a volume mount! + SidecarBaseDir = "/backup" + // BackupDir is the directory in the sidecar where the database backup contents to be archived live in - BackupDir = "/tmp/backup-restore-sidecar/backup/files" + BackupDir = SidecarBaseDir + "/upload/files" // UploadDir is the path where the backup files are archived in and uploaded to the backup provider - UploadDir = "/tmp/backup-restore-sidecar/backup" + UploadDir = SidecarBaseDir + "/upload" // RestoreDir is the directory in the sidecar where the database backup contents will be unarchived to - RestoreDir = "/tmp/backup-restore-sidecar/restore/files" + RestoreDir = SidecarBaseDir + "/restore/files" // DownloadDir is the path where the backup archive will be downloaded to before it is being unarchived to the restore dir - DownloadDir = "/tmp/backup-restore-sidecar/restore" + DownloadDir = SidecarBaseDir + "/restore" ) diff --git a/pkg/constants/errors.go b/pkg/constants/errors.go new file mode 100644 index 0000000..cabbbb7 --- /dev/null +++ b/pkg/constants/errors.go @@ -0,0 +1,7 @@ +package constants + +import "errors" + +var ( + ErrBackupAlreadyInProgress = errors.New("a backup is already in progress") +) diff --git a/proto/v1/backup.proto b/proto/v1/backup.proto new file mode 100644 index 0000000..1eb0590 --- /dev/null +++ b/proto/v1/backup.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package v1; + +import "google/protobuf/timestamp.proto"; + +service BackupService { + rpc ListBackups(ListBackupsRequest) returns (BackupListResponse); + rpc RestoreBackup(RestoreBackupRequest) returns (RestoreBackupResponse); +} + +message ListBackupsRequest {} + +message BackupListResponse { + repeated Backup backups = 1; +} + +message Backup { + string name = 1; + string version = 2; + google.protobuf.Timestamp timestamp = 3; +} + +message RestoreBackupRequest { + string version = 1; +} + +message RestoreBackupResponse {} diff --git a/proto/v1/database.proto b/proto/v1/database.proto new file mode 100644 index 0000000..85c73bc --- /dev/null +++ b/proto/v1/database.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package v1; + +service DatabaseService { + rpc CreateBackup(CreateBackupRequest) returns (CreateBackupResponse); +} + +message CreateBackupRequest {} + +message CreateBackupResponse {} diff --git a/proto/v1/initializer.proto b/proto/v1/initializer.proto index 1f1be67..53fc87c 100644 --- a/proto/v1/initializer.proto +++ b/proto/v1/initializer.proto @@ -2,13 +2,11 @@ syntax = "proto3"; package v1; -option go_package = "./v1"; - service InitializerService { - rpc Status(Empty) returns (StatusResponse); + rpc Status(StatusRequest) returns (StatusResponse); } -message Empty {} +message StatusRequest {} message StatusResponse { enum InitializerStatus {