diff --git a/core/container_create.go b/core/container_create.go index 432cc6fe2..fb6e0a5b3 100644 --- a/core/container_create.go +++ b/core/container_create.go @@ -67,6 +67,11 @@ func (ds *dockerService) CreateContainer( containerName := makeContainerName(sandboxConfig, config) mounts := config.GetMounts() terminationMessagePath, _ := config.Annotations["io.kubernetes.container.terminationMessagePath"] + + sandboxInfo, err := ds.client.InspectContainer(r.GetPodSandboxId()) + if err != nil { + return nil, fmt.Errorf("unable to get container's sandbox ID: %v", err) + } createConfig := dockerbackend.ContainerCreateConfig{ Name: containerName, Config: &container.Config{ @@ -91,6 +96,7 @@ func (ds *dockerService) CreateContainer( RestartPolicy: container.RestartPolicy{ Name: "no", }, + Runtime: sandboxInfo.HostConfig.Runtime, }, } diff --git a/core/container_test.go b/core/container_test.go index 1d5bdd408..013aae366 100644 --- a/core/container_test.go +++ b/core/container_test.go @@ -67,9 +67,15 @@ func TestConcurrentlyCreateAndDeleteContainers(t *testing.T) { podName, namespace := "foo", "bar" containerName, image := "sidecar", "logger" + type podInfo struct { + ContainerId string + SandboxID string + } + const count = 20 configs := make([]*runtimeapi.ContainerConfig, 0, count) sConfigs := make([]*runtimeapi.PodSandboxConfig, 0, count) + for i := 0; i < count; i++ { s := makeSandboxConfig(fmt.Sprintf("%s%d", podName, i), fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0) @@ -80,8 +86,8 @@ func TestConcurrentlyCreateAndDeleteContainers(t *testing.T) { configs = append(configs, c) } - containerIDs := make( - chan string, + podInfos := make( + chan podInfo, len(configs), ) // make channel non-blocking to simulate concurrent containers creation @@ -94,39 +100,64 @@ func TestConcurrentlyCreateAndDeleteContainers(t *testing.T) { go func() { creationWg.Wait() - close(containerIDs) + close(podInfos) }() for i := range configs { go func(i int) { defer creationWg.Done() - // We don't care about the sandbox id; pass a bogus one. - sandboxID := fmt.Sprintf("sandboxid%d", i) + + runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{ + Config: sConfigs[i], + }) + if err != nil { + t.Errorf("RunPodSandbox: %v", err) + return + } + req := &runtimeapi.CreateContainerRequest{ - PodSandboxId: sandboxID, + PodSandboxId: runSandboxResp.PodSandboxId, Config: configs[i], SandboxConfig: sConfigs[i], } + createResp, err := ds.CreateContainer(getTestCTX(), req) if err != nil { t.Errorf("CreateContainer: %v", err) return } - containerIDs <- createResp.ContainerId + podInfos <- podInfo{ + ContainerId: createResp.ContainerId, + SandboxID: runSandboxResp.PodSandboxId, + } }(i) } - for containerID := range containerIDs { + for pod := range podInfos { deletionWg.Add(1) - go func(id string) { + go func(i podInfo) { defer deletionWg.Done() _, err := ds.RemoveContainer( getTestCTX(), - &runtimeapi.RemoveContainerRequest{ContainerId: id}, + &runtimeapi.RemoveContainerRequest{ContainerId: i.ContainerId}, ) if err != nil { t.Errorf("RemoveContainer: %v", err) } - }(containerID) + _, err = ds.StopPodSandbox( + getTestCTX(), + &runtimeapi.StopPodSandboxRequest{PodSandboxId: i.SandboxID}, + ) + if err != nil { + t.Errorf("StopPodSandbox: %v", err) + } + _, err = ds.RemovePodSandbox( + getTestCTX(), + &runtimeapi.RemovePodSandboxRequest{PodSandboxId: i.SandboxID}, + ) + if err != nil { + t.Errorf("RemovePodSandbox: %v", err) + } + }(pod) } deletionWg.Wait() } @@ -155,10 +186,15 @@ func TestListContainers(t *testing.T) { state := runtimeapi.ContainerState_CONTAINER_RUNNING var createdAt int64 = fakeClock.Now().UnixNano() for i := range configs { - // We don't care about the sandbox id; pass a bogus one. - sandboxID := fmt.Sprintf("sandboxid%d", i) + runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{ + Config: sConfigs[i], + }) + if err != nil { + t.Errorf("RunPodSandbox: %v", err) + return + } req := &runtimeapi.CreateContainerRequest{ - PodSandboxId: sandboxID, + PodSandboxId: runSandboxResp.PodSandboxId, Config: configs[i], SandboxConfig: sConfigs[i], } @@ -174,7 +210,7 @@ func TestListContainers(t *testing.T) { expected = append([]*runtimeapi.Container{{ Metadata: configs[i].Metadata, Id: id, - PodSandboxId: sandboxID, + PodSandboxId: runSandboxResp.PodSandboxId, State: state, CreatedAt: createdAt, Image: configs[i].Image, @@ -226,12 +262,20 @@ func TestContainerStatus(t *testing.T) { fDocker.InjectImages([]dockertypes.ImageSummary{{ID: imageName}}) + runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{ + Config: sConfig, + }) + if err != nil { + t.Errorf("RunPodSandbox: %v", err) + return + } + // Create the container. fClock.SetTime(time.Now().Add(-1 * time.Hour)) expected.CreatedAt = fClock.Now().UnixNano() req := &runtimeapi.CreateContainerRequest{ - PodSandboxId: sandboxID, + PodSandboxId: runSandboxResp.PodSandboxId, Config: config, SandboxConfig: sConfig, } @@ -243,7 +287,7 @@ func TestContainerStatus(t *testing.T) { c, err := fDocker.InspectContainer(id) require.NoError(t, err) assert.Equal(t, c.Config.Labels[containerTypeLabelKey], containerTypeLabelContainer) - assert.Equal(t, c.Config.Labels[sandboxIDLabelKey], sandboxID) + assert.Equal(t, c.Config.Labels[sandboxIDLabelKey], runSandboxResp.PodSandboxId) // Set the id manually since we don't know the id until it's created. expected.Id = id @@ -309,8 +353,16 @@ func TestContainerLogPath(t *testing.T) { config := makeContainerConfig(sConfig, "pause", "iamimage", 0, nil, nil) config.LogPath = containerLogPath + runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{ + Config: sConfig, + }) + if err != nil { + t.Errorf("RunPodSandbox: %v", err) + return + } + req := &runtimeapi.CreateContainerRequest{ - PodSandboxId: sandboxID, + PodSandboxId: runSandboxResp.PodSandboxId, Config: config, SandboxConfig: sConfig, } @@ -371,6 +423,8 @@ func TestContainerCreationConflict(t *testing.T) { noContainerError := fmt.Errorf("Error response from daemon: No such container: %s", containerID) randomError := fmt.Errorf("random error") + // sandBox run called "inspect_image", "pull", "create", "start", "inspect_container", + sandBoxCalls := []string{"inspect_image", "pull", "create", "start", "inspect_container"} for desc, test := range map[string]struct { createError error removeError error @@ -378,36 +432,45 @@ func TestContainerCreationConflict(t *testing.T) { expectCalls []string expectFields int }{ + // sandBox run called "inspect_image", "pull", "create", "start", "inspect_container", "no create error": { - expectCalls: []string{"create"}, + expectCalls: append(sandBoxCalls, []string{"create"}...), expectFields: 6, }, "random create error": { createError: randomError, expectError: randomError, - expectCalls: []string{"create"}, + expectCalls: append(sandBoxCalls, []string{"create"}...), }, "conflict create error with successful remove": { createError: conflictError, expectError: conflictError, - expectCalls: []string{"create", "remove"}, + expectCalls: append(sandBoxCalls, []string{"create", "remove"}...), }, "conflict create error with random remove error": { createError: conflictError, removeError: randomError, expectError: conflictError, - expectCalls: []string{"create", "remove"}, + expectCalls: append(sandBoxCalls, []string{"create", "remove"}...), }, "conflict create error with no such container remove error": { createError: conflictError, removeError: noContainerError, - expectCalls: []string{"create", "remove", "create"}, + expectCalls: append(sandBoxCalls, []string{"create", "remove", "create"}...), expectFields: 7, }, } { t.Logf("TestCase: %s", desc) ds, fDocker, _ := newTestDockerService() + runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{ + Config: sConfig, + }) + if err != nil { + require.EqualError(t, err, test.expectError.Error()) + continue + } + if test.createError != nil { fDocker.InjectError("create", test.createError) } @@ -416,7 +479,7 @@ func TestContainerCreationConflict(t *testing.T) { } req := &runtimeapi.CreateContainerRequest{ - PodSandboxId: sandboxID, + PodSandboxId: runSandboxResp.PodSandboxId, Config: config, SandboxConfig: sConfig, } diff --git a/core/docker_service.go b/core/docker_service.go index f35290b36..e3eaaec99 100644 --- a/core/docker_service.go +++ b/core/docker_service.go @@ -286,6 +286,8 @@ type dockerService struct { // methods for more info). containerCleanupInfos map[string]*containerCleanupInfo cleanupInfosLock sync.RWMutex + + // runtimeInfoLock sync.RWMutex } type dockerServiceAlpha struct { diff --git a/core/docker_service_test.go b/core/docker_service_test.go index 500d3f6fa..b6ce2473b 100644 --- a/core/docker_service_test.go +++ b/core/docker_service_test.go @@ -20,6 +20,7 @@ import ( "errors" "math/rand" "runtime" + "sync" "testing" "time" @@ -46,6 +47,7 @@ func newTestNetworkPlugin(t *testing.T) *nettest.MockNetworkPlugin { } type mockCheckpointManager struct { + lock sync.Mutex checkpoint map[string]*PodSandboxCheckpoint } @@ -53,7 +55,9 @@ func (ckm *mockCheckpointManager) CreateCheckpoint( checkpointKey string, checkpoint store.Checkpoint, ) error { + ckm.lock.Lock() ckm.checkpoint[checkpointKey] = checkpoint.(*PodSandboxCheckpoint) + ckm.lock.Unlock() return nil } @@ -61,11 +65,15 @@ func (ckm *mockCheckpointManager) GetCheckpoint( checkpointKey string, checkpoint store.Checkpoint, ) error { + ckm.lock.Lock() + defer ckm.lock.Unlock() *(checkpoint.(*PodSandboxCheckpoint)) = *(ckm.checkpoint[checkpointKey]) return nil } func (ckm *mockCheckpointManager) RemoveCheckpoint(checkpointKey string) error { + ckm.lock.Lock() + defer ckm.lock.Unlock() _, ok := ckm.checkpoint[checkpointKey] if ok { delete(ckm.checkpoint, "moo") @@ -75,6 +83,8 @@ func (ckm *mockCheckpointManager) RemoveCheckpoint(checkpointKey string) error { func (ckm *mockCheckpointManager) ListCheckpoints() ([]string, error) { var keys []string + ckm.lock.Lock() + defer ckm.lock.Unlock() for key := range ckm.checkpoint { keys = append(keys, key) } @@ -82,7 +92,10 @@ func (ckm *mockCheckpointManager) ListCheckpoints() ([]string, error) { } func newMockCheckpointManager() store.CheckpointManager { - return &mockCheckpointManager{checkpoint: make(map[string]*PodSandboxCheckpoint)} + return &mockCheckpointManager{ + checkpoint: make(map[string]*PodSandboxCheckpoint), + lock: sync.Mutex{}, + } } func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) { diff --git a/core/sandbox_helpers.go b/core/sandbox_helpers.go index f017b63a4..818d1cfed 100644 --- a/core/sandbox_helpers.go +++ b/core/sandbox_helpers.go @@ -58,6 +58,24 @@ var ( defaultSandboxGracePeriod = time.Duration(10) * time.Second ) +// check Runtime correct +func (ds *dockerService) IsRuntimeConfigured(runtime string) error { + info, err := ds.getDockerInfo() + if err != nil { + return fmt.Errorf("failed to get docker info: %v", err) + } + + // ds.runtimeInfoLock.RLock() + for r := range info.Runtimes { + if r == runtime { + return nil + } + } + // ds.runtimeInfoLock.RUnlock() + + return fmt.Errorf("no runtime for %q is configured", runtime) +} + // Returns whether the sandbox network is ready, and whether the sandbox is known func (ds *dockerService) getNetworkReady(podSandboxID string) (bool, bool) { ds.networkReadyLock.Lock() diff --git a/core/sandbox_helpers_test.go b/core/sandbox_helpers_test.go index 3e9b53e63..f020d761f 100644 --- a/core/sandbox_helpers_test.go +++ b/core/sandbox_helpers_test.go @@ -388,3 +388,73 @@ func TestSetUpPodFailure(t *testing.T) { assert.NotNil(t, sandbox) assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State) } + +// TestRuntimeHandler checks that the sandbox with RuntimeHandler +func TestRuntimeHandler(t *testing.T) { + ds, _, _ := newTestDockerService() + name, namespace := "foo", "bar11" + var configs []*runtimeapi.PodSandboxConfig + + rtHandlerTestCases := []struct { + Runtimehandler string + expectRuntimehandler string + expectError error + }{ + { + Runtimehandler: "", + expectRuntimehandler: "", + expectError: nil, + }, + { + Runtimehandler: "docker", + expectRuntimehandler: "", + expectError: nil, + }, + { + Runtimehandler: "runc", + expectRuntimehandler: "runc", + expectError: nil, + }, + { + Runtimehandler: "error_runtime", + expectRuntimehandler: "", + expectError: fmt.Errorf("failed to get sandbox runtime: no runtime for %q is configured", "error_runtime"), + }, + } + + for i := 0; i < len(rtHandlerTestCases); i++ { + c := makeSandboxConfigWithLabelsAndAnnotations(fmt.Sprintf("%s%d", name, i), + fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0, + map[string]string{"label": fmt.Sprintf("foo%d", i)}, + map[string]string{"annotation": fmt.Sprintf("bar%d", i)}, + ) + configs = append(configs, c) + } + + for i := range configs { + runResp, err := ds.RunPodSandbox( + getTestCTX(), + &runtimeapi.RunPodSandboxRequest{ + Config: configs[i], + RuntimeHandler: rtHandlerTestCases[i].Runtimehandler, + }, + ) + if rtHandlerTestCases[i].expectError != nil { + assert.EqualError(t, err, rtHandlerTestCases[i].expectError.Error()) + continue + } + + require.NoError(t, err) + + if runResp != nil { + listResp, err := ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{ + PodSandboxId: runResp.PodSandboxId}, + ) + require.NoError(t, err) + assert.Equal(t, rtHandlerTestCases[i].expectRuntimehandler, listResp.Status.GetRuntimeHandler()) + + } + + } + +} diff --git a/core/sandbox_run.go b/core/sandbox_run.go index 996582716..44b44685e 100644 --- a/core/sandbox_run.go +++ b/core/sandbox_run.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/Mirantis/cri-dockerd/config" "github.com/Mirantis/cri-dockerd/utils/errors" v1 "k8s.io/cri-api/pkg/apis/runtime/v1" @@ -51,9 +52,6 @@ func (ds *dockerService) RunPodSandbox( } // Step 2: Create the sandbox container. - if r.GetRuntimeHandler() != "" && r.GetRuntimeHandler() != runtimeName { - return nil, fmt.Errorf("RuntimeHandler %q not supported", r.GetRuntimeHandler()) - } createConfig, err := ds.makeSandboxDockerConfig(containerConfig, image) if err != nil { return nil, fmt.Errorf( @@ -62,6 +60,15 @@ func (ds *dockerService) RunPodSandbox( err, ) } + // k8s RuntimeClass.handler=docker will use docker's default runtime + runtimeHandler := r.GetRuntimeHandler() + if runtimeHandler != "" && runtimeHandler != runtimeName { + err = ds.IsRuntimeConfigured(runtimeHandler) + if err != nil { + return nil, fmt.Errorf("failed to get sandbox runtime: %v", err) + } + createConfig.HostConfig.Runtime = runtimeHandler + } createResp, err := ds.client.CreateContainer(*createConfig) if err != nil { createResp, err = recoverFromCreationConflictIfNeeded(ds.client, *createConfig, err) @@ -69,7 +76,7 @@ func (ds *dockerService) RunPodSandbox( if err != nil || createResp == nil { return nil, fmt.Errorf( - "failed to create a sandbox for pod %q: %v", + "failed to create a sandbox for pod %q: %w", containerConfig.Metadata.Name, err, ) diff --git a/core/sandbox_status.go b/core/sandbox_status.go index 917f45bca..28823b0f3 100644 --- a/core/sandbox_status.go +++ b/core/sandbox_status.go @@ -19,6 +19,7 @@ package core import ( "context" "fmt" + v1 "k8s.io/cri-api/pkg/apis/runtime/v1" ) @@ -81,6 +82,7 @@ func (ds *dockerService) PodSandboxStatus( }, }, }, + RuntimeHandler: r.HostConfig.Runtime, } // add additional IPs additionalPodIPs := make([]*v1.PodIP, 0, len(ips)) diff --git a/libdocker/client.go b/libdocker/client.go index 9c953614a..f4d740733 100644 --- a/libdocker/client.go +++ b/libdocker/client.go @@ -25,6 +25,7 @@ import ( dockercontainer "github.com/docker/docker/api/types/container" dockerimagetypes "github.com/docker/docker/api/types/image" dockerregistry "github.com/docker/docker/api/types/registry" + dockersystem "github.com/docker/docker/api/types/system" dockerapi "github.com/docker/docker/client" "github.com/sirupsen/logrus" ) @@ -46,7 +47,7 @@ const ( // DockerClientInterface is an abstract interface for testability. It abstracts the interface of docker client. type DockerClientInterface interface { - ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) + ListContainers(options dockercontainer.ListOptions) ([]dockertypes.Container, error) InspectContainer(id string) (*dockertypes.ContainerJSON, error) InspectContainerWithSize(id string) (*dockertypes.ContainerJSON, error) CreateContainer( @@ -55,23 +56,23 @@ type DockerClientInterface interface { StartContainer(id string) error StopContainer(id string, timeout time.Duration) error UpdateContainerResources(id string, updateConfig dockercontainer.UpdateConfig) error - RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error + RemoveContainer(id string, opts dockercontainer.RemoveOptions) error InspectImageByRef(imageRef string) (*dockertypes.ImageInspect, error) InspectImageByID(imageID string) (*dockertypes.ImageInspect, error) - ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error) + ListImages(opts dockertypes.ImageListOptions) ([]dockerimagetypes.Summary, error) PullImage(image string, auth dockerregistry.AuthConfig, opts dockertypes.ImagePullOptions) error RemoveImage( image string, opts dockertypes.ImageRemoveOptions, - ) ([]dockertypes.ImageDeleteResponseItem, error) + ) ([]dockerimagetypes.DeleteResponse, error) ImageHistory(id string) ([]dockerimagetypes.HistoryResponseItem, error) - Logs(string, dockertypes.ContainerLogsOptions, StreamOptions) error + Logs(string, dockercontainer.LogsOptions, StreamOptions) error Version() (*dockertypes.Version, error) - Info() (*dockertypes.Info, error) + Info() (*dockersystem.Info, error) CreateExec(string, dockertypes.ExecConfig) (*dockertypes.IDResponse, error) StartExec(string, dockertypes.ExecStartCheck, StreamOptions) error InspectExec(id string) (*dockertypes.ContainerExecInspect, error) - AttachToContainer(string, dockertypes.ContainerAttachOptions, StreamOptions) error + AttachToContainer(string, dockercontainer.AttachOptions, StreamOptions) error ResizeContainerTTY(id string, height, width uint) error ResizeExecTTY(id string, height, width uint) error GetContainerStats(id string) (*dockertypes.StatsJSON, error) diff --git a/libdocker/fake_client.go b/libdocker/fake_client.go index 90c16425c..882c4b388 100644 --- a/libdocker/fake_client.go +++ b/libdocker/fake_client.go @@ -33,6 +33,7 @@ import ( dockercontainer "github.com/docker/docker/api/types/container" dockerimagetypes "github.com/docker/docker/api/types/image" dockerregistry "github.com/docker/docker/api/types/registry" + dockersystem "github.com/docker/docker/api/types/system" v1 "k8s.io/api/core/v1" "k8s.io/utils/clock" @@ -56,7 +57,7 @@ type FakeDockerClient struct { ExitedContainerList []dockertypes.Container ContainerMap map[string]*dockertypes.ContainerJSON ImageInspects map[string]*dockertypes.ImageInspect - Images []dockertypes.ImageSummary + Images []dockerimagetypes.Summary ImageIDsNeedingAuth map[string]dockerregistry.AuthConfig Errors map[string]error called []CalledDetail @@ -73,7 +74,7 @@ type FakeDockerClient struct { ImagesPulled []string VersionInfo dockertypes.Version - Information dockertypes.Info + Information dockersystem.Info ExecInspect *dockertypes.ContainerExecInspect execCmd []string EnableSleep bool @@ -107,6 +108,15 @@ func NewFakeDockerClient() *FakeDockerClient { ImageInspects: make(map[string]*dockertypes.ImageInspect), ImageIDsNeedingAuth: make(map[string]dockerregistry.AuthConfig), RandGenerator: rand.New(rand.NewSource(time.Now().UnixNano())), + Information: dockersystem.Info{ + Runtimes: map[string]dockersystem.RuntimeWithStatus{ + "runc": dockersystem.RuntimeWithStatus{ + Runtime: dockersystem.Runtime{ + Path: "runc", + }, + }, + }, + }, } } @@ -313,7 +323,7 @@ func (f *FakeDockerClient) popError(op string) error { // ListContainers is a test-spy implementation of DockerClientInterface.ListContainers. // It adds an entry "list" to the internal method call record. func (f *FakeDockerClient) ListContainers( - options dockertypes.ContainerListOptions, + options dockercontainer.ListOptions, ) ([]dockertypes.Container, error) { f.Lock() defer f.Unlock()