Skip to content

Commit

Permalink
support RuntimeClass.handler, will useful like nvidia isn't the defau…
Browse files Browse the repository at this point in the history
…lt runtime

Signed-off-by: zhangguanzhang <[email protected]>
  • Loading branch information
zhangguanzhang committed Apr 21, 2024
1 parent b138f52 commit b92e5ba
Show file tree
Hide file tree
Showing 10 changed files with 226 additions and 39 deletions.
6 changes: 6 additions & 0 deletions core/container_create.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,11 @@ func (ds *dockerService) CreateContainer(
containerName := makeContainerName(sandboxConfig, config)
mounts := config.GetMounts()
terminationMessagePath, _ := config.Annotations["io.kubernetes.container.terminationMessagePath"]

sandboxInfo, err := ds.client.InspectContainer(r.GetPodSandboxId())
if err != nil {
return nil, fmt.Errorf("unable to get container's sandbox ID: %v", err)
}
createConfig := dockerbackend.ContainerCreateConfig{
Name: containerName,
Config: &container.Config{
Expand All @@ -91,6 +96,7 @@ func (ds *dockerService) CreateContainer(
RestartPolicy: container.RestartPolicy{
Name: "no",
},
Runtime: sandboxInfo.HostConfig.Runtime,
},
}

Expand Down
111 changes: 87 additions & 24 deletions core/container_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,15 @@ func TestConcurrentlyCreateAndDeleteContainers(t *testing.T) {
podName, namespace := "foo", "bar"
containerName, image := "sidecar", "logger"

type podInfo struct {
ContainerId string
SandboxID string
}

const count = 20
configs := make([]*runtimeapi.ContainerConfig, 0, count)
sConfigs := make([]*runtimeapi.PodSandboxConfig, 0, count)

for i := 0; i < count; i++ {
s := makeSandboxConfig(fmt.Sprintf("%s%d", podName, i),
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0)
Expand All @@ -80,8 +86,8 @@ func TestConcurrentlyCreateAndDeleteContainers(t *testing.T) {
configs = append(configs, c)
}

containerIDs := make(
chan string,
podInfos := make(
chan podInfo,
len(configs),
) // make channel non-blocking to simulate concurrent containers creation

Expand All @@ -94,39 +100,64 @@ func TestConcurrentlyCreateAndDeleteContainers(t *testing.T) {

go func() {
creationWg.Wait()
close(containerIDs)
close(podInfos)
}()
for i := range configs {
go func(i int) {
defer creationWg.Done()
// We don't care about the sandbox id; pass a bogus one.
sandboxID := fmt.Sprintf("sandboxid%d", i)

runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{
Config: sConfigs[i],
})
if err != nil {
t.Errorf("RunPodSandbox: %v", err)
return
}

req := &runtimeapi.CreateContainerRequest{
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
Config: configs[i],
SandboxConfig: sConfigs[i],
}

createResp, err := ds.CreateContainer(getTestCTX(), req)
if err != nil {
t.Errorf("CreateContainer: %v", err)
return
}
containerIDs <- createResp.ContainerId
podInfos <- podInfo{
ContainerId: createResp.ContainerId,
SandboxID: runSandboxResp.PodSandboxId,
}
}(i)
}

for containerID := range containerIDs {
for pod := range podInfos {
deletionWg.Add(1)
go func(id string) {
go func(i podInfo) {
defer deletionWg.Done()
_, err := ds.RemoveContainer(
getTestCTX(),
&runtimeapi.RemoveContainerRequest{ContainerId: id},
&runtimeapi.RemoveContainerRequest{ContainerId: i.ContainerId},
)
if err != nil {
t.Errorf("RemoveContainer: %v", err)
}
}(containerID)
_, err = ds.StopPodSandbox(
getTestCTX(),
&runtimeapi.StopPodSandboxRequest{PodSandboxId: i.SandboxID},
)
if err != nil {
t.Errorf("StopPodSandbox: %v", err)
}
_, err = ds.RemovePodSandbox(
getTestCTX(),
&runtimeapi.RemovePodSandboxRequest{PodSandboxId: i.SandboxID},
)
if err != nil {
t.Errorf("RemovePodSandbox: %v", err)
}
}(pod)
}
deletionWg.Wait()
}
Expand Down Expand Up @@ -155,10 +186,15 @@ func TestListContainers(t *testing.T) {
state := runtimeapi.ContainerState_CONTAINER_RUNNING
var createdAt int64 = fakeClock.Now().UnixNano()
for i := range configs {
// We don't care about the sandbox id; pass a bogus one.
sandboxID := fmt.Sprintf("sandboxid%d", i)
runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{
Config: sConfigs[i],
})
if err != nil {
t.Errorf("RunPodSandbox: %v", err)
return
}
req := &runtimeapi.CreateContainerRequest{
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
Config: configs[i],
SandboxConfig: sConfigs[i],
}
Expand All @@ -174,7 +210,7 @@ func TestListContainers(t *testing.T) {
expected = append([]*runtimeapi.Container{{
Metadata: configs[i].Metadata,
Id: id,
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
State: state,
CreatedAt: createdAt,
Image: configs[i].Image,
Expand Down Expand Up @@ -226,12 +262,20 @@ func TestContainerStatus(t *testing.T) {

fDocker.InjectImages([]dockertypes.ImageSummary{{ID: imageName}})

runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{
Config: sConfig,
})
if err != nil {
t.Errorf("RunPodSandbox: %v", err)
return
}

// Create the container.
fClock.SetTime(time.Now().Add(-1 * time.Hour))
expected.CreatedAt = fClock.Now().UnixNano()

req := &runtimeapi.CreateContainerRequest{
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
Config: config,
SandboxConfig: sConfig,
}
Expand All @@ -243,7 +287,7 @@ func TestContainerStatus(t *testing.T) {
c, err := fDocker.InspectContainer(id)
require.NoError(t, err)
assert.Equal(t, c.Config.Labels[containerTypeLabelKey], containerTypeLabelContainer)
assert.Equal(t, c.Config.Labels[sandboxIDLabelKey], sandboxID)
assert.Equal(t, c.Config.Labels[sandboxIDLabelKey], runSandboxResp.PodSandboxId)

// Set the id manually since we don't know the id until it's created.
expected.Id = id
Expand Down Expand Up @@ -309,8 +353,16 @@ func TestContainerLogPath(t *testing.T) {
config := makeContainerConfig(sConfig, "pause", "iamimage", 0, nil, nil)
config.LogPath = containerLogPath

runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{
Config: sConfig,
})
if err != nil {
t.Errorf("RunPodSandbox: %v", err)
return
}

req := &runtimeapi.CreateContainerRequest{
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
Config: config,
SandboxConfig: sConfig,
}
Expand Down Expand Up @@ -371,43 +423,54 @@ func TestContainerCreationConflict(t *testing.T) {
noContainerError := fmt.Errorf("Error response from daemon: No such container: %s", containerID)
randomError := fmt.Errorf("random error")

// sandBox run called "inspect_image", "pull", "create", "start", "inspect_container",
sandBoxCalls := []string{"inspect_image", "pull", "create", "start", "inspect_container"}
for desc, test := range map[string]struct {
createError error
removeError error
expectError error
expectCalls []string
expectFields int
}{
// sandBox run called "inspect_image", "pull", "create", "start", "inspect_container",
"no create error": {
expectCalls: []string{"create"},
expectCalls: append(sandBoxCalls, []string{"create"}...),
expectFields: 6,
},
"random create error": {
createError: randomError,
expectError: randomError,
expectCalls: []string{"create"},
expectCalls: append(sandBoxCalls, []string{"create"}...),
},
"conflict create error with successful remove": {
createError: conflictError,
expectError: conflictError,
expectCalls: []string{"create", "remove"},
expectCalls: append(sandBoxCalls, []string{"create", "remove"}...),
},
"conflict create error with random remove error": {
createError: conflictError,
removeError: randomError,
expectError: conflictError,
expectCalls: []string{"create", "remove"},
expectCalls: append(sandBoxCalls, []string{"create", "remove"}...),
},
"conflict create error with no such container remove error": {
createError: conflictError,
removeError: noContainerError,
expectCalls: []string{"create", "remove", "create"},
expectCalls: append(sandBoxCalls, []string{"create", "remove", "create"}...),
expectFields: 7,
},
} {
t.Logf("TestCase: %s", desc)
ds, fDocker, _ := newTestDockerService()

runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{
Config: sConfig,
})
if err != nil {
require.EqualError(t, err, test.expectError.Error())
continue
}

if test.createError != nil {
fDocker.InjectError("create", test.createError)
}
Expand All @@ -416,7 +479,7 @@ func TestContainerCreationConflict(t *testing.T) {
}

req := &runtimeapi.CreateContainerRequest{
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
Config: config,
SandboxConfig: sConfig,
}
Expand Down
2 changes: 2 additions & 0 deletions core/docker_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,8 @@ type dockerService struct {
// methods for more info).
containerCleanupInfos map[string]*containerCleanupInfo
cleanupInfosLock sync.RWMutex

// runtimeInfoLock sync.RWMutex
}

type dockerServiceAlpha struct {
Expand Down
15 changes: 14 additions & 1 deletion core/docker_service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"errors"
"math/rand"
"runtime"
"sync"
"testing"
"time"

Expand All @@ -46,26 +47,33 @@ func newTestNetworkPlugin(t *testing.T) *nettest.MockNetworkPlugin {
}

type mockCheckpointManager struct {
lock sync.Mutex
checkpoint map[string]*PodSandboxCheckpoint
}

func (ckm *mockCheckpointManager) CreateCheckpoint(
checkpointKey string,
checkpoint store.Checkpoint,
) error {
ckm.lock.Lock()
ckm.checkpoint[checkpointKey] = checkpoint.(*PodSandboxCheckpoint)
ckm.lock.Unlock()
return nil
}

func (ckm *mockCheckpointManager) GetCheckpoint(
checkpointKey string,
checkpoint store.Checkpoint,
) error {
ckm.lock.Lock()
defer ckm.lock.Unlock()
*(checkpoint.(*PodSandboxCheckpoint)) = *(ckm.checkpoint[checkpointKey])
return nil
}

func (ckm *mockCheckpointManager) RemoveCheckpoint(checkpointKey string) error {
ckm.lock.Lock()
defer ckm.lock.Unlock()
_, ok := ckm.checkpoint[checkpointKey]
if ok {
delete(ckm.checkpoint, "moo")
Expand All @@ -75,14 +83,19 @@ func (ckm *mockCheckpointManager) RemoveCheckpoint(checkpointKey string) error {

func (ckm *mockCheckpointManager) ListCheckpoints() ([]string, error) {
var keys []string
ckm.lock.Lock()
defer ckm.lock.Unlock()
for key := range ckm.checkpoint {
keys = append(keys, key)
}
return keys, nil
}

func newMockCheckpointManager() store.CheckpointManager {
return &mockCheckpointManager{checkpoint: make(map[string]*PodSandboxCheckpoint)}
return &mockCheckpointManager{
checkpoint: make(map[string]*PodSandboxCheckpoint),
lock: sync.Mutex{},
}
}

func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) {
Expand Down
18 changes: 18 additions & 0 deletions core/sandbox_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,24 @@ var (
defaultSandboxGracePeriod = time.Duration(10) * time.Second
)

// check Runtime correct
func (ds *dockerService) IsRuntimeConfigured(runtime string) error {
info, err := ds.getDockerInfo()
if err != nil {
return fmt.Errorf("failed to get docker info: %v", err)
}

// ds.runtimeInfoLock.RLock()
for r := range info.Runtimes {
if r == runtime {
return nil
}
}
// ds.runtimeInfoLock.RUnlock()

return fmt.Errorf("no runtime for %q is configured", runtime)
}

// Returns whether the sandbox network is ready, and whether the sandbox is known
func (ds *dockerService) getNetworkReady(podSandboxID string) (bool, bool) {
ds.networkReadyLock.Lock()
Expand Down
Loading

0 comments on commit b92e5ba

Please sign in to comment.