fix volume issue

This commit is contained in:
zerodoctor
2023-10-08 00:14:25 -05:00
parent 00087aa865
commit 1490989b80
2 changed files with 21 additions and 48 deletions

View File

@@ -33,21 +33,16 @@ func toSpec(spec *Spec, step *Step) *specgen.SpecGenerator {
} }
volume := specgen.ContainerStorageConfig{ volume := specgen.ContainerStorageConfig{
Image: step.Image,
WorkDir: step.WorkingDir, WorkDir: step.WorkingDir,
CreateWorkingDir: true, CreateWorkingDir: true,
ShmSize: toPtr(step.ShmSize), ShmSize: toPtr(step.ShmSize),
} }
volumeSet := toVolumeSet(spec, step)
for path := range volumeSet {
volume.Volumes = append(volume.Volumes, &specgen.NamedVolume{
Dest: path,
})
}
if len(step.Volumes) != 0 { if len(step.Volumes) != 0 {
volume.Devices = toLinuxDeviceSlice(spec, step) volume.Devices = toLinuxDeviceSlice(spec, step)
volume.Mounts = toLinuxVolumeMounts(spec, step) volume.Mounts = toLinuxVolumeMounts(spec, step)
volume.Volumes = toLinuxVolumeSlice(spec, step)
} }
security := specgen.ContainerSecurityConfig{ security := specgen.ContainerSecurityConfig{
@@ -140,35 +135,9 @@ func toLinuxDeviceSlice(spec *Spec, step *Step) []specs.LinuxDevice {
return to return to
} }
// helper function that converts a slice of volume paths to a set
// of unique volume names.
func toVolumeSet(spec *Spec, step *Step) map[string]struct{} {
set := map[string]struct{}{}
for _, mount := range step.Volumes {
volume, ok := lookupVolume(spec, mount.Name)
if !ok {
continue
}
if isDevice(volume) {
continue
}
if isNamedPipe(volume) {
continue
}
if isBindMount(volume) == false {
continue
}
set[mount.Path] = struct{}{}
}
return set
}
// helper function returns a slice of volume mounts. // helper function returns a slice of volume mounts.
func toVolumeSlice(spec *Spec, step *Step) []string { func toLinuxVolumeSlice(spec *Spec, step *Step) []*specgen.NamedVolume {
// this entire function should be deprecated in var to []*specgen.NamedVolume
// favor of toVolumeMounts, however, I am unable
// to get it working with data volumes.
var to []string
for _, mount := range step.Volumes { for _, mount := range step.Volumes {
volume, ok := lookupVolume(spec, mount.Name) volume, ok := lookupVolume(spec, mount.Name)
if !ok { if !ok {
@@ -178,14 +147,19 @@ func toVolumeSlice(spec *Spec, step *Step) []string {
continue continue
} }
if isDataVolume(volume) { if isDataVolume(volume) {
path := volume.EmptyDir.ID + ":" + mount.Path to = append(to, &specgen.NamedVolume{
to = append(to, path) Name: volume.EmptyDir.ID,
Dest: mount.Path,
})
} }
if isBindMount(volume) { if isBindMount(volume) {
path := volume.HostPath.Path + ":" + mount.Path to = append(to, &specgen.NamedVolume{
to = append(to, path) Name: volume.HostPath.Path,
Dest: mount.Path,
})
} }
} }
return to return to
} }
@@ -231,12 +205,11 @@ func toLinuxMount(source *Volume, target *VolumeMount) specs.Mount {
// options defaults = rw, suid, dev, exec, auto, nouser, and async // options defaults = rw, suid, dev, exec, auto, nouser, and async
to.Options = append(to.Options, "ro") to.Options = append(to.Options, "ro")
} }
// to.ReadOnly = source.HostPath.ReadOnly
} }
if isTempfs(source) { if isTempfs(source) {
// NOTE: not sure if this is translatable // NOTE: specs.Mount might not be the right structure
// probably part of resource struct // maybe ImageVolume is suitable here
// to.TmpfsOptions = &mount.TmpfsOptions{ // to.TmpfsOptions = &mount.TmpfsOptions{
// SizeBytes: source.EmptyDir.SizeLimit, // SizeBytes: source.EmptyDir.SizeLimit,

View File

@@ -102,6 +102,7 @@ func (e *Podman) Setup(ctx context.Context, specv runtime.Spec) error {
logger.FromContext(ctx).Tracef("setup networks...") logger.FromContext(ctx).Tracef("setup networks...")
_, err := network.Create(e.conn, &types.Network{ _, err := network.Create(e.conn, &types.Network{
Driver: driver, Driver: driver,
Name: spec.Network.ID,
Options: spec.Network.Options, Options: spec.Network.Options,
Labels: spec.Network.Labels, Labels: spec.Network.Labels,
}) })
@@ -238,19 +239,14 @@ func (e *Podman) Run(ctx context.Context, specv runtime.Spec, stepv runtime.Step
} }
defer logs.Close() defer logs.Close()
} else { } else {
var buf bytes.Buffer
multiWriter := io.MultiWriter(output, &buf)
logger.FromContext(ctx).Tracef("tail logging...") logger.FromContext(ctx).Tracef("tail logging...")
err = e.tail(ctx, step.ID, multiWriter) err = e.tail(ctx, step.ID, output)
if err != nil { if err != nil {
logger.FromContext(ctx). logger.FromContext(ctx).
WithError(err). WithError(err).
Errorf("failed to tail logs") Errorf("failed to tail logs")
return nil, errors.TrimExtraInfo(err) return nil, errors.TrimExtraInfo(err)
} }
logger.FromContext(ctx).Debugf("[tail_logs=%s]", buf.String())
} }
// wait for the response // wait for the response
@@ -364,6 +360,10 @@ func (e *Podman) waitRetry(ctx context.Context, id string) (*runtime.State, erro
// helper function emulates the `docker wait` command, blocking // helper function emulates the `docker wait` command, blocking
// until the container stops and returning the exit code. // until the container stops and returning the exit code.
func (e *Podman) wait(ctx context.Context, id string) (*runtime.State, error) { func (e *Podman) wait(ctx context.Context, id string) (*runtime.State, error) {
logger.FromContext(ctx).
WithField("container", id).
Debug("waiting for container")
containers.Wait(e.conn, id, &containers.WaitOptions{ containers.Wait(e.conn, id, &containers.WaitOptions{
Conditions: []string{"created", "exited", "dead", "removing", "removed"}, Conditions: []string{"created", "exited", "dead", "removing", "removed"},
}) })