wip adding docker run commands [ci skip]

This commit is contained in:
Brad Rydzewski
2019-10-19 11:39:16 -07:00
parent 55336ebdd6
commit 00df09b842
14 changed files with 1005 additions and 111 deletions

View File

@@ -50,9 +50,8 @@ type Resources struct {
CPUSet []string
}
// Compiler compiles the Yaml configuration file to an
// intermediate representation optimized for simple execution.
type Compiler struct {
// Args provides compiler arguments.
type Args struct {
// Manifest provides the parsed manifest.
Manifest *manifest.Manifest
@@ -82,6 +81,20 @@ type Compiler struct {
// each pipeline step.
System *drone.System
// Netrc provides netrc parameters that can be used by the
// default clone step to authenticate to the remote
// repository.
Netrc *drone.Netrc
// Secret returns a named secret value that can be injected
// into the pipeline step.
Secret secret.Provider
}
// Compiler compiles the Yaml configuration file to an
// intermediate representation optimized for simple execution.
type Compiler struct {
// Environ provides a set of environment variables that
// should be added to each pipeline step by default.
Environ map[string]string
@@ -106,11 +119,6 @@ type Compiler struct {
// applies to pipeline containers.
Resources Resources
// Netrc provides netrc parameters that can be used by the
// default clone step to authenticate to the remote
// repository.
Netrc *drone.Netrc
// Secret returns a named secret value that can be injected
// into the pipeline step.
Secret secret.Provider
@@ -121,11 +129,11 @@ type Compiler struct {
}
// Compile compiles the configuration file.
func (c *Compiler) Compile(ctx context.Context) *engine.Spec {
os := c.Pipeline.Platform.OS
func (c *Compiler) Compile(ctx context.Context, args Args) *engine.Spec {
os := args.Pipeline.Platform.OS
// create the workspace paths
base, path, full := createWorkspace(c.Pipeline)
base, path, full := createWorkspace(args.Pipeline)
// create the workspace mount
mount := &engine.VolumeMount{
@@ -136,11 +144,11 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec {
// create system labels
labels := labels.Combine(
c.Labels,
labels.FromRepo(c.Repo),
labels.FromBuild(c.Build),
labels.FromStage(c.Stage),
labels.FromSystem(c.System),
labels.WithTimeout(c.Repo),
labels.FromRepo(args.Repo),
labels.FromBuild(args.Build),
labels.FromStage(args.Stage),
labels.FromSystem(args.System),
labels.WithTimeout(args.Repo),
)
// create the workspace volume
@@ -156,10 +164,10 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec {
Labels: labels,
},
Platform: engine.Platform{
OS: c.Pipeline.Platform.OS,
Arch: c.Pipeline.Platform.Arch,
Variant: c.Pipeline.Platform.Variant,
Version: c.Pipeline.Platform.Version,
OS: args.Pipeline.Platform.OS,
Arch: args.Pipeline.Platform.Arch,
Variant: args.Pipeline.Platform.Variant,
Version: args.Pipeline.Platform.Version,
},
Volumes: []*engine.Volume{
{EmptyDir: volume},
@@ -169,20 +177,20 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec {
// create the default environment variables.
envs := environ.Combine(
c.Environ,
c.Build.Params,
c.Pipeline.Environment,
args.Build.Params,
args.Pipeline.Environment,
environ.Proxy(),
environ.System(c.System),
environ.Repo(c.Repo),
environ.Build(c.Build),
environ.Stage(c.Stage),
environ.Link(c.Repo, c.Build, c.System),
environ.System(args.System),
environ.Repo(args.Repo),
environ.Build(args.Build),
environ.Stage(args.Stage),
environ.Link(args.Repo, args.Build, args.System),
clone.Environ(clone.Config{
SkipVerify: c.Pipeline.Clone.SkipVerify,
Trace: c.Pipeline.Clone.Trace,
SkipVerify: args.Pipeline.Clone.SkipVerify,
Trace: args.Pipeline.Clone.Trace,
User: clone.User{
Name: c.Build.AuthorName,
Email: c.Build.AuthorEmail,
Name: args.Build.AuthorName,
Email: args.Build.AuthorEmail,
},
}),
)
@@ -197,32 +205,32 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec {
envs["DRONE_WORKSPACE_PATH"] = path
// create the netrc environment variables
if c.Netrc != nil && c.Netrc.Machine != "" {
envs["DRONE_NETRC_MACHINE"] = c.Netrc.Machine
envs["DRONE_NETRC_USERNAME"] = c.Netrc.Login
envs["DRONE_NETRC_PASSWORD"] = c.Netrc.Password
if args.Netrc != nil && args.Netrc.Machine != "" {
envs["DRONE_NETRC_MACHINE"] = args.Netrc.Machine
envs["DRONE_NETRC_USERNAME"] = args.Netrc.Login
envs["DRONE_NETRC_PASSWORD"] = args.Netrc.Password
envs["DRONE_NETRC_FILE"] = fmt.Sprintf(
"machine %s login %s password %s",
c.Netrc.Machine,
c.Netrc.Login,
c.Netrc.Password,
args.Netrc.Machine,
args.Netrc.Login,
args.Netrc.Password,
)
}
match := manifest.Match{
Action: c.Build.Action,
Cron: c.Build.Cron,
Ref: c.Build.Ref,
Repo: c.Repo.Slug,
Instance: c.System.Host,
Target: c.Build.Deploy,
Event: c.Build.Event,
Branch: c.Build.Target,
Action: args.Build.Action,
Cron: args.Build.Cron,
Ref: args.Build.Ref,
Repo: args.Repo.Slug,
Instance: args.System.Host,
Target: args.Build.Deploy,
Event: args.Build.Event,
Branch: args.Build.Target,
}
// create the clone step
if c.Pipeline.Clone.Disable == false {
step := createClone(c.Pipeline)
if args.Pipeline.Clone.Disable == false {
step := createClone(args.Pipeline)
step.ID = random()
step.Envs = environ.Combine(envs, step.Envs)
step.WorkingDir = full
@@ -232,8 +240,8 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec {
}
// create steps
for _, src := range c.Pipeline.Services {
dst := createStep(c.Pipeline, src)
for _, src := range args.Pipeline.Services {
dst := createStep(args.Pipeline, src)
dst.Detach = true
dst.Envs = environ.Combine(envs, dst.Envs)
dst.Volumes = append(dst.Volumes, mount)
@@ -250,8 +258,8 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec {
}
// create steps
for _, src := range c.Pipeline.Steps {
dst := createStep(c.Pipeline, src)
for _, src := range args.Pipeline.Steps {
dst := createStep(args.Pipeline, src)
dst.Envs = environ.Combine(envs, dst.Envs)
dst.Volumes = append(dst.Volumes, mount)
dst.Labels = labels
@@ -275,15 +283,15 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec {
if isGraph(spec) == false {
configureSerial(spec)
} else if c.Pipeline.Clone.Disable == false {
} else if args.Pipeline.Clone.Disable == false {
configureCloneDeps(spec)
} else if c.Pipeline.Clone.Disable == true {
} else if args.Pipeline.Clone.Disable == true {
removeCloneDeps(spec)
}
for _, step := range spec.Steps {
for _, s := range step.Secrets {
secret, ok := c.findSecret(ctx, s.Name)
secret, ok := c.findSecret(ctx, args, s.Name)
if ok {
s.Data = []byte(secret)
}
@@ -292,8 +300,8 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec {
// get registry credentials from registry plugins
creds, err := c.Registry.List(ctx, &registry.Request{
Repo: c.Repo,
Build: c.Build,
Repo: args.Repo,
Build: args.Build,
})
if err != nil {
// TODO (bradrydzewski) return an error to the caller
@@ -301,8 +309,8 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec {
}
// get registry credentials from secrets
for _, name := range c.Pipeline.PullSecrets {
secret, ok := c.findSecret(ctx, name)
for _, name := range args.Pipeline.PullSecrets {
secret, ok := c.findSecret(ctx, args, name)
if ok {
parsed, err := auths.ParseString(secret)
if err == nil {
@@ -391,17 +399,25 @@ func (c *Compiler) isPrivileged(step *resource.Step) bool {
// helper function attempts to find and return the named secret.
// from the secret provider.
func (c *Compiler) findSecret(ctx context.Context, name string) (s string, ok bool) {
func (c *Compiler) findSecret(ctx context.Context, args Args, name string) (s string, ok bool) {
if name == "" {
return
}
// TODO (bradrydzewski) return an error to the caller
// if the provider returns an error.
found, _ := c.Secret.Find(ctx, &secret.Request{
// source secrets from the global secret provider
// and the repository secret provider.
provider := secret.Combine(
args.Secret,
c.Secret,
)
// TODO return an error to the caller if the provider
// returns an error.
found, _ := provider.Find(ctx, &secret.Request{
Name: name,
Build: c.Build,
Repo: c.Repo,
Conf: c.Manifest,
Build: args.Build,
Repo: args.Repo,
Conf: args.Manifest,
})
if found == nil {
return

View File

@@ -15,7 +15,7 @@ import (
// Encode encodes an interface value as a string. This function
// assumes all types were unmarshaled by the yaml.v2 library.
// The yaml.v2 package only supports a subset of primative types.
// The yaml.v2 package only supports a subset of primitive types.
func Encode(v interface{}) string {
switch v := v.(type) {
case string:

286
engine/convert.go Normal file
View File

@@ -0,0 +1,286 @@
// Copyright 2019 Drone.IO Inc. All rights reserved.
// Use of this source code is governed by the Polyform License
// that can be found in the LICENSE file.
package engine
import (
"strings"
"docker.io/go-docker/api/types/container"
"docker.io/go-docker/api/types/mount"
"docker.io/go-docker/api/types/network"
)
// returns a container configuration.
func toConfig(spec *Spec, step *Step) *container.Config {
config := &container.Config{
Image: step.Image,
Labels: step.Labels,
WorkingDir: step.WorkingDir,
User: step.User,
AttachStdin: false,
AttachStdout: true,
AttachStderr: true,
Tty: false,
OpenStdin: false,
StdinOnce: false,
ArgsEscaped: false,
}
if len(step.Envs) != 0 {
config.Env = toEnv(step.Envs)
}
for _, sec := range step.Secrets {
config.Env = append(config.Env, sec.Env+"="+string(sec.Data))
}
if len(step.Entrypoint) != 0 {
config.Cmd = step.Entrypoint
}
if len(step.Command) != 0 {
config.Cmd = step.Command
}
if len(step.Volumes) != 0 {
config.Volumes = toVolumeSet(spec, step)
}
return config
}
// returns a container host configuration.
func toHostConfig(spec *Spec, step *Step) *container.HostConfig {
config := &container.HostConfig{
LogConfig: container.LogConfig{
Type: "json-file",
},
Privileged: step.Privileged,
// TODO(bradrydzewski) set ShmSize
}
// windows does not support privileged so we hard-code
// this value to false.
if spec.Platform.OS == "windows" {
config.Privileged = false
}
if len(step.Network) > 0 {
config.NetworkMode = container.NetworkMode(step.Network)
}
if len(step.DNS) > 0 {
config.DNS = step.DNS
}
if len(step.DNSSearch) > 0 {
config.DNSSearch = step.DNSSearch
}
if len(step.ExtraHosts) > 0 {
config.ExtraHosts = step.ExtraHosts
}
// if step.Resources != nil {
// config.Resources = container.Resources{}
// if limits := step.Resources.Limits; limits != nil {
// config.Resources.Memory = limits.Memory
// // TODO(bradrydewski) set config.Resources.CPUPercent
// // IMPORTANT docker and kubernetes use
// // different units of measure for cpu limits.
// // we need to figure out how to convert from
// // the kubernetes unit of measure to the docker
// // unit of measure.
// }
// }
if len(step.Volumes) != 0 {
config.Devices = toDeviceSlice(spec, step)
config.Binds = toVolumeSlice(spec, step)
config.Mounts = toVolumeMounts(spec, step)
}
return config
}
// helper function returns the container network configuration.
func toNetConfig(spec *Spec, proc *Step) *network.NetworkingConfig {
// if the user overrides the default network we do not
// attach to the user-defined network.
if proc.Network != "" {
return &network.NetworkingConfig{}
}
endpoints := map[string]*network.EndpointSettings{}
endpoints[spec.Network.ID] = &network.EndpointSettings{
NetworkID: spec.Network.ID,
Aliases: []string{proc.Name},
}
return &network.NetworkingConfig{
EndpointsConfig: endpoints,
}
}
// helper function that converts a slice of device paths to a slice of
// container.DeviceMapping.
func toDeviceSlice(spec *Spec, step *Step) []container.DeviceMapping {
var to []container.DeviceMapping
for _, mount := range step.Devices {
device, ok := LookupVolume(spec, mount.Name)
if !ok {
continue
}
if isDevice(device) == false {
continue
}
to = append(to, container.DeviceMapping{
PathOnHost: device.HostPath.Path,
PathInContainer: mount.DevicePath,
CgroupPermissions: "rwm",
})
}
if len(to) == 0 {
return nil
}
return to
}
// helper function that converts a slice of volume paths to a set
// of unique volume names.
func toVolumeSet(spec *Spec, step *Step) map[string]struct{} {
set := map[string]struct{}{}
for _, mount := range step.Volumes {
volume, ok := LookupVolume(spec, mount.Name)
if !ok {
continue
}
if isDevice(volume) {
continue
}
if isNamedPipe(volume) {
continue
}
if isBindMount(volume) == false {
continue
}
set[mount.Path] = struct{}{}
}
return set
}
// helper function returns a slice of volume mounts.
func toVolumeSlice(spec *Spec, step *Step) []string {
// this entire function should be deprecated in
// favor of toVolumeMounts, however, I am unable
// to get it working with data volumes.
var to []string
for _, mount := range step.Volumes {
volume, ok := LookupVolume(spec, mount.Name)
if !ok {
continue
}
if isDevice(volume) {
continue
}
if isDataVolume(volume) {
path := volume.Metadata.UID + ":" + mount.Path
to = append(to, path)
}
if isBindMount(volume) {
path := volume.HostPath.Path + ":" + mount.Path
to = append(to, path)
}
}
return to
}
// helper function returns a slice of docker mount
// configurations.
func toVolumeMounts(spec *Spec, step *Step) []mount.Mount {
var mounts []mount.Mount
for _, target := range step.Volumes {
source, ok := LookupVolume(spec, target.Name)
if !ok {
continue
}
if isBindMount(source) && !isDevice(source) {
continue
}
// HACK: this condition can be removed once
// toVolumeSlice has been fully replaced. at this
// time, I cannot figure out how to get mounts
// working with data volumes :(
if isDataVolume(source) {
continue
}
mounts = append(mounts, toMount(source, target))
}
if len(mounts) == 0 {
return nil
}
return mounts
}
// helper function converts the volume declaration to a
// docker mount structure.
func toMount(source *Volume, target *VolumeMount) mount.Mount {
to := mount.Mount{
Target: target.Path,
Type: toVolumeType(source),
}
if isBindMount(source) || isNamedPipe(source) {
to.Source = source.HostPath.Path
}
if isTempfs(source) {
to.TmpfsOptions = &mount.TmpfsOptions{
SizeBytes: source.EmptyDir.SizeLimit,
Mode: 0700,
}
}
return to
}
// helper function returns the docker volume enumeration
// for the given volume.
func toVolumeType(from *Volume) mount.Type {
switch {
case isDataVolume(from):
return mount.TypeVolume
case isTempfs(from):
return mount.TypeTmpfs
case isNamedPipe(from):
return mount.TypeNamedPipe
default:
return mount.TypeBind
}
}
// helper function that converts a key value map of
// environment variables to a string slice in key=value
// format.
func toEnv(env map[string]string) []string {
var envs []string
for k, v := range env {
envs = append(envs, k+"="+v)
}
return envs
}
// returns true if the volume is a bind mount.
func isBindMount(volume *Volume) bool {
return volume.HostPath != nil
}
// returns true if the volume is in-memory.
func isTempfs(volume *Volume) bool {
return volume.EmptyDir != nil && volume.EmptyDir.Medium == "memory"
}
// returns true if the volume is a data-volume.
func isDataVolume(volume *Volume) bool {
return volume.EmptyDir != nil && volume.EmptyDir.Medium != "memory"
}
// returns true if the volume is a device
func isDevice(volume *Volume) bool {
return volume.HostPath != nil && strings.HasPrefix(volume.HostPath.Path, "/dev/")
}
// returns true if the volume is a named pipe.
func isNamedPipe(volume *Volume) bool {
return volume.HostPath != nil &&
strings.HasPrefix(volume.HostPath.Path, `\\.\pipe\`)
}

View File

@@ -7,27 +7,259 @@ package engine
import (
"context"
"io"
"io/ioutil"
"github.com/drone-runners/drone-runner-docker/engine/stdcopy"
"github.com/drone/drone-runtime/engine/docker/auth"
"docker.io/go-docker"
"docker.io/go-docker/api/types"
"docker.io/go-docker/api/types/volume"
)
// New returns a new engine.
func New(publickeyFile, privatekeyFile string) (Engine, error) {
return &engine{}, nil
type engine struct {
client docker.APIClient
}
type engine struct {
// New returns a new engine.
func New(client docker.APIClient) Engine {
return &engine{client}
}
// NewEnv returns a new Engine from the environment.
func NewEnv() (Engine, error) {
cli, err := docker.NewEnvClient()
if err != nil {
return nil, err
}
return New(cli), nil
}
// Setup the pipeline environment.
func (e *engine) Setup(ctx context.Context, spec *Spec) error {
return nil
// creates the default temporary (local) volumes
// that are mounted into each container step.
for _, vol := range spec.Volumes {
if vol.EmptyDir == nil {
continue
}
_, err := e.client.VolumeCreate(ctx, volume.VolumesCreateBody{
Name: vol.EmptyDir.ID,
Driver: "local",
Labels: vol.EmptyDir.Labels,
})
if err != nil {
return err
}
}
// creates the default pod network. All containers
// defined in the pipeline are attached to this network.
driver := "bridge"
if spec.Platform.OS == "windows" {
driver = "nat"
}
_, err := e.client.NetworkCreate(ctx, spec.Network.ID, types.NetworkCreate{
Driver: driver,
Labels: spec.Network.Labels,
})
return err
}
// Destroy the pipeline environment.
func (e *engine) Destroy(ctx context.Context, spec *Spec) error {
removeOpts := types.ContainerRemoveOptions{
Force: true,
RemoveLinks: false,
RemoveVolumes: true,
}
// stop all containers
for _, step := range spec.Steps {
e.client.ContainerKill(ctx, step.ID, "9")
}
// cleanup all containers
for _, step := range spec.Steps {
e.client.ContainerRemove(ctx, step.ID, removeOpts)
}
// cleanup all volumes
for _, vol := range spec.Volumes {
if vol.EmptyDir == nil {
continue
}
// tempfs volumes do not have a volume entry,
// and therefore do not require removal.
if vol.EmptyDir.Medium == "memory" {
continue
}
e.client.VolumeRemove(ctx, vol.EmptyDir.ID, true)
}
// cleanup the network
e.client.NetworkRemove(ctx, spec.Network.ID)
// notice that we never collect or return any errors.
// this is because we silently ignore cleanup failures
// and instead ask the system admin to periodically run
// `docker prune` commands.
return nil
}
// Run runs the pipeline step.
func (e *engine) Run(ctx context.Context, spec *Spec, step *Step, output io.Writer) (*State, error) {
return nil, nil
// create the container
err := e.create(ctx, spec, step, output)
if err != nil {
return nil, err
}
// start the container
err = e.start(ctx, step.ID)
if err != nil {
return nil, err
}
// tail the container
err = e.tail(ctx, step.ID, output)
if err != nil {
return nil, err
}
// wait for the response
return e.wait(ctx, step.ID)
}
//
// emulate docker commands
//
func (e *engine) create(ctx context.Context, spec *Spec, step *Step, output io.Writer) error {
// parse the docker image name. We need to extract the
// image domain name and match to registry credentials
// stored in the .docker/config.json object.
_, _, latest, err := parseImage(step.Image)
if err != nil {
return err
}
// create pull options with encoded authorization credentials.
pullopts := types.ImagePullOptions{}
if step.Auth != nil {
pullopts.RegistryAuth = auth.Encode(
step.Auth.Username,
step.Auth.Password,
)
}
// automatically pull the latest version of the image if requested
// by the process configuration, or if the image is :latest
if step.Pull == PullAlways ||
(step.Pull == PullDefault && latest) {
rc, pullerr := e.client.ImagePull(ctx, step.Image, pullopts)
if pullerr == nil {
io.Copy(ioutil.Discard, rc)
rc.Close()
}
if pullerr != nil {
return pullerr
}
}
_, err = e.client.ContainerCreate(ctx,
toConfig(spec, step),
toHostConfig(spec, step),
toNetConfig(spec, step),
step.ID,
)
// automatically pull and try to re-create the image if the
// failure is caused because the image does not exist.
if docker.IsErrImageNotFound(err) && step.Pull != PullNever {
rc, pullerr := e.client.ImagePull(ctx, step.Image, pullopts)
if pullerr != nil {
return pullerr
}
io.Copy(ioutil.Discard, rc)
rc.Close()
// once the image is successfully pulled we attempt to
// re-create the container.
_, err = e.client.ContainerCreate(ctx,
toConfig(spec, step),
toHostConfig(spec, step),
toNetConfig(spec, step),
step.ID,
)
}
if err != nil {
return err
}
// // use the default user-defined network if network_mode
// // is not otherwise specified.
// if step.Network == "" {
// for _, net := range step.Networks {
// err = e.client.NetworkConnect(ctx, net, step.ID, &network.EndpointSettings{
// Aliases: []string{net},
// })
// if err != nil {
// return nil
// }
// }
// }
return nil
}
// helper function emulates the `docker start` command.
func (e *engine) start(ctx context.Context, id string) error {
return e.client.ContainerStart(ctx, id, types.ContainerStartOptions{})
}
// helper function emulates the `docker wait` command, blocking
// until the container stops and returning the exit code.
func (e *engine) wait(ctx context.Context, id string) (*State, error) {
wait, errc := e.client.ContainerWait(ctx, id, "")
select {
case <-wait:
case <-errc:
}
info, err := e.client.ContainerInspect(ctx, id)
if err != nil {
return nil, err
}
if info.State.Running {
// TODO(bradrydewski) if the state is still running
// we should call wait again.
}
return &State{
Exited: true,
ExitCode: info.State.ExitCode,
OOMKilled: info.State.OOMKilled,
}, nil
}
// helper function emulates the `docker logs -f` command, streaming
// all container logs until the container stops.
func (e *engine) tail(ctx context.Context, id string, output io.Writer) error {
opts := types.ContainerLogsOptions{
Follow: true,
ShowStdout: true,
ShowStderr: true,
Details: false,
Timestamps: false,
}
logs, err := e.client.ContainerLogs(ctx, id, opts)
if err != nil {
return err
}
go func() {
stdcopy.StdCopy(output, output, logs)
logs.Close()
}()
return nil
}

34
engine/image.go Normal file
View File

@@ -0,0 +1,34 @@
// Copyright 2019 Drone.IO Inc. All rights reserved.
// Use of this source code is governed by the Polyform License
// that can be found in the LICENSE file.
package engine
import (
"strings"
"github.com/docker/distribution/reference"
)
// helper function parses the image and returns the
// canonical image name, domain name, and whether or not
// the image tag is :latest.
func parseImage(s string) (canonical, domain string, latest bool, err error) {
// parse the docker image name. We need to extract the
// image domain name and match to registry credentials
// stored in the .docker/config.json object.
named, err := reference.ParseNormalizedNamed(s)
if err != nil {
return
}
// the canonical image name, for some reason, excludes
// the tag name. So we need to make sure it is included
// in the image name so we can determine if the :latest
// tag is specified
named = reference.TagNameOnly(named)
return named.String(),
reference.Domain(named),
strings.HasSuffix(named.String(), ":latest"),
nil
}

56
engine/image_test.go Normal file
View File

@@ -0,0 +1,56 @@
// Copyright 2019 Drone.IO Inc. All rights reserved.
// Use of this source code is governed by the Polyform License
// that can be found in the LICENSE file.
package engine
import "testing"
func TestParseImage(t *testing.T) {
tests := []struct {
image string
canonical string
domain string
latest bool
err bool
}{
{
image: "golang",
canonical: "docker.io/library/golang:latest",
domain: "docker.io",
latest: true,
},
{
image: "golang:1.11",
canonical: "docker.io/library/golang:1.11",
domain: "docker.io",
latest: false,
},
{
image: "",
err: true,
},
}
for _, test := range tests {
canonical, domain, latest, err := parseImage(test.image)
if test.err {
if err == nil {
t.Errorf("Expect error parsing image %s", test.image)
}
continue
}
if err != nil {
t.Error(err)
}
if got, want := canonical, test.canonical; got != want {
t.Errorf("Want image %s, got %s", want, got)
}
if got, want := domain, test.domain; got != want {
t.Errorf("Want image domain %s, got %s", want, got)
}
if got, want := latest, test.latest; got != want {
t.Errorf("Want image latest %v, got %v", want, got)
}
}
}

View File

@@ -119,6 +119,41 @@ type (
Labels map[string]string `json:"labels,omitempty"`
}
// XVolume that is mounted into the container
XVolume struct {
ID string `json:"id,omitempty"`
Source string `json:"source,omitempty"`
Target string `json:"target,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
}
volumeDevice struct {
Path string
}
volumeData struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Path string `json:"target,omitempty"`
Mode uint32 `json:"mode,omitempty"`
}
volumeBind struct {
Source string `json:"source,omitempty"`
Target string `json:"target,omitempty"`
Readonly bool `json:"readonly,omitempty"`
}
volumePipe struct {
Source string `json:"source,omitempty"`
Target string `json:"target,omitempty"`
}
volumeTemp struct {
Size int64 `json:"size,omitempty"`
Path string `json:"path,omitempty"`
}
// Network that is created and attached to containers
Network struct {
ID string `json:"id,omitempty"`

188
engine/stdcopy/stdcopy.go Normal file
View File

@@ -0,0 +1,188 @@
// Copyright 2018 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stdcopy
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"sync"
)
// StdType is the type of standard stream
// a writer can multiplex to.
type StdType byte
const (
// Stdin represents standard input stream type.
Stdin StdType = iota
// Stdout represents standard output stream type.
Stdout
// Stderr represents standard error steam type.
Stderr
stdWriterPrefixLen = 8
stdWriterFdIndex = 0
stdWriterSizeIndex = 4
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
)
var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
// stdWriter is wrapper of io.Writer with extra customized info.
type stdWriter struct {
io.Writer
prefix byte
}
// Write sends the buffer to the underneath writer.
// It inserts the prefix header before the buffer,
// so stdcopy.StdCopy knows where to multiplex the output.
// It makes stdWriter to implement io.Writer.
func (w *stdWriter) Write(p []byte) (n int, err error) {
if w == nil || w.Writer == nil {
return 0, errors.New("Writer not instantiated")
}
if p == nil {
return 0, nil
}
header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
buf := bufPool.Get().(*bytes.Buffer)
buf.Write(header[:])
buf.Write(p)
n, err = w.Writer.Write(buf.Bytes())
n -= stdWriterPrefixLen
if n < 0 {
n = 0
}
buf.Reset()
bufPool.Put(buf)
return
}
// NewStdWriter instantiates a new Writer.
// Everything written to it will be encapsulated using a custom format,
// and written to the underlying `w` stream.
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
// `t` indicates the id of the stream to encapsulate.
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
func NewStdWriter(w io.Writer, t StdType) io.Writer {
return &stdWriter{
Writer: w,
prefix: byte(t),
}
}
// StdCopy is a modified version of io.Copy.
//
// StdCopy will demultiplex `src`, assuming that it contains two streams,
// previously multiplexed together using a StdWriter instance.
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
//
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
// In other words: if `err` is non nil, it indicates a real underlying error.
//
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
var (
buf = make([]byte, startingBufLen)
bufLen = len(buf)
nr, nw int
er, ew error
out io.Writer
frameSize int
)
for {
// Make sure we have at least a full header
for nr < stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < stdWriterPrefixLen {
return written, nil
}
break
}
if er != nil {
return 0, er
}
}
// Check the first byte to know where to write
switch StdType(buf[stdWriterFdIndex]) {
case Stdin:
fallthrough
case Stdout:
// Write on stdout
out = dstout
case Stderr:
// Write on stderr
out = dsterr
default:
return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
}
// Retrieve the size of the frame
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
// Check if the buffer is big enough to read the frame.
// Extend it if necessary.
if frameSize+stdWriterPrefixLen > bufLen {
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
bufLen = len(buf)
}
// While the amount of bytes read is less than the size of the frame + header, we keep reading
for nr < frameSize+stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < frameSize+stdWriterPrefixLen {
return written, nil
}
break
}
if er != nil {
return 0, er
}
}
// Write the retrieved frame (without header)
nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
if ew != nil {
return 0, ew
}
// If the frame has not been fully written: error
if nw != frameSize {
return 0, io.ErrShortWrite
}
written += int64(nw)
// Move the rest of the buffer to the beginning
copy(buf, buf[frameSize+stdWriterPrefixLen:])
// Move the index
nr -= frameSize + stdWriterPrefixLen
}
}