diff --git a/command/compile.go b/command/compile.go index a48a334..50f4e82 100644 --- a/command/compile.go +++ b/command/compile.go @@ -12,6 +12,7 @@ import ( "strings" "github.com/drone-runners/drone-runner-docker/command/internal" + "github.com/drone-runners/drone-runner-docker/engine" "github.com/drone-runners/drone-runner-docker/engine/compiler" "github.com/drone-runners/drone-runner-docker/engine/linter" "github.com/drone-runners/drone-runner-docker/engine/resource" @@ -35,6 +36,7 @@ type compileCommand struct { Labels map[string]string Secrets map[string]string Resources compiler.Resources + Clone bool Config string } @@ -96,13 +98,6 @@ func (c *compileCommand) run(*kingpin.ParseContext) error { // compile the pipeline to an intermediate representation. comp := &compiler.Compiler{ - Pipeline: resource, - Manifest: manifest, - Build: c.Build, - Netrc: c.Netrc, - Repo: c.Repo, - Stage: c.Stage, - System: c.System, Environ: c.Environ, Labels: c.Labels, Resources: c.Resources, @@ -114,7 +109,38 @@ func (c *compileCommand) run(*kingpin.ParseContext) error { registry.File(c.Config), ), } - spec := comp.Compile(nocontext) + args := compiler.Args{ + Pipeline: resource, + Manifest: manifest, + Build: c.Build, + Netrc: c.Netrc, + Repo: c.Repo, + Stage: c.Stage, + System: c.System, + } + spec := comp.Compile(nocontext, args) + + // when running a build locally cloning is always + // disabled in favor of mounting the source code + // from the current working directory. + if c.Clone == false { + pwd, _ := os.Getwd() + for _, volume := range spec.Volumes { + if volume.EmptyDir != nil && volume.EmptyDir.Name == "_workspace" { + volume.HostPath = &engine.VolumeHostPath{ + ID: volume.EmptyDir.ID, + Name: volume.EmptyDir.Name, + Path: pwd, + } + volume.EmptyDir = nil + } + } + for _, step := range spec.Steps { + if step.Name == "clone" { + step.RunPolicy = engine.RunNever + } + } + } // encode the pipeline in json format and print to the // console for inspection. @@ -138,6 +164,9 @@ func registerCompile(app *kingpin.Application) { Default(".drone.yml"). FileVar(&c.Source) + cmd.Flag("clone", "enable cloning"). + BoolVar(&c.Clone) + cmd.Flag("secrets", "secret parameters"). StringMapVar(&c.Secrets) diff --git a/command/daemon/daemon.go b/command/daemon/daemon.go index 5e64869..6d4ec51 100644 --- a/command/daemon/daemon.go +++ b/command/daemon/daemon.go @@ -9,6 +9,7 @@ import ( "time" "github.com/drone-runners/drone-runner-docker/engine" + "github.com/drone-runners/drone-runner-docker/engine/compiler" "github.com/drone-runners/drone-runner-docker/engine/linter" "github.com/drone-runners/drone-runner-docker/engine/resource" "github.com/drone-runners/drone-runner-docker/internal/match" @@ -80,7 +81,6 @@ func (c *daemonCommand) run(*kingpin.ParseContext) error { Client: cli, Runner: &runtime.Runner{ Client: cli, - Environ: config.Runner.Environ, Machine: config.Runner.Name, Reporter: tracer, Linter: linter.New(), @@ -89,11 +89,20 @@ func (c *daemonCommand) run(*kingpin.ParseContext) error { config.Limit.Events, config.Limit.Trusted, ), - Secret: secret.External( - config.Secret.Endpoint, - config.Secret.Token, - config.Secret.SkipVerify, - ), + Compiler: &compiler.Compiler{ + Environ: nil, + Labels: nil, + Privileged: nil, + Networks: nil, + Volumes: nil, + // Resources: nil, + Registry: nil, + Secret: secret.External( + config.Secret.Endpoint, + config.Secret.Token, + config.Secret.SkipVerify, + ), + }, Execer: runtime.NewExecer( tracer, remote, diff --git a/command/exec.go b/command/exec.go index 57b2268..4d73168 100644 --- a/command/exec.go +++ b/command/exec.go @@ -48,6 +48,7 @@ type execCommand struct { Labels map[string]string Secrets map[string]string Resources compiler.Resources + Clone bool Config string Pretty bool Procs int64 @@ -116,13 +117,6 @@ func (c *execCommand) run(*kingpin.ParseContext) error { // compile the pipeline to an intermediate representation. comp := &compiler.Compiler{ - Pipeline: resource, - Manifest: manifest, - Build: c.Build, - Netrc: c.Netrc, - Repo: c.Repo, - Stage: c.Stage, - System: c.System, Environ: c.Environ, Labels: c.Labels, Resources: c.Resources, @@ -134,7 +128,16 @@ func (c *execCommand) run(*kingpin.ParseContext) error { registry.File(c.Config), ), } - spec := comp.Compile(nocontext) + args := compiler.Args{ + Pipeline: resource, + Manifest: manifest, + Build: c.Build, + Netrc: c.Netrc, + Repo: c.Repo, + Stage: c.Stage, + System: c.System, + } + spec := comp.Compile(nocontext, args) // include only steps that are in the include list, // if the list in non-empty. @@ -262,6 +265,9 @@ func registerExec(app *kingpin.Application) { Default(".drone.yml"). FileVar(&c.Source) + cmd.Flag("clone", "enable cloning"). + BoolVar(&c.Clone) + cmd.Flag("secrets", "secret parameters"). StringMapVar(&c.Secrets) diff --git a/engine/compiler/compiler.go b/engine/compiler/compiler.go index 94e3458..fc77a76 100644 --- a/engine/compiler/compiler.go +++ b/engine/compiler/compiler.go @@ -50,9 +50,8 @@ type Resources struct { CPUSet []string } -// Compiler compiles the Yaml configuration file to an -// intermediate representation optimized for simple execution. -type Compiler struct { +// Args provides compiler arguments. +type Args struct { // Manifest provides the parsed manifest. Manifest *manifest.Manifest @@ -82,6 +81,20 @@ type Compiler struct { // each pipeline step. System *drone.System + // Netrc provides netrc parameters that can be used by the + // default clone step to authenticate to the remote + // repository. + Netrc *drone.Netrc + + // Secret returns a named secret value that can be injected + // into the pipeline step. + Secret secret.Provider +} + +// Compiler compiles the Yaml configuration file to an +// intermediate representation optimized for simple execution. +type Compiler struct { + // Environ provides a set of environment variables that // should be added to each pipeline step by default. Environ map[string]string @@ -106,11 +119,6 @@ type Compiler struct { // applies to pipeline containers. Resources Resources - // Netrc provides netrc parameters that can be used by the - // default clone step to authenticate to the remote - // repository. - Netrc *drone.Netrc - // Secret returns a named secret value that can be injected // into the pipeline step. Secret secret.Provider @@ -121,11 +129,11 @@ type Compiler struct { } // Compile compiles the configuration file. -func (c *Compiler) Compile(ctx context.Context) *engine.Spec { - os := c.Pipeline.Platform.OS +func (c *Compiler) Compile(ctx context.Context, args Args) *engine.Spec { + os := args.Pipeline.Platform.OS // create the workspace paths - base, path, full := createWorkspace(c.Pipeline) + base, path, full := createWorkspace(args.Pipeline) // create the workspace mount mount := &engine.VolumeMount{ @@ -136,11 +144,11 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec { // create system labels labels := labels.Combine( c.Labels, - labels.FromRepo(c.Repo), - labels.FromBuild(c.Build), - labels.FromStage(c.Stage), - labels.FromSystem(c.System), - labels.WithTimeout(c.Repo), + labels.FromRepo(args.Repo), + labels.FromBuild(args.Build), + labels.FromStage(args.Stage), + labels.FromSystem(args.System), + labels.WithTimeout(args.Repo), ) // create the workspace volume @@ -156,10 +164,10 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec { Labels: labels, }, Platform: engine.Platform{ - OS: c.Pipeline.Platform.OS, - Arch: c.Pipeline.Platform.Arch, - Variant: c.Pipeline.Platform.Variant, - Version: c.Pipeline.Platform.Version, + OS: args.Pipeline.Platform.OS, + Arch: args.Pipeline.Platform.Arch, + Variant: args.Pipeline.Platform.Variant, + Version: args.Pipeline.Platform.Version, }, Volumes: []*engine.Volume{ {EmptyDir: volume}, @@ -169,20 +177,20 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec { // create the default environment variables. envs := environ.Combine( c.Environ, - c.Build.Params, - c.Pipeline.Environment, + args.Build.Params, + args.Pipeline.Environment, environ.Proxy(), - environ.System(c.System), - environ.Repo(c.Repo), - environ.Build(c.Build), - environ.Stage(c.Stage), - environ.Link(c.Repo, c.Build, c.System), + environ.System(args.System), + environ.Repo(args.Repo), + environ.Build(args.Build), + environ.Stage(args.Stage), + environ.Link(args.Repo, args.Build, args.System), clone.Environ(clone.Config{ - SkipVerify: c.Pipeline.Clone.SkipVerify, - Trace: c.Pipeline.Clone.Trace, + SkipVerify: args.Pipeline.Clone.SkipVerify, + Trace: args.Pipeline.Clone.Trace, User: clone.User{ - Name: c.Build.AuthorName, - Email: c.Build.AuthorEmail, + Name: args.Build.AuthorName, + Email: args.Build.AuthorEmail, }, }), ) @@ -197,32 +205,32 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec { envs["DRONE_WORKSPACE_PATH"] = path // create the netrc environment variables - if c.Netrc != nil && c.Netrc.Machine != "" { - envs["DRONE_NETRC_MACHINE"] = c.Netrc.Machine - envs["DRONE_NETRC_USERNAME"] = c.Netrc.Login - envs["DRONE_NETRC_PASSWORD"] = c.Netrc.Password + if args.Netrc != nil && args.Netrc.Machine != "" { + envs["DRONE_NETRC_MACHINE"] = args.Netrc.Machine + envs["DRONE_NETRC_USERNAME"] = args.Netrc.Login + envs["DRONE_NETRC_PASSWORD"] = args.Netrc.Password envs["DRONE_NETRC_FILE"] = fmt.Sprintf( "machine %s login %s password %s", - c.Netrc.Machine, - c.Netrc.Login, - c.Netrc.Password, + args.Netrc.Machine, + args.Netrc.Login, + args.Netrc.Password, ) } match := manifest.Match{ - Action: c.Build.Action, - Cron: c.Build.Cron, - Ref: c.Build.Ref, - Repo: c.Repo.Slug, - Instance: c.System.Host, - Target: c.Build.Deploy, - Event: c.Build.Event, - Branch: c.Build.Target, + Action: args.Build.Action, + Cron: args.Build.Cron, + Ref: args.Build.Ref, + Repo: args.Repo.Slug, + Instance: args.System.Host, + Target: args.Build.Deploy, + Event: args.Build.Event, + Branch: args.Build.Target, } // create the clone step - if c.Pipeline.Clone.Disable == false { - step := createClone(c.Pipeline) + if args.Pipeline.Clone.Disable == false { + step := createClone(args.Pipeline) step.ID = random() step.Envs = environ.Combine(envs, step.Envs) step.WorkingDir = full @@ -232,8 +240,8 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec { } // create steps - for _, src := range c.Pipeline.Services { - dst := createStep(c.Pipeline, src) + for _, src := range args.Pipeline.Services { + dst := createStep(args.Pipeline, src) dst.Detach = true dst.Envs = environ.Combine(envs, dst.Envs) dst.Volumes = append(dst.Volumes, mount) @@ -250,8 +258,8 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec { } // create steps - for _, src := range c.Pipeline.Steps { - dst := createStep(c.Pipeline, src) + for _, src := range args.Pipeline.Steps { + dst := createStep(args.Pipeline, src) dst.Envs = environ.Combine(envs, dst.Envs) dst.Volumes = append(dst.Volumes, mount) dst.Labels = labels @@ -275,15 +283,15 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec { if isGraph(spec) == false { configureSerial(spec) - } else if c.Pipeline.Clone.Disable == false { + } else if args.Pipeline.Clone.Disable == false { configureCloneDeps(spec) - } else if c.Pipeline.Clone.Disable == true { + } else if args.Pipeline.Clone.Disable == true { removeCloneDeps(spec) } for _, step := range spec.Steps { for _, s := range step.Secrets { - secret, ok := c.findSecret(ctx, s.Name) + secret, ok := c.findSecret(ctx, args, s.Name) if ok { s.Data = []byte(secret) } @@ -292,8 +300,8 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec { // get registry credentials from registry plugins creds, err := c.Registry.List(ctx, ®istry.Request{ - Repo: c.Repo, - Build: c.Build, + Repo: args.Repo, + Build: args.Build, }) if err != nil { // TODO (bradrydzewski) return an error to the caller @@ -301,8 +309,8 @@ func (c *Compiler) Compile(ctx context.Context) *engine.Spec { } // get registry credentials from secrets - for _, name := range c.Pipeline.PullSecrets { - secret, ok := c.findSecret(ctx, name) + for _, name := range args.Pipeline.PullSecrets { + secret, ok := c.findSecret(ctx, args, name) if ok { parsed, err := auths.ParseString(secret) if err == nil { @@ -391,17 +399,25 @@ func (c *Compiler) isPrivileged(step *resource.Step) bool { // helper function attempts to find and return the named secret. // from the secret provider. -func (c *Compiler) findSecret(ctx context.Context, name string) (s string, ok bool) { +func (c *Compiler) findSecret(ctx context.Context, args Args, name string) (s string, ok bool) { if name == "" { return } - // TODO (bradrydzewski) return an error to the caller - // if the provider returns an error. - found, _ := c.Secret.Find(ctx, &secret.Request{ + + // source secrets from the global secret provider + // and the repository secret provider. + provider := secret.Combine( + args.Secret, + c.Secret, + ) + + // TODO return an error to the caller if the provider + // returns an error. + found, _ := provider.Find(ctx, &secret.Request{ Name: name, - Build: c.Build, - Repo: c.Repo, - Conf: c.Manifest, + Build: args.Build, + Repo: args.Repo, + Conf: args.Manifest, }) if found == nil { return diff --git a/engine/compiler/encoder/encoder.go b/engine/compiler/encoder/encoder.go index 8b261c2..4c545f2 100644 --- a/engine/compiler/encoder/encoder.go +++ b/engine/compiler/encoder/encoder.go @@ -15,7 +15,7 @@ import ( // Encode encodes an interface value as a string. This function // assumes all types were unmarshaled by the yaml.v2 library. -// The yaml.v2 package only supports a subset of primative types. +// The yaml.v2 package only supports a subset of primitive types. func Encode(v interface{}) string { switch v := v.(type) { case string: diff --git a/engine/convert.go b/engine/convert.go new file mode 100644 index 0000000..f351c90 --- /dev/null +++ b/engine/convert.go @@ -0,0 +1,286 @@ +// Copyright 2019 Drone.IO Inc. All rights reserved. +// Use of this source code is governed by the Polyform License +// that can be found in the LICENSE file. + +package engine + +import ( + "strings" + + "docker.io/go-docker/api/types/container" + "docker.io/go-docker/api/types/mount" + "docker.io/go-docker/api/types/network" +) + +// returns a container configuration. +func toConfig(spec *Spec, step *Step) *container.Config { + config := &container.Config{ + Image: step.Image, + Labels: step.Labels, + WorkingDir: step.WorkingDir, + User: step.User, + AttachStdin: false, + AttachStdout: true, + AttachStderr: true, + Tty: false, + OpenStdin: false, + StdinOnce: false, + ArgsEscaped: false, + } + + if len(step.Envs) != 0 { + config.Env = toEnv(step.Envs) + } + for _, sec := range step.Secrets { + config.Env = append(config.Env, sec.Env+"="+string(sec.Data)) + } + + if len(step.Entrypoint) != 0 { + config.Cmd = step.Entrypoint + } + if len(step.Command) != 0 { + config.Cmd = step.Command + } + if len(step.Volumes) != 0 { + config.Volumes = toVolumeSet(spec, step) + } + return config +} + +// returns a container host configuration. +func toHostConfig(spec *Spec, step *Step) *container.HostConfig { + config := &container.HostConfig{ + LogConfig: container.LogConfig{ + Type: "json-file", + }, + Privileged: step.Privileged, + // TODO(bradrydzewski) set ShmSize + } + // windows does not support privileged so we hard-code + // this value to false. + if spec.Platform.OS == "windows" { + config.Privileged = false + } + if len(step.Network) > 0 { + config.NetworkMode = container.NetworkMode(step.Network) + } + if len(step.DNS) > 0 { + config.DNS = step.DNS + } + if len(step.DNSSearch) > 0 { + config.DNSSearch = step.DNSSearch + } + if len(step.ExtraHosts) > 0 { + config.ExtraHosts = step.ExtraHosts + } + // if step.Resources != nil { + // config.Resources = container.Resources{} + // if limits := step.Resources.Limits; limits != nil { + // config.Resources.Memory = limits.Memory + // // TODO(bradrydewski) set config.Resources.CPUPercent + + // // IMPORTANT docker and kubernetes use + // // different units of measure for cpu limits. + // // we need to figure out how to convert from + // // the kubernetes unit of measure to the docker + // // unit of measure. + // } + // } + + if len(step.Volumes) != 0 { + config.Devices = toDeviceSlice(spec, step) + config.Binds = toVolumeSlice(spec, step) + config.Mounts = toVolumeMounts(spec, step) + } + return config +} + +// helper function returns the container network configuration. +func toNetConfig(spec *Spec, proc *Step) *network.NetworkingConfig { + // if the user overrides the default network we do not + // attach to the user-defined network. + if proc.Network != "" { + return &network.NetworkingConfig{} + } + endpoints := map[string]*network.EndpointSettings{} + endpoints[spec.Network.ID] = &network.EndpointSettings{ + NetworkID: spec.Network.ID, + Aliases: []string{proc.Name}, + } + return &network.NetworkingConfig{ + EndpointsConfig: endpoints, + } +} + +// helper function that converts a slice of device paths to a slice of +// container.DeviceMapping. +func toDeviceSlice(spec *Spec, step *Step) []container.DeviceMapping { + var to []container.DeviceMapping + for _, mount := range step.Devices { + device, ok := LookupVolume(spec, mount.Name) + if !ok { + continue + } + if isDevice(device) == false { + continue + } + to = append(to, container.DeviceMapping{ + PathOnHost: device.HostPath.Path, + PathInContainer: mount.DevicePath, + CgroupPermissions: "rwm", + }) + } + if len(to) == 0 { + return nil + } + return to +} + +// helper function that converts a slice of volume paths to a set +// of unique volume names. +func toVolumeSet(spec *Spec, step *Step) map[string]struct{} { + set := map[string]struct{}{} + for _, mount := range step.Volumes { + volume, ok := LookupVolume(spec, mount.Name) + if !ok { + continue + } + if isDevice(volume) { + continue + } + if isNamedPipe(volume) { + continue + } + if isBindMount(volume) == false { + continue + } + set[mount.Path] = struct{}{} + } + return set +} + +// helper function returns a slice of volume mounts. +func toVolumeSlice(spec *Spec, step *Step) []string { + // this entire function should be deprecated in + // favor of toVolumeMounts, however, I am unable + // to get it working with data volumes. + var to []string + for _, mount := range step.Volumes { + volume, ok := LookupVolume(spec, mount.Name) + if !ok { + continue + } + if isDevice(volume) { + continue + } + if isDataVolume(volume) { + path := volume.Metadata.UID + ":" + mount.Path + to = append(to, path) + } + if isBindMount(volume) { + path := volume.HostPath.Path + ":" + mount.Path + to = append(to, path) + } + } + return to +} + +// helper function returns a slice of docker mount +// configurations. +func toVolumeMounts(spec *Spec, step *Step) []mount.Mount { + var mounts []mount.Mount + for _, target := range step.Volumes { + source, ok := LookupVolume(spec, target.Name) + if !ok { + continue + } + + if isBindMount(source) && !isDevice(source) { + continue + } + + // HACK: this condition can be removed once + // toVolumeSlice has been fully replaced. at this + // time, I cannot figure out how to get mounts + // working with data volumes :( + if isDataVolume(source) { + continue + } + mounts = append(mounts, toMount(source, target)) + } + if len(mounts) == 0 { + return nil + } + return mounts +} + +// helper function converts the volume declaration to a +// docker mount structure. +func toMount(source *Volume, target *VolumeMount) mount.Mount { + to := mount.Mount{ + Target: target.Path, + Type: toVolumeType(source), + } + if isBindMount(source) || isNamedPipe(source) { + to.Source = source.HostPath.Path + } + if isTempfs(source) { + to.TmpfsOptions = &mount.TmpfsOptions{ + SizeBytes: source.EmptyDir.SizeLimit, + Mode: 0700, + } + } + return to +} + +// helper function returns the docker volume enumeration +// for the given volume. +func toVolumeType(from *Volume) mount.Type { + switch { + case isDataVolume(from): + return mount.TypeVolume + case isTempfs(from): + return mount.TypeTmpfs + case isNamedPipe(from): + return mount.TypeNamedPipe + default: + return mount.TypeBind + } +} + +// helper function that converts a key value map of +// environment variables to a string slice in key=value +// format. +func toEnv(env map[string]string) []string { + var envs []string + for k, v := range env { + envs = append(envs, k+"="+v) + } + return envs +} + +// returns true if the volume is a bind mount. +func isBindMount(volume *Volume) bool { + return volume.HostPath != nil +} + +// returns true if the volume is in-memory. +func isTempfs(volume *Volume) bool { + return volume.EmptyDir != nil && volume.EmptyDir.Medium == "memory" +} + +// returns true if the volume is a data-volume. +func isDataVolume(volume *Volume) bool { + return volume.EmptyDir != nil && volume.EmptyDir.Medium != "memory" +} + +// returns true if the volume is a device +func isDevice(volume *Volume) bool { + return volume.HostPath != nil && strings.HasPrefix(volume.HostPath.Path, "/dev/") +} + +// returns true if the volume is a named pipe. +func isNamedPipe(volume *Volume) bool { + return volume.HostPath != nil && + strings.HasPrefix(volume.HostPath.Path, `\\.\pipe\`) +} diff --git a/engine/engine_impl.go b/engine/engine_impl.go index b41805e..bf3b093 100644 --- a/engine/engine_impl.go +++ b/engine/engine_impl.go @@ -7,27 +7,259 @@ package engine import ( "context" "io" + "io/ioutil" + + "github.com/drone-runners/drone-runner-docker/engine/stdcopy" + "github.com/drone/drone-runtime/engine/docker/auth" + + "docker.io/go-docker" + "docker.io/go-docker/api/types" + "docker.io/go-docker/api/types/volume" ) -// New returns a new engine. -func New(publickeyFile, privatekeyFile string) (Engine, error) { - return &engine{}, nil +type engine struct { + client docker.APIClient } -type engine struct { +// New returns a new engine. +func New(client docker.APIClient) Engine { + return &engine{client} +} + +// NewEnv returns a new Engine from the environment. +func NewEnv() (Engine, error) { + cli, err := docker.NewEnvClient() + if err != nil { + return nil, err + } + return New(cli), nil } // Setup the pipeline environment. func (e *engine) Setup(ctx context.Context, spec *Spec) error { - return nil + // creates the default temporary (local) volumes + // that are mounted into each container step. + for _, vol := range spec.Volumes { + if vol.EmptyDir == nil { + continue + } + _, err := e.client.VolumeCreate(ctx, volume.VolumesCreateBody{ + Name: vol.EmptyDir.ID, + Driver: "local", + Labels: vol.EmptyDir.Labels, + }) + if err != nil { + return err + } + } + + // creates the default pod network. All containers + // defined in the pipeline are attached to this network. + driver := "bridge" + if spec.Platform.OS == "windows" { + driver = "nat" + } + _, err := e.client.NetworkCreate(ctx, spec.Network.ID, types.NetworkCreate{ + Driver: driver, + Labels: spec.Network.Labels, + }) + + return err } // Destroy the pipeline environment. func (e *engine) Destroy(ctx context.Context, spec *Spec) error { + removeOpts := types.ContainerRemoveOptions{ + Force: true, + RemoveLinks: false, + RemoveVolumes: true, + } + + // stop all containers + for _, step := range spec.Steps { + e.client.ContainerKill(ctx, step.ID, "9") + } + + // cleanup all containers + for _, step := range spec.Steps { + e.client.ContainerRemove(ctx, step.ID, removeOpts) + } + + // cleanup all volumes + for _, vol := range spec.Volumes { + if vol.EmptyDir == nil { + continue + } + // tempfs volumes do not have a volume entry, + // and therefore do not require removal. + if vol.EmptyDir.Medium == "memory" { + continue + } + e.client.VolumeRemove(ctx, vol.EmptyDir.ID, true) + } + + // cleanup the network + e.client.NetworkRemove(ctx, spec.Network.ID) + + // notice that we never collect or return any errors. + // this is because we silently ignore cleanup failures + // and instead ask the system admin to periodically run + // `docker prune` commands. return nil } // Run runs the pipeline step. func (e *engine) Run(ctx context.Context, spec *Spec, step *Step, output io.Writer) (*State, error) { - return nil, nil + // create the container + err := e.create(ctx, spec, step, output) + if err != nil { + return nil, err + } + // start the container + err = e.start(ctx, step.ID) + if err != nil { + return nil, err + } + // tail the container + err = e.tail(ctx, step.ID, output) + if err != nil { + return nil, err + } + // wait for the response + return e.wait(ctx, step.ID) +} + +// +// emulate docker commands +// + +func (e *engine) create(ctx context.Context, spec *Spec, step *Step, output io.Writer) error { + // parse the docker image name. We need to extract the + // image domain name and match to registry credentials + // stored in the .docker/config.json object. + _, _, latest, err := parseImage(step.Image) + if err != nil { + return err + } + + // create pull options with encoded authorization credentials. + pullopts := types.ImagePullOptions{} + if step.Auth != nil { + pullopts.RegistryAuth = auth.Encode( + step.Auth.Username, + step.Auth.Password, + ) + } + + // automatically pull the latest version of the image if requested + // by the process configuration, or if the image is :latest + if step.Pull == PullAlways || + (step.Pull == PullDefault && latest) { + rc, pullerr := e.client.ImagePull(ctx, step.Image, pullopts) + if pullerr == nil { + io.Copy(ioutil.Discard, rc) + rc.Close() + } + if pullerr != nil { + return pullerr + } + } + + _, err = e.client.ContainerCreate(ctx, + toConfig(spec, step), + toHostConfig(spec, step), + toNetConfig(spec, step), + step.ID, + ) + + // automatically pull and try to re-create the image if the + // failure is caused because the image does not exist. + if docker.IsErrImageNotFound(err) && step.Pull != PullNever { + rc, pullerr := e.client.ImagePull(ctx, step.Image, pullopts) + if pullerr != nil { + return pullerr + } + io.Copy(ioutil.Discard, rc) + rc.Close() + + // once the image is successfully pulled we attempt to + // re-create the container. + _, err = e.client.ContainerCreate(ctx, + toConfig(spec, step), + toHostConfig(spec, step), + toNetConfig(spec, step), + step.ID, + ) + } + if err != nil { + return err + } + + // // use the default user-defined network if network_mode + // // is not otherwise specified. + // if step.Network == "" { + // for _, net := range step.Networks { + // err = e.client.NetworkConnect(ctx, net, step.ID, &network.EndpointSettings{ + // Aliases: []string{net}, + // }) + // if err != nil { + // return nil + // } + // } + // } + + return nil +} + +// helper function emulates the `docker start` command. +func (e *engine) start(ctx context.Context, id string) error { + return e.client.ContainerStart(ctx, id, types.ContainerStartOptions{}) +} + +// helper function emulates the `docker wait` command, blocking +// until the container stops and returning the exit code. +func (e *engine) wait(ctx context.Context, id string) (*State, error) { + wait, errc := e.client.ContainerWait(ctx, id, "") + select { + case <-wait: + case <-errc: + } + + info, err := e.client.ContainerInspect(ctx, id) + if err != nil { + return nil, err + } + if info.State.Running { + // TODO(bradrydewski) if the state is still running + // we should call wait again. + } + + return &State{ + Exited: true, + ExitCode: info.State.ExitCode, + OOMKilled: info.State.OOMKilled, + }, nil +} + +// helper function emulates the `docker logs -f` command, streaming +// all container logs until the container stops. +func (e *engine) tail(ctx context.Context, id string, output io.Writer) error { + opts := types.ContainerLogsOptions{ + Follow: true, + ShowStdout: true, + ShowStderr: true, + Details: false, + Timestamps: false, + } + + logs, err := e.client.ContainerLogs(ctx, id, opts) + if err != nil { + return err + } + + go func() { + stdcopy.StdCopy(output, output, logs) + logs.Close() + }() + return nil } diff --git a/engine/image.go b/engine/image.go new file mode 100644 index 0000000..3c21701 --- /dev/null +++ b/engine/image.go @@ -0,0 +1,34 @@ +// Copyright 2019 Drone.IO Inc. All rights reserved. +// Use of this source code is governed by the Polyform License +// that can be found in the LICENSE file. + +package engine + +import ( + "strings" + + "github.com/docker/distribution/reference" +) + +// helper function parses the image and returns the +// canonical image name, domain name, and whether or not +// the image tag is :latest. +func parseImage(s string) (canonical, domain string, latest bool, err error) { + // parse the docker image name. We need to extract the + // image domain name and match to registry credentials + // stored in the .docker/config.json object. + named, err := reference.ParseNormalizedNamed(s) + if err != nil { + return + } + // the canonical image name, for some reason, excludes + // the tag name. So we need to make sure it is included + // in the image name so we can determine if the :latest + // tag is specified + named = reference.TagNameOnly(named) + + return named.String(), + reference.Domain(named), + strings.HasSuffix(named.String(), ":latest"), + nil +} diff --git a/engine/image_test.go b/engine/image_test.go new file mode 100644 index 0000000..49d11b2 --- /dev/null +++ b/engine/image_test.go @@ -0,0 +1,56 @@ +// Copyright 2019 Drone.IO Inc. All rights reserved. +// Use of this source code is governed by the Polyform License +// that can be found in the LICENSE file. + +package engine + +import "testing" + +func TestParseImage(t *testing.T) { + tests := []struct { + image string + canonical string + domain string + latest bool + err bool + }{ + { + image: "golang", + canonical: "docker.io/library/golang:latest", + domain: "docker.io", + latest: true, + }, + { + image: "golang:1.11", + canonical: "docker.io/library/golang:1.11", + domain: "docker.io", + latest: false, + }, + { + image: "", + err: true, + }, + } + + for _, test := range tests { + canonical, domain, latest, err := parseImage(test.image) + if test.err { + if err == nil { + t.Errorf("Expect error parsing image %s", test.image) + } + continue + } + if err != nil { + t.Error(err) + } + if got, want := canonical, test.canonical; got != want { + t.Errorf("Want image %s, got %s", want, got) + } + if got, want := domain, test.domain; got != want { + t.Errorf("Want image domain %s, got %s", want, got) + } + if got, want := latest, test.latest; got != want { + t.Errorf("Want image latest %v, got %v", want, got) + } + } +} diff --git a/engine/spec.go b/engine/spec.go index 6f75b57..9387343 100644 --- a/engine/spec.go +++ b/engine/spec.go @@ -119,6 +119,41 @@ type ( Labels map[string]string `json:"labels,omitempty"` } + // XVolume that is mounted into the container + XVolume struct { + ID string `json:"id,omitempty"` + Source string `json:"source,omitempty"` + Target string `json:"target,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + } + + volumeDevice struct { + Path string + } + + volumeData struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"target,omitempty"` + Mode uint32 `json:"mode,omitempty"` + } + + volumeBind struct { + Source string `json:"source,omitempty"` + Target string `json:"target,omitempty"` + Readonly bool `json:"readonly,omitempty"` + } + + volumePipe struct { + Source string `json:"source,omitempty"` + Target string `json:"target,omitempty"` + } + + volumeTemp struct { + Size int64 `json:"size,omitempty"` + Path string `json:"path,omitempty"` + } + // Network that is created and attached to containers Network struct { ID string `json:"id,omitempty"` diff --git a/engine/stdcopy/stdcopy.go b/engine/stdcopy/stdcopy.go new file mode 100644 index 0000000..a01c6e1 --- /dev/null +++ b/engine/stdcopy/stdcopy.go @@ -0,0 +1,188 @@ +// Copyright 2018 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stdcopy + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + +const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} + +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { + io.Writer + prefix byte +} + +// Write sends the buffer to the underneath writer. +// It inserts the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(p []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + if p == nil { + return 0, nil + } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) + buf := bufPool.Get().(*bytes.Buffer) + buf.Write(header[:]) + buf.Write(p) + + n, err = w.Writer.Write(buf.Bytes()) + n -= stdWriterPrefixLen + if n < 0 { + n = 0 + } + + buf.Reset() + bufPool.Put(buf) + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), + } +} + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // Check the first byte to know where to write + switch StdType(buf[stdWriterFdIndex]) { + case Stdin: + fallthrough + case Stdout: + // Write on stdout + out = dstout + case Stderr: + // Write on stderr + out = dsterr + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/go.mod b/go.mod index 8c2db79..136fa29 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,12 @@ module github.com/drone-runners/drone-runner-docker go 1.12 require ( + docker.io/go-docker v1.0.0 github.com/buildkite/yaml v2.1.0+incompatible github.com/dchest/uniuri v0.0.0-20160212164326-8902c56451e9 github.com/digitalocean/godo v1.19.0 github.com/docker/distribution v2.7.1+incompatible + github.com/docker/docker v1.13.1 github.com/drone/drone-go v1.0.5-0.20190504210458-4d6116b897ba github.com/drone/drone-runtime v1.0.7-0.20190729202838-87c84080f4a1 github.com/drone/drone-yaml v1.2.2 diff --git a/go.sum b/go.sum index 28d5442..e0d22a5 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,5 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +docker.io/go-docker v1.0.0 h1:VdXS/aNYQxyA9wdLD5z8Q8Ro688/hG8HzKxYVEVbE6s= docker.io/go-docker v1.0.0/go.mod h1:7tiAn5a0LFmjbPDbyTPOaTTOuG1ZRNXdPA6RvKY+fpY= github.com/99designs/basicauth-go v0.0.0-20160802081356-2a93ba0f464d h1:j6oB/WPCigdOkxtuPl1VSIiLpy7Mdsu6phQffbF19Ng= github.com/99designs/basicauth-go v0.0.0-20160802081356-2a93ba0f464d/go.mod h1:3cARGAK9CfW3HoxCy1a0G4TKrdiKke8ftOMEOHyySYs= @@ -24,6 +25,9 @@ github.com/digitalocean/godo v1.19.0/go.mod h1:AAPQ+tiM4st79QHlEBTg8LM7JQNre4SAQ github.com/docker/distribution v0.0.0-20170726174610-edc3ab29cdff/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= +github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -59,6 +63,7 @@ github.com/drone/signal v1.0.0 h1:NrnM2M/4yAuU/tXs6RP1a1ZfxnaHwYkd0kJurA1p6uI= github.com/drone/signal v1.0.0/go.mod h1:S8t92eFT0g4WUgEc/LxG+LCuiskpMNsG0ajAMGnyZpc= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gogo/protobuf v0.0.0-20170307180453-100ba4e88506 h1:zDlw+wgyXdfkRuvFCdEDUiPLmZp2cvf/dWHazY0a5VM= github.com/gogo/protobuf v0.0.0-20170307180453-100ba4e88506/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -100,6 +105,7 @@ github.com/natessilva/dag v0.0.0-20180124060714-7194b8dcc5c4 h1:dnMxwus89s86tI8r github.com/natessilva/dag v0.0.0-20180124060714-7194b8dcc5c4/go.mod h1:cojhOHk1gbMeklOyDP2oKKLftefXoJreOQGOrXk+Z38= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/petar/GoLLRB v0.0.0-20130427215148-53be0d36a84c/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= diff --git a/runtime/runner.go b/runtime/runner.go index b434b35..819eea6 100644 --- a/runtime/runner.go +++ b/runtime/runner.go @@ -28,12 +28,16 @@ import ( "github.com/drone/runner-go/secret" ) -// Runnner runs the pipeline. +// Runner runs the pipeline. type Runner struct { // Client is the remote client responsible for interacting // with the central server. Client client.Client + // Compiler is responsible for compiling the pipeline + // configuration to the intermediate representation. + Compiler *compiler.Compiler + // Execer is responsible for executing intermediate // representation of the pipeline and returns its results. Execer Execer @@ -42,14 +46,6 @@ type Runner struct { // and failing if any rules are broken. Linter *linter.Linter - // Reporter reports pipeline status back to the remote - // server. - Reporter pipeline.Reporter - - // Environ provides custom, global environment variables - // that are added to every pipeline step. - Environ map[string]string - // Machine provides the runner with the name of the host // machine executing the pipeline. Machine string @@ -60,8 +56,9 @@ type Runner struct { // processing an unwanted pipeline. Match func(*drone.Repo, *drone.Build) bool - // Secret provides the compiler with secrets. - Secret secret.Provider + // Reporter reports pipeline status and logs back to the + // remote server. + Reporter pipeline.Reporter } // Run runs the pipeline stage. @@ -124,7 +121,6 @@ func (s *Runner) Run(ctx context.Context, stage *drone.Stage) error { }() envs := environ.Combine( - s.Environ, environ.System(data.System), environ.Repo(data.Repo), environ.Build(data.Build), @@ -197,15 +193,14 @@ func (s *Runner) Run(ctx context.Context, stage *drone.Stage) error { secrets := secret.Combine( secret.Static(data.Secrets), secret.Encrypted(), - s.Secret, + // s.Secret, ) // compile the yaml configuration file to an intermediate // representation, and then - comp := &compiler.Compiler{ + args := compiler.Args{ Pipeline: resource, Manifest: manifest, - Environ: s.Environ, Build: data.Build, Stage: stage, Repo: data.Repo, @@ -214,7 +209,7 @@ func (s *Runner) Run(ctx context.Context, stage *drone.Stage) error { Secret: secrets, } - spec := comp.Compile(ctx) + spec := s.Compiler.Compile(ctx, args) for _, src := range spec.Steps { // steps that are skipped are ignored and are not stored // in the drone database, nor displayed in the UI.