Vendor in latest projectatomic/buildah

Adds --rm and --force-rm to podman build.

Signed-off-by: umohnani8 <umohnani@redhat.com>

Closes: #945
Approved by: rhatdan
This commit is contained in:
umohnani8 2018-06-14 13:25:03 -04:00 committed by Atomic Bot
parent c1ef1151ac
commit 6bdf023aea
7 changed files with 158 additions and 99 deletions

View file

@ -840,6 +840,7 @@ _podman_commit() {
_podman_build() { _podman_build() {
local boolean_options=" local boolean_options="
--force-rm
--help --help
-h -h
--layers --layers
@ -848,6 +849,7 @@ _podman_build() {
--pull-always --pull-always
--quiet --quiet
-q -q
--rm
--squash --squash
--tls-verify --tls-verify
" "

View file

@ -160,9 +160,9 @@ If a build context is not specified, and at least one Dockerfile is a
local file, the directory in which it resides will be used as the build local file, the directory in which it resides will be used as the build
context. context.
**--force-rm** **--force-rm** *bool-value*
Always remove intermediate containers after a build. Podman does not currently support caching so this is a NOOP. Always remove intermediate containers after a build, even if the build is unsuccessful.
**--format** **--format**
@ -264,9 +264,9 @@ Suppress output messages which indicate which instruction is being processed,
and of progress when pulling images from a registry, and when writing the and of progress when pulling images from a registry, and when writing the
output image. output image.
**--rm** **--rm** *bool-value*
Remove intermediate containers after a successful build. Podman does not currently support caching so this is a NOOP. Remove intermediate containers after a successful build (default true).
**--runtime** *path* **--runtime** *path*
@ -510,6 +510,10 @@ podman build --layers -t imageName .
podman build --no-cache -t imageName . podman build --no-cache -t imageName .
podman build --layers --force-rm -t imageName .
podman build --no-cache --rm=false -t imageName .
### Building an image using a URL, Git repo, or archive ### Building an image using a URL, Git repo, or archive
The build context directory can be specified as a URL to a Dockerfile, a Git repository, or URL to an archive. If the URL is a Dockerfile, it is downloaded to a temporary location and used as the context. When a Git repository is set as the URL, the repository is cloned locally to a temporary location and then used as the context. Lastly, if the URL is an archive, it is downloaded to a temporary location and extracted before being used as the context. The build context directory can be specified as a URL to a Dockerfile, a Git repository, or URL to an archive. If the URL is a Dockerfile, it is downloaded to a temporary location and used as the context. When a Git repository is set as the URL, the repository is cloned locally to a temporary location and then used as the context. Lastly, if the URL is an archive, it is downloaded to a temporary location and extracted before being used as the context.

View file

@ -88,7 +88,7 @@ k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
github.com/mrunalp/fileutils master github.com/mrunalp/fileutils master
github.com/varlink/go master github.com/varlink/go master
github.com/projectatomic/buildah 6c4bef7f2e17432d4dc4065299cec08754ca5863 github.com/projectatomic/buildah fc438bb932e891cbe04109cfae1dfbe3c99307a5
github.com/Nvveen/Gotty master github.com/Nvveen/Gotty master
github.com/fsouza/go-dockerclient master github.com/fsouza/go-dockerclient master
github.com/openshift/imagebuilder master github.com/openshift/imagebuilder master

View file

@ -34,7 +34,6 @@ const (
PullIfMissing = buildah.PullIfMissing PullIfMissing = buildah.PullIfMissing
PullAlways = buildah.PullAlways PullAlways = buildah.PullAlways
PullNever = buildah.PullNever PullNever = buildah.PullNever
DefaultRuntime = buildah.DefaultRuntime
OCIv1ImageFormat = buildah.OCIv1ImageManifest OCIv1ImageFormat = buildah.OCIv1ImageManifest
Dockerv2ImageFormat = buildah.Dockerv2ImageManifest Dockerv2ImageFormat = buildah.Dockerv2ImageManifest
@ -148,6 +147,12 @@ type BuildOptions struct {
// NoCache tells the builder to build the image from scratch without checking for a cache. // NoCache tells the builder to build the image from scratch without checking for a cache.
// It creates a new set of cached images for the build. // It creates a new set of cached images for the build.
NoCache bool NoCache bool
// RemoveIntermediateCtrs tells the builder whether to remove intermediate containers used
// during the build process. Default is true.
RemoveIntermediateCtrs bool
// ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if
// the build was unsuccessful.
ForceRmIntermediateCtrs bool
} }
// Executor is a buildah-based implementation of the imagebuilder.Executor // Executor is a buildah-based implementation of the imagebuilder.Executor
@ -197,6 +202,8 @@ type Executor struct {
layers bool layers bool
topLayers []string topLayers []string
noCache bool noCache bool
removeIntermediateCtrs bool
forceRmIntermediateCtrs bool
} }
// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME. // withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME.
@ -522,35 +529,37 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
registry: options.Registry, registry: options.Registry,
transport: options.Transport, transport: options.Transport,
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions, ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
quiet: options.Quiet, quiet: options.Quiet,
runtime: options.Runtime, runtime: options.Runtime,
runtimeArgs: options.RuntimeArgs, runtimeArgs: options.RuntimeArgs,
transientMounts: options.TransientMounts, transientMounts: options.TransientMounts,
compression: options.Compression, compression: options.Compression,
output: options.Output, output: options.Output,
outputFormat: options.OutputFormat, outputFormat: options.OutputFormat,
additionalTags: options.AdditionalTags, additionalTags: options.AdditionalTags,
signaturePolicyPath: options.SignaturePolicyPath, signaturePolicyPath: options.SignaturePolicyPath,
systemContext: options.SystemContext, systemContext: options.SystemContext,
volumeCache: make(map[string]string), volumeCache: make(map[string]string),
volumeCacheInfo: make(map[string]os.FileInfo), volumeCacheInfo: make(map[string]os.FileInfo),
log: options.Log, log: options.Log,
out: options.Out, out: options.Out,
err: options.Err, err: options.Err,
reportWriter: options.ReportWriter, reportWriter: options.ReportWriter,
namespaceOptions: options.NamespaceOptions, namespaceOptions: options.NamespaceOptions,
configureNetwork: options.ConfigureNetwork, configureNetwork: options.ConfigureNetwork,
cniPluginPath: options.CNIPluginPath, cniPluginPath: options.CNIPluginPath,
cniConfigDir: options.CNIConfigDir, cniConfigDir: options.CNIConfigDir,
idmappingOptions: options.IDMappingOptions, idmappingOptions: options.IDMappingOptions,
commonBuildOptions: options.CommonBuildOpts, commonBuildOptions: options.CommonBuildOpts,
defaultMountsFilePath: options.DefaultMountsFilePath, defaultMountsFilePath: options.DefaultMountsFilePath,
iidfile: options.IIDFile, iidfile: options.IIDFile,
squash: options.Squash, squash: options.Squash,
labels: append([]string{}, options.Labels...), labels: append([]string{}, options.Labels...),
annotations: append([]string{}, options.Annotations...), annotations: append([]string{}, options.Annotations...),
layers: options.Layers, layers: options.Layers,
noCache: options.NoCache, noCache: options.NoCache,
removeIntermediateCtrs: options.RemoveIntermediateCtrs,
forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
} }
if exec.err == nil { if exec.err == nil {
exec.err = os.Stderr exec.err = os.Stderr
@ -679,6 +688,7 @@ func (b *Executor) Delete() (err error) {
func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *parser.Node) error { func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *parser.Node) error {
checkForLayers := true checkForLayers := true
children := node.Children children := node.Children
commitName := b.output
for i, node := range node.Children { for i, node := range node.Children {
step := ib.Step() step := ib.Step()
if err := step.Resolve(node); err != nil { if err := step.Resolve(node); err != nil {
@ -701,6 +711,12 @@ func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *
continue continue
} }
if i < len(children)-1 {
b.output = ""
} else {
b.output = commitName
}
var ( var (
cacheID string cacheID string
err error err error
@ -740,9 +756,11 @@ func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *
// it is used to create the container for the next step. // it is used to create the container for the next step.
imgID = cacheID imgID = cacheID
} }
// Delete the intermediate container. // Delete the intermediate container if b.removeIntermediateCtrs is true.
if err := b.Delete(); err != nil { if b.removeIntermediateCtrs {
return errors.Wrap(err, "error deleting intermediate container") if err := b.Delete(); err != nil {
return errors.Wrap(err, "error deleting intermediate container")
}
} }
// Prepare for the next step with imgID as the new base image. // Prepare for the next step with imgID as the new base image.
if i != len(children)-1 { if i != len(children)-1 {
@ -1048,7 +1066,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) error
if err := stageExecutor.Prepare(ctx, stage.Builder, stage.Node, ""); err != nil { if err := stageExecutor.Prepare(ctx, stage.Builder, stage.Node, ""); err != nil {
return err return err
} }
defer stageExecutor.Delete() // Always remove the intermediate/build containers, even if the build was unsuccessful.
// If building with layers, remove all intermediate/build containers if b.forceRmIntermediateCtrs
// is true.
if b.forceRmIntermediateCtrs || (!b.layers && !b.noCache) {
defer stageExecutor.Delete()
}
if err := stageExecutor.Execute(ctx, stage.Builder, stage.Node); err != nil { if err := stageExecutor.Execute(ctx, stage.Builder, stage.Node); err != nil {
return err return err
} }
@ -1057,7 +1080,21 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) error
return nil return nil
} }
_, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "") _, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "")
return err if err != nil {
return err
}
// If building with layers and b.removeIntermediateCtrs is true
// only remove intermediate container for each step if an error
// during the build process doesn't occur.
// If the build is unsuccessful, the container created at the step
// the failure happened will persist in the container store.
// This if condition will be false if not building with layers and
// the removal of intermediate/build containers will be handled by the
// defer statement above.
if b.removeIntermediateCtrs && (b.layers || b.noCache) {
return stageExecutor.Delete()
}
return nil
} }
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be // BuildDockerfiles parses a set of one or more Dockerfiles (which may be

View file

@ -10,12 +10,12 @@ import (
"strings" "strings"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
"github.com/projectatomic/buildah"
"github.com/projectatomic/buildah/util" "github.com/projectatomic/buildah/util"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
var ( var (
runtime = util.Runtime()
usernsFlags = []cli.Flag{ usernsFlags = []cli.Flag{
cli.StringFlag{ cli.StringFlag{
Name: "userns", Name: "userns",
@ -109,7 +109,7 @@ var (
}, },
cli.BoolFlag{ cli.BoolFlag{
Name: "force-rm", Name: "force-rm",
Usage: "Always remove intermediate containers after a build. The build process does not currently support caching so this is a NOOP.", Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "format", Name: "format",
@ -147,14 +147,14 @@ var (
Name: "quiet, q", Name: "quiet, q",
Usage: "refrain from announcing build instructions and image read/write progress", Usage: "refrain from announcing build instructions and image read/write progress",
}, },
cli.BoolFlag{ cli.BoolTFlag{
Name: "rm", Name: "rm",
Usage: "Remove intermediate containers after a successful build. The build process does not currently support caching so this is a NOOP.", Usage: "Remove intermediate containers after a successful build (default true)",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "runtime", Name: "runtime",
Usage: "`path` to an alternate runtime", Usage: "`path` to an alternate runtime",
Value: buildah.DefaultRuntime, Value: runtime,
}, },
cli.StringSliceFlag{ cli.StringSliceFlag{
Name: "runtime-flag", Name: "runtime-flag",

View file

@ -41,8 +41,6 @@ import (
const ( const (
// DefaultWorkingDir is used if none was specified. // DefaultWorkingDir is used if none was specified.
DefaultWorkingDir = "/" DefaultWorkingDir = "/"
// DefaultRuntime is the default command to use to run the container.
DefaultRuntime = "runc"
// runUsingRuntimeCommand is a command we use as a key for reexec // runUsingRuntimeCommand is a command we use as a key for reexec
runUsingRuntimeCommand = Package + "-runtime" runUsingRuntimeCommand = Package + "-runtime"
) )
@ -1007,7 +1005,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetwork
// Decide which runtime to use. // Decide which runtime to use.
runtime := options.Runtime runtime := options.Runtime
if runtime == "" { if runtime == "" {
runtime = DefaultRuntime runtime = util.Runtime()
} }
// Default to not specifying a console socket location. // Default to not specifying a console socket location.
@ -1405,41 +1403,63 @@ func runCopyStdio(stdio *sync.WaitGroup, copyStdio bool, stdioPipe [][]int, copy
reading := 0 reading := 0
// Map describing where data on an incoming descriptor should go. // Map describing where data on an incoming descriptor should go.
relayMap := make(map[int]int) relayMap := make(map[int]int)
// Map describing incoming descriptors. // Map describing incoming and outgoing descriptors.
relayDesc := make(map[int]string) readDesc := make(map[int]string)
writeDesc := make(map[int]string)
// Buffers. // Buffers.
relayBuffer := make(map[int]*bytes.Buffer) relayBuffer := make(map[int]*bytes.Buffer)
if copyConsole { if copyConsole {
// Input from our stdin, output from the terminal descriptor. // Input from our stdin, output from the terminal descriptor.
relayMap[unix.Stdin] = terminalFD relayMap[unix.Stdin] = terminalFD
relayDesc[unix.Stdin] = "stdin" readDesc[unix.Stdin] = "stdin"
relayBuffer[unix.Stdin] = new(bytes.Buffer)
relayMap[terminalFD] = unix.Stdout
relayDesc[terminalFD] = "container terminal output"
relayBuffer[terminalFD] = new(bytes.Buffer) relayBuffer[terminalFD] = new(bytes.Buffer)
writeDesc[terminalFD] = "container terminal input"
relayMap[terminalFD] = unix.Stdout
readDesc[terminalFD] = "container terminal output"
relayBuffer[unix.Stdout] = new(bytes.Buffer)
writeDesc[unix.Stdout] = "output"
reading = 2 reading = 2
} }
if copyStdio { if copyStdio {
// Input from our stdin, output from the stdout and stderr pipes. // Input from our stdin, output from the stdout and stderr pipes.
relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1] relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1]
relayDesc[unix.Stdin] = "stdin" readDesc[unix.Stdin] = "stdin"
relayBuffer[unix.Stdin] = new(bytes.Buffer) relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer)
writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin"
relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout
relayDesc[stdioPipe[unix.Stdout][0]] = "container stdout" readDesc[stdioPipe[unix.Stdout][0]] = "container stdout"
relayBuffer[stdioPipe[unix.Stdout][0]] = new(bytes.Buffer) relayBuffer[unix.Stdout] = new(bytes.Buffer)
writeDesc[unix.Stdout] = "stdout"
relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr
relayDesc[stdioPipe[unix.Stderr][0]] = "container stderr" readDesc[stdioPipe[unix.Stderr][0]] = "container stderr"
relayBuffer[stdioPipe[unix.Stderr][0]] = new(bytes.Buffer) relayBuffer[unix.Stderr] = new(bytes.Buffer)
writeDesc[unix.Stderr] = "stderr"
reading = 3 reading = 3
} }
// Set our reading descriptors to non-blocking. // Set our reading descriptors to non-blocking.
for fd := range relayMap { for fd := range relayMap {
if err := unix.SetNonblock(fd, true); err != nil { if err := unix.SetNonblock(fd, true); err != nil {
logrus.Errorf("error setting %s to nonblocking: %v", relayDesc[fd], err) logrus.Errorf("error setting %s to nonblocking: %v", readDesc[fd], err)
return return
} }
} }
// A helper that returns false if err is an error that would cause us
// to give up.
logIfNotRetryable := func(err error, what string) (retry bool) {
if err == nil {
return true
}
if errno, isErrno := err.(syscall.Errno); isErrno {
switch errno {
case syscall.EINTR, syscall.EAGAIN:
return true
}
}
logrus.Error(what)
return false
}
// Pass data back and forth. // Pass data back and forth.
pollTimeout := -1
for { for {
// Start building the list of descriptors to poll. // Start building the list of descriptors to poll.
pollFds := make([]unix.PollFd, 0, reading+1) pollFds := make([]unix.PollFd, 0, reading+1)
@ -1451,23 +1471,8 @@ func runCopyStdio(stdio *sync.WaitGroup, copyStdio bool, stdioPipe [][]int, copy
} }
buf := make([]byte, 8192) buf := make([]byte, 8192)
// Wait for new data from any input descriptor, or a notification that we're done. // Wait for new data from any input descriptor, or a notification that we're done.
nevents, err := unix.Poll(pollFds, -1) _, err := unix.Poll(pollFds, pollTimeout)
if err != nil { if !logIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) {
if errno, isErrno := err.(syscall.Errno); isErrno {
switch errno {
case syscall.EINTR:
continue
default:
logrus.Errorf("unable to wait for stdio/terminal data to relay: %v", err)
return
}
} else {
logrus.Errorf("unable to wait for stdio/terminal data to relay: %v", err)
return
}
}
if nevents == 0 {
logrus.Errorf("unexpected no data, no error waiting for terminal data to relay")
return return
} }
var removes []int var removes []int
@ -1487,22 +1492,13 @@ func runCopyStdio(stdio *sync.WaitGroup, copyStdio bool, stdioPipe [][]int, copy
} }
continue continue
} }
// Copy whatever we read to wherever it needs to be sent. // Read whatever there is to be read.
readFD := int(pollFd.Fd) readFD := int(pollFd.Fd)
writeFD, needToRelay := relayMap[readFD] writeFD, needToRelay := relayMap[readFD]
if needToRelay { if needToRelay {
n, err := unix.Read(readFD, buf) n, err := unix.Read(readFD, buf)
if err != nil { if !logIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) {
if errno, isErrno := err.(syscall.Errno); isErrno { return
switch errno {
default:
logrus.Errorf("unable to read %s: %v", relayDesc[readFD], err)
case syscall.EINTR, syscall.EAGAIN:
}
} else {
logrus.Errorf("unable to wait for %s data to relay: %v", relayDesc[readFD], err)
}
continue
} }
// If it's zero-length on our stdin and we're // If it's zero-length on our stdin and we're
// using pipes, it's an EOF, so close the stdin // using pipes, it's an EOF, so close the stdin
@ -1512,18 +1508,28 @@ func runCopyStdio(stdio *sync.WaitGroup, copyStdio bool, stdioPipe [][]int, copy
stdioPipe[unix.Stdin][1] = -1 stdioPipe[unix.Stdin][1] = -1
} }
if n > 0 { if n > 0 {
// Buffer the data in case we're blocked on where they need to go. // Buffer the data in case we get blocked on where they need to go.
relayBuffer[readFD].Write(buf[:n]) relayBuffer[writeFD].Write(buf[:n])
// Try to drain the buffer.
n, err = unix.Write(writeFD, relayBuffer[readFD].Bytes())
if err != nil {
logrus.Errorf("unable to write %s: %v", relayDesc[readFD], err)
return
}
relayBuffer[readFD].Next(n)
} }
} }
} }
// Try to drain the output buffers. Set the default timeout
// for the next poll() to 100ms if we still have data to write.
pollTimeout = -1
for writeFD := range relayBuffer {
if relayBuffer[writeFD].Len() > 0 {
n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes())
if !logIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) {
return
}
if n > 0 {
relayBuffer[writeFD].Next(n)
}
}
if relayBuffer[writeFD].Len() > 0 {
pollTimeout = 100
}
}
// Remove any descriptors which we don't need to poll any more from the poll descriptor list. // Remove any descriptors which we don't need to poll any more from the poll descriptor list.
for _, remove := range removes { for _, remove := range removes {
delete(relayMap, remove) delete(relayMap, remove)

View file

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"io" "io"
"net/url" "net/url"
"os"
"path" "path"
"strings" "strings"
@ -233,3 +234,12 @@ func WriteError(w io.Writer, err error, lastError error) error {
} }
return err return err
} }
// Runtime is the default command to use to run the container.
func Runtime() string {
runtime := os.Getenv("BUILDAH_RUNTIME")
if runtime != "" {
return runtime
}
return DefaultRuntime
}