Vendor in latest projectatomic/buildah

Adds --rm and --force-rm to podman build.

Signed-off-by: umohnani8 <umohnani@redhat.com>

Closes: #945
Approved by: rhatdan
This commit is contained in:
umohnani8 2018-06-14 13:25:03 -04:00 committed by Atomic Bot
parent c1ef1151ac
commit 6bdf023aea
7 changed files with 158 additions and 99 deletions

View file

@ -840,6 +840,7 @@ _podman_commit() {
_podman_build() {
local boolean_options="
--force-rm
--help
-h
--layers
@ -848,6 +849,7 @@ _podman_build() {
--pull-always
--quiet
-q
--rm
--squash
--tls-verify
"

View file

@ -160,9 +160,9 @@ If a build context is not specified, and at least one Dockerfile is a
local file, the directory in which it resides will be used as the build
context.
**--force-rm**
**--force-rm** *bool-value*
Always remove intermediate containers after a build. Podman does not currently support caching so this is a NOOP.
Always remove intermediate containers after a build, even if the build is unsuccessful.
**--format**
@ -264,9 +264,9 @@ Suppress output messages which indicate which instruction is being processed,
and of progress when pulling images from a registry, and when writing the
output image.
**--rm**
**--rm** *bool-value*
Remove intermediate containers after a successful build. Podman does not currently support caching so this is a NOOP.
Remove intermediate containers after a successful build (default true).
**--runtime** *path*
@ -510,6 +510,10 @@ podman build --layers -t imageName .
podman build --no-cache -t imageName .
podman build --layers --force-rm -t imageName .
podman build --no-cache --rm=false -t imageName .
### Building an image using a URL, Git repo, or archive
The build context directory can be specified as a URL to a Dockerfile, a Git repository, or URL to an archive. If the URL is a Dockerfile, it is downloaded to a temporary location and used as the context. When a Git repository is set as the URL, the repository is cloned locally to a temporary location and then used as the context. Lastly, if the URL is an archive, it is downloaded to a temporary location and extracted before being used as the context.

View file

@ -88,7 +88,7 @@ k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
github.com/mrunalp/fileutils master
github.com/varlink/go master
github.com/projectatomic/buildah 6c4bef7f2e17432d4dc4065299cec08754ca5863
github.com/projectatomic/buildah fc438bb932e891cbe04109cfae1dfbe3c99307a5
github.com/Nvveen/Gotty master
github.com/fsouza/go-dockerclient master
github.com/openshift/imagebuilder master

View file

@ -34,7 +34,6 @@ const (
PullIfMissing = buildah.PullIfMissing
PullAlways = buildah.PullAlways
PullNever = buildah.PullNever
DefaultRuntime = buildah.DefaultRuntime
OCIv1ImageFormat = buildah.OCIv1ImageManifest
Dockerv2ImageFormat = buildah.Dockerv2ImageManifest
@ -148,6 +147,12 @@ type BuildOptions struct {
// NoCache tells the builder to build the image from scratch without checking for a cache.
// It creates a new set of cached images for the build.
NoCache bool
// RemoveIntermediateCtrs tells the builder whether to remove intermediate containers used
// during the build process. Default is true.
RemoveIntermediateCtrs bool
// ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if
// the build was unsuccessful.
ForceRmIntermediateCtrs bool
}
// Executor is a buildah-based implementation of the imagebuilder.Executor
@ -197,6 +202,8 @@ type Executor struct {
layers bool
topLayers []string
noCache bool
removeIntermediateCtrs bool
forceRmIntermediateCtrs bool
}
// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME.
@ -522,35 +529,37 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
registry: options.Registry,
transport: options.Transport,
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
quiet: options.Quiet,
runtime: options.Runtime,
runtimeArgs: options.RuntimeArgs,
transientMounts: options.TransientMounts,
compression: options.Compression,
output: options.Output,
outputFormat: options.OutputFormat,
additionalTags: options.AdditionalTags,
signaturePolicyPath: options.SignaturePolicyPath,
systemContext: options.SystemContext,
volumeCache: make(map[string]string),
volumeCacheInfo: make(map[string]os.FileInfo),
log: options.Log,
out: options.Out,
err: options.Err,
reportWriter: options.ReportWriter,
namespaceOptions: options.NamespaceOptions,
configureNetwork: options.ConfigureNetwork,
cniPluginPath: options.CNIPluginPath,
cniConfigDir: options.CNIConfigDir,
idmappingOptions: options.IDMappingOptions,
commonBuildOptions: options.CommonBuildOpts,
defaultMountsFilePath: options.DefaultMountsFilePath,
iidfile: options.IIDFile,
squash: options.Squash,
labels: append([]string{}, options.Labels...),
annotations: append([]string{}, options.Annotations...),
layers: options.Layers,
noCache: options.NoCache,
quiet: options.Quiet,
runtime: options.Runtime,
runtimeArgs: options.RuntimeArgs,
transientMounts: options.TransientMounts,
compression: options.Compression,
output: options.Output,
outputFormat: options.OutputFormat,
additionalTags: options.AdditionalTags,
signaturePolicyPath: options.SignaturePolicyPath,
systemContext: options.SystemContext,
volumeCache: make(map[string]string),
volumeCacheInfo: make(map[string]os.FileInfo),
log: options.Log,
out: options.Out,
err: options.Err,
reportWriter: options.ReportWriter,
namespaceOptions: options.NamespaceOptions,
configureNetwork: options.ConfigureNetwork,
cniPluginPath: options.CNIPluginPath,
cniConfigDir: options.CNIConfigDir,
idmappingOptions: options.IDMappingOptions,
commonBuildOptions: options.CommonBuildOpts,
defaultMountsFilePath: options.DefaultMountsFilePath,
iidfile: options.IIDFile,
squash: options.Squash,
labels: append([]string{}, options.Labels...),
annotations: append([]string{}, options.Annotations...),
layers: options.Layers,
noCache: options.NoCache,
removeIntermediateCtrs: options.RemoveIntermediateCtrs,
forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
}
if exec.err == nil {
exec.err = os.Stderr
@ -679,6 +688,7 @@ func (b *Executor) Delete() (err error) {
func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *parser.Node) error {
checkForLayers := true
children := node.Children
commitName := b.output
for i, node := range node.Children {
step := ib.Step()
if err := step.Resolve(node); err != nil {
@ -701,6 +711,12 @@ func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *
continue
}
if i < len(children)-1 {
b.output = ""
} else {
b.output = commitName
}
var (
cacheID string
err error
@ -740,9 +756,11 @@ func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *
// it is used to create the container for the next step.
imgID = cacheID
}
// Delete the intermediate container.
if err := b.Delete(); err != nil {
return errors.Wrap(err, "error deleting intermediate container")
// Delete the intermediate container if b.removeIntermediateCtrs is true.
if b.removeIntermediateCtrs {
if err := b.Delete(); err != nil {
return errors.Wrap(err, "error deleting intermediate container")
}
}
// Prepare for the next step with imgID as the new base image.
if i != len(children)-1 {
@ -1048,7 +1066,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) error
if err := stageExecutor.Prepare(ctx, stage.Builder, stage.Node, ""); err != nil {
return err
}
defer stageExecutor.Delete()
// Always remove the intermediate/build containers, even if the build was unsuccessful.
// If building with layers, remove all intermediate/build containers if b.forceRmIntermediateCtrs
// is true.
if b.forceRmIntermediateCtrs || (!b.layers && !b.noCache) {
defer stageExecutor.Delete()
}
if err := stageExecutor.Execute(ctx, stage.Builder, stage.Node); err != nil {
return err
}
@ -1057,7 +1080,21 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) error
return nil
}
_, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "")
return err
if err != nil {
return err
}
// If building with layers and b.removeIntermediateCtrs is true
// only remove intermediate container for each step if an error
// during the build process doesn't occur.
// If the build is unsuccessful, the container created at the step
// the failure happened will persist in the container store.
// This if condition will be false if not building with layers and
// the removal of intermediate/build containers will be handled by the
// defer statement above.
if b.removeIntermediateCtrs && (b.layers || b.noCache) {
return stageExecutor.Delete()
}
return nil
}
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be

View file

@ -10,12 +10,12 @@ import (
"strings"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/projectatomic/buildah"
"github.com/projectatomic/buildah/util"
"github.com/urfave/cli"
)
var (
runtime = util.Runtime()
usernsFlags = []cli.Flag{
cli.StringFlag{
Name: "userns",
@ -109,7 +109,7 @@ var (
},
cli.BoolFlag{
Name: "force-rm",
Usage: "Always remove intermediate containers after a build. The build process does not currently support caching so this is a NOOP.",
Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
},
cli.StringFlag{
Name: "format",
@ -147,14 +147,14 @@ var (
Name: "quiet, q",
Usage: "refrain from announcing build instructions and image read/write progress",
},
cli.BoolFlag{
cli.BoolTFlag{
Name: "rm",
Usage: "Remove intermediate containers after a successful build. The build process does not currently support caching so this is a NOOP.",
Usage: "Remove intermediate containers after a successful build (default true)",
},
cli.StringFlag{
Name: "runtime",
Usage: "`path` to an alternate runtime",
Value: buildah.DefaultRuntime,
Value: runtime,
},
cli.StringSliceFlag{
Name: "runtime-flag",

View file

@ -41,8 +41,6 @@ import (
const (
// DefaultWorkingDir is used if none was specified.
DefaultWorkingDir = "/"
// DefaultRuntime is the default command to use to run the container.
DefaultRuntime = "runc"
// runUsingRuntimeCommand is a command we use as a key for reexec
runUsingRuntimeCommand = Package + "-runtime"
)
@ -1007,7 +1005,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetwork
// Decide which runtime to use.
runtime := options.Runtime
if runtime == "" {
runtime = DefaultRuntime
runtime = util.Runtime()
}
// Default to not specifying a console socket location.
@ -1405,41 +1403,63 @@ func runCopyStdio(stdio *sync.WaitGroup, copyStdio bool, stdioPipe [][]int, copy
reading := 0
// Map describing where data on an incoming descriptor should go.
relayMap := make(map[int]int)
// Map describing incoming descriptors.
relayDesc := make(map[int]string)
// Map describing incoming and outgoing descriptors.
readDesc := make(map[int]string)
writeDesc := make(map[int]string)
// Buffers.
relayBuffer := make(map[int]*bytes.Buffer)
if copyConsole {
// Input from our stdin, output from the terminal descriptor.
relayMap[unix.Stdin] = terminalFD
relayDesc[unix.Stdin] = "stdin"
relayBuffer[unix.Stdin] = new(bytes.Buffer)
relayMap[terminalFD] = unix.Stdout
relayDesc[terminalFD] = "container terminal output"
readDesc[unix.Stdin] = "stdin"
relayBuffer[terminalFD] = new(bytes.Buffer)
writeDesc[terminalFD] = "container terminal input"
relayMap[terminalFD] = unix.Stdout
readDesc[terminalFD] = "container terminal output"
relayBuffer[unix.Stdout] = new(bytes.Buffer)
writeDesc[unix.Stdout] = "output"
reading = 2
}
if copyStdio {
// Input from our stdin, output from the stdout and stderr pipes.
relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1]
relayDesc[unix.Stdin] = "stdin"
relayBuffer[unix.Stdin] = new(bytes.Buffer)
readDesc[unix.Stdin] = "stdin"
relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer)
writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin"
relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout
relayDesc[stdioPipe[unix.Stdout][0]] = "container stdout"
relayBuffer[stdioPipe[unix.Stdout][0]] = new(bytes.Buffer)
readDesc[stdioPipe[unix.Stdout][0]] = "container stdout"
relayBuffer[unix.Stdout] = new(bytes.Buffer)
writeDesc[unix.Stdout] = "stdout"
relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr
relayDesc[stdioPipe[unix.Stderr][0]] = "container stderr"
relayBuffer[stdioPipe[unix.Stderr][0]] = new(bytes.Buffer)
readDesc[stdioPipe[unix.Stderr][0]] = "container stderr"
relayBuffer[unix.Stderr] = new(bytes.Buffer)
writeDesc[unix.Stderr] = "stderr"
reading = 3
}
// Set our reading descriptors to non-blocking.
for fd := range relayMap {
if err := unix.SetNonblock(fd, true); err != nil {
logrus.Errorf("error setting %s to nonblocking: %v", relayDesc[fd], err)
logrus.Errorf("error setting %s to nonblocking: %v", readDesc[fd], err)
return
}
}
// A helper that returns false if err is an error that would cause us
// to give up.
logIfNotRetryable := func(err error, what string) (retry bool) {
if err == nil {
return true
}
if errno, isErrno := err.(syscall.Errno); isErrno {
switch errno {
case syscall.EINTR, syscall.EAGAIN:
return true
}
}
logrus.Error(what)
return false
}
// Pass data back and forth.
pollTimeout := -1
for {
// Start building the list of descriptors to poll.
pollFds := make([]unix.PollFd, 0, reading+1)
@ -1451,23 +1471,8 @@ func runCopyStdio(stdio *sync.WaitGroup, copyStdio bool, stdioPipe [][]int, copy
}
buf := make([]byte, 8192)
// Wait for new data from any input descriptor, or a notification that we're done.
nevents, err := unix.Poll(pollFds, -1)
if err != nil {
if errno, isErrno := err.(syscall.Errno); isErrno {
switch errno {
case syscall.EINTR:
continue
default:
logrus.Errorf("unable to wait for stdio/terminal data to relay: %v", err)
return
}
} else {
logrus.Errorf("unable to wait for stdio/terminal data to relay: %v", err)
return
}
}
if nevents == 0 {
logrus.Errorf("unexpected no data, no error waiting for terminal data to relay")
_, err := unix.Poll(pollFds, pollTimeout)
if !logIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) {
return
}
var removes []int
@ -1487,22 +1492,13 @@ func runCopyStdio(stdio *sync.WaitGroup, copyStdio bool, stdioPipe [][]int, copy
}
continue
}
// Copy whatever we read to wherever it needs to be sent.
// Read whatever there is to be read.
readFD := int(pollFd.Fd)
writeFD, needToRelay := relayMap[readFD]
if needToRelay {
n, err := unix.Read(readFD, buf)
if err != nil {
if errno, isErrno := err.(syscall.Errno); isErrno {
switch errno {
default:
logrus.Errorf("unable to read %s: %v", relayDesc[readFD], err)
case syscall.EINTR, syscall.EAGAIN:
}
} else {
logrus.Errorf("unable to wait for %s data to relay: %v", relayDesc[readFD], err)
}
continue
if !logIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) {
return
}
// If it's zero-length on our stdin and we're
// using pipes, it's an EOF, so close the stdin
@ -1512,18 +1508,28 @@ func runCopyStdio(stdio *sync.WaitGroup, copyStdio bool, stdioPipe [][]int, copy
stdioPipe[unix.Stdin][1] = -1
}
if n > 0 {
// Buffer the data in case we're blocked on where they need to go.
relayBuffer[readFD].Write(buf[:n])
// Try to drain the buffer.
n, err = unix.Write(writeFD, relayBuffer[readFD].Bytes())
if err != nil {
logrus.Errorf("unable to write %s: %v", relayDesc[readFD], err)
return
}
relayBuffer[readFD].Next(n)
// Buffer the data in case we get blocked on where they need to go.
relayBuffer[writeFD].Write(buf[:n])
}
}
}
// Try to drain the output buffers. Set the default timeout
// for the next poll() to 100ms if we still have data to write.
pollTimeout = -1
for writeFD := range relayBuffer {
if relayBuffer[writeFD].Len() > 0 {
n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes())
if !logIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) {
return
}
if n > 0 {
relayBuffer[writeFD].Next(n)
}
}
if relayBuffer[writeFD].Len() > 0 {
pollTimeout = 100
}
}
// Remove any descriptors which we don't need to poll any more from the poll descriptor list.
for _, remove := range removes {
delete(relayMap, remove)

View file

@ -4,6 +4,7 @@ import (
"fmt"
"io"
"net/url"
"os"
"path"
"strings"
@ -233,3 +234,12 @@ func WriteError(w io.Writer, err error, lastError error) error {
}
return err
}
// Runtime is the default command to use to run the container.
func Runtime() string {
runtime := os.Getenv("BUILDAH_RUNTIME")
if runtime != "" {
return runtime
}
return DefaultRuntime
}