Vendor in latest github.com/projectatomic/buildah

This adds support for Dockerfile.in and fixes some limits
issues on docker build

Also adds support for podman build to read Dockerfile from stdin.

cat Dockerfile | podman build -f - .

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>

Closes: #1209
Approved by: mheon
This commit is contained in:
Daniel J Walsh 2018-08-03 07:27:33 -04:00 committed by Atomic Bot
parent a83f54e9c7
commit 7462ebe830
27 changed files with 2156 additions and 60 deletions

View file

@ -29,6 +29,18 @@ var (
} }
) )
func getDockerfiles(files []string) []string {
var dockerfiles []string
for _, f := range files {
if f == "-" {
dockerfiles = append(dockerfiles, "/dev/stdin")
} else {
dockerfiles = append(dockerfiles, f)
}
}
return dockerfiles
}
func buildCmd(c *cli.Context) error { func buildCmd(c *cli.Context) error {
// The following was taken directly from projectatomic/buildah/cmd/bud.go // The following was taken directly from projectatomic/buildah/cmd/bud.go
// TODO Find a away to vendor more of this in rather than copy from bud // TODO Find a away to vendor more of this in rather than copy from bud
@ -62,7 +74,7 @@ func buildCmd(c *cli.Context) error {
} }
} }
dockerfiles := c.StringSlice("file") dockerfiles := getDockerfiles(c.StringSlice("file"))
format := "oci" format := "oci"
if c.IsSet("format") { if c.IsSet("format") {
format = strings.ToLower(c.String("format")) format = strings.ToLower(c.String("format"))

View file

@ -11,6 +11,8 @@ podman\-build - Build a container image using a Dockerfile.
The build context directory can be specified as the http(s) URL of an archive, git repository or Dockerfile. The build context directory can be specified as the http(s) URL of an archive, git repository or Dockerfile.
Dockerfiles ending with a ".in" suffix will be preprocessed via CPP(1). This can be useful to decompose Dockerfiles into several reusable parts that can be used via CPP's **#include** directive. Notice, a Dockerfile.in file can still be used by other tools when manually preprocessing them via `cpp -E`.
When the URL is an archive, the contents of the URL is downloaded to a temporary location and extracted before execution. When the URL is an archive, the contents of the URL is downloaded to a temporary location and extracted before execution.
When the URL is an Dockerfile, the Dockerfile is downloaded to a temporary location. When the URL is an Dockerfile, the Dockerfile is downloaded to a temporary location.
@ -180,6 +182,8 @@ If a build context is not specified, and at least one Dockerfile is a
local file, the directory in which it resides will be used as the build local file, the directory in which it resides will be used as the build
context. context.
If you specify `-f -`, the Dockerfile contents will be read from stdin.
**--force-rm** *bool-value* **--force-rm** *bool-value*
Always remove intermediate containers after a build, even if the build is unsuccessful. Always remove intermediate containers after a build, even if the build is unsuccessful.
@ -522,8 +526,12 @@ podman build .
podman build -f Dockerfile.simple . podman build -f Dockerfile.simple .
cat ~/Dockerfile | podman build -f - .
podman build -f Dockerfile.simple -f Dockerfile.notsosimple podman build -f Dockerfile.simple -f Dockerfile.notsosimple
podman build -f Dockerfile.in ~
podman build -t imageName . podman build -t imageName .
podman build --tls-verify=true -t imageName -f Dockerfile.simple podman build --tls-verify=true -t imageName -f Dockerfile.simple

View file

@ -0,0 +1,7 @@
FROM alpine:latest
#include "common"
RUNHELLO
#include "install-base"

View file

@ -0,0 +1,5 @@
FROM alpine:latest
#include "common"
#error

View file

@ -0,0 +1,3 @@
#define RUNHELLO RUN echo "Hello world!"
RUN touch /etc/hello-world.txt

View file

@ -0,0 +1,3 @@
RUN apk update
RUN apk add git curl

View file

@ -16,9 +16,9 @@
HOME=`pwd` HOME=`pwd`
######## echo ########################################################
# test "build-from-scratch" echo test "build-from-scratch"
######## echo ########################################################
TARGET=scratch-image TARGET=scratch-image
podman build -q=True -t $TARGET $HOME/test/build/from-scratch podman build -q=True -t $TARGET $HOME/test/build/from-scratch
CID=$(buildah from $TARGET) CID=$(buildah from $TARGET)
@ -33,9 +33,9 @@ HOME=`pwd`
podman images -q podman images -q
######## echo ########################################################
# test "build-preserve-subvolumes" echo test "build-preserve-subvolumes"
######## echo ########################################################
TARGET=volume-image TARGET=volume-image
podman build -t $TARGET $HOME/test/build/preserve-volumes podman build -t $TARGET $HOME/test/build/preserve-volumes
CID=$(buildah from $TARGET) CID=$(buildah from $TARGET)
@ -50,9 +50,9 @@ HOME=`pwd`
podman rmi $(buildah --debug=false images -q) podman rmi $(buildah --debug=false images -q)
buildah --debug=false images -q buildah --debug=false images -q
######## echo ########################################################
# test "build-git-context" echo test "build-git-context"
######## echo ########################################################
TARGET=giturl-image TARGET=giturl-image
# Any repo should do, but this one is small and is FROM: scratch. # Any repo should do, but this one is small and is FROM: scratch.
GITREPO=git://github.com/projectatomic/nulecule-library GITREPO=git://github.com/projectatomic/nulecule-library
@ -63,9 +63,9 @@ HOME=`pwd`
podman images -q podman images -q
######## echo ########################################################
# test "build-github-context" echo test "build-github-context"
######## echo ########################################################
TARGET=github-image TARGET=github-image
# Any repo should do, but this one is small and is FROM: scratch. # Any repo should do, but this one is small and is FROM: scratch.
GITREPO=github.com/projectatomic/nulecule-library GITREPO=github.com/projectatomic/nulecule-library
@ -77,9 +77,9 @@ HOME=`pwd`
podman images -q podman images -q
######## echo ########################################################
# test "build-additional-tags" echo test "build-additional-tags"
######## echo ########################################################
TARGET=scratch-image TARGET=scratch-image
TARGET2=another-scratch-image TARGET2=another-scratch-image
TARGET3=so-many-scratch-images TARGET3=so-many-scratch-images
@ -95,9 +95,9 @@ HOME=`pwd`
podman images -q podman images -q
######## echo ########################################################
# test "build-volume-perms" echo test "build-volume-perms"
######## echo ########################################################
TARGET=volume-image TARGET=volume-image
podman build -t $TARGET $HOME/test/build/volume-perms podman build -t $TARGET $HOME/test/build/volume-perms
CID=$(buildah from $TARGET) CID=$(buildah from $TARGET)
@ -110,9 +110,9 @@ HOME=`pwd`
podman images -q podman images -q
######## echo ########################################################
# test "build-from-glob" echo test "build-from-glob"
######## echo ########################################################
TARGET=alpine-image TARGET=alpine-image
podman build -t $TARGET -file Dockerfile2.glob $HOME/test/build/from-multiple-files podman build -t $TARGET -file Dockerfile2.glob $HOME/test/build/from-multiple-files
CID=$(buildah from $TARGET) CID=$(buildah from $TARGET)
@ -124,9 +124,9 @@ HOME=`pwd`
podman images -q podman images -q
######## echo ########################################################
# test "build-from-multiple-files-one-from" echo test "build-from-multiple-files-one-from"
######## echo ########################################################
TARGET=scratch-image TARGET=scratch-image
podman build -t $TARGET -file $HOME/test/build/from-multiple-files/Dockerfile1.scratch -file $HOME/test/build/from-multiple-files/Dockerfile2.nofrom podman build -t $TARGET -file $HOME/test/build/from-multiple-files/Dockerfile1.scratch -file $HOME/test/build/from-multiple-files/Dockerfile2.nofrom
CID=$(buildah from $TARGET) CID=$(buildah from $TARGET)
@ -146,9 +146,9 @@ HOME=`pwd`
buildah --debug=false images -q buildah --debug=false images -q
######## echo ########################################################
# test "build-from-multiple-files-two-froms" echo test "build-from-multiple-files-two-froms"
######## echo ########################################################
TARGET=scratch-image TARGET=scratch-image
podman build -t $TARGET -file $HOME/test/build/from-multiple-files/Dockerfile1.scratch -file $HOME/test/build/from-multiple-files/Dockerfile2.withfrom podman build -t $TARGET -file $HOME/test/build/from-multiple-files/Dockerfile1.scratch -file $HOME/test/build/from-multiple-files/Dockerfile2.withfrom
CID=$(buildah from $TARGET) CID=$(buildah from $TARGET)
@ -170,3 +170,29 @@ HOME=`pwd`
buildah rm $CID buildah rm $CID
podman rmi $(buildah --debug=false images -q) podman rmi $(buildah --debug=false images -q)
buildah --debug=false images -q buildah --debug=false images -q
echo ########################################################
echo test "build-from-multiple-files-two-froms" with "-f -"
echo ########################################################
TARGET=scratch-image
cat $HOME/test/build/from-multiple-files/Dockerfile1.alpine | podman build -t ${TARGET} -file - -file Dockerfile2.withfrom $HOME/test/build/from-multiple-files
CID=$(buildah from $TARGET)
ROOT=$(buildah mount $CID)
cmp $ROOT/Dockerfile1 $HOME/test/build/from-multiple-files/Dockerfile1.alpine
cmp $ROOT/Dockerfile2.withfrom $HOME/test/build/from-multiple-files/Dockerfile2.withfrom
test -s $ROOT/etc/passwd
buildah rm $CID
podman rmi $(buildah --debug=false images -q)
buildah --debug=false images -q
echo ########################################################
echo test "build with preprocessor"
echo ########################################################
target=alpine-image
podman build -q -t ${TARGET} -f Decomposed.in $HOME/test/build/preprocess
buildah --debug=false images
CID=$(buildah from $TARGET)
buildah rm $CID
podman rmi $(buildah --debug=false images -q)
buildah --debug=false images -q

View file

@ -90,7 +90,7 @@ k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
github.com/mrunalp/fileutils master github.com/mrunalp/fileutils master
github.com/varlink/go master github.com/varlink/go master
github.com/projectatomic/buildah 4976d8c58367e835280125e6843a279cd8843b18 github.com/projectatomic/buildah 35a37f36d37bf84397d7f79f6bb8649f728c19f1
github.com/Nvveen/Gotty master github.com/Nvveen/Gotty master
github.com/fsouza/go-dockerclient master github.com/fsouza/go-dockerclient master
github.com/openshift/imagebuilder master github.com/openshift/imagebuilder master

View file

@ -360,7 +360,9 @@ type BuilderOptions struct {
// after processing the AddCapabilities set, when running commands in the // after processing the AddCapabilities set, when running commands in the
// container. If a capability appears in both lists, it will be dropped. // container. If a capability appears in both lists, it will be dropped.
DropCapabilities []string DropCapabilities []string
// ImageOnly is a boolean designating that we wish to only pull the image and
// to not create a container from it. Used by pull command.
ImageOnly bool
CommonBuildOpts *CommonBuildOptions CommonBuildOpts *CommonBuildOptions
} }

1248
vendor/github.com/projectatomic/buildah/chroot/run.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,142 @@
// +build linux,seccomp
package chroot
import (
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
libseccomp "github.com/seccomp/libseccomp-golang"
"github.com/sirupsen/logrus"
)
// setSeccomp sets the seccomp filter for ourselves and any processes that we'll start.
func setSeccomp(spec *specs.Spec) error {
logrus.Debugf("setting seccomp configuration")
if spec.Linux.Seccomp == nil {
return nil
}
mapAction := func(specAction specs.LinuxSeccompAction) libseccomp.ScmpAction {
switch specAction {
case specs.ActKill:
return libseccomp.ActKill
case specs.ActTrap:
return libseccomp.ActTrap
case specs.ActErrno:
return libseccomp.ActErrno
case specs.ActTrace:
return libseccomp.ActTrace
case specs.ActAllow:
return libseccomp.ActAllow
}
return libseccomp.ActInvalid
}
mapArch := func(specArch specs.Arch) libseccomp.ScmpArch {
switch specArch {
case specs.ArchX86:
return libseccomp.ArchX86
case specs.ArchX86_64:
return libseccomp.ArchAMD64
case specs.ArchX32:
return libseccomp.ArchX32
case specs.ArchARM:
return libseccomp.ArchARM
case specs.ArchAARCH64:
return libseccomp.ArchARM64
case specs.ArchMIPS:
return libseccomp.ArchMIPS
case specs.ArchMIPS64:
return libseccomp.ArchMIPS64
case specs.ArchMIPS64N32:
return libseccomp.ArchMIPS64N32
case specs.ArchMIPSEL:
return libseccomp.ArchMIPSEL
case specs.ArchMIPSEL64:
return libseccomp.ArchMIPSEL64
case specs.ArchMIPSEL64N32:
return libseccomp.ArchMIPSEL64N32
case specs.ArchPPC:
return libseccomp.ArchPPC
case specs.ArchPPC64:
return libseccomp.ArchPPC64
case specs.ArchPPC64LE:
return libseccomp.ArchPPC64LE
case specs.ArchS390:
return libseccomp.ArchS390
case specs.ArchS390X:
return libseccomp.ArchS390X
case specs.ArchPARISC:
/* fallthrough */ /* for now */
case specs.ArchPARISC64:
/* fallthrough */ /* for now */
}
return libseccomp.ArchInvalid
}
mapOp := func(op specs.LinuxSeccompOperator) libseccomp.ScmpCompareOp {
switch op {
case specs.OpNotEqual:
return libseccomp.CompareNotEqual
case specs.OpLessThan:
return libseccomp.CompareLess
case specs.OpLessEqual:
return libseccomp.CompareLessOrEqual
case specs.OpEqualTo:
return libseccomp.CompareEqual
case specs.OpGreaterEqual:
return libseccomp.CompareGreaterEqual
case specs.OpGreaterThan:
return libseccomp.CompareGreater
case specs.OpMaskedEqual:
return libseccomp.CompareMaskedEqual
}
return libseccomp.CompareInvalid
}
filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction))
if err != nil {
return errors.Wrapf(err, "error creating seccomp filter with default action %q", spec.Linux.Seccomp.DefaultAction)
}
for _, arch := range spec.Linux.Seccomp.Architectures {
if err = filter.AddArch(mapArch(arch)); err != nil {
return errors.Wrapf(err, "error adding architecture %q(%q) to seccomp filter", arch, mapArch(arch))
}
}
for _, rule := range spec.Linux.Seccomp.Syscalls {
scnames := make(map[libseccomp.ScmpSyscall]string)
for _, name := range rule.Names {
scnum, err := libseccomp.GetSyscallFromName(name)
if err != nil {
logrus.Debugf("error mapping syscall %q to a syscall, ignoring %q rule for %q", name, rule.Action)
continue
}
scnames[scnum] = name
}
for scnum := range scnames {
if len(rule.Args) == 0 {
if err = filter.AddRule(scnum, mapAction(rule.Action)); err != nil {
return errors.Wrapf(err, "error adding a rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action)
}
continue
}
var conditions []libseccomp.ScmpCondition
for _, arg := range rule.Args {
condition, err := libseccomp.MakeCondition(arg.Index, mapOp(arg.Op), arg.Value, arg.ValueTwo)
if err != nil {
return errors.Wrapf(err, "error building a seccomp condition %d:%v:%d:%d", arg.Index, arg.Op, arg.Value, arg.ValueTwo)
}
conditions = append(conditions, condition)
}
if err = filter.AddRuleConditional(scnum, mapAction(rule.Action), conditions); err != nil {
return errors.Wrapf(err, "error adding a conditional rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action)
}
}
}
if err = filter.SetNoNewPrivsBit(spec.Process.NoNewPrivileges); err != nil {
return errors.Wrapf(err, "error setting no-new-privileges bit to %v", spec.Process.NoNewPrivileges)
}
err = filter.Load()
filter.Release()
if err != nil {
return errors.Wrapf(err, "error activating seccomp filter")
}
return nil
}

View file

@ -0,0 +1,15 @@
// +build !linux !seccomp
package chroot
import (
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
func setSeccomp(spec *specs.Spec) error {
if spec.Linux.Seccomp != nil {
return errors.New("configured a seccomp filter without seccomp support?")
}
return nil
}

View file

@ -0,0 +1,22 @@
// +build linux,selinux
package chroot
import (
"github.com/opencontainers/runtime-spec/specs-go"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// setSelinuxLabel sets the process label for child processes that we'll start.
func setSelinuxLabel(spec *specs.Spec) error {
logrus.Debugf("setting selinux label")
if spec.Process.SelinuxLabel != "" && selinux.EnforceMode() != selinux.Disabled {
if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil {
return errors.Wrapf(err, "error setting process label to %q", spec.Process.SelinuxLabel)
}
}
return nil
}

View file

@ -0,0 +1,18 @@
// +build !linux !selinux
package chroot
import (
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
func setSelinuxLabel(spec *specs.Spec) error {
if spec.Linux.MountLabel != "" {
return errors.New("configured an SELinux mount label without SELinux support?")
}
if spec.Process.SelinuxLabel != "" {
return errors.New("configured an SELinux process label without SELinux support?")
}
return nil
}

View file

@ -0,0 +1,15 @@
// +build !linux
package chroot
import (
"io"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
// RunUsingChroot is not supported.
func RunUsingChroot(spec *specs.Spec, bundlePath string, stdin io.Reader, stdout, stderr io.Writer) (err error) {
return errors.Errorf("--isolation chroot is not supported on this platform")
}

15
vendor/github.com/projectatomic/buildah/chroot/util.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
// +build linux
package chroot
func dedupeStringSlice(slice []string) []string {
done := make([]string, 0, len(slice))
m := make(map[string]struct{})
for _, s := range slice {
if _, present := m[s]; !present {
m[s] = struct{}{}
done = append(done, s)
}
}
return done
}

View file

@ -1,11 +1,14 @@
package imagebuildah package imagebuildah
import ( import (
"bytes"
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
@ -215,6 +218,7 @@ type Executor struct {
noCache bool noCache bool
removeIntermediateCtrs bool removeIntermediateCtrs bool
forceRmIntermediateCtrs bool forceRmIntermediateCtrs bool
containerIDs []string // Stores the IDs of the successful intermediate containers used during layer build
} }
// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME. // withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME.
@ -684,6 +688,7 @@ func (b *Executor) Prepare(ctx context.Context, ib *imagebuilder.Builder, node *
// Add the top layer of this image to b.topLayers so we can keep track of them // Add the top layer of this image to b.topLayers so we can keep track of them
// when building with cached images. // when building with cached images.
b.topLayers = append(b.topLayers, builder.TopLayer) b.topLayers = append(b.topLayers, builder.TopLayer)
logrus.Debugln("Container ID:", builder.ContainerID)
return nil return nil
} }
@ -811,12 +816,8 @@ func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *
// it is used to create the container for the next step. // it is used to create the container for the next step.
imgID = cacheID imgID = cacheID
} }
// Delete the intermediate container if b.removeIntermediateCtrs is true. // Add container ID of successful intermediate container to b.containerIDs
if b.removeIntermediateCtrs { b.containerIDs = append(b.containerIDs, b.builder.ContainerID)
if err := b.Delete(); err != nil {
return errors.Wrap(err, "error deleting intermediate container")
}
}
// Prepare for the next step with imgID as the new base image. // Prepare for the next step with imgID as the new base image.
if i != len(children)-1 { if i != len(children)-1 {
if err := b.Prepare(ctx, ib, node, imgID); err != nil { if err := b.Prepare(ctx, ib, node, imgID); err != nil {
@ -1122,11 +1123,14 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) error
if len(stages) == 0 { if len(stages) == 0 {
errors.New("error building: no stages to build") errors.New("error building: no stages to build")
} }
var stageExecutor *Executor var (
stageExecutor *Executor
lastErr error
)
for _, stage := range stages { for _, stage := range stages {
stageExecutor = b.withName(stage.Name, stage.Position) stageExecutor = b.withName(stage.Name, stage.Position)
if err := stageExecutor.Prepare(ctx, stage.Builder, stage.Node, ""); err != nil { if err := stageExecutor.Prepare(ctx, stage.Builder, stage.Node, ""); err != nil {
return err lastErr = err
} }
// Always remove the intermediate/build containers, even if the build was unsuccessful. // Always remove the intermediate/build containers, even if the build was unsuccessful.
// If building with layers, remove all intermediate/build containers if b.forceRmIntermediateCtrs // If building with layers, remove all intermediate/build containers if b.forceRmIntermediateCtrs
@ -1135,8 +1139,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) error
defer stageExecutor.Delete() defer stageExecutor.Delete()
} }
if err := stageExecutor.Execute(ctx, stage.Builder, stage.Node); err != nil { if err := stageExecutor.Execute(ctx, stage.Builder, stage.Node); err != nil {
return err lastErr = err
} }
// Delete the successful intermediate containers if an error in the build
// process occurs and b.removeIntermediateCtrs is true.
if lastErr != nil {
if b.removeIntermediateCtrs {
stageExecutor.deleteSuccessfulIntermediateCtrs()
}
return lastErr
}
b.containerIDs = append(b.containerIDs, stageExecutor.containerIDs...)
} }
if !b.layers && !b.noCache { if !b.layers && !b.noCache {
@ -1154,7 +1168,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) error
// the removal of intermediate/build containers will be handled by the // the removal of intermediate/build containers will be handled by the
// defer statement above. // defer statement above.
if b.removeIntermediateCtrs && (b.layers || b.noCache) { if b.removeIntermediateCtrs && (b.layers || b.noCache) {
return stageExecutor.Delete() if err := b.deleteSuccessfulIntermediateCtrs(); err != nil {
return errors.Errorf("Failed to cleanup intermediate containers")
}
} }
return nil return nil
} }
@ -1173,6 +1189,8 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
} }
}(dockerfiles...) }(dockerfiles...)
for _, dfile := range paths { for _, dfile := range paths {
var data io.ReadCloser
if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") { if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") {
logrus.Debugf("reading remote Dockerfile %q", dfile) logrus.Debugf("reading remote Dockerfile %q", dfile)
resp, err := http.Get(dfile) resp, err := http.Get(dfile)
@ -1183,7 +1201,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
resp.Body.Close() resp.Body.Close()
return errors.Errorf("no contents in %q", dfile) return errors.Errorf("no contents in %q", dfile)
} }
dockerfiles = append(dockerfiles, resp.Body) data = resp.Body
} else { } else {
if !filepath.IsAbs(dfile) { if !filepath.IsAbs(dfile) {
logrus.Debugf("resolving local Dockerfile %q", dfile) logrus.Debugf("resolving local Dockerfile %q", dfile)
@ -1199,12 +1217,23 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
contents.Close() contents.Close()
return errors.Wrapf(err, "error reading info about %q", dfile) return errors.Wrapf(err, "error reading info about %q", dfile)
} }
if dinfo.Size() == 0 { if dinfo.Mode().IsRegular() && dinfo.Size() == 0 {
contents.Close() contents.Close()
return errors.Wrapf(err, "no contents in %q", dfile) return errors.Wrapf(err, "no contents in %q", dfile)
} }
dockerfiles = append(dockerfiles, contents) data = contents
} }
// pre-process Dockerfiles with ".in" suffix
if strings.HasSuffix(dfile, ".in") {
pData, err := preprocessDockerfileContents(data, options.ContextDirectory)
if err != nil {
return err
}
data = *pData
}
dockerfiles = append(dockerfiles, data)
} }
mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0]) mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0])
if err != nil { if err != nil {
@ -1225,3 +1254,67 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
stages := imagebuilder.NewStages(mainNode, b) stages := imagebuilder.NewStages(mainNode, b)
return exec.Build(ctx, stages) return exec.Build(ctx, stages)
} }
// deleteSuccessfulIntermediateCtrs goes through the container IDs in b.containerIDs
// and deletes the containers associated with that ID.
func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
var lastErr error
for _, ctr := range b.containerIDs {
if err := b.store.DeleteContainer(ctr); err != nil {
logrus.Errorf("error deleting build container %q: %v\n", ctr, err)
lastErr = err
}
}
return lastErr
}
// preprocessDockerfileContents runs CPP(1) in preprocess-only mode on the input
// dockerfile content and will use ctxDir as the base include path.
//
// Note: we cannot use cmd.StdoutPipe() as cmd.Wait() closes it.
func preprocessDockerfileContents(r io.ReadCloser, ctxDir string) (rdrCloser *io.ReadCloser, err error) {
cppPath := "/usr/bin/cpp"
if _, err = os.Stat(cppPath); err != nil {
if os.IsNotExist(err) {
err = errors.Errorf("error: Dockerfile.in support requires %s to be installed", cppPath)
}
return nil, err
}
stdout := bytes.Buffer{}
stderr := bytes.Buffer{}
cmd := exec.Command(cppPath, "-E", "-iquote", ctxDir, "-")
cmd.Stdout = &stdout
cmd.Stderr = &stderr
pipe, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
pipe.Close()
}
}()
if err = cmd.Start(); err != nil {
return nil, err
}
if _, err = io.Copy(pipe, r); err != nil {
return nil, err
}
pipe.Close()
if err = cmd.Wait(); err != nil {
if stderr.Len() > 0 {
err = fmt.Errorf("%v: %s", err, strings.TrimSpace(stderr.String()))
}
return nil, errors.Wrapf(err, "error pre-processing Dockerfile")
}
rc := ioutil.NopCloser(bytes.NewReader(stdout.Bytes()))
return &rc, nil
}

View file

@ -248,6 +248,15 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
defer src.Close() defer src.Close()
} }
// If the pull command was used, we only pull the image,
// we don't create a container.
if options.ImageOnly {
imgBuilder := &Builder{
FromImageID: imageID,
}
return imgBuilder, nil
}
name := "working-container" name := "working-container"
if options.Container != "" { if options.Container != "" {
name = options.Container name = options.Container

View file

@ -24,6 +24,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
"golang.org/x/crypto/ssh/terminal" "golang.org/x/crypto/ssh/terminal"
"golang.org/x/sys/unix"
) )
const ( const (
@ -40,6 +41,14 @@ func CommonBuildOptions(c *cli.Context) (*buildah.CommonBuildOptions, error) {
memorySwap int64 memorySwap int64
err error err error
) )
rlim := unix.Rlimit{Cur: 1048576, Max: 1048576}
defaultLimits := []string{}
if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil {
defaultLimits = append(defaultLimits, fmt.Sprintf("nofile=%d:%d", rlim.Cur, rlim.Max))
}
if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil {
defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max))
}
if c.String("memory") != "" { if c.String("memory") != "" {
memoryLimit, err = units.RAMInBytes(c.String("memory")) memoryLimit, err = units.RAMInBytes(c.String("memory"))
if err != nil { if err != nil {
@ -77,7 +86,7 @@ func CommonBuildOptions(c *cli.Context) (*buildah.CommonBuildOptions, error) {
Memory: memoryLimit, Memory: memoryLimit,
MemorySwap: memorySwap, MemorySwap: memorySwap,
ShmSize: c.String("shm-size"), ShmSize: c.String("shm-size"),
Ulimit: c.StringSlice("ulimit"), Ulimit: append(defaultLimits, c.StringSlice("ulimit")...),
Volumes: c.StringSlice("volume"), Volumes: c.StringSlice("volume"),
} }
if err := parseSecurityOpts(c.StringSlice("security-opt"), commonOpts); err != nil { if err := parseSecurityOpts(c.StringSlice("security-opt"), commonOpts); err != nil {
@ -531,12 +540,17 @@ func NamespaceOptions(c *cli.Context) (namespaceOptions buildah.NamespaceOptions
return options, policy, nil return options, policy, nil
} }
func defaultIsolation() buildah.Isolation { func defaultIsolation() (buildah.Isolation, error) {
isolation := os.Getenv("BUILDAH_ISOLATION") isolation, isSet := os.LookupEnv("BUILDAH_ISOLATION")
if strings.HasPrefix(strings.ToLower(isolation), "oci") { if isSet {
return buildah.IsolationOCI if strings.HasPrefix(strings.ToLower(isolation), "oci") {
return buildah.IsolationOCI, nil
} else if strings.HasPrefix(strings.ToLower(isolation), "chroot") {
return buildah.IsolationChroot, nil
}
return 0, errors.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation)
} }
return buildah.IsolationDefault return buildah.IsolationDefault, nil
} }
// IsolationOption parses the --isolation flag. // IsolationOption parses the --isolation flag.
@ -544,9 +558,11 @@ func IsolationOption(c *cli.Context) (buildah.Isolation, error) {
if c.String("isolation") != "" { if c.String("isolation") != "" {
if strings.HasPrefix(strings.ToLower(c.String("isolation")), "oci") { if strings.HasPrefix(strings.ToLower(c.String("isolation")), "oci") {
return buildah.IsolationOCI, nil return buildah.IsolationOCI, nil
} else if strings.HasPrefix(strings.ToLower(c.String("isolation")), "chroot") {
return buildah.IsolationChroot, nil
} else { } else {
return buildah.IsolationDefault, errors.Errorf("unrecognized isolation type %q", c.String("isolation")) return buildah.IsolationDefault, errors.Errorf("unrecognized isolation type %q", c.String("isolation"))
} }
} }
return defaultIsolation(), nil return defaultIsolation()
} }

View file

@ -29,6 +29,7 @@ import (
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/projectatomic/buildah/bind" "github.com/projectatomic/buildah/bind"
"github.com/projectatomic/buildah/chroot"
"github.com/projectatomic/buildah/util" "github.com/projectatomic/buildah/util"
"github.com/projectatomic/libpod/pkg/secrets" "github.com/projectatomic/libpod/pkg/secrets"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -40,7 +41,7 @@ const (
// DefaultWorkingDir is used if none was specified. // DefaultWorkingDir is used if none was specified.
DefaultWorkingDir = "/" DefaultWorkingDir = "/"
// runUsingRuntimeCommand is a command we use as a key for reexec // runUsingRuntimeCommand is a command we use as a key for reexec
runUsingRuntimeCommand = Package + "-runtime" runUsingRuntimeCommand = Package + "-oci-runtime"
) )
// TerminalPolicy takes the value DefaultTerminal, WithoutTerminal, or WithTerminal. // TerminalPolicy takes the value DefaultTerminal, WithoutTerminal, or WithTerminal.
@ -112,6 +113,9 @@ const (
IsolationDefault Isolation = iota IsolationDefault Isolation = iota
// IsolationOCI is a proper OCI runtime. // IsolationOCI is a proper OCI runtime.
IsolationOCI IsolationOCI
// IsolationChroot is a more chroot-like environment: less isolation,
// but with fewer requirements.
IsolationChroot
) )
// String converts a Isolation into a string. // String converts a Isolation into a string.
@ -121,6 +125,8 @@ func (i Isolation) String() string {
return "IsolationDefault" return "IsolationDefault"
case IsolationOCI: case IsolationOCI:
return "IsolationOCI" return "IsolationOCI"
case IsolationChroot:
return "IsolationChroot"
} }
return fmt.Sprintf("unrecognized isolation type %d", i) return fmt.Sprintf("unrecognized isolation type %d", i)
} }
@ -129,10 +135,10 @@ func (i Isolation) String() string {
type RunOptions struct { type RunOptions struct {
// Hostname is the hostname we set for the running container. // Hostname is the hostname we set for the running container.
Hostname string Hostname string
// Isolation is either IsolationDefault or IsolationOCI. // Isolation is either IsolationDefault, IsolationOCI, or IsolationChroot.
Isolation Isolation Isolation Isolation
// Runtime is the name of the command to run. It should accept the same arguments // Runtime is the name of the runtime to run. It should accept the
// that runc does, and produce similar output. // same arguments that runc does, and produce similar output.
Runtime string Runtime string
// Args adds global arguments for the runtime. // Args adds global arguments for the runtime.
Args []string Args []string
@ -792,6 +798,11 @@ func setupNamespaces(g *generate.Generator, namespaceOptions NamespaceOptions, i
} }
} }
} }
if configureNetwork {
for name, val := range util.DefaultNetworkSysctl {
g.AddLinuxSysctl(name, val)
}
}
return configureNetwork, configureNetworks, configureUTS, nil return configureNetwork, configureNetworks, configureUTS, nil
} }
@ -969,8 +980,8 @@ func (b *Builder) Run(command []string, options RunOptions) error {
return err return err
} }
defer func() { defer func() {
if err2 := b.Unmount(); err2 != nil { if err := b.Unmount(); err != nil {
logrus.Errorf("error unmounting container: %v", err2) logrus.Errorf("error unmounting container: %v", err)
} }
}() }()
g.SetRootPath(mountPoint) g.SetRootPath(mountPoint)
@ -1069,6 +1080,8 @@ func (b *Builder) Run(command []string, options RunOptions) error {
switch isolation { switch isolation {
case IsolationOCI: case IsolationOCI:
err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, spec, mountPoint, path, Package+"-"+filepath.Base(path)) err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, spec, mountPoint, path, Package+"-"+filepath.Base(path))
case IsolationChroot:
err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr)
default: default:
err = errors.Errorf("don't know how to run this command") err = errors.Errorf("don't know how to run this command")
} }
@ -1677,7 +1690,7 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
} }
// If the descriptor was closed elsewhere, remove it from our list. // If the descriptor was closed elsewhere, remove it from our list.
if pollFd.Revents&unix.POLLNVAL != 0 { if pollFd.Revents&unix.POLLNVAL != 0 {
logrus.Debugf("error polling descriptor %d: closed?", pollFd.Fd) logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)])
removes[int(pollFd.Fd)] = struct{}{} removes[int(pollFd.Fd)] = struct{}{}
} }
// If the POLLIN flag isn't set, then there's no data to be read from this descriptor. // If the POLLIN flag isn't set, then there's no data to be read from this descriptor.

View file

@ -0,0 +1,110 @@
#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <grp.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <termios.h>
#include <unistd.h>
static int _buildah_unshare_parse_envint(const char *envname) {
char *p, *q;
long l;
p = getenv(envname);
if (p == NULL) {
return -1;
}
q = NULL;
l = strtol(p, &q, 10);
if ((q == NULL) || (*q != '\0')) {
fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p);
_exit(1);
}
unsetenv(envname);
return l;
}
void _buildah_unshare(void)
{
int flags, pidfd, continuefd, n, pgrp, sid, ctty, allow_setgroups;
char buf[2048];
flags = _buildah_unshare_parse_envint("_Buildah-unshare");
if (flags == -1) {
return;
}
if ((flags & CLONE_NEWUSER) != 0) {
if (unshare(CLONE_NEWUSER) == -1) {
fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n");
_exit(1);
}
}
pidfd = _buildah_unshare_parse_envint("_Buildah-pid-pipe");
if (pidfd != -1) {
snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
if (write(pidfd, buf, strlen(buf)) != strlen(buf)) {
fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
_exit(1);
}
close(pidfd);
}
continuefd = _buildah_unshare_parse_envint("_Buildah-continue-pipe");
if (continuefd != -1) {
n = read(continuefd, buf, sizeof(buf));
if (n > 0) {
fprintf(stderr, "Error: %.*s\n", n, buf);
_exit(1);
}
close(continuefd);
}
sid = _buildah_unshare_parse_envint("_Buildah-setsid");
if (sid == 1) {
if (setsid() == -1) {
fprintf(stderr, "Error during setsid: %m\n");
_exit(1);
}
}
pgrp = _buildah_unshare_parse_envint("_Buildah-setpgrp");
if (pgrp == 1) {
if (setpgrp() == -1) {
fprintf(stderr, "Error during setpgrp: %m\n");
_exit(1);
}
}
ctty = _buildah_unshare_parse_envint("_Buildah-ctty");
if (ctty != -1) {
if (ioctl(ctty, TIOCSCTTY, 0) == -1) {
fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty);
_exit(1);
}
}
allow_setgroups = _buildah_unshare_parse_envint("_Buildah-allow-setgroups");
if ((flags & CLONE_NEWUSER) != 0) {
if (allow_setgroups == 1) {
if (setgroups(0, NULL) != 0) {
fprintf(stderr, "Error during setgroups(0, NULL): %m\n");
_exit(1);
}
}
if (setresgid(0, 0, 0) != 0) {
fprintf(stderr, "Error during setresgid(0): %m\n");
_exit(1);
}
if (setresuid(0, 0, 0) != 0) {
fprintf(stderr, "Error during setresuid(0): %m\n");
_exit(1);
}
}
if ((flags & ~CLONE_NEWUSER) != 0) {
if (unshare(flags & ~CLONE_NEWUSER) == -1) {
fprintf(stderr, "Error during unshare(...): %m\n");
_exit(1);
}
}
return;
}

View file

@ -0,0 +1,273 @@
// +build linux
package unshare
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"syscall"
"github.com/containers/storage/pkg/reexec"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/projectatomic/buildah/util"
)
// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
// handles setting ID maps and other related settings by triggering
// initialization code in the child.
type Cmd struct {
*exec.Cmd
UnshareFlags int
UseNewuidmap bool
UidMappings []specs.LinuxIDMapping
UseNewgidmap bool
GidMappings []specs.LinuxIDMapping
GidMappingsEnableSetgroups bool
Setsid bool
Setpgrp bool
Ctty *os.File
OOMScoreAdj int
Hook func(pid int) error
}
// Command creates a new Cmd which can be customized.
func Command(args ...string) *Cmd {
cmd := reexec.Command(args...)
return &Cmd{
Cmd: cmd,
}
}
func (c *Cmd) Start() error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
// Set an environment variable to tell the child to synchronize its startup.
if c.Env == nil {
c.Env = os.Environ()
}
c.Env = append(c.Env, fmt.Sprintf("_Buildah-unshare=%d", c.UnshareFlags))
// Create the pipe for reading the child's PID.
pidRead, pidWrite, err := os.Pipe()
if err != nil {
return errors.Wrapf(err, "error creating pid pipe")
}
c.Env = append(c.Env, fmt.Sprintf("_Buildah-pid-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
// Create the pipe for letting the child know to proceed.
continueRead, continueWrite, err := os.Pipe()
if err != nil {
pidRead.Close()
pidWrite.Close()
return errors.Wrapf(err, "error creating pid pipe")
}
c.Env = append(c.Env, fmt.Sprintf("_Buildah-continue-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, continueRead)
// Pass along other instructions.
if c.Setsid {
c.Env = append(c.Env, "_Buildah-setsid=1")
}
if c.Setpgrp {
c.Env = append(c.Env, "_Buildah-setpgrp=1")
}
if c.Ctty != nil {
c.Env = append(c.Env, fmt.Sprintf("_Buildah-ctty=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
}
if c.GidMappingsEnableSetgroups {
c.Env = append(c.Env, "_Buildah-allow-setgroups=1")
} else {
c.Env = append(c.Env, "_Buildah-allow-setgroups=0")
}
// Make sure we clean up our pipes.
defer func() {
if pidRead != nil {
pidRead.Close()
}
if pidWrite != nil {
pidWrite.Close()
}
if continueRead != nil {
continueRead.Close()
}
if continueWrite != nil {
continueWrite.Close()
}
}()
// Start the new process.
err = c.Cmd.Start()
if err != nil {
return err
}
// Close the ends of the pipes that the parent doesn't need.
continueRead.Close()
continueRead = nil
pidWrite.Close()
pidWrite = nil
// Read the child's PID from the pipe.
pidString := ""
b := new(bytes.Buffer)
io.Copy(b, pidRead)
pidString = b.String()
pid, err := strconv.Atoi(pidString)
if err != nil {
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
return errors.Wrapf(err, "error parsing PID %q", pidString)
}
pidString = fmt.Sprintf("%d", pid)
// If we created a new user namespace, set any specified mappings.
if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 {
// Always set "setgroups".
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening setgroups: %v", err)
return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
}
defer setgroups.Close()
if c.GidMappingsEnableSetgroups {
if _, err := fmt.Fprintf(setgroups, "allow"); err != nil {
fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err)
return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString)
}
} else {
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err)
return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString)
}
}
if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 {
uidmap, gidmap, err := util.GetHostIDMappings("")
if err != nil {
fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
return errors.Wrapf(err, "error reading ID mappings in parent")
}
if len(c.UidMappings) == 0 {
c.UidMappings = uidmap
for i := range c.UidMappings {
c.UidMappings[i].HostID = c.UidMappings[i].ContainerID
}
}
if len(c.GidMappings) == 0 {
c.GidMappings = gidmap
for i := range c.GidMappings {
c.GidMappings[i].HostID = c.GidMappings[i].ContainerID
}
}
}
if len(c.GidMappings) > 0 {
// Build the GID map, since writing to the proc file has to be done all at once.
g := new(bytes.Buffer)
for _, m := range c.GidMappings {
fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
}
// Set the GID map.
if c.UseNewgidmap {
cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
g.Reset()
cmd.Stdout = g
cmd.Stderr = g
err := cmd.Run()
if err != nil {
fmt.Fprintf(continueWrite, "error running newgidmap: %v: %s", err, g.String())
return errors.Wrapf(err, "error running newgidmap: %s", g.String())
}
} else {
gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err)
return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString)
}
defer gidmap.Close()
if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil {
fmt.Fprintf(continueWrite, "error writing /proc/%s/gid_map: %v", pidString, err)
return errors.Wrapf(err, "error writing /proc/%s/gid_map", pidString)
}
}
}
if len(c.UidMappings) > 0 {
// Build the UID map, since writing to the proc file has to be done all at once.
u := new(bytes.Buffer)
for _, m := range c.UidMappings {
fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
}
// Set the GID map.
if c.UseNewuidmap {
cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
u.Reset()
cmd.Stdout = u
cmd.Stderr = u
err := cmd.Run()
if err != nil {
fmt.Fprintf(continueWrite, "error running newuidmap: %v: %s", err, u.String())
return errors.Wrapf(err, "error running newuidmap: %s", u.String())
}
} else {
uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err)
return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString)
}
defer uidmap.Close()
if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil {
fmt.Fprintf(continueWrite, "error writing /proc/%s/uid_map: %v", pidString, err)
return errors.Wrapf(err, "error writing /proc/%s/uid_map", pidString)
}
}
}
}
// Adjust the process's OOM score.
oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err)
return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString)
}
if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", c.OOMScoreAdj); err != nil {
fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err)
return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString)
}
defer oomScoreAdj.Close()
// Run any additional setup that we want to do before the child starts running proper.
if c.Hook != nil {
if err = c.Hook(pid); err != nil {
fmt.Fprintf(continueWrite, "hook error: %v", err)
return err
}
}
return nil
}
func (c *Cmd) Run() error {
if err := c.Start(); err != nil {
return err
}
return c.Wait()
}
func (c *Cmd) CombinedOutput() ([]byte, error) {
return nil, errors.New("unshare: CombinedOutput() not implemented")
}
func (c *Cmd) Output() ([]byte, error) {
return nil, errors.New("unshare: Output() not implemented")
}

View file

@ -0,0 +1,10 @@
// +build linux,cgo,!gccgo
package unshare
// #cgo CFLAGS: -Wall
// extern void _buildah_unshare(void);
// void __attribute__((constructor)) init(void) {
// _buildah_unshare();
// }
import "C"

View file

@ -0,0 +1,25 @@
// +build linux,cgo,gccgo
package unshare
// #cgo CFLAGS: -Wall -Wextra
// extern void _buildah_unshare(void);
// void __attribute__((constructor)) init(void) {
// _buildah_unshare();
// }
import "C"
// This next bit is straight out of libcontainer.
// AlwaysFalse is here to stay false
// (and be exported so the compiler doesn't optimize out its reference)
var AlwaysFalse bool
func init() {
if AlwaysFalse {
// by referencing this C init() in a noop test, it will ensure the compiler
// links in the C function.
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
C.init()
}
}

View file

@ -0,0 +1 @@
package unshare

View file

@ -27,4 +27,9 @@ var (
"CAP_SETUID", "CAP_SETUID",
"CAP_SYS_CHROOT", "CAP_SYS_CHROOT",
} }
// DefaultNetworkSysctl is the list of Kernel parameters which we
// grant by default to containers which are running under UID 0.
DefaultNetworkSysctl = map[string]string{
"net.ipv4.ping_group_range": "0 0",
}
) )

View file

@ -4,8 +4,8 @@ github.com/BurntSushi/toml master
github.com/containerd/continuity master github.com/containerd/continuity master
github.com/containernetworking/cni v0.6.0 github.com/containernetworking/cni v0.6.0
github.com/seccomp/containers-golang master github.com/seccomp/containers-golang master
github.com/containers/image master github.com/containers/image 134f99bed228d6297dc01d152804f6f09f185418
github.com/containers/storage afdedba2d2ad573350aee35033d4e0c58fdbd57b github.com/containers/storage 17c7d1fee5603ccf6dd97edc14162fc1510e7e23
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker b8571fd81c7d2223c9ecbf799c693e3ef1daaea9 github.com/docker/docker b8571fd81c7d2223c9ecbf799c693e3ef1daaea9
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
@ -46,7 +46,7 @@ github.com/projectatomic/libpod master
github.com/sirupsen/logrus master github.com/sirupsen/logrus master
github.com/syndtr/gocapability master github.com/syndtr/gocapability master
github.com/tchap/go-patricia master github.com/tchap/go-patricia master
github.com/urfave/cli master github.com/urfave/cli fix-short-opts-parsing https://github.com/vrothberg/cli
github.com/vbatts/tar-split v0.10.2 github.com/vbatts/tar-split v0.10.2
github.com/xeipuuv/gojsonpointer master github.com/xeipuuv/gojsonpointer master
github.com/xeipuuv/gojsonreference master github.com/xeipuuv/gojsonreference master