Merge pull request #10880 from vrothberg/auto-update

auto-update: make output more user friendly
This commit is contained in:
OpenShift Merge Robot 2021-07-14 15:26:01 -04:00 committed by GitHub
commit ce4c23c4af
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 276 additions and 126 deletions

View file

@ -1,10 +1,15 @@
package main
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/containers/common/pkg/auth"
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/report"
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/errorhandling"
@ -12,8 +17,13 @@ import (
"github.com/spf13/cobra"
)
type cliAutoUpdateOptions struct {
entities.AutoUpdateOptions
format string
}
var (
autoUpdateOptions = entities.AutoUpdateOptions{}
autoUpdateOptions = cliAutoUpdateOptions{}
autoUpdateDescription = `Auto update containers according to their auto-update policy.
Auto-update policies are specified with the "io.containers.autoupdate" label.
@ -42,6 +52,9 @@ func init() {
authfileFlagName := "authfile"
flags.StringVar(&autoUpdateOptions.Authfile, authfileFlagName, auth.GetDefaultAuthFile(), "Path to the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
_ = autoUpdateCommand.RegisterFlagCompletionFunc(authfileFlagName, completion.AutocompleteDefault)
flags.StringVar(&autoUpdateOptions.format, "format", "", "Change the output format to JSON or a Go template")
_ = autoUpdateCommand.RegisterFlagCompletionFunc("format", common.AutocompleteFormat(autoUpdateOutput{}))
}
func autoUpdate(cmd *cobra.Command, args []string) error {
@ -49,11 +62,83 @@ func autoUpdate(cmd *cobra.Command, args []string) error {
// Backwards compat. System tests expect this error string.
return errors.Errorf("`%s` takes no arguments", cmd.CommandPath())
}
report, failures := registry.ContainerEngine().AutoUpdate(registry.GetContext(), autoUpdateOptions)
if report != nil {
for _, unit := range report.Units {
fmt.Println(unit)
}
allReports, failures := registry.ContainerEngine().AutoUpdate(registry.GetContext(), autoUpdateOptions.AutoUpdateOptions)
if allReports == nil {
return errorhandling.JoinErrors(failures)
}
if err := writeTemplate(allReports, autoUpdateOptions.format); err != nil {
failures = append(failures, err)
}
return errorhandling.JoinErrors(failures)
}
type autoUpdateOutput struct {
Unit string
Container string
ContainerName string
ContainerID string
Image string
Policy string
Updated string
}
func reportsToOutput(allReports []*entities.AutoUpdateReport) []autoUpdateOutput {
output := make([]autoUpdateOutput, len(allReports))
for i, r := range allReports {
output[i] = autoUpdateOutput{
Unit: r.SystemdUnit,
Container: fmt.Sprintf("%s (%s)", r.ContainerID[:12], r.ContainerName),
ContainerName: r.ContainerName,
ContainerID: r.ContainerID,
Image: r.ImageName,
Policy: r.Policy,
Updated: r.Updated,
}
}
return output
}
func writeTemplate(allReports []*entities.AutoUpdateReport, inputFormat string) error {
var format string
var printHeader bool
output := reportsToOutput(allReports)
switch inputFormat {
case "":
rows := []string{"{{.Unit}}", "{{.Container}}", "{{.Image}}", "{{.Policy}}", "{{.Updated}}"}
format = "{{range . }}" + strings.Join(rows, "\t") + "\n{{end -}}"
printHeader = true
case "json":
prettyJSON, err := json.MarshalIndent(output, "", " ")
if err != nil {
return err
}
fmt.Println(string(prettyJSON))
return nil
default:
format = "{{range . }}" + inputFormat + "\n{{end -}}"
}
tmpl, err := report.NewTemplate("auto-update").Parse(format)
if err != nil {
return err
}
w, err := report.NewWriterDefault(os.Stdout)
if err != nil {
return err
}
defer w.Flush()
if printHeader {
headers := report.Headers(autoUpdateOutput{}, nil)
if err := tmpl.Execute(w, headers); err != nil {
return err
}
}
return tmpl.Execute(w, output)
}

View file

@ -41,6 +41,22 @@ If the authorization state is not found there, `$HOME/.docker/config.json` is ch
Note: There is also the option to override the default path of the authentication file by setting the `REGISTRY_AUTH_FILE` environment variable. This can be done with **export REGISTRY_AUTH_FILE=_path_**.
#### **--format**=*format*
Change the default output format. This can be of a supported type like 'json' or a Go template.
Valid placeholders for the Go template are listed below:
| **Placeholder** | **Description** |
| --------------- | -------------------------------------- |
| .Unit | Name of the systemd unit |
| .ContainerName | Name of the container |
| .ContainerID | ID of the container |
| .Container | ID and name of the container |
| .Image | Name of the image |
| .Policy | Auto-update policy of the container |
| .Updated | Update status: true,false,failed |
## EXAMPLES
Autoupdate with registry policy
@ -53,7 +69,7 @@ bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d
### Generate a systemd unit for this container
$ podman generate systemd --new --files bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d
/home/user/containers/libpod/container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service
/home/user/container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service
### Load the new systemd unit and start it
$ mv ./container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service ~/.config/systemd/user
@ -67,7 +83,7 @@ $ systemctl --user start container-bc219740a210455fa27deacc96d50a9e20516492f1417
### Auto-update the container
$ podman auto-update
container-bc219740a210455fa27deacc96d50a9e20516492f1417507c13ce1533dbdcd9d.service
[...]
```
Autoupdate with local policy
@ -80,7 +96,7 @@ be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338
### Generate a systemd unit for this container
$ podman generate systemd --new --files be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338
/home/user/containers/libpod/container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service
/home/user/container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service
### Load the new systemd unit and start it
$ mv ./container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service ~/.config/systemd/user
@ -102,7 +118,7 @@ $ podman commit --change CMD=/bin/bash inspiring_galileo busybox:latest
### Auto-update the container
$ podman auto-update
container-be0889fd06f252a2e5141b37072c6bada68563026cb2b2649f53394d87ccc338.service
[...]
```
## SEE ALSO

View file

@ -9,12 +9,13 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/systemd"
systemdDefine "github.com/containers/podman/v3/pkg/systemd/define"
"github.com/coreos/go-systemd/v22/dbus"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -119,7 +120,7 @@ func ValidateImageReference(imageName string) error {
//
// It returns a slice of successfully restarted systemd units and a slice of
// errors encountered during auto update.
func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) {
func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options Options) ([]*entities.AutoUpdateReport, []error) {
// Create a map from `image ID -> []*Container`.
containerMap, errs := imageContainersMap(runtime)
if len(containerMap) == 0 {
@ -130,7 +131,7 @@ func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) {
listOptions := &libimage.ListImagesOptions{
Filters: []string{"readonly=false"},
}
imagesSlice, err := runtime.LibimageRuntime().ListImages(context.Background(), nil, listOptions)
imagesSlice, err := runtime.LibimageRuntime().ListImages(ctx, nil, listOptions)
if err != nil {
return nil, []error{err}
}
@ -147,8 +148,8 @@ func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) {
}
defer conn.Close()
// Update images.
containersToRestart := []*libpod.Container{}
// Update all images/container according to their auto-update policy.
var allReports []*entities.AutoUpdateReport
updatedRawImages := make(map[string]bool)
for imageID, policyMapper := range containerMap {
image, exists := imageMap[imageID]
@ -156,76 +157,139 @@ func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) {
errs = append(errs, errors.Errorf("container image ID %q not found in local storage", imageID))
return nil, errs
}
// Now we have to check if the image of any containers must be updated.
// Note that the image ID is NOT enough for this check as a given image
// may have multiple tags.
for _, registryCtr := range policyMapper[PolicyRegistryImage] {
cid := registryCtr.ID()
rawImageName := registryCtr.RawImageName()
if rawImageName == "" {
errs = append(errs, errors.Errorf("error registry auto-updating container %q: raw-image name is empty", cid))
}
readAuthenticationPath(registryCtr, options)
needsUpdate, err := newerRemoteImageAvailable(runtime, image, rawImageName, options)
if err != nil {
errs = append(errs, errors.Wrapf(err, "error registry auto-updating container %q: image check for %q failed", cid, rawImageName))
continue
}
if needsUpdate {
logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName)
if _, updated := updatedRawImages[rawImageName]; !updated {
_, err = updateImage(runtime, rawImageName, options)
if err != nil {
errs = append(errs, errors.Wrapf(err, "error registry auto-updating container %q: image update for %q failed", cid, rawImageName))
continue
}
updatedRawImages[rawImageName] = true
}
containersToRestart = append(containersToRestart, registryCtr)
for _, ctr := range policyMapper[PolicyRegistryImage] {
report, err := autoUpdateRegistry(ctx, image, ctr, updatedRawImages, &options, conn, runtime)
if err != nil {
errs = append(errs, err)
}
if report != nil {
allReports = append(allReports, report)
}
}
for _, localCtr := range policyMapper[PolicyLocalImage] {
cid := localCtr.ID()
rawImageName := localCtr.RawImageName()
if rawImageName == "" {
errs = append(errs, errors.Errorf("error locally auto-updating container %q: raw-image name is empty", cid))
}
// This avoids restarting containers unnecessarily.
needsUpdate, err := newerLocalImageAvailable(runtime, image, rawImageName)
for _, ctr := range policyMapper[PolicyLocalImage] {
report, err := autoUpdateLocally(ctx, image, ctr, &options, conn, runtime)
if err != nil {
errs = append(errs, errors.Wrapf(err, "error locally auto-updating container %q: image check for %q failed", cid, rawImageName))
continue
errs = append(errs, err)
}
if needsUpdate {
logrus.Infof("Auto-updating container %q using local image %q", cid, rawImageName)
containersToRestart = append(containersToRestart, localCtr)
if report != nil {
allReports = append(allReports, report)
}
}
}
// Restart containers.
updatedUnits := []string{}
for _, ctr := range containersToRestart {
labels := ctr.Labels()
unit, exists := labels[systemdDefine.EnvVariable]
if !exists {
// Shouldn't happen but let's be sure of it.
errs = append(errs, errors.Errorf("error auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable))
continue
}
_, err := conn.RestartUnit(unit, "replace", nil)
if err != nil {
errs = append(errs, errors.Wrapf(err, "error auto-updating container %q: restarting systemd unit %q failed", ctr.ID(), unit))
continue
}
logrus.Infof("Successfully restarted systemd unit %q", unit)
updatedUnits = append(updatedUnits, unit)
return allReports, errs
}
// autoUpdateRegistry updates the image/container according to the "registry" policy.
func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.Container, updatedRawImages map[string]bool, options *Options, conn *dbus.Conn, runtime *libpod.Runtime) (*entities.AutoUpdateReport, error) {
cid := ctr.ID()
rawImageName := ctr.RawImageName()
if rawImageName == "" {
return nil, errors.Errorf("error registry auto-updating container %q: raw-image name is empty", cid)
}
return updatedUnits, errs
labels := ctr.Labels()
unit, exists := labels[systemdDefine.EnvVariable]
if !exists {
return nil, errors.Errorf("error auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
}
report := &entities.AutoUpdateReport{
ContainerID: cid,
ContainerName: ctr.Name(),
ImageName: rawImageName,
Policy: PolicyRegistryImage,
SystemdUnit: unit,
Updated: "failed",
}
if _, updated := updatedRawImages[rawImageName]; updated {
logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName)
if err := restartSystemdUnit(ctr, unit, conn); err != nil {
return report, err
}
report.Updated = "true"
return report, nil
}
authfile := getAuthfilePath(ctr, options)
needsUpdate, err := newerRemoteImageAvailable(ctx, runtime, image, rawImageName, authfile)
if err != nil {
return report, errors.Wrapf(err, "error registry auto-updating container %q: image check for %q failed", cid, rawImageName)
}
if !needsUpdate {
report.Updated = "false"
return report, nil
}
if _, err := updateImage(ctx, runtime, rawImageName, options); err != nil {
return report, errors.Wrapf(err, "error registry auto-updating container %q: image update for %q failed", cid, rawImageName)
}
updatedRawImages[rawImageName] = true
logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName)
if err := restartSystemdUnit(ctr, unit, conn); err != nil {
return report, err
}
report.Updated = "true"
return report, nil
}
// autoUpdateRegistry updates the image/container according to the "local" policy.
func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.Container, options *Options, conn *dbus.Conn, runtime *libpod.Runtime) (*entities.AutoUpdateReport, error) {
cid := ctr.ID()
rawImageName := ctr.RawImageName()
if rawImageName == "" {
return nil, errors.Errorf("error locally auto-updating container %q: raw-image name is empty", cid)
}
labels := ctr.Labels()
unit, exists := labels[systemdDefine.EnvVariable]
if !exists {
return nil, errors.Errorf("error auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
}
report := &entities.AutoUpdateReport{
ContainerID: cid,
ContainerName: ctr.Name(),
ImageName: rawImageName,
Policy: PolicyLocalImage,
SystemdUnit: unit,
Updated: "failed",
}
needsUpdate, err := newerLocalImageAvailable(runtime, image, rawImageName)
if err != nil {
return report, errors.Wrapf(err, "error locally auto-updating container %q: image check for %q failed", cid, rawImageName)
}
if !needsUpdate {
report.Updated = "false"
return report, nil
}
logrus.Infof("Auto-updating container %q using local image %q", cid, rawImageName)
if err := restartSystemdUnit(ctr, unit, conn); err != nil {
return report, err
}
report.Updated = "true"
return report, nil
}
// restartSystemdUnit restarts the systemd unit the container is running in.
func restartSystemdUnit(ctr *libpod.Container, unit string, conn *dbus.Conn) error {
_, err := conn.RestartUnit(unit, "replace", nil)
if err != nil {
return errors.Wrapf(err, "error auto-updating container %q: restarting systemd unit %q failed", ctr.ID(), unit)
}
logrus.Infof("Successfully restarted systemd unit %q of container %q", unit, ctr.ID())
return nil
}
// imageContainersMap generates a map[image ID] -> [containers using the image]
@ -280,52 +344,25 @@ func imageContainersMap(runtime *libpod.Runtime) (map[string]policyMapper, []err
return containerMap, errors
}
// readAuthenticationPath reads a container's labels and reads authentication path into options
func readAuthenticationPath(ctr *libpod.Container, options Options) {
// getAuthfilePath returns an authfile path, if set. The authfile label in the
// container, if set, as precedence over the one set in the options.
func getAuthfilePath(ctr *libpod.Container, options *Options) string {
labels := ctr.Labels()
authFilePath, exists := labels[AuthfileLabel]
if exists {
options.Authfile = authFilePath
return authFilePath
}
return options.Authfile
}
// newerRemoteImageAvailable returns true if there corresponding image on the remote
// registry is newer.
func newerRemoteImageAvailable(runtime *libpod.Runtime, img *libimage.Image, origName string, options Options) (bool, error) {
func newerRemoteImageAvailable(ctx context.Context, runtime *libpod.Runtime, img *libimage.Image, origName string, authfile string) (bool, error) {
remoteRef, err := docker.ParseReference("//" + origName)
if err != nil {
return false, err
}
data, err := img.Inspect(context.Background(), false)
if err != nil {
return false, err
}
sys := runtime.SystemContext()
sys.AuthFilePath = options.Authfile
// We need to account for the arch that the image uses. It seems
// common on ARM to tweak this option to pull the correct image. See
// github.com/containers/podman/issues/6613.
sys.ArchitectureChoice = data.Architecture
remoteImg, err := remoteRef.NewImage(context.Background(), sys)
if err != nil {
return false, err
}
rawManifest, _, err := remoteImg.Manifest(context.Background())
if err != nil {
return false, err
}
remoteDigest, err := manifest.Digest(rawManifest)
if err != nil {
return false, err
}
return img.Digest().String() != remoteDigest.String(), nil
return img.HasDifferentDigest(ctx, remoteRef)
}
// newerLocalImageAvailable returns true if the container and local image have different digests
@ -334,21 +371,16 @@ func newerLocalImageAvailable(runtime *libpod.Runtime, img *libimage.Image, rawI
if err != nil {
return false, err
}
localDigest := localImg.Digest().String()
ctrDigest := img.Digest().String()
return localDigest != ctrDigest, nil
return localImg.Digest().String() != img.Digest().String(), nil
}
// updateImage pulls the specified image.
func updateImage(runtime *libpod.Runtime, name string, options Options) (*libimage.Image, error) {
func updateImage(ctx context.Context, runtime *libpod.Runtime, name string, options *Options) (*libimage.Image, error) {
pullOptions := &libimage.PullOptions{}
pullOptions.AuthFilePath = options.Authfile
pullOptions.Writer = os.Stderr
pulledImages, err := runtime.LibimageRuntime().Pull(context.Background(), name, config.PullPolicyAlways, pullOptions)
pulledImages, err := runtime.LibimageRuntime().Pull(ctx, name, config.PullPolicyAlways, pullOptions)
if err != nil {
return nil, err
}

View file

@ -8,6 +8,17 @@ type AutoUpdateOptions struct {
// AutoUpdateReport contains the results from running auto-update.
type AutoUpdateReport struct {
// Units - the restarted systemd units during auto-update.
Units []string
// ID of the container *before* an update.
ContainerID string
// Name of the container *before* an update.
ContainerName string
// Name of the image.
ImageName string
// The configured auto-update policy.
Policy string
// SystemdUnit running a container configured for auto updates.
SystemdUnit string
// Indicates whether the image was updated and the container (and
// systemd unit) restarted.
Updated string
}

View file

@ -14,7 +14,7 @@ import (
type ContainerCopyFunc func() error
type ContainerEngine interface {
AutoUpdate(ctx context.Context, options AutoUpdateOptions) (*AutoUpdateReport, []error)
AutoUpdate(ctx context.Context, options AutoUpdateOptions) ([]*AutoUpdateReport, []error)
Config(ctx context.Context) (*config.Config, error)
ContainerAttach(ctx context.Context, nameOrID string, options AttachOptions) error
ContainerCheckpoint(ctx context.Context, namesOrIds []string, options CheckpointOptions) ([]*CheckpointReport, error)

View file

@ -7,11 +7,10 @@ import (
"github.com/containers/podman/v3/pkg/domain/entities"
)
func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) (*entities.AutoUpdateReport, []error) {
func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) {
// Convert the entities options to the autoupdate ones. We can't use
// them in the entities package as low-level packages must not leak
// into the remote client.
autoOpts := autoupdate.Options{Authfile: options.Authfile}
units, failures := autoupdate.AutoUpdate(ic.Libpod, autoOpts)
return &entities.AutoUpdateReport{Units: units}, failures
return autoupdate.AutoUpdate(ctx, ic.Libpod, autoOpts)
}

View file

@ -7,6 +7,6 @@ import (
"github.com/pkg/errors"
)
func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) (*entities.AutoUpdateReport, []error) {
func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) {
return nil, []error{errors.New("not implemented")}
}

View file

@ -119,7 +119,7 @@ function service_cleanup() {
# Run auto-update and check that it restarted the container
run_podman commit --change "CMD=/bin/bash" $cname $IMAGE
run_podman auto-update
is $output $SERVICE_NAME "autoupdate local restarted container"
is "$output" ".*$SERVICE_NAME.*" "autoupdate local restarted container"
# All good. Stop service, clean up.
service_cleanup

View file

@ -121,8 +121,10 @@ function _confirm_update() {
generate_service alpine image
_wait_service_ready container-$cname.service
run_podman auto-update
run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" "Trying to pull.*" "Image is updated."
is "$output" ".*container-$cname.service,quay.io/libpod/alpine:latest,true,registry.*" "Image is updated."
_confirm_update $cname $ori_image
}
@ -151,10 +153,15 @@ function _confirm_update() {
@test "podman auto-update - label io.containers.autoupdate=local" {
generate_service localtest local
podman commit --change CMD=/bin/bash $cname quay.io/libpod/localtest:latest
image=quay.io/libpod/localtest:latest
podman commit --change CMD=/bin/bash $cname $image
podman image inspect --format "{{.ID}}" $image
imageID="$output"
_wait_service_ready container-$cname.service
run_podman auto-update
run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" ".*container-$cname.service,quay.io/libpod/localtest:latest,true,local.*" "Image is updated."
_confirm_update $cname $ori_image
}