mirror of
https://github.com/containers/podman
synced 2024-10-18 16:24:34 +00:00
Add --time out for podman * rm -f commands
Add --time flag to podman container rm Add --time flag to podman pod rm Add --time flag to podman volume rm Add --time flag to podman network rm Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
parent
36821d302e
commit
21c9dc3c40
|
@ -62,6 +62,9 @@ func rmFlags(cmd *cobra.Command) {
|
|||
flags.BoolVarP(&rmOptions.All, "all", "a", false, "Remove all containers")
|
||||
flags.BoolVarP(&rmOptions.Ignore, "ignore", "i", false, "Ignore errors when a specified container is missing")
|
||||
flags.BoolVarP(&rmOptions.Force, "force", "f", false, "Force removal of a running or unusable container. The default is false")
|
||||
timeFlagName := "time"
|
||||
flags.UintVarP(&stopTimeout, timeFlagName, "t", containerConfig.Engine.StopTimeout, "Seconds to wait for stop before killing the container")
|
||||
_ = cmd.RegisterFlagCompletionFunc(timeFlagName, completion.AutocompleteNone)
|
||||
flags.BoolVarP(&rmOptions.Volumes, "volumes", "v", false, "Remove anonymous volumes associated with the container")
|
||||
|
||||
cidfileFlagName := "cidfile"
|
||||
|
@ -91,6 +94,12 @@ func init() {
|
|||
}
|
||||
|
||||
func rm(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flag("time").Changed {
|
||||
if !rmOptions.Force {
|
||||
return errors.New("--force option must be specified to use the --time option")
|
||||
}
|
||||
rmOptions.Timeout = &stopTimeout
|
||||
}
|
||||
for _, cidFile := range cidFiles {
|
||||
content, err := ioutil.ReadFile(string(cidFile))
|
||||
if err != nil {
|
||||
|
|
|
@ -3,6 +3,7 @@ package network
|
|||
import (
|
||||
"github.com/containers/podman/v3/cmd/podman/registry"
|
||||
"github.com/containers/podman/v3/cmd/podman/validate"
|
||||
"github.com/containers/podman/v3/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -17,6 +18,7 @@ var (
|
|||
Long: "Manage networks",
|
||||
RunE: validate.SubCommandExists,
|
||||
}
|
||||
containerConfig = util.DefaultContainerConfig()
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/podman/v3/cmd/podman/common"
|
||||
"github.com/containers/podman/v3/cmd/podman/registry"
|
||||
"github.com/containers/podman/v3/cmd/podman/utils"
|
||||
|
@ -26,6 +27,7 @@ var (
|
|||
Args: cobra.MinimumNArgs(1),
|
||||
ValidArgsFunction: common.AutocompleteNetworks,
|
||||
}
|
||||
stopTimeout uint
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -34,6 +36,9 @@ var (
|
|||
|
||||
func networkRmFlags(flags *pflag.FlagSet) {
|
||||
flags.BoolVarP(&networkRmOptions.Force, "force", "f", false, "remove any containers using network")
|
||||
timeFlagName := "time"
|
||||
flags.UintVarP(&stopTimeout, timeFlagName, "t", containerConfig.Engine.StopTimeout, "Seconds to wait for running containers to stop before killing the container")
|
||||
_ = networkrmCommand.RegisterFlagCompletionFunc(timeFlagName, completion.AutocompleteNone)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -50,6 +55,12 @@ func networkRm(cmd *cobra.Command, args []string) error {
|
|||
errs utils.OutputErrors
|
||||
)
|
||||
|
||||
if cmd.Flag("time").Changed {
|
||||
if !networkRmOptions.Force {
|
||||
return errors.New("--force option must be specified to use the --time option")
|
||||
}
|
||||
networkRmOptions.Timeout = &stopTimeout
|
||||
}
|
||||
responses, err := registry.ContainerEngine().NetworkRm(registry.Context(), args, networkRmOptions)
|
||||
if err != nil {
|
||||
setExitCode(err)
|
||||
|
|
|
@ -42,6 +42,7 @@ var (
|
|||
podman pod rm -f 860a4b23
|
||||
podman pod rm -f -a`,
|
||||
}
|
||||
stopTimeout uint
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -59,6 +60,10 @@ func init() {
|
|||
flags.StringArrayVarP(&rmOptions.PodIDFiles, podIDFileFlagName, "", nil, "Read the pod ID from the file")
|
||||
_ = rmCommand.RegisterFlagCompletionFunc(podIDFileFlagName, completion.AutocompleteDefault)
|
||||
|
||||
timeFlagName := "time"
|
||||
flags.UintVarP(&stopTimeout, timeFlagName, "t", containerConfig.Engine.StopTimeout, "Seconds to wait for pod stop before killing the container")
|
||||
_ = rmCommand.RegisterFlagCompletionFunc(timeFlagName, completion.AutocompleteNone)
|
||||
|
||||
validate.AddLatestFlag(rmCommand, &rmOptions.Latest)
|
||||
|
||||
if registry.IsRemote() {
|
||||
|
@ -66,12 +71,18 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func rm(_ *cobra.Command, args []string) error {
|
||||
func rm(cmd *cobra.Command, args []string) error {
|
||||
ids, err := specgenutil.ReadPodIDFiles(rmOptions.PodIDFiles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, ids...)
|
||||
if cmd.Flag("time").Changed {
|
||||
if !rmOptions.Force {
|
||||
return errors.New("--force option must be specified to use the --time option")
|
||||
}
|
||||
rmOptions.Timeout = &stopTimeout
|
||||
}
|
||||
return removePods(args, rmOptions.PodRmOptions, true)
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/podman/v3/cmd/podman/common"
|
||||
"github.com/containers/podman/v3/cmd/podman/registry"
|
||||
"github.com/containers/podman/v3/cmd/podman/utils"
|
||||
|
@ -32,7 +33,8 @@ var (
|
|||
)
|
||||
|
||||
var (
|
||||
rmOptions = entities.VolumeRmOptions{}
|
||||
rmOptions = entities.VolumeRmOptions{}
|
||||
stopTimeout uint
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -43,6 +45,9 @@ func init() {
|
|||
flags := rmCommand.Flags()
|
||||
flags.BoolVarP(&rmOptions.All, "all", "a", false, "Remove all volumes")
|
||||
flags.BoolVarP(&rmOptions.Force, "force", "f", false, "Remove a volume by force, even if it is being used by a container")
|
||||
timeFlagName := "time"
|
||||
flags.UintVarP(&stopTimeout, timeFlagName, "t", containerConfig.Engine.StopTimeout, "Seconds to wait for running containers to stop before killing the container")
|
||||
_ = rmCommand.RegisterFlagCompletionFunc(timeFlagName, completion.AutocompleteNone)
|
||||
}
|
||||
|
||||
func rm(cmd *cobra.Command, args []string) error {
|
||||
|
@ -52,6 +57,12 @@ func rm(cmd *cobra.Command, args []string) error {
|
|||
if (len(args) > 0 && rmOptions.All) || (len(args) < 1 && !rmOptions.All) {
|
||||
return errors.New("choose either one or more volumes or all")
|
||||
}
|
||||
if cmd.Flag("time").Changed {
|
||||
if !rmOptions.Force {
|
||||
return errors.New("--force option must be specified to use the --time option")
|
||||
}
|
||||
rmOptions.Timeout = &stopTimeout
|
||||
}
|
||||
responses, err := registry.ContainerEngine().VolumeRm(context.Background(), args, rmOptions)
|
||||
if err != nil {
|
||||
setExitCode(err)
|
||||
|
|
|
@ -3,6 +3,7 @@ package volumes
|
|||
import (
|
||||
"github.com/containers/podman/v3/cmd/podman/registry"
|
||||
"github.com/containers/podman/v3/cmd/podman/validate"
|
||||
"github.com/containers/podman/v3/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -17,6 +18,7 @@ var (
|
|||
Long: "Volumes are created in and can be shared between containers",
|
||||
RunE: validate.SubCommandExists,
|
||||
}
|
||||
containerConfig = util.DefaultContainerConfig()
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -15,6 +15,10 @@ Delete one or more Podman networks.
|
|||
The `force` option will remove all containers that use the named network. If the container is
|
||||
running, the container will be stopped and removed.
|
||||
|
||||
#### **--time**, **-t**=*seconds*
|
||||
|
||||
Seconds to wait before forcibly stopping the running containers that are using the specified network. The --force option must be specified to use the --time option.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
Delete the `cni-podman9` network
|
||||
|
|
|
@ -33,6 +33,10 @@ Stop running containers and delete all stopped containers before removal of pod.
|
|||
|
||||
Read pod ID from the specified file and remove the pod. Can be specified multiple times.
|
||||
|
||||
#### **--time**, **-t**=*seconds*
|
||||
|
||||
Seconds to wait before forcibly stopping running containers within the pod. The --force option must be specified to use the --time option.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
podman pod rm mywebserverpod
|
||||
|
|
|
@ -25,9 +25,9 @@ ExecStop directive of a systemd service referencing that pod.
|
|||
|
||||
Instead of providing the pod name or ID, stop the last created pod. (This option is not available with the remote Podman client)
|
||||
|
||||
#### **--time**, **-t**=*time*
|
||||
#### **--time**, **-t**=*seconds*
|
||||
|
||||
Timeout to wait before forcibly stopping the containers in the pod.
|
||||
Seconds to wait before forcibly stopping the containers in the pod.
|
||||
|
||||
#### **--pod-id-file**
|
||||
|
||||
|
|
|
@ -24,9 +24,9 @@ to run containers such as CRI-O, the last started container could be from either
|
|||
#### **--running**
|
||||
Restart all containers that are already in the *running* state.
|
||||
|
||||
#### **--time**=*time*, **-t**
|
||||
Timeout to wait before forcibly stopping the container.
|
||||
#### **--time**, **-t**=*seconds*
|
||||
|
||||
Seconds to wait before forcibly stopping the container.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
|
|
|
@ -41,6 +41,10 @@ during the ExecStop directive of a systemd service referencing that container.
|
|||
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
|
||||
to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
|
||||
|
||||
#### **--time**, **-t**=*seconds*
|
||||
|
||||
Seconds to wait before forcibly stopping the container. The --force option must be specified to use the --time option.
|
||||
|
||||
#### **--volumes**, **-v**
|
||||
|
||||
Remove anonymous volumes associated with the container. This does not include named volumes
|
||||
|
|
|
@ -36,9 +36,9 @@ during the ExecStop directive of a systemd service referencing that container.
|
|||
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
|
||||
to run containers such as CRI-O, the last started container could be from either of those methods. (This option is not available with the remote Podman client)
|
||||
|
||||
#### **--time**, **-t**=*time*
|
||||
#### **--time**, **-t**=*seconds*
|
||||
|
||||
Time to wait before forcibly stopping the container
|
||||
Seconds to wait before forcibly stopping the container
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
|
|
|
@ -28,6 +28,9 @@ If it is being used by containers, the containers will be removed first.
|
|||
|
||||
Print usage statement
|
||||
|
||||
#### **--time**, **-t**=*seconds*
|
||||
|
||||
Seconds to wait before forcibly stopping running containers that are using the specified volume. The --force option must be specified to use the --time option.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
|
|
|
@ -37,7 +37,8 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
|
|||
if initCon.config.InitContainerType == define.OneShotInitContainer {
|
||||
icLock := initCon.lock
|
||||
icLock.Lock()
|
||||
if err := p.runtime.removeContainer(ctx, initCon, false, false, true); err != nil {
|
||||
var time *uint
|
||||
if err := p.runtime.removeContainer(ctx, initCon, false, false, true, time); err != nil {
|
||||
icLock.Unlock()
|
||||
return errors.Wrapf(err, "failed to remove once init container %s", initCon.ID())
|
||||
}
|
||||
|
|
|
@ -18,12 +18,13 @@ import (
|
|||
|
||||
// Reset removes all storage
|
||||
func (r *Runtime) Reset(ctx context.Context) error {
|
||||
var timeout *uint
|
||||
pods, err := r.GetAllPods()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range pods {
|
||||
if err := r.RemovePod(ctx, p, true, true); err != nil {
|
||||
if err := r.RemovePod(ctx, p, true, true, timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchPod {
|
||||
continue
|
||||
}
|
||||
|
@ -37,7 +38,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
|
|||
}
|
||||
|
||||
for _, c := range ctrs {
|
||||
if err := r.RemoveContainer(ctx, c, true, true); err != nil {
|
||||
if err := r.RemoveContainer(ctx, c, true, true, timeout); err != nil {
|
||||
if err := r.RemoveStorageContainer(c.ID(), true); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
continue
|
||||
|
@ -61,7 +62,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
for _, v := range volumes {
|
||||
if err := r.RemoveVolume(ctx, v, true); err != nil {
|
||||
if err := r.RemoveVolume(ctx, v, true, timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchVolume {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -535,10 +535,10 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
|||
// If removeVolume is specified, named volumes used by the container will
|
||||
// be removed also if and only if the container is the sole user
|
||||
// Otherwise, RemoveContainer will return an error if the container is running
|
||||
func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, removeVolume bool) error {
|
||||
func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, removeVolume bool, timeout *uint) error {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
return r.removeContainer(ctx, c, force, removeVolume, false)
|
||||
return r.removeContainer(ctx, c, force, removeVolume, false, timeout)
|
||||
}
|
||||
|
||||
// Internal function to remove a container.
|
||||
|
@ -546,7 +546,7 @@ func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool,
|
|||
// removePod is used only when removing pods. It instructs Podman to ignore
|
||||
// infra container protections, and *not* remove from the database (as pod
|
||||
// remove will handle that).
|
||||
func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, removeVolume, removePod bool) error {
|
||||
func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, removeVolume, removePod bool, timeout *uint) error {
|
||||
if !c.valid {
|
||||
if ok, _ := r.state.HasContainer(c.ID()); !ok {
|
||||
// Container probably already removed
|
||||
|
@ -642,9 +642,13 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
|||
|
||||
// Check that the container's in a good state to be removed.
|
||||
if c.state.State == define.ContainerStateRunning {
|
||||
time := c.StopTimeout()
|
||||
if timeout != nil {
|
||||
time = *timeout
|
||||
}
|
||||
// Ignore ErrConmonDead - we couldn't retrieve the container's
|
||||
// exit code properly, but it's still stopped.
|
||||
if err := c.stop(c.StopTimeout()); err != nil && errors.Cause(err) != define.ErrConmonDead {
|
||||
if err := c.stop(time); err != nil && errors.Cause(err) != define.ErrConmonDead {
|
||||
return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
|
||||
}
|
||||
|
||||
|
@ -751,7 +755,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
|||
if !volume.Anonymous() {
|
||||
continue
|
||||
}
|
||||
if err := runtime.removeVolume(ctx, volume, false); err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
|
||||
if err := runtime.removeVolume(ctx, volume, false, timeout); err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
|
||||
logrus.Errorf("Cleanup volume (%s): %v", v, err)
|
||||
}
|
||||
}
|
||||
|
@ -782,6 +786,7 @@ func (r *Runtime) EvictContainer(ctx context.Context, idOrName string, removeVol
|
|||
// remove will handle that).
|
||||
func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVolume bool) (string, error) {
|
||||
var err error
|
||||
var timeout *uint
|
||||
|
||||
if !r.valid {
|
||||
return "", define.ErrRuntimeStopped
|
||||
|
@ -797,7 +802,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
|
|||
if err == nil {
|
||||
logrus.Infof("Container %s successfully retrieved from state, attempting normal removal", id)
|
||||
// Assume force = true for the evict case
|
||||
err = r.removeContainer(ctx, tmpCtr, true, removeVolume, false)
|
||||
err = r.removeContainer(ctx, tmpCtr, true, removeVolume, false, timeout)
|
||||
if !tmpCtr.valid {
|
||||
// If the container is marked invalid, remove succeeded
|
||||
// in kicking it out of the state - no need to continue.
|
||||
|
@ -892,7 +897,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
|
|||
if !volume.Anonymous() {
|
||||
continue
|
||||
}
|
||||
if err := r.removeVolume(ctx, volume, false); err != nil && err != define.ErrNoSuchVolume && err != define.ErrVolumeBeingUsed {
|
||||
if err := r.removeVolume(ctx, volume, false, timeout); err != nil && err != define.ErrNoSuchVolume && err != define.ErrVolumeBeingUsed {
|
||||
logrus.Errorf("Cleanup volume (%s): %v", v, err)
|
||||
}
|
||||
}
|
||||
|
@ -1089,7 +1094,8 @@ func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) ([]*reports.Pru
|
|||
preports = append(preports, report)
|
||||
continue
|
||||
}
|
||||
err = r.RemoveContainer(context.Background(), c, false, false)
|
||||
var time *uint
|
||||
err = r.RemoveContainer(context.Background(), c, false, false, time)
|
||||
if err != nil {
|
||||
report.Err = err
|
||||
} else {
|
||||
|
|
|
@ -37,7 +37,8 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
|
|||
}
|
||||
for _, ctr := range ctrs {
|
||||
if ctr.config.RootfsImageID == imageID {
|
||||
if err := r.removeContainer(ctx, ctr, true, false, false); err != nil {
|
||||
var timeout *uint
|
||||
if err := r.removeContainer(ctx, ctr, true, false, false, timeout); err != nil {
|
||||
return errors.Wrapf(err, "error removing image %s: container %s using image could not be removed", imageID, ctr.ID())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ type PodFilter func(*Pod) bool
|
|||
// If force is specified with removeCtrs, all containers will be stopped before
|
||||
// being removed
|
||||
// Otherwise, the pod will not be removed if any containers are running
|
||||
func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool) error {
|
||||
func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) error {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
|
@ -45,7 +45,7 @@ func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool)
|
|||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
return r.removePod(ctx, p, removeCtrs, force)
|
||||
return r.removePod(ctx, p, removeCtrs, force, timeout)
|
||||
}
|
||||
|
||||
// GetPod retrieves a pod by its ID
|
||||
|
@ -196,7 +196,8 @@ func (r *Runtime) PrunePods(ctx context.Context) (map[string]error, error) {
|
|||
return response, nil
|
||||
}
|
||||
for _, pod := range pods {
|
||||
err := r.removePod(context.TODO(), pod, true, false)
|
||||
var timeout *uint
|
||||
err := r.removePod(context.TODO(), pod, true, false, timeout)
|
||||
response[pod.ID()] = err
|
||||
}
|
||||
return response, nil
|
||||
|
|
|
@ -168,7 +168,7 @@ func (r *Runtime) SavePod(pod *Pod) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) error {
|
||||
func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) error {
|
||||
if err := p.updatePod(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -255,7 +255,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
|
|||
ctrNamedVolumes[vol.Name] = vol
|
||||
}
|
||||
|
||||
if err := r.removeContainer(ctx, ctr, force, false, true); err != nil {
|
||||
if err := r.removeContainer(ctx, ctr, force, false, true, timeout); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = err
|
||||
} else {
|
||||
|
@ -281,7 +281,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
|
|||
if !volume.Anonymous() {
|
||||
continue
|
||||
}
|
||||
if err := r.removeVolume(ctx, volume, false); err != nil {
|
||||
if err := r.removeVolume(ctx, volume, false, timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchVolume || errors.Cause(err) == define.ErrVolumeRemoved {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ type VolumeCreateOption func(*Volume) error
|
|||
type VolumeFilter func(*Volume) bool
|
||||
|
||||
// RemoveVolume removes a volumes
|
||||
func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error {
|
||||
func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool, timeout *uint) error {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
|
@ -36,7 +36,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error
|
|||
return nil
|
||||
}
|
||||
}
|
||||
return r.removeVolume(ctx, v, force)
|
||||
return r.removeVolume(ctx, v, force, timeout)
|
||||
}
|
||||
|
||||
// GetVolume retrieves a volume given its full name.
|
||||
|
@ -149,7 +149,8 @@ func (r *Runtime) PruneVolumes(ctx context.Context, filterFuncs []VolumeFilter)
|
|||
}
|
||||
report.Size = volSize
|
||||
report.Id = vol.Name()
|
||||
if err := r.RemoveVolume(ctx, vol, false); err != nil {
|
||||
var timeout *uint
|
||||
if err := r.RemoveVolume(ctx, vol, false, timeout); err != nil {
|
||||
if errors.Cause(err) != define.ErrVolumeBeingUsed && errors.Cause(err) != define.ErrVolumeRemoved {
|
||||
report.Err = err
|
||||
} else {
|
||||
|
|
|
@ -189,7 +189,7 @@ func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin
|
|||
}
|
||||
|
||||
// removeVolume removes the specified volume from state as well tears down its mountpoint and storage
|
||||
func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error {
|
||||
func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeout *uint) error {
|
||||
if !v.valid {
|
||||
if ok, _ := r.state.HasVolume(v.Name()); !ok {
|
||||
return nil
|
||||
|
@ -234,7 +234,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
|
|||
// containers?
|
||||
// I'm inclined to say no, in case someone accidentally
|
||||
// wipes a container they're using...
|
||||
if err := r.removeContainer(ctx, ctr, false, false, false); err != nil {
|
||||
if err := r.removeContainer(ctx, ctr, false, false, false, timeout); err != nil {
|
||||
return errors.Wrapf(err, "error removing container %s that depends on volume %s", ctr.ID(), v.Name())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,11 +34,12 @@ import (
|
|||
func RemoveContainer(w http.ResponseWriter, r *http.Request) {
|
||||
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
|
||||
query := struct {
|
||||
Force bool `schema:"force"`
|
||||
Ignore bool `schema:"ignore"`
|
||||
Link bool `schema:"link"`
|
||||
DockerVolumes bool `schema:"v"`
|
||||
LibpodVolumes bool `schema:"volumes"`
|
||||
Force bool `schema:"force"`
|
||||
Ignore bool `schema:"ignore"`
|
||||
Link bool `schema:"link"`
|
||||
Timeout *uint `schema:"timeout"`
|
||||
DockerVolumes bool `schema:"v"`
|
||||
LibpodVolumes bool `schema:"volumes"`
|
||||
}{
|
||||
// override any golang type defaults
|
||||
}
|
||||
|
@ -55,6 +56,7 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
if utils.IsLibpodRequest(r) {
|
||||
options.Volumes = query.LibpodVolumes
|
||||
options.Timeout = query.Timeout
|
||||
} else {
|
||||
if query.Link {
|
||||
utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
|
||||
|
|
|
@ -245,7 +245,8 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
ic := abi.ContainerEngine{Libpod: runtime}
|
||||
|
||||
query := struct {
|
||||
Force bool `schema:"force"`
|
||||
Force bool `schema:"force"`
|
||||
Timeout *uint `schema:"timeout"`
|
||||
}{
|
||||
// This is where you can override the golang default value for one of fields
|
||||
}
|
||||
|
@ -257,7 +258,8 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
options := entities.NetworkRmOptions{
|
||||
Force: query.Force,
|
||||
Force: query.Force,
|
||||
Timeout: query.Timeout,
|
||||
}
|
||||
|
||||
name := utils.GetName(r)
|
||||
|
|
|
@ -213,7 +213,8 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
|
|||
decoder = r.Context().Value(api.DecoderKey).(*schema.Decoder)
|
||||
)
|
||||
query := struct {
|
||||
Force bool `schema:"force"`
|
||||
Force bool `schema:"force"`
|
||||
Timeout *uint `schema:"timeout"`
|
||||
}{
|
||||
// override any golang type defaults
|
||||
}
|
||||
|
@ -239,7 +240,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
|
|||
vol, err := runtime.LookupVolume(name)
|
||||
if err == nil {
|
||||
// As above, we do not pass `force` from the query parameters here
|
||||
if err := runtime.RemoveVolume(r.Context(), vol, false); err != nil {
|
||||
if err := runtime.RemoveVolume(r.Context(), vol, false, query.Timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrVolumeBeingUsed {
|
||||
utils.Error(w, "volumes being used", http.StatusConflict, err)
|
||||
} else {
|
||||
|
|
|
@ -246,7 +246,8 @@ func PodDelete(w http.ResponseWriter, r *http.Request) {
|
|||
decoder = r.Context().Value(api.DecoderKey).(*schema.Decoder)
|
||||
)
|
||||
query := struct {
|
||||
Force bool `schema:"force"`
|
||||
Force bool `schema:"force"`
|
||||
Timeout *uint `schema:"timeout"`
|
||||
}{
|
||||
// override any golang type defaults
|
||||
}
|
||||
|
@ -262,7 +263,7 @@ func PodDelete(w http.ResponseWriter, r *http.Request) {
|
|||
utils.PodNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
if err := runtime.RemovePod(r.Context(), pod, true, query.Force); err != nil {
|
||||
if err := runtime.RemovePod(r.Context(), pod, true, query.Force, query.Timeout); err != nil {
|
||||
utils.Error(w, "Something went wrong", http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -169,7 +169,8 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
|
|||
decoder = r.Context().Value(api.DecoderKey).(*schema.Decoder)
|
||||
)
|
||||
query := struct {
|
||||
Force bool `schema:"force"`
|
||||
Force bool `schema:"force"`
|
||||
Timeout *uint `schema:"timeout"`
|
||||
}{
|
||||
// override any golang type defaults
|
||||
}
|
||||
|
@ -185,7 +186,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
|
|||
utils.VolumeNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
if err := runtime.RemoveVolume(r.Context(), vol, query.Force); err != nil {
|
||||
if err := runtime.RemoveVolume(r.Context(), vol, query.Force, query.Timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrVolumeBeingUsed {
|
||||
utils.Error(w, "volumes being used", http.StatusConflict, err)
|
||||
return
|
||||
|
|
|
@ -132,6 +132,7 @@ type RemoveOptions struct {
|
|||
Ignore *bool
|
||||
Force *bool
|
||||
Volumes *bool
|
||||
Timeout *uint
|
||||
}
|
||||
|
||||
//go:generate go run ../generator/generator.go InspectOptions
|
||||
|
|
|
@ -61,3 +61,18 @@ func (o *RemoveOptions) GetVolumes() bool {
|
|||
}
|
||||
return *o.Volumes
|
||||
}
|
||||
|
||||
// WithTimeout set field Timeout to given value
|
||||
func (o *RemoveOptions) WithTimeout(value uint) *RemoveOptions {
|
||||
o.Timeout = &value
|
||||
return o
|
||||
}
|
||||
|
||||
// GetTimeout returns value of field Timeout
|
||||
func (o *RemoveOptions) GetTimeout() uint {
|
||||
if o.Timeout == nil {
|
||||
var z uint
|
||||
return z
|
||||
}
|
||||
return *o.Timeout
|
||||
}
|
||||
|
|
|
@ -40,7 +40,8 @@ type InspectOptions struct {
|
|||
// RemoveOptions are optional options for inspecting networks
|
||||
type RemoveOptions struct {
|
||||
// Force removes the network even if it is being used
|
||||
Force *bool
|
||||
Force *bool
|
||||
Timeout *uint
|
||||
}
|
||||
|
||||
//go:generate go run ../generator/generator.go ListOptions
|
||||
|
|
|
@ -31,3 +31,18 @@ func (o *RemoveOptions) GetForce() bool {
|
|||
}
|
||||
return *o.Force
|
||||
}
|
||||
|
||||
// WithTimeout set field Timeout to given value
|
||||
func (o *RemoveOptions) WithTimeout(value uint) *RemoveOptions {
|
||||
o.Timeout = &value
|
||||
return o
|
||||
}
|
||||
|
||||
// GetTimeout returns value of field Timeout
|
||||
func (o *RemoveOptions) GetTimeout() uint {
|
||||
if o.Timeout == nil {
|
||||
var z uint
|
||||
return z
|
||||
}
|
||||
return *o.Timeout
|
||||
}
|
||||
|
|
|
@ -68,7 +68,8 @@ type StatsOptions struct {
|
|||
//go:generate go run ../generator/generator.go RemoveOptions
|
||||
// RemoveOptions are optional options for removing pods
|
||||
type RemoveOptions struct {
|
||||
Force *bool
|
||||
Force *bool
|
||||
Timeout *uint
|
||||
}
|
||||
|
||||
//go:generate go run ../generator/generator.go ExistsOptions
|
||||
|
|
|
@ -31,3 +31,18 @@ func (o *RemoveOptions) GetForce() bool {
|
|||
}
|
||||
return *o.Force
|
||||
}
|
||||
|
||||
// WithTimeout set field Timeout to given value
|
||||
func (o *RemoveOptions) WithTimeout(value uint) *RemoveOptions {
|
||||
o.Timeout = &value
|
||||
return o
|
||||
}
|
||||
|
||||
// GetTimeout returns value of field Timeout
|
||||
func (o *RemoveOptions) GetTimeout() uint {
|
||||
if o.Timeout == nil {
|
||||
var z uint
|
||||
return z
|
||||
}
|
||||
return *o.Timeout
|
||||
}
|
||||
|
|
|
@ -28,7 +28,8 @@ type PruneOptions struct {
|
|||
// RemoveOptions are optional options for removing volumes
|
||||
type RemoveOptions struct {
|
||||
// Force removes the volume even if it is being used
|
||||
Force *bool
|
||||
Force *bool
|
||||
Timeout *uint
|
||||
}
|
||||
|
||||
//go:generate go run ../generator/generator.go ExistsOptions
|
||||
|
|
|
@ -31,3 +31,18 @@ func (o *RemoveOptions) GetForce() bool {
|
|||
}
|
||||
return *o.Force
|
||||
}
|
||||
|
||||
// WithTimeout set field Timeout to given value
|
||||
func (o *RemoveOptions) WithTimeout(value uint) *RemoveOptions {
|
||||
o.Timeout = &value
|
||||
return o
|
||||
}
|
||||
|
||||
// GetTimeout returns value of field Timeout
|
||||
func (o *RemoveOptions) GetTimeout() uint {
|
||||
if o.Timeout == nil {
|
||||
var z uint
|
||||
return z
|
||||
}
|
||||
return *o.Timeout
|
||||
}
|
||||
|
|
|
@ -132,6 +132,7 @@ type RmOptions struct {
|
|||
Force bool
|
||||
Ignore bool
|
||||
Latest bool
|
||||
Timeout *uint
|
||||
Volumes bool
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,8 @@ type NetworkReloadReport struct {
|
|||
|
||||
// NetworkRmOptions describes options for removing networks
|
||||
type NetworkRmOptions struct {
|
||||
Force bool
|
||||
Force bool
|
||||
Timeout *uint
|
||||
}
|
||||
|
||||
//NetworkRmReport describes the results of network removal
|
||||
|
|
|
@ -95,10 +95,11 @@ type PodStartReport struct {
|
|||
}
|
||||
|
||||
type PodRmOptions struct {
|
||||
All bool
|
||||
Force bool
|
||||
Ignore bool
|
||||
Latest bool
|
||||
All bool
|
||||
Force bool
|
||||
Ignore bool
|
||||
Latest bool
|
||||
Timeout *uint
|
||||
}
|
||||
|
||||
type PodRmReport struct {
|
||||
|
|
|
@ -94,8 +94,9 @@ type VolumeConfigResponse struct {
|
|||
}
|
||||
|
||||
type VolumeRmOptions struct {
|
||||
All bool
|
||||
Force bool
|
||||
All bool
|
||||
Force bool
|
||||
Timeout *uint
|
||||
}
|
||||
|
||||
type VolumeRmReport struct {
|
||||
|
|
|
@ -283,7 +283,7 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
|
|||
}
|
||||
|
||||
func (ic *ContainerEngine) removeContainer(ctx context.Context, ctr *libpod.Container, options entities.RmOptions) error {
|
||||
err := ic.Libpod.RemoveContainer(ctx, ctr, options.Force, options.Volumes)
|
||||
err := ic.Libpod.RemoveContainer(ctx, ctr, options.Force, options.Volumes, options.Timeout)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -963,7 +963,8 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
|
|||
return &report, nil
|
||||
}
|
||||
if opts.Rm {
|
||||
if deleteError := ic.Libpod.RemoveContainer(ctx, ctr, true, false); deleteError != nil {
|
||||
var timeout *uint
|
||||
if deleteError := ic.Libpod.RemoveContainer(ctx, ctr, true, false, timeout); deleteError != nil {
|
||||
logrus.Debugf("unable to remove container %s after failing to start and attach to it", ctr.ID())
|
||||
}
|
||||
}
|
||||
|
@ -977,7 +978,8 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
|
|||
}
|
||||
report.ExitCode = ic.GetContainerExitCode(ctx, ctr)
|
||||
if opts.Rm && !ctr.ShouldRestart(ctx) {
|
||||
if err := ic.Libpod.RemoveContainer(ctx, ctr, false, true); err != nil {
|
||||
var timeout *uint
|
||||
if err := ic.Libpod.RemoveContainer(ctx, ctr, false, true, timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr ||
|
||||
errors.Cause(err) == define.ErrCtrRemoved {
|
||||
logrus.Infof("Container %s was already removed, skipping --rm", ctr.ID())
|
||||
|
@ -1082,7 +1084,8 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st
|
|||
}
|
||||
|
||||
if options.Remove && !ctr.ShouldRestart(ctx) {
|
||||
err = ic.Libpod.RemoveContainer(ctx, ctr, false, true)
|
||||
var timeout *uint
|
||||
err = ic.Libpod.RemoveContainer(ctx, ctr, false, true, timeout)
|
||||
if err != nil {
|
||||
report.RmErr = errors.Wrapf(err, "failed to cleanup and remove container %v", ctr.ID())
|
||||
}
|
||||
|
|
|
@ -92,7 +92,8 @@ func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string,
|
|||
}
|
||||
} else {
|
||||
logrus.Debugf("Runlabel --replace option given. Container %s will be deleted. The new container will be named %s", ctr.ID(), name)
|
||||
if err := ic.Libpod.RemoveContainer(ctx, ctr, true, false); err != nil {
|
||||
var timeout *uint
|
||||
if err := ic.Libpod.RemoveContainer(ctx, ctr, true, false, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,10 +91,10 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
|
|||
if err != nil {
|
||||
return reports, err
|
||||
}
|
||||
if err := ic.Libpod.RemovePod(ctx, pod, true, true); err != nil {
|
||||
if err := ic.Libpod.RemovePod(ctx, pod, true, true, options.Timeout); err != nil {
|
||||
return reports, err
|
||||
}
|
||||
} else if err := ic.Libpod.RemoveContainer(ctx, c, true, true); err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
|
||||
} else if err := ic.Libpod.RemoveContainer(ctx, c, true, true, options.Timeout); err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
|
||||
return reports, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -259,7 +259,7 @@ func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, optio
|
|||
reports := make([]*entities.PodRmReport, 0, len(pods))
|
||||
for _, p := range pods {
|
||||
report := entities.PodRmReport{Id: p.ID()}
|
||||
err := ic.Libpod.RemovePod(ctx, p, true, options.Force)
|
||||
err := ic.Libpod.RemovePod(ctx, p, true, options.Force, options.Timeout)
|
||||
if err != nil {
|
||||
report.Err = err
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, op
|
|||
}
|
||||
for _, vol := range vols {
|
||||
reports = append(reports, &entities.VolumeRmReport{
|
||||
Err: ic.Libpod.RemoveVolume(ctx, vol, opts.Force),
|
||||
Err: ic.Libpod.RemoveVolume(ctx, vol, opts.Force, opts.Timeout),
|
||||
Id: vol.Name(),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -185,7 +185,9 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
|
|||
func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, opts entities.RmOptions) ([]*entities.RmReport, error) {
|
||||
// TODO there is no endpoint for container eviction. Need to discuss
|
||||
options := new(containers.RemoveOptions).WithForce(opts.Force).WithVolumes(opts.Volumes).WithIgnore(opts.Ignore)
|
||||
|
||||
if opts.Timeout != nil {
|
||||
options = options.WithTimeout(*opts.Timeout)
|
||||
}
|
||||
if opts.All {
|
||||
ctrs, err := getContainersByContext(ic.ClientCtx, opts.All, opts.Ignore, namesOrIds)
|
||||
if err != nil {
|
||||
|
|
|
@ -47,6 +47,9 @@ func (ic *ContainerEngine) NetworkReload(ctx context.Context, names []string, op
|
|||
func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, opts entities.NetworkRmOptions) ([]*entities.NetworkRmReport, error) {
|
||||
reports := make([]*entities.NetworkRmReport, 0, len(namesOrIds))
|
||||
options := new(network.RemoveOptions).WithForce(opts.Force)
|
||||
if opts.Timeout != nil {
|
||||
options = options.WithTimeout(*opts.Timeout)
|
||||
}
|
||||
for _, name := range namesOrIds {
|
||||
response, err := network.Remove(ic.ClientCtx, name, options)
|
||||
if err != nil {
|
||||
|
|
|
@ -169,6 +169,9 @@ func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, opts
|
|||
}
|
||||
reports := make([]*entities.PodRmReport, 0, len(foundPods))
|
||||
options := new(pods.RemoveOptions).WithForce(opts.Force)
|
||||
if opts.Timeout != nil {
|
||||
options = options.WithTimeout(*opts.Timeout)
|
||||
}
|
||||
for _, p := range foundPods {
|
||||
response, err := pods.Remove(ic.ClientCtx, p.Id, options)
|
||||
if err != nil {
|
||||
|
|
|
@ -31,6 +31,9 @@ func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, op
|
|||
reports := make([]*entities.VolumeRmReport, 0, len(namesOrIds))
|
||||
for _, id := range namesOrIds {
|
||||
options := new(volumes.RemoveOptions).WithForce(opts.Force)
|
||||
if opts.Timeout != nil {
|
||||
options = options.WithTimeout(*opts.Timeout)
|
||||
}
|
||||
reports = append(reports, &entities.VolumeRmReport{
|
||||
Err: volumes.Remove(ic.ClientCtx, id, options),
|
||||
Id: id,
|
||||
|
|
|
@ -159,7 +159,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
Expect(result).Should(Exit(2))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-f", cid})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "1", "-f", cid})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -197,7 +197,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
|
||||
Expect(podmanTest.GetContainerStatus()).To(Not(ContainSubstring("Exited")))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-fa"})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "-fa"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -234,7 +234,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
|
||||
Expect(podmanTest.GetContainerStatus()).To(Not(ContainSubstring("Exited")))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-fa"})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "-fa"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -289,7 +289,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
|
||||
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-fa"})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "-fa"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -329,7 +329,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
|
||||
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-fa"})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "-fa"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -377,7 +377,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
|
||||
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-fa"})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "-fa"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -425,7 +425,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2))
|
||||
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-fa"})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "-fa"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -525,7 +525,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
|
||||
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-fa"})
|
||||
result = podmanTest.Podman([]string{"rm", "--time", "0", "-fa"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -753,7 +753,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
|
||||
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-fa"})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "-fa"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -916,7 +916,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Exited"))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-f", cid})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "-f", cid})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -976,7 +976,7 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
}
|
||||
conn.Close()
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-fa"})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "-fa"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
|
|
@ -566,7 +566,7 @@ var _ = Describe("Podman generate kube", func() {
|
|||
Expect(found).To(BeTrue())
|
||||
Expect(val).To(HaveSuffix("z"))
|
||||
|
||||
rm := podmanTest.Podman([]string{"pod", "rm", "-f", "test1"})
|
||||
rm := podmanTest.Podman([]string{"pod", "rm", "-t", "0", "-f", "test1"})
|
||||
rm.WaitWithDefaultTimeout()
|
||||
Expect(rm).Should(Exit(0))
|
||||
|
||||
|
@ -619,7 +619,7 @@ var _ = Describe("Podman generate kube", func() {
|
|||
kube.WaitWithDefaultTimeout()
|
||||
Expect(kube).Should(Exit(0))
|
||||
|
||||
rm := podmanTest.Podman([]string{"pod", "rm", "-f", "test1"})
|
||||
rm := podmanTest.Podman([]string{"pod", "rm", "-t", "0", "-f", "test1"})
|
||||
rm.WaitWithDefaultTimeout()
|
||||
Expect(rm).Should(Exit(0))
|
||||
|
||||
|
@ -648,7 +648,7 @@ var _ = Describe("Podman generate kube", func() {
|
|||
kube.WaitWithDefaultTimeout()
|
||||
Expect(kube).Should(Exit(0))
|
||||
|
||||
rm := podmanTest.Podman([]string{"pod", "rm", "-f", podName})
|
||||
rm := podmanTest.Podman([]string{"pod", "rm", "-t", "0", "-f", podName})
|
||||
rm.WaitWithDefaultTimeout()
|
||||
Expect(rm).Should(Exit(0))
|
||||
|
||||
|
@ -905,7 +905,7 @@ ENTRYPOINT /bin/sleep`
|
|||
Expect(kube).Should(Exit(0))
|
||||
|
||||
// Remove the pod so play can recreate it.
|
||||
kube = podmanTest.Podman([]string{"pod", "rm", "-f", "testpod"})
|
||||
kube = podmanTest.Podman([]string{"pod", "rm", "-t", "0", "-f", "testpod"})
|
||||
kube.WaitWithDefaultTimeout()
|
||||
Expect(kube).Should(Exit(0))
|
||||
|
||||
|
|
|
@ -227,7 +227,7 @@ var _ = Describe("Podman logs", func() {
|
|||
Expect(inspect.ErrorToString()).To(ContainSubstring("no such container"))
|
||||
}
|
||||
|
||||
results = podmanTest.Podman([]string{"rm", "-f", containerName})
|
||||
results = podmanTest.Podman([]string{"rm", "--time", "0", "-f", containerName})
|
||||
results.WaitWithDefaultTimeout()
|
||||
Expect(results).To(Exit(0))
|
||||
})
|
||||
|
|
|
@ -200,7 +200,7 @@ var _ = Describe("Podman network connect and disconnect", func() {
|
|||
Expect(exec).Should(Exit(0))
|
||||
|
||||
// make sure no logrus errors are shown https://github.com/containers/podman/issues/9602
|
||||
rm := podmanTest.Podman([]string{"rm", "-f", "test"})
|
||||
rm := podmanTest.Podman([]string{"rm", "--time=0", "-f", "test"})
|
||||
rm.WaitWithDefaultTimeout()
|
||||
Expect(rm).Should(Exit(0))
|
||||
Expect(rm.ErrorToString()).To(Equal(""))
|
||||
|
|
|
@ -272,7 +272,7 @@ var _ = Describe("Podman network", func() {
|
|||
Expect(strings.HasPrefix(net.IPAddress, "10.50.50.")).To(BeTrue())
|
||||
|
||||
// Necessary to ensure the CNI network is removed cleanly
|
||||
rmAll := podmanTest.Podman([]string{"rm", "-f", ctrName})
|
||||
rmAll := podmanTest.Podman([]string{"rm", "-t", "0", "-f", ctrName})
|
||||
rmAll.WaitWithDefaultTimeout()
|
||||
Expect(rmAll).Should(Exit(0))
|
||||
})
|
||||
|
@ -309,7 +309,7 @@ var _ = Describe("Podman network", func() {
|
|||
Expect(net2.NetworkID).To(Equal(netName2))
|
||||
|
||||
// Necessary to ensure the CNI network is removed cleanly
|
||||
rmAll := podmanTest.Podman([]string{"rm", "-f", ctrName})
|
||||
rmAll := podmanTest.Podman([]string{"rm", "-t", "0", "-f", ctrName})
|
||||
rmAll.WaitWithDefaultTimeout()
|
||||
Expect(rmAll).Should(Exit(0))
|
||||
})
|
||||
|
@ -350,7 +350,7 @@ var _ = Describe("Podman network", func() {
|
|||
Expect(strings.HasPrefix(net2.IPAddress, "10.50.51.")).To(BeTrue())
|
||||
|
||||
// Necessary to ensure the CNI network is removed cleanly
|
||||
rmAll := podmanTest.Podman([]string{"rm", "-f", ctrName})
|
||||
rmAll := podmanTest.Podman([]string{"rm", "-t", "0", "-f", ctrName})
|
||||
rmAll.WaitWithDefaultTimeout()
|
||||
Expect(rmAll).Should(Exit(0))
|
||||
})
|
||||
|
@ -404,7 +404,7 @@ var _ = Describe("Podman network", func() {
|
|||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(2))
|
||||
|
||||
session = podmanTest.Podman([]string{"network", "rm", "--force", netName})
|
||||
session = podmanTest.Podman([]string{"network", "rm", "-t", "0", "--force", netName})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ var _ = Describe("Podman pause", func() {
|
|||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
Expect(strings.ToLower(podmanTest.GetContainerStatus())).To(ContainSubstring(pausedState))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "--force", cid})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "--force", cid})
|
||||
result.WaitWithDefaultTimeout()
|
||||
|
||||
Expect(result).Should(Exit(0))
|
||||
|
@ -205,7 +205,7 @@ var _ = Describe("Podman pause", func() {
|
|||
Expect(result).Should(Exit(2))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
|
||||
|
||||
result = podmanTest.Podman([]string{"rm", "-f", cid})
|
||||
result = podmanTest.Podman([]string{"rm", "-t", "0", "-f", cid})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
|
|
@ -107,7 +107,7 @@ var _ = Describe("Podman pod rm", func() {
|
|||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
|
||||
result := podmanTest.Podman([]string{"pod", "rm", "-f", podid})
|
||||
result := podmanTest.Podman([]string{"pod", "rm", "-t", "0", "-f", podid})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
|
||||
|
@ -179,7 +179,7 @@ var _ = Describe("Podman pod rm", func() {
|
|||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
|
||||
result := podmanTest.Podman([]string{"pod", "rm", "-fa"})
|
||||
result := podmanTest.Podman([]string{"pod", "rm", "-t", "0", "-fa"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
|
||||
|
@ -225,7 +225,7 @@ var _ = Describe("Podman pod rm", func() {
|
|||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
|
||||
session = podmanTest.Podman([]string{"pod", "rm", "--force", "--ignore", "bogus", "test1"})
|
||||
session = podmanTest.Podman([]string{"pod", "rm", "-t", "0", "--force", "--ignore", "bogus", "test1"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
|
||||
|
@ -257,7 +257,7 @@ var _ = Describe("Podman pod rm", func() {
|
|||
Expect(session).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) // infra+top
|
||||
|
||||
session = podmanTest.Podman([]string{"pod", "rm", "--pod-id-file", tmpFile, "--force"})
|
||||
session = podmanTest.Podman([]string{"pod", "rm", "-t", "0", "--pod-id-file", tmpFile, "--force"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0))
|
||||
|
@ -294,7 +294,7 @@ var _ = Describe("Podman pod rm", func() {
|
|||
Expect(session).Should(Exit(0))
|
||||
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(20)) // 10*(infra+top)
|
||||
|
||||
cmd = []string{"pod", "rm", "--force"}
|
||||
cmd = []string{"pod", "rm", "--time=0", "--force"}
|
||||
cmd = append(cmd, podIDFiles...)
|
||||
session = podmanTest.Podman(cmd)
|
||||
session.WaitWithDefaultTimeout()
|
||||
|
|
|
@ -82,7 +82,7 @@ var _ = Describe("Podman rm", func() {
|
|||
Expect(session).Should(Exit(0))
|
||||
cid := session.OutputToString()
|
||||
|
||||
result := podmanTest.Podman([]string{"rm", "-f", cid})
|
||||
result := podmanTest.Podman([]string{"rm", "-t", "0", "-f", cid})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).Should(Exit(0))
|
||||
})
|
||||
|
@ -275,7 +275,7 @@ var _ = Describe("Podman rm", func() {
|
|||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
|
||||
session = podmanTest.Podman([]string{"rm", "--force", "--ignore", "bogus", "test1"})
|
||||
session = podmanTest.Podman([]string{"rm", "-t", "0", "--force", "--ignore", "bogus", "test1"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
|
||||
|
|
|
@ -680,7 +680,7 @@ var _ = Describe("Podman run networking", func() {
|
|||
Expect(run).Should(Exit(0))
|
||||
Expect(run.OutputToString()).To(ContainSubstring(ipAddr))
|
||||
|
||||
podrm := podmanTest.Podman([]string{"pod", "rm", "-f", podname})
|
||||
podrm := podmanTest.Podman([]string{"pod", "rm", "-t", "0", "-f", podname})
|
||||
podrm.WaitWithDefaultTimeout()
|
||||
Expect(podrm).Should(Exit(0))
|
||||
})
|
||||
|
|
|
@ -201,7 +201,7 @@ var _ = Describe("Podman run", func() {
|
|||
Expect(session).Should(Exit(0))
|
||||
Expect(session.OutputToString()).To(Equal(label1))
|
||||
|
||||
session = podmanTest.Podman([]string{"pod", "rm", podID, "--force"})
|
||||
session = podmanTest.Podman([]string{"pod", "rm", "-t", "0", podID, "--force"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
})
|
||||
|
@ -222,7 +222,7 @@ var _ = Describe("Podman run", func() {
|
|||
Expect(session).Should(Exit(0))
|
||||
Expect(session.OutputToString()).To(Not(Equal(label1)))
|
||||
|
||||
session = podmanTest.Podman([]string{"pod", "rm", podID, "--force"})
|
||||
session = podmanTest.Podman([]string{"pod", "rm", "-t", "0", podID, "--force"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
})
|
||||
|
|
|
@ -59,7 +59,7 @@ var _ = Describe("Podman volume rm", func() {
|
|||
Expect(session).Should(Exit(2))
|
||||
Expect(session.ErrorToString()).To(ContainSubstring(cid))
|
||||
|
||||
session = podmanTest.Podman([]string{"volume", "rm", "-f", "myvol"})
|
||||
session = podmanTest.Podman([]string{"volume", "rm", "-t", "0", "-f", "myvol"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ $s_after"
|
|||
|
||||
run_podman logs --since $after test
|
||||
is "$output" "$s_after"
|
||||
run_podman rm -f test
|
||||
run_podman rm -t 1 -f test
|
||||
}
|
||||
|
||||
@test "podman logs - since k8s-file" {
|
||||
|
@ -167,7 +167,7 @@ $s_after"
|
|||
|
||||
run_podman logs --until $after test
|
||||
is "$output" "$s_both" "podman logs --until after"
|
||||
run_podman rm -f test
|
||||
run_podman rm -t 0 -f test
|
||||
}
|
||||
|
||||
@test "podman logs - until k8s-file" {
|
||||
|
@ -195,7 +195,7 @@ function _log_test_follow() {
|
|||
$contentB
|
||||
$contentC" "logs -f on exitted container works"
|
||||
|
||||
run_podman rm -f $cname
|
||||
run_podman rm -t 0 -f $cname
|
||||
}
|
||||
|
||||
@test "podman logs - --follow k8s-file" {
|
||||
|
|
|
@ -138,7 +138,7 @@ EOF
|
|||
is "$output" "Error: container .* is mounted and cannot be removed without using force: container state improper" "podman rm <buildah container> without -f"
|
||||
|
||||
# With -f, we can remove it.
|
||||
run_podman rm -f "$cid"
|
||||
run_podman rm -t 0 -f "$cid"
|
||||
|
||||
run_podman ps --external -a
|
||||
is "${#lines[@]}" "1" "storage container has been removed"
|
||||
|
|
|
@ -30,7 +30,7 @@ load helpers
|
|||
is "$output" "Error: cannot remove container $cid as it is running - running or paused containers cannot be removed without force: container state improper" "error message"
|
||||
|
||||
# rm -f should succeed
|
||||
run_podman rm -f $cid
|
||||
run_podman rm -t 0 -f $cid
|
||||
}
|
||||
|
||||
@test "podman rm container from storage" {
|
||||
|
@ -70,7 +70,7 @@ load helpers
|
|||
# See https://github.com/containers/podman/issues/3795
|
||||
@test "podman rm -f" {
|
||||
rand=$(random_string 30)
|
||||
( sleep 3; run_podman rm -f $rand ) &
|
||||
( sleep 3; run_podman rm -t 0 -f $rand ) &
|
||||
run_podman 137 run --name $rand $IMAGE sleep 30
|
||||
}
|
||||
|
||||
|
|
|
@ -125,8 +125,7 @@ load helpers
|
|||
run_podman exec $cid find /image-mount/etc/
|
||||
|
||||
# Clean up
|
||||
run_podman stop -t 0 $cid
|
||||
run_podman rm -f $cid
|
||||
run_podman rm -t 0 -f $cid
|
||||
}
|
||||
|
||||
@test "podman run --mount image inspection" {
|
||||
|
@ -148,8 +147,7 @@ load helpers
|
|||
run_podman inspect --format "{{(index .Mounts 0).RW}}" $cid
|
||||
is "$output" "true" "inspect data includes image mount source"
|
||||
|
||||
run_podman stop -t 0 $cid
|
||||
run_podman rm -f $cid
|
||||
run_podman rm -t 0 -f $cid
|
||||
}
|
||||
|
||||
@test "podman mount external container - basic test" {
|
||||
|
|
|
@ -70,7 +70,7 @@ load helpers
|
|||
"copy into nonexistent path in container"
|
||||
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
|
||||
# CREATED container
|
||||
while read id dest dest_fullname description; do
|
||||
|
@ -80,7 +80,7 @@ load helpers
|
|||
run_podman exec cpcontainer cat $dest_fullname
|
||||
is "$output" "${randomcontent[$id]}" "$description (cp -> ctr:$dest)"
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
done < <(parse_table "$tests")
|
||||
|
||||
run_podman rmi -f $cpimage
|
||||
|
@ -99,7 +99,7 @@ load helpers
|
|||
run_podman exec cpcontainer cat /tmp/file
|
||||
is "$output" "${content}" "cp to running container's tmpfs"
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
|
||||
# CREATED container (with copy up)
|
||||
run_podman create --mount type=tmpfs,dst=/tmp --name cpcontainer $IMAGE sleep infinity
|
||||
|
@ -108,7 +108,7 @@ load helpers
|
|||
run_podman exec cpcontainer cat /tmp/file
|
||||
is "$output" "${content}" "cp to created container's tmpfs"
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
|
||||
|
@ -124,7 +124,7 @@ load helpers
|
|||
run_podman exec cpcontainer stat -c "%u" /tmp/hostfile
|
||||
is "$output" "$userid" "copied file is chowned to the container user"
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
@test "podman cp (-a=false) file from host to container and check ownership" {
|
||||
|
@ -143,7 +143,7 @@ load helpers
|
|||
run_podman exec cpcontainer stat -c "%u:%g" /tmp/a.txt
|
||||
is "$output" "1042:1043" "copied file retains uid/gid from the tar"
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
|
||||
|
@ -160,7 +160,7 @@ load helpers
|
|||
run_podman cp $srcdir/hostfile cpcontainer:/tmp/hostfile
|
||||
run_podman cp cpcontainer:/tmp/hostfile $srcdir/hostfile1
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
@test "podman cp file from container to host" {
|
||||
|
@ -206,7 +206,7 @@ load helpers
|
|||
rm $srcdir$dest_fullname
|
||||
done < <(parse_table "$tests")
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
|
||||
# Created container
|
||||
run_podman create --name cpcontainer --workdir=/srv $cpimage
|
||||
|
@ -219,7 +219,7 @@ load helpers
|
|||
is "$(< $srcdir$dest_fullname)" "${randomcontent[$id]}" "$description (cp ctr:$src to \$srcdir$dest)"
|
||||
rm $srcdir$dest_fullname
|
||||
done < <(parse_table "$tests")
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
|
||||
run_podman rmi -f $cpimage
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ load helpers
|
|||
is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to /$dest)"
|
||||
done < <(parse_table "$tests")
|
||||
run_podman kill cpcontainer ${destcontainers[@]}
|
||||
run_podman rm -f cpcontainer ${destcontainers[@]}
|
||||
run_podman rm -t 0 -f cpcontainer ${destcontainers[@]}
|
||||
|
||||
# From CREATED container
|
||||
destcontainers=()
|
||||
|
@ -309,8 +309,7 @@ load helpers
|
|||
is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to /$dest)"
|
||||
done < <(parse_table "$tests")
|
||||
run_podman kill ${destcontainers[@]}
|
||||
run_podman rm -f cpcontainer ${destcontainers[@]}
|
||||
|
||||
run_podman rm -t 0 -f cpcontainer ${destcontainers[@]}
|
||||
run_podman rmi -f $cpimage
|
||||
}
|
||||
|
||||
|
@ -361,7 +360,7 @@ load helpers
|
|||
is "${lines[1]}" "${randomcontent[1]}" "$description (cp -> ctr:$dest)"
|
||||
done < <(parse_table "$tests")
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
|
||||
# CREATED container
|
||||
while read src dest dest_fullname description; do
|
||||
|
@ -376,13 +375,13 @@ load helpers
|
|||
is "${lines[0]}" "${randomcontent[0]}" "$description (cp -> ctr:$dest)"
|
||||
is "${lines[1]}" "${randomcontent[1]}" "$description (cp -> ctr:$dest)"
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
done < <(parse_table "$tests")
|
||||
|
||||
run_podman create --name cpcontainer --workdir=/srv $cpimage sleep infinity
|
||||
run_podman 125 cp $srcdir cpcontainer:/etc/os-release
|
||||
is "$output" "Error: destination must be a directory when copying a directory" "cannot copy directory to file"
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
|
||||
run_podman rmi -f $cpimage
|
||||
}
|
||||
|
@ -436,7 +435,7 @@ load helpers
|
|||
rm -rf $destdir/*
|
||||
done < <(parse_table "$tests")
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
|
||||
# CREATED container
|
||||
run_podman create --name cpcontainer --workdir=/srv $cpimage
|
||||
|
@ -459,7 +458,7 @@ load helpers
|
|||
touch $destdir/testfile
|
||||
run_podman 125 cp cpcontainer:/etc/ $destdir/testfile
|
||||
is "$output" "Error: destination must be a directory when copying a directory" "cannot copy directory to file"
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
|
||||
run_podman rmi -f $cpimage
|
||||
}
|
||||
|
@ -526,7 +525,7 @@ ${randomcontent[1]}" "$description"
|
|||
${randomcontent[1]}" "$description"
|
||||
done < <(parse_table "$tests")
|
||||
run_podman kill cpcontainer ${destcontainers[@]}
|
||||
run_podman rm -f cpcontainer ${destcontainers[@]}
|
||||
run_podman rm -t 0 -f cpcontainer ${destcontainers[@]}
|
||||
|
||||
# From CREATED container
|
||||
destcontainers=()
|
||||
|
@ -563,7 +562,7 @@ ${randomcontent[1]}" "$description"
|
|||
done < <(parse_table "$tests")
|
||||
|
||||
run_podman kill ${destcontainers[@]}
|
||||
run_podman rm -f cpcontainer ${destcontainers[@]}
|
||||
run_podman rm -t 0 -f cpcontainer ${destcontainers[@]}
|
||||
run_podman rmi -f $cpimage
|
||||
}
|
||||
|
||||
|
@ -595,7 +594,7 @@ ${randomcontent[1]}" "$description"
|
|||
is "${lines[1]}" "${randomcontent[1]}" "eval symlink - running container"
|
||||
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
run rm -rf $srcdir/dest
|
||||
|
||||
# CREATED container
|
||||
|
@ -604,7 +603,7 @@ ${randomcontent[1]}" "$description"
|
|||
run cat $destdir/dest/containerfile0 $destdir/dest/containerfile1
|
||||
is "${lines[0]}" "${randomcontent[0]}" "eval symlink - created container"
|
||||
is "${lines[1]}" "${randomcontent[1]}" "eval symlink - created container"
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
run_podman rmi $cpimage
|
||||
}
|
||||
|
||||
|
@ -638,7 +637,7 @@ ${randomcontent[1]}" "$description"
|
|||
run ls $volume1_mount
|
||||
is "$output" ""
|
||||
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
run_podman volume rm $volume1 $volume2
|
||||
}
|
||||
|
||||
|
@ -658,7 +657,7 @@ ${randomcontent[1]}" "$description"
|
|||
run_podman cp $srcdir/hostfile cpcontainer:/tmp/volume/mount
|
||||
is "$(< $mountdir/hostfile)" "This file should be in the mount"
|
||||
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
run_podman volume rm $volume
|
||||
}
|
||||
|
||||
|
@ -684,7 +683,7 @@ ${randomcontent[1]}" "$description"
|
|||
# cp no longer supports wildcarding
|
||||
run_podman 125 cp 'cpcontainer:/tmp/*' $dstdir
|
||||
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
|
||||
|
@ -708,7 +707,7 @@ ${randomcontent[1]}" "$description"
|
|||
# make sure there are no files in dstdir
|
||||
is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host"
|
||||
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
|
||||
|
@ -732,7 +731,7 @@ ${randomcontent[1]}" "$description"
|
|||
# make sure there are no files in dstdir
|
||||
is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host"
|
||||
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
|
||||
|
@ -752,7 +751,7 @@ ${randomcontent[1]}" "$description"
|
|||
# dstdir must be empty
|
||||
is "$(/bin/ls -1 $dstdir)" "" "incorrectly copied symlink from host"
|
||||
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
|
||||
|
@ -810,7 +809,7 @@ ${randomcontent[1]}" "$description"
|
|||
is "$output" "$rand_content3" "cp creates file named x"
|
||||
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
|
||||
|
@ -848,7 +847,7 @@ ${randomcontent[1]}" "$description"
|
|||
is "$output" "$rand_content" "Contents of file copied into container"
|
||||
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
|
||||
|
@ -897,7 +896,7 @@ ${randomcontent[1]}" "$description"
|
|||
is "$output" 'Error: destination must be a directory when copying from stdin'
|
||||
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
|
||||
|
@ -945,12 +944,12 @@ ${randomcontent[1]}" "$description"
|
|||
is "$(< $srcdir/tmp/empty.txt)" ""
|
||||
|
||||
run_podman kill cpcontainer
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 -f cpcontainer
|
||||
}
|
||||
|
||||
function teardown() {
|
||||
# In case any test fails, clean up the container we left behind
|
||||
run_podman rm -f cpcontainer
|
||||
run_podman rm -t 0 f cpcontainer
|
||||
basic_teardown
|
||||
}
|
||||
|
||||
|
|
|
@ -969,7 +969,7 @@ function teardown() {
|
|||
# A timeout or other error in 'build' can leave behind stale images
|
||||
# that podman can't even see and which will cascade into subsequent
|
||||
# test failures. Try a last-ditch force-rm in cleanup, ignoring errors.
|
||||
run_podman '?' rm -a -f
|
||||
run_podman '?' rm -t 0 -a -f
|
||||
run_podman '?' rmi -f build_test
|
||||
|
||||
# Many of the tests above leave interim layers behind. Clean them up.
|
||||
|
|
|
@ -53,7 +53,7 @@ load helpers
|
|||
is "$(check_exec_pid)" "" "there isn't any exec pid hash file leak"
|
||||
|
||||
run_podman stop --time 1 $cid
|
||||
run_podman rm -f $cid
|
||||
run_podman rm -t 0 -f $cid
|
||||
}
|
||||
|
||||
# Issue #4785 - piping to exec statement - fixed in #4818
|
||||
|
@ -126,7 +126,7 @@ load helpers
|
|||
is "$output" "" "exec output is identical with the file"
|
||||
|
||||
# Clean up
|
||||
run_podman rm -f $cid
|
||||
run_podman rm -t 0 -f $cid
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
|
|
|
@ -48,8 +48,7 @@ load helpers
|
|||
# would imply that the container never paused.
|
||||
is "$max_delta" "[3456]" "delta t between paused and restarted"
|
||||
|
||||
run_podman stop -t 0 $cname
|
||||
run_podman rm -f $cname
|
||||
run_podman rm -t 0 -f $cname
|
||||
|
||||
# Pause/unpause on nonexistent name or id - these should all fail
|
||||
run_podman 125 pause $cid
|
||||
|
@ -75,7 +74,7 @@ load helpers
|
|||
run_podman ps --format '{{.ID}} {{.Names}} {{.Status}}'
|
||||
is "$output" "${cid:0:12} $cname Up.*" "podman ps on resumed container"
|
||||
run_podman stop -t 0 $cname
|
||||
run_podman rm -f $cname
|
||||
run_podman rm -f notrunning
|
||||
run_podman rm -t 0 -f $cname
|
||||
run_podman rm -t 0 -f notrunning
|
||||
}
|
||||
# vim: filetype=sh
|
||||
|
|
|
@ -15,7 +15,7 @@ load helpers
|
|||
|
||||
run_podman run --name import $IMAGE sh -c "echo ${random_content} > /random.txt"
|
||||
run_podman export import -o $archive
|
||||
run_podman rm -f import
|
||||
run_podman rm -t 0 -f import
|
||||
|
||||
# Simple import
|
||||
run_podman import -q $archive
|
||||
|
@ -71,7 +71,7 @@ EOF
|
|||
|
||||
# Export built container as tarball
|
||||
run_podman export -o $PODMAN_TMPDIR/$b_cnt.tar $b_cnt
|
||||
run_podman rm -f $b_cnt
|
||||
run_podman rm -t 0 -f $b_cnt
|
||||
|
||||
# Modify tarball contents
|
||||
tar --delete -f $PODMAN_TMPDIR/$b_cnt.tar tmp/testfile1
|
||||
|
@ -102,7 +102,7 @@ EOF
|
|||
run_podman ps -a --filter name=$a_cnt --format '{{.Status}}'
|
||||
is "$output" "Exited (33) .*" "Exit by non-TERM/KILL"
|
||||
|
||||
run_podman rm -f $a_cnt
|
||||
run_podman rm -t 0 -f $a_cnt
|
||||
run_podman rmi $b_img $a_img
|
||||
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ function setup() {
|
|||
fi
|
||||
|
||||
# Run the registry container.
|
||||
run_podman '?' ${PODMAN_LOGIN_ARGS} rm -f registry
|
||||
run_podman '?' ${PODMAN_LOGIN_ARGS} rm -t 0 -f registry
|
||||
run_podman ${PODMAN_LOGIN_ARGS} run -d \
|
||||
-p ${PODMAN_LOGIN_REGISTRY_PORT}:5000 \
|
||||
--name registry \
|
||||
|
|
|
@ -13,7 +13,7 @@ function setup() {
|
|||
|
||||
function teardown() {
|
||||
run_podman '?' rm -a --volumes
|
||||
run_podman '?' volume rm -a -f
|
||||
run_podman '?' volume rm -t 0 -a -f
|
||||
|
||||
basic_teardown
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ load helpers
|
|||
|
||||
# This is a long ugly way to clean up pods and remove the pause image
|
||||
function teardown() {
|
||||
run_podman pod rm -f -a
|
||||
run_podman rm -f -a
|
||||
run_podman pod rm -f -t 0 -a
|
||||
run_podman rm -f -t 0 -a
|
||||
run_podman image list --format '{{.ID}} {{.Repository}}'
|
||||
while read id name; do
|
||||
if [[ "$name" =~ /pause ]]; then
|
||||
|
@ -57,7 +57,7 @@ function teardown() {
|
|||
fi
|
||||
|
||||
# Clean up
|
||||
run_podman pod rm -f $podid
|
||||
run_podman pod rm -f -t 0 $podid
|
||||
}
|
||||
|
||||
|
||||
|
@ -301,7 +301,7 @@ EOF
|
|||
|
||||
# Clean up
|
||||
run_podman rm $cid
|
||||
run_podman pod rm -f mypod
|
||||
run_podman pod rm -t 0 -f mypod
|
||||
run_podman rmi $infra_image
|
||||
|
||||
}
|
||||
|
|
|
@ -108,8 +108,7 @@ Log[-1].Output |
|
|||
is "$output" "unhealthy" "output from 'podman healthcheck run'"
|
||||
|
||||
# Clean up
|
||||
run_podman stop -t 0 healthcheck_c
|
||||
run_podman rm -f healthcheck_c
|
||||
run_podman rm -t 0 -f healthcheck_c
|
||||
run_podman rmi healthcheck_i
|
||||
}
|
||||
|
||||
|
|
|
@ -156,7 +156,7 @@ function service_cleanup() {
|
|||
is "$output" ".*Restart=on-failure.*" "on-failure:xx is parsed correclty"
|
||||
is "$output" ".*StartLimitBurst=42.*" "on-failure:xx is parsed correctly"
|
||||
|
||||
run_podman rm -f $cname $cname2 $cname3
|
||||
run_podman rm -t 0 -f $cname $cname2 $cname3
|
||||
}
|
||||
|
||||
function set_listen_env() {
|
||||
|
|
|
@ -78,7 +78,7 @@ function generate_service() {
|
|||
|
||||
(cd $UNIT_DIR; run_podman generate systemd --new --files --name $cname)
|
||||
echo "container-$cname" >> $SNAME_FILE
|
||||
run_podman rm -f $cname
|
||||
run_podman rm -t 0 -f $cname
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl start container-$cname
|
||||
|
|
|
@ -105,7 +105,7 @@ function check_label() {
|
|||
"'podman inspect' preserves all --security-opts"
|
||||
|
||||
run_podman exec myc touch /stop
|
||||
run_podman rm -f myc
|
||||
run_podman rm -t 0 -f myc
|
||||
}
|
||||
|
||||
# Sharing context between two containers not in a pod
|
||||
|
|
|
@ -61,7 +61,7 @@ function teardown() {
|
|||
run_podman run -it --name mystty $IMAGE stty size <$PODMAN_TEST_PTY
|
||||
is "$output" "$rows $cols$CR" "stty under podman run reads the correct dimensions"
|
||||
|
||||
run_podman rm -f mystty
|
||||
run_podman rm -t 0 -f mystty
|
||||
|
||||
# FIXME: the checks below are flaking a lot (see #10710).
|
||||
|
||||
|
@ -70,7 +70,7 @@ function teardown() {
|
|||
# run_podman exec -it mystty stty size <$PODMAN_TEST_PTY
|
||||
# is "$output" "$rows $cols" "stty under podman exec reads the correct dimensions"
|
||||
#
|
||||
# run_podman rm -f mystty
|
||||
# run_podman rm -t 0 -f mystty
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -192,9 +192,9 @@ load helpers
|
|||
is "$output" "Error: network name $mynetname already used: network already exists" \
|
||||
"Trying to create an already-existing network"
|
||||
|
||||
run_podman rm $cid
|
||||
run_podman rm -t 0 -f $cid
|
||||
run_podman network rm $mynetname
|
||||
run_podman 1 network rm $mynetname
|
||||
run_podman 1 network rm -f $mynetname
|
||||
}
|
||||
|
||||
@test "podman network reload" {
|
||||
|
@ -293,13 +293,13 @@ load helpers
|
|||
is "$output" "$random_1" "curl 127.0.0.1:/index.txt"
|
||||
|
||||
# cleanup the container
|
||||
run_podman rm -f $cid
|
||||
run_podman rm -t 0 -f $cid
|
||||
|
||||
# test that we cannot remove the default network
|
||||
run_podman 125 network rm -f $netname
|
||||
run_podman 125 network rm -t 0 -f $netname
|
||||
is "$output" "Error: default network $netname cannot be removed" "Remove default network"
|
||||
|
||||
run_podman network rm -f $netname2
|
||||
run_podman network rm -t 0 -f $netname2
|
||||
}
|
||||
|
||||
@test "podman rootless cni adds /usr/sbin to PATH" {
|
||||
|
@ -314,7 +314,7 @@ load helpers
|
|||
PATH=/usr/local/bin:/usr/bin run_podman run --rm --network $mynetname $IMAGE ip addr
|
||||
is "$output" ".*eth0.*" "Interface eth0 not found in ip addr output"
|
||||
|
||||
run_podman network rm -f $mynetname
|
||||
run_podman network rm -t 0 -f $mynetname
|
||||
}
|
||||
|
||||
@test "podman ipv6 in /etc/resolv.conf" {
|
||||
|
@ -357,7 +357,7 @@ load helpers
|
|||
die "resolv.conf contains a ipv6 nameserver"
|
||||
fi
|
||||
|
||||
run_podman network rm -f $netname
|
||||
run_podman network rm -t 0 -f $netname
|
||||
|
||||
# ipv6 cni
|
||||
mysubnet=fd00:4:4:4:4::/64
|
||||
|
@ -372,7 +372,7 @@ load helpers
|
|||
die "resolv.conf does not contain a ipv6 nameserver"
|
||||
fi
|
||||
|
||||
run_podman network rm -f $netname
|
||||
run_podman network rm -t 0 -f $netname
|
||||
}
|
||||
|
||||
# Test for https://github.com/containers/podman/issues/10052
|
||||
|
@ -463,9 +463,8 @@ load helpers
|
|||
is "$output" "$random_1" "curl 127.0.0.1:/index.txt should still work"
|
||||
|
||||
# cleanup
|
||||
run_podman stop -t 0 $cid $background_cid
|
||||
run_podman rm -f $cid $background_cid
|
||||
run_podman network rm -f $netname $netname2
|
||||
run_podman rm -t 0 -f $cid $background_cid
|
||||
run_podman network rm -t 0 -f $netname $netname2
|
||||
}
|
||||
|
||||
@test "podman network after restart" {
|
||||
|
@ -538,12 +537,11 @@ load helpers
|
|||
run curl --retry 2 -s $SERVER/index.txt
|
||||
is "$output" "$random_1" "curl 127.0.0.1:/index.txt after podman restart"
|
||||
|
||||
run_podman stop -t 0 $cid
|
||||
run_podman rm -f $cid
|
||||
run_podman rm -t 0 -f $cid
|
||||
done
|
||||
|
||||
# Cleanup network
|
||||
run_podman network rm $netname
|
||||
run_podman network rm -t 0 -f $netname
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
|
|
|
@ -299,7 +299,7 @@ function _check_completion_end() {
|
|||
run_podman image untag $IMAGE $random_image_name:$random_image_tag
|
||||
|
||||
for state in created running degraded exited; do
|
||||
run_podman pod rm --force $state-$random_pod_name
|
||||
run_podman pod rm -t 0 --force $state-$random_pod_name
|
||||
done
|
||||
|
||||
for state in created running pause exited; do
|
||||
|
|
|
@ -7,8 +7,8 @@ load helpers
|
|||
|
||||
# This is a long ugly way to clean up pods and remove the pause image
|
||||
function teardown() {
|
||||
run_podman pod rm -f -a
|
||||
run_podman rm -f -a
|
||||
run_podman pod rm -t 0 -f -a
|
||||
run_podman rm -t 0 -f -a
|
||||
run_podman image list --format '{{.ID}} {{.Repository}}'
|
||||
while read id name; do
|
||||
if [[ "$name" =~ /pause ]]; then
|
||||
|
@ -77,8 +77,7 @@ RELABEL="system_u:object_r:container_file_t:s0"
|
|||
fi
|
||||
|
||||
run_podman stop -a -t 0
|
||||
run_podman pod stop test_pod
|
||||
run_podman pod rm -f test_pod
|
||||
run_podman pod rm -t 0 -f test_pod
|
||||
}
|
||||
|
||||
@test "podman play" {
|
||||
|
@ -92,8 +91,7 @@ RELABEL="system_u:object_r:container_file_t:s0"
|
|||
fi
|
||||
|
||||
run_podman stop -a -t 0
|
||||
run_podman pod stop test_pod
|
||||
run_podman pod rm -f test_pod
|
||||
run_podman pod rm -t 0 -f test_pod
|
||||
}
|
||||
|
||||
@test "podman play --network" {
|
||||
|
@ -111,8 +109,7 @@ RELABEL="system_u:object_r:container_file_t:s0"
|
|||
is "$output" "slirp4netns" "network mode slirp4netns is set for the container"
|
||||
|
||||
run_podman stop -a -t 0
|
||||
run_podman pod stop test_pod
|
||||
run_podman pod rm -f test_pod
|
||||
run_podman pod rm -t 0 -f test_pod
|
||||
|
||||
run_podman play kube --network none $PODMAN_TMPDIR/test.yaml
|
||||
run_podman pod inspect --format {{.InfraContainerID}} "${lines[1]}"
|
||||
|
@ -121,8 +118,7 @@ RELABEL="system_u:object_r:container_file_t:s0"
|
|||
is "$output" "none" "network mode none is set for the container"
|
||||
|
||||
run_podman stop -a -t 0
|
||||
run_podman pod stop test_pod
|
||||
run_podman pod rm -f test_pod
|
||||
run_podman pod rm -t 0 -f test_pod
|
||||
}
|
||||
|
||||
@test "podman play with user from image" {
|
||||
|
@ -165,7 +161,6 @@ _EOF
|
|||
is "$output" bin "expect container within pod to run as the bin user"
|
||||
|
||||
run_podman stop -a -t 0
|
||||
run_podman pod stop test_pod
|
||||
run_podman pod rm -f test_pod
|
||||
run_podman pod rm -t 0 -f test_pod
|
||||
run_podman rmi -f userimage:latest
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ fi
|
|||
# Setup helper: establish a test environment with exactly the images needed
|
||||
function basic_setup() {
|
||||
# Clean up all containers
|
||||
run_podman rm --all --force
|
||||
run_podman rm -t 0 --all --force
|
||||
|
||||
# ...including external (buildah) ones
|
||||
run_podman ps --all --external --format '{{.ID}} {{.Names}}'
|
||||
|
@ -109,8 +109,8 @@ function basic_setup() {
|
|||
# Basic teardown: remove all pods and containers
|
||||
function basic_teardown() {
|
||||
echo "# [teardown]" >&2
|
||||
run_podman '?' pod rm --all --force
|
||||
run_podman '?' rm --all --force
|
||||
run_podman '?' pod rm -t 0 --all --force
|
||||
run_podman '?' rm -t 0 --all --force
|
||||
|
||||
command rm -rf $PODMAN_TMPDIR
|
||||
}
|
||||
|
|
|
@ -553,7 +553,7 @@ podman build -f Dockerfile -t build-priv
|
|||
########
|
||||
# Cleanup
|
||||
########
|
||||
podman rm -a -f
|
||||
podman rm -a -f -t 0
|
||||
podman rmi -a -f
|
||||
rm ./Dockerfile
|
||||
|
||||
|
|
|
@ -114,4 +114,4 @@ podman pod kill foobar
|
|||
########
|
||||
# Remove all pods and their containers
|
||||
########
|
||||
podman pod rm -fa
|
||||
podman pod rm -t 0 -fa
|
||||
|
|
Loading…
Reference in a new issue