mirror of
https://github.com/containers/podman
synced 2024-10-20 01:03:51 +00:00
Correctly report errors on unmounting SHM
When we fail to remove a container's SHM, that's an error, and we need to report it as such. This may be part of our lingering storage woes. Also, remove MNT_DETACH. It may be another cause of the storage removal failures. Signed-off-by: Matthew Heon <matthew.heon@pm.me>
This commit is contained in:
parent
a760e325f3
commit
de9a394fcf
|
@ -1403,11 +1403,10 @@ func (s *BoltState) AddVolume(volume *Volume) error {
|
|||
// Volume state is allowed to not exist
|
||||
var volStateJSON []byte
|
||||
if volume.state != nil {
|
||||
stateJSON, err := json.Marshal(volume.state)
|
||||
volStateJSON, err = json.Marshal(volume.state)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name())
|
||||
}
|
||||
volStateJSON = stateJSON
|
||||
}
|
||||
|
||||
db, err := s.getDBCon()
|
||||
|
|
|
@ -1253,10 +1253,10 @@ func (c *Container) mountStorage() (_ string, Err error) {
|
|||
return
|
||||
}
|
||||
vol.lock.Lock()
|
||||
defer vol.lock.Unlock()
|
||||
if err := vol.unmount(false); err != nil {
|
||||
logrus.Errorf("Error unmounting volume %s after error mounting container %s: %v", vol.Name(), c.ID(), err)
|
||||
}
|
||||
vol.lock.Unlock()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
@ -1281,14 +1281,19 @@ func (c *Container) cleanupStorage() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var cleanupErr error
|
||||
|
||||
for _, containerMount := range c.config.Mounts {
|
||||
if err := c.unmountSHM(containerMount); err != nil {
|
||||
return err
|
||||
if cleanupErr != nil {
|
||||
logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr)
|
||||
}
|
||||
cleanupErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if c.config.Rootfs != "" {
|
||||
return nil
|
||||
return cleanupErr
|
||||
}
|
||||
|
||||
if err := c.unmount(false); err != nil {
|
||||
|
@ -1298,14 +1303,14 @@ func (c *Container) cleanupStorage() error {
|
|||
// state
|
||||
if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
logrus.Errorf("Storage for container %s has been removed", c.ID())
|
||||
return nil
|
||||
} else {
|
||||
if cleanupErr != nil {
|
||||
logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), cleanupErr)
|
||||
}
|
||||
cleanupErr = err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var cleanupErr error
|
||||
|
||||
// Request an unmount of all named volumes
|
||||
for _, v := range c.config.NamedVolumes {
|
||||
vol, err := c.runtime.state.Volume(v.Name)
|
||||
|
|
|
@ -49,12 +49,12 @@ func (c *Container) mountSHM(shmOptions string) error {
|
|||
}
|
||||
|
||||
func (c *Container) unmountSHM(mount string) error {
|
||||
if err := unix.Unmount(mount, unix.MNT_DETACH); err != nil {
|
||||
if err := unix.Unmount(mount, 0); err != nil {
|
||||
if err != syscall.EINVAL {
|
||||
logrus.Warnf("container %s failed to unmount %s : %v", c.ID(), mount, err)
|
||||
} else {
|
||||
logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err)
|
||||
return errors.Wrapf(err, "error unmounting container %s SHM mount %s", c.ID(), mount)
|
||||
}
|
||||
// If it's just an EINVAL, debug logs only
|
||||
logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -18,3 +18,7 @@ type InfoData struct {
|
|||
Type string
|
||||
Data map[string]interface{}
|
||||
}
|
||||
|
||||
// VolumeDriverLocal is the "local" volume driver. It is managed by libpod
|
||||
// itself.
|
||||
const VolumeDriverLocal = "local"
|
||||
|
|
|
@ -44,11 +44,11 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
|
|||
volume.config.Name = stringid.GenerateNonCryptoID()
|
||||
}
|
||||
if volume.config.Driver == "" {
|
||||
volume.config.Driver = "local"
|
||||
volume.config.Driver = define.VolumeDriverLocal
|
||||
}
|
||||
volume.config.CreatedTime = time.Now()
|
||||
|
||||
if volume.config.Driver == "local" {
|
||||
if volume.config.Driver == define.VolumeDriverLocal {
|
||||
logrus.Debugf("Validating options for local driver")
|
||||
// Validate options
|
||||
for key := range volume.config.Options {
|
||||
|
|
|
@ -26,7 +26,7 @@ func (v *Volume) teardownStorage() error {
|
|||
// Volumes with options set, or a filesystem type, or a device to mount need to
|
||||
// be mounted and unmounted.
|
||||
func (v *Volume) needsMount() bool {
|
||||
return len(v.config.Options) > 0 && v.config.Driver == "local"
|
||||
return len(v.config.Options) > 0 && v.config.Driver == define.VolumeDriverLocal
|
||||
}
|
||||
|
||||
// update() updates the volume state from the DB.
|
||||
|
|
|
@ -67,7 +67,7 @@ func (v *Volume) mount() error {
|
|||
|
||||
errPipe, err := mountCmd.StderrPipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "getting stderr pipe")
|
||||
return errors.Wrapf(err, "error getting stderr pipe for mount")
|
||||
}
|
||||
if err := mountCmd.Start(); err != nil {
|
||||
out, err2 := ioutil.ReadAll(errPipe)
|
||||
|
|
|
@ -67,8 +67,7 @@ func (i *LibpodAPI) GetVolumes(call iopodman.VarlinkCall, args []string, all boo
|
|||
Labels: v.Labels(),
|
||||
MountPoint: v.MountPoint(),
|
||||
Name: v.Name(),
|
||||
// TODO change types here to be correct
|
||||
//Options: v.Options(),
|
||||
Options: v.Options(),
|
||||
}
|
||||
volumes = append(volumes, newVol)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue