2017-11-01 15:24:59 +00:00
|
|
|
package libpod
|
|
|
|
|
|
|
|
import (
|
2019-04-16 12:12:12 +00:00
|
|
|
"context"
|
2019-01-05 12:30:03 +00:00
|
|
|
"fmt"
|
2017-11-01 15:24:59 +00:00
|
|
|
"os"
|
2017-11-09 18:51:20 +00:00
|
|
|
"path/filepath"
|
2019-06-20 19:05:46 +00:00
|
|
|
"strings"
|
2017-11-01 15:24:59 +00:00
|
|
|
"sync"
|
2019-02-15 15:33:59 +00:00
|
|
|
"syscall"
|
2017-11-01 15:24:59 +00:00
|
|
|
|
2020-03-15 16:53:59 +00:00
|
|
|
"github.com/containers/common/pkg/config"
|
2019-10-24 14:37:22 +00:00
|
|
|
is "github.com/containers/image/v5/storage"
|
|
|
|
"github.com/containers/image/v5/types"
|
2020-07-06 13:38:20 +00:00
|
|
|
"github.com/containers/libpod/v2/libpod/define"
|
|
|
|
"github.com/containers/libpod/v2/libpod/events"
|
|
|
|
"github.com/containers/libpod/v2/libpod/image"
|
|
|
|
"github.com/containers/libpod/v2/libpod/lock"
|
|
|
|
"github.com/containers/libpod/v2/pkg/cgroups"
|
|
|
|
"github.com/containers/libpod/v2/pkg/rootless"
|
|
|
|
"github.com/containers/libpod/v2/pkg/util"
|
2017-11-01 15:24:59 +00:00
|
|
|
"github.com/containers/storage"
|
2017-11-30 19:25:00 +00:00
|
|
|
"github.com/cri-o/ocicni/pkg/ocicni"
|
2018-03-18 00:08:27 +00:00
|
|
|
"github.com/docker/docker/pkg/namesgenerator"
|
2017-11-01 15:24:59 +00:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
)
|
|
|
|
|
|
|
|
// A RuntimeOption is a functional option which alters the Runtime created by
|
|
|
|
// NewRuntime
|
|
|
|
type RuntimeOption func(*Runtime) error
|
|
|
|
|
2020-03-27 14:13:51 +00:00
|
|
|
type storageSet struct {
|
|
|
|
RunRootSet bool
|
|
|
|
GraphRootSet bool
|
|
|
|
StaticDirSet bool
|
|
|
|
VolumePathSet bool
|
|
|
|
GraphDriverNameSet bool
|
|
|
|
TmpDirSet bool
|
|
|
|
}
|
|
|
|
|
2017-11-01 15:24:59 +00:00
|
|
|
// Runtime is the core libpod runtime
|
|
|
|
type Runtime struct {
|
2020-03-27 14:13:51 +00:00
|
|
|
config *config.Config
|
|
|
|
storageConfig storage.StoreOptions
|
|
|
|
storageSet storageSet
|
2019-02-15 14:59:11 +00:00
|
|
|
|
2019-06-19 21:08:43 +00:00
|
|
|
state State
|
|
|
|
store storage.Store
|
|
|
|
storageService *storageService
|
|
|
|
imageContext *types.SystemContext
|
2019-10-08 17:53:36 +00:00
|
|
|
defaultOCIRuntime OCIRuntime
|
|
|
|
ociRuntimes map[string]OCIRuntime
|
2019-06-19 21:08:43 +00:00
|
|
|
netPlugin ocicni.CNIPlugin
|
|
|
|
conmonPath string
|
|
|
|
imageRuntime *image.Runtime
|
|
|
|
lockManager lock.Manager
|
2019-02-15 14:59:11 +00:00
|
|
|
|
2019-02-18 21:20:02 +00:00
|
|
|
// doRenumber indicates that the runtime should perform a lock renumber
|
|
|
|
// during initialization.
|
|
|
|
// Once the runtime has been initialized and returned, this variable is
|
|
|
|
// unused.
|
2019-02-15 14:59:11 +00:00
|
|
|
doRenumber bool
|
|
|
|
|
2019-04-15 20:03:47 +00:00
|
|
|
doMigrate bool
|
2019-10-08 18:12:58 +00:00
|
|
|
// System migrate can move containers to a new runtime.
|
|
|
|
// We make no promises that these migrated containers work on the new
|
|
|
|
// runtime, though.
|
|
|
|
migrateRuntime string
|
2019-04-15 20:03:47 +00:00
|
|
|
|
2019-02-18 21:20:02 +00:00
|
|
|
// valid indicates whether the runtime is ready to use.
|
|
|
|
// valid is set to true when a runtime is returned from GetRuntime(),
|
|
|
|
// and remains true until the runtime is shut down (rendering its
|
|
|
|
// storage unusable). When valid is false, the runtime cannot be used.
|
2019-02-15 14:59:11 +00:00
|
|
|
valid bool
|
|
|
|
lock sync.RWMutex
|
2019-03-27 18:50:54 +00:00
|
|
|
|
|
|
|
// mechanism to read and write even logs
|
|
|
|
eventer events.Eventer
|
2019-07-01 18:35:16 +00:00
|
|
|
|
|
|
|
// noStore indicates whether we need to interact with a store or not
|
|
|
|
noStore bool
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
2019-07-01 18:35:13 +00:00
|
|
|
// SetXdgDirs ensures the XDG_RUNTIME_DIR env and XDG_CONFIG_HOME variables are set.
|
|
|
|
// containers/image uses XDG_RUNTIME_DIR to locate the auth file, XDG_CONFIG_HOME is
|
2020-07-20 18:17:37 +00:00
|
|
|
// use for the containers.conf configuration file.
|
2019-07-01 18:35:13 +00:00
|
|
|
func SetXdgDirs() error {
|
2019-06-25 13:40:19 +00:00
|
|
|
if !rootless.IsRootless() {
|
|
|
|
return nil
|
|
|
|
}
|
2019-06-21 10:49:23 +00:00
|
|
|
|
2019-07-01 18:35:13 +00:00
|
|
|
// Setup XDG_RUNTIME_DIR
|
2019-06-21 10:49:23 +00:00
|
|
|
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
|
|
|
|
|
|
|
if runtimeDir == "" {
|
2019-06-25 13:40:19 +00:00
|
|
|
var err error
|
2019-08-12 18:11:53 +00:00
|
|
|
runtimeDir, err = util.GetRuntimeDir()
|
2019-06-25 13:40:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-06-21 10:49:23 +00:00
|
|
|
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
|
2019-06-25 13:40:19 +00:00
|
|
|
return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
|
|
|
|
}
|
2019-07-01 18:35:13 +00:00
|
|
|
|
2019-10-01 21:06:00 +00:00
|
|
|
if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
|
|
|
|
sessionAddr := filepath.Join(runtimeDir, "bus")
|
|
|
|
if _, err := os.Stat(sessionAddr); err == nil {
|
|
|
|
os.Setenv("DBUS_SESSION_BUS_ADDRESS", fmt.Sprintf("unix:path=%s", sessionAddr))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-01 18:35:13 +00:00
|
|
|
// Setup XDG_CONFIG_HOME
|
|
|
|
if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" {
|
2019-10-15 12:32:24 +00:00
|
|
|
cfgHomeDir, err := util.GetRootlessConfigHomeDir()
|
|
|
|
if err != nil {
|
2019-07-01 18:35:13 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-10-15 12:32:24 +00:00
|
|
|
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
|
2019-07-01 18:35:13 +00:00
|
|
|
return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME")
|
|
|
|
}
|
|
|
|
}
|
2019-06-25 13:40:19 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-01 15:24:59 +00:00
|
|
|
// NewRuntime creates a new container runtime
|
|
|
|
// Options can be passed to override the default configuration for the runtime
|
2020-07-09 17:50:01 +00:00
|
|
|
func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) {
|
2020-03-27 14:13:51 +00:00
|
|
|
conf, err := config.NewConfig("")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
conf.CheckCgroupsAndAdjustConfig()
|
|
|
|
return newRuntimeFromConfig(ctx, conf, options...)
|
2019-03-12 12:01:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewRuntimeFromConfig creates a new container runtime using the given
|
|
|
|
// configuration file for its default configuration. Passed RuntimeOption
|
|
|
|
// functions can be used to mutate this configuration further.
|
|
|
|
// An error will be returned if the configuration file at the given path does
|
|
|
|
// not exist or cannot be loaded
|
2020-07-09 17:50:01 +00:00
|
|
|
func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (*Runtime, error) {
|
2020-03-27 14:13:51 +00:00
|
|
|
|
|
|
|
return newRuntimeFromConfig(ctx, userConfig, options...)
|
2019-03-12 12:01:38 +00:00
|
|
|
}
|
|
|
|
|
2020-07-09 17:50:01 +00:00
|
|
|
func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (*Runtime, error) {
|
|
|
|
runtime := new(Runtime)
|
2019-10-21 10:06:46 +00:00
|
|
|
|
2020-03-27 14:13:51 +00:00
|
|
|
if conf.Engine.OCIRuntime == "" {
|
|
|
|
conf.Engine.OCIRuntime = "runc"
|
|
|
|
// If we're running on cgroups v2, default to using crun.
|
|
|
|
if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 {
|
|
|
|
conf.Engine.OCIRuntime = "crun"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
runtime.config = conf
|
|
|
|
|
|
|
|
storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
|
2019-03-28 09:30:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-27 14:13:51 +00:00
|
|
|
runtime.storageConfig = storeOpts
|
2018-03-01 16:56:22 +00:00
|
|
|
|
|
|
|
// Overwrite config with user-given configuration options
|
|
|
|
for _, opt := range options {
|
|
|
|
if err := opt(runtime); err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error configuring runtime")
|
|
|
|
}
|
|
|
|
}
|
2019-06-27 12:11:52 +00:00
|
|
|
|
2019-05-01 19:07:30 +00:00
|
|
|
if err := makeRuntime(ctx, runtime); err != nil {
|
2019-03-15 16:05:03 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-03-01 16:56:22 +00:00
|
|
|
return runtime, nil
|
|
|
|
}
|
|
|
|
|
2019-06-27 09:06:24 +00:00
|
|
|
func getLockManager(runtime *Runtime) (lock.Manager, error) {
|
|
|
|
var err error
|
|
|
|
var manager lock.Manager
|
|
|
|
|
2020-03-27 14:13:51 +00:00
|
|
|
switch runtime.config.Engine.LockType {
|
2019-06-27 10:56:29 +00:00
|
|
|
case "file":
|
2020-03-27 14:13:51 +00:00
|
|
|
lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks")
|
2019-06-27 10:56:29 +00:00
|
|
|
manager, err = lock.OpenFileLockManager(lockPath)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(errors.Cause(err)) {
|
|
|
|
manager, err = lock.NewFileLockManager(lockPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "failed to get new file lock manager")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 09:06:24 +00:00
|
|
|
case "", "shm":
|
2019-10-21 17:48:23 +00:00
|
|
|
lockPath := define.DefaultSHMLockPath
|
2019-06-27 09:06:24 +00:00
|
|
|
if rootless.IsRootless() {
|
2019-10-21 17:48:23 +00:00
|
|
|
lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
|
2019-06-27 09:06:24 +00:00
|
|
|
}
|
|
|
|
// Set up the lock manager
|
2020-03-27 14:13:51 +00:00
|
|
|
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
|
2019-06-27 09:06:24 +00:00
|
|
|
if err != nil {
|
2020-01-13 12:01:45 +00:00
|
|
|
switch {
|
|
|
|
case os.IsNotExist(errors.Cause(err)):
|
2020-03-27 14:13:51 +00:00
|
|
|
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
|
2019-06-27 09:06:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "failed to get new shm lock manager")
|
|
|
|
}
|
2020-01-13 12:01:45 +00:00
|
|
|
case errors.Cause(err) == syscall.ERANGE && runtime.doRenumber:
|
2019-06-27 09:06:24 +00:00
|
|
|
logrus.Debugf("Number of locks does not match - removing old locks")
|
|
|
|
|
|
|
|
// ERANGE indicates a lock numbering mismatch.
|
|
|
|
// Since we're renumbering, this is not fatal.
|
|
|
|
// Remove the earlier set of locks and recreate.
|
|
|
|
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
|
|
|
|
}
|
|
|
|
|
2020-03-27 14:13:51 +00:00
|
|
|
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
|
2019-06-27 09:06:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-13 12:01:45 +00:00
|
|
|
default:
|
2019-06-27 09:06:24 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
2020-03-27 14:13:51 +00:00
|
|
|
return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.Engine.LockType)
|
2019-06-27 09:06:24 +00:00
|
|
|
}
|
|
|
|
return manager, nil
|
|
|
|
}
|
|
|
|
|
2018-03-01 16:56:22 +00:00
|
|
|
// Make a new runtime based on the given configuration
|
|
|
|
// Sets up containers/storage, state store, OCI runtime
|
2020-07-09 17:50:01 +00:00
|
|
|
func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
|
2018-02-28 03:31:18 +00:00
|
|
|
// Find a working conmon binary
|
2020-01-08 12:57:54 +00:00
|
|
|
cPath, err := runtime.config.FindConmon()
|
|
|
|
if err != nil {
|
2019-10-21 17:48:23 +00:00
|
|
|
return err
|
2018-01-30 15:31:16 +00:00
|
|
|
}
|
2020-01-08 12:57:54 +00:00
|
|
|
runtime.conmonPath = cPath
|
2018-01-30 15:31:16 +00:00
|
|
|
|
2018-12-03 16:07:01 +00:00
|
|
|
// Make the static files directory if it does not exist
|
2020-03-27 14:13:51 +00:00
|
|
|
if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil {
|
2018-12-03 16:07:01 +00:00
|
|
|
// The directory is allowed to exist
|
|
|
|
if !os.IsExist(err) {
|
|
|
|
return errors.Wrapf(err, "error creating runtime static files directory %s",
|
2020-03-27 14:13:51 +00:00
|
|
|
runtime.config.Engine.StaticDir)
|
2018-12-03 16:07:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-21 17:48:23 +00:00
|
|
|
// Set up the state.
|
|
|
|
//
|
|
|
|
// TODO - if we further break out the state implementation into
|
|
|
|
// libpod/state, the config could take care of the code below. It
|
|
|
|
// would further allow to move the types and consts into a coherent
|
|
|
|
// package.
|
2020-03-27 14:13:51 +00:00
|
|
|
switch runtime.config.Engine.StateType {
|
|
|
|
case config.InMemoryStateStore:
|
2018-11-29 16:36:16 +00:00
|
|
|
state, err := NewInMemoryState()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
runtime.state = state
|
2020-03-27 14:13:51 +00:00
|
|
|
case config.SQLiteStateStore:
|
2019-06-24 20:48:34 +00:00
|
|
|
return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
|
2020-03-27 14:13:51 +00:00
|
|
|
case config.BoltDBStateStore:
|
|
|
|
dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db")
|
2018-11-29 16:36:16 +00:00
|
|
|
|
2018-12-04 18:50:38 +00:00
|
|
|
state, err := NewBoltState(dbPath, runtime)
|
2018-11-29 16:36:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
runtime.state = state
|
|
|
|
default:
|
2020-03-27 14:13:51 +00:00
|
|
|
return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.Engine.StateType)
|
2018-11-29 16:36:16 +00:00
|
|
|
}
|
|
|
|
|
2018-12-02 19:21:22 +00:00
|
|
|
// Grab config from the database so we can reset some defaults
|
|
|
|
dbConfig, err := runtime.state.GetDBConfig()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "error retrieving runtime configuration from database")
|
|
|
|
}
|
|
|
|
|
2020-06-11 18:40:38 +00:00
|
|
|
runtime.mergeDBConfig(dbConfig)
|
2019-03-12 12:01:38 +00:00
|
|
|
|
2020-03-27 14:13:51 +00:00
|
|
|
logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName)
|
|
|
|
logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot)
|
|
|
|
logrus.Debugf("Using run root %s", runtime.storageConfig.RunRoot)
|
|
|
|
logrus.Debugf("Using static dir %s", runtime.config.Engine.StaticDir)
|
|
|
|
logrus.Debugf("Using tmp dir %s", runtime.config.Engine.TmpDir)
|
|
|
|
logrus.Debugf("Using volume path %s", runtime.config.Engine.VolumePath)
|
2018-12-02 20:32:06 +00:00
|
|
|
|
2018-12-02 19:21:22 +00:00
|
|
|
// Validate our config against the database, now that we've set our
|
|
|
|
// final storage configuration
|
2018-12-02 18:36:55 +00:00
|
|
|
if err := runtime.state.ValidateDBConfig(runtime); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-27 14:13:51 +00:00
|
|
|
if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil {
|
2018-11-29 16:36:16 +00:00
|
|
|
return errors.Wrapf(err, "error setting libpod namespace in state")
|
|
|
|
}
|
2020-03-27 14:13:51 +00:00
|
|
|
logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace)
|
2018-11-29 16:36:16 +00:00
|
|
|
|
2017-11-01 15:24:59 +00:00
|
|
|
// Set up containers/storage
|
2018-08-23 21:02:04 +00:00
|
|
|
var store storage.Store
|
2019-03-19 09:59:43 +00:00
|
|
|
if os.Geteuid() != 0 {
|
2018-08-23 21:02:04 +00:00
|
|
|
logrus.Debug("Not configuring container store")
|
2019-07-01 18:35:16 +00:00
|
|
|
} else if runtime.noStore {
|
|
|
|
logrus.Debug("No store required. Not opening container store.")
|
2020-01-13 12:01:45 +00:00
|
|
|
} else if err := runtime.configureStore(); err != nil {
|
|
|
|
return err
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
2019-07-09 14:06:57 +00:00
|
|
|
defer func() {
|
2020-07-09 17:50:01 +00:00
|
|
|
if retErr != nil && store != nil {
|
2019-07-09 14:06:57 +00:00
|
|
|
// Don't forcibly shut down
|
|
|
|
// We could be opening a store in use by another libpod
|
2020-07-09 17:50:01 +00:00
|
|
|
if _, err := store.Shutdown(false); err != nil {
|
|
|
|
logrus.Errorf("Error removing store for partially-created runtime: %s", err)
|
2019-07-09 14:06:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2019-03-27 18:50:54 +00:00
|
|
|
|
|
|
|
// Setup the eventer
|
|
|
|
eventer, err := runtime.newEventer()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
runtime.eventer = eventer
|
2019-07-09 14:06:57 +00:00
|
|
|
if runtime.imageRuntime != nil {
|
|
|
|
runtime.imageRuntime.Eventer = eventer
|
|
|
|
}
|
2019-02-28 20:15:56 +00:00
|
|
|
|
2017-11-01 15:24:59 +00:00
|
|
|
// Set up containers/image
|
2020-05-13 11:44:29 +00:00
|
|
|
if runtime.imageContext == nil {
|
|
|
|
runtime.imageContext = &types.SystemContext{}
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
2020-05-13 11:44:29 +00:00
|
|
|
runtime.imageContext.SignaturePolicyPath = runtime.config.Engine.SignaturePolicyPath
|
2017-11-01 15:24:59 +00:00
|
|
|
|
2018-04-24 14:41:42 +00:00
|
|
|
// Create the tmpDir
|
2020-03-27 14:13:51 +00:00
|
|
|
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
|
2018-04-24 14:41:42 +00:00
|
|
|
// The directory is allowed to exist
|
|
|
|
if !os.IsExist(err) {
|
2020-03-27 14:13:51 +00:00
|
|
|
return errors.Wrapf(err, "error creating tmpdir %s", runtime.config.Engine.TmpDir)
|
2018-04-24 14:41:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-28 20:15:56 +00:00
|
|
|
// Create events log dir
|
2020-03-27 14:13:51 +00:00
|
|
|
if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil {
|
2019-02-28 20:15:56 +00:00
|
|
|
// The directory is allowed to exist
|
|
|
|
if !os.IsExist(err) {
|
2020-03-27 14:13:51 +00:00
|
|
|
return errors.Wrapf(err, "error creating events dirs %s", filepath.Dir(runtime.config.Engine.EventsLogFilePath))
|
2019-02-28 20:15:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-19 21:08:43 +00:00
|
|
|
// Get us at least one working OCI runtime.
|
2019-10-08 17:53:36 +00:00
|
|
|
runtime.ociRuntimes = make(map[string]OCIRuntime)
|
2019-06-19 21:08:43 +00:00
|
|
|
|
|
|
|
// Initialize remaining OCI runtimes
|
2020-03-27 14:13:51 +00:00
|
|
|
for name, paths := range runtime.config.Engine.OCIRuntimes {
|
2019-06-19 21:08:43 +00:00
|
|
|
|
2020-04-15 18:48:53 +00:00
|
|
|
ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.config)
|
2019-06-19 21:08:43 +00:00
|
|
|
if err != nil {
|
2019-06-19 21:12:27 +00:00
|
|
|
// Don't fatally error.
|
|
|
|
// This will allow us to ship configs including optional
|
|
|
|
// runtimes that might not be installed (crun, kata).
|
|
|
|
// Only a warnf so default configs don't spec errors.
|
|
|
|
logrus.Warnf("Error initializing configured OCI runtime %s: %v", name, err)
|
|
|
|
continue
|
2019-06-19 21:08:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
runtime.ociRuntimes[name] = ociRuntime
|
|
|
|
}
|
|
|
|
|
2019-06-20 19:05:46 +00:00
|
|
|
// Do we have a default OCI runtime?
|
2020-03-27 14:13:51 +00:00
|
|
|
if runtime.config.Engine.OCIRuntime != "" {
|
2019-06-20 19:05:46 +00:00
|
|
|
// If the string starts with / it's a path to a runtime
|
|
|
|
// executable.
|
2020-03-27 14:13:51 +00:00
|
|
|
if strings.HasPrefix(runtime.config.Engine.OCIRuntime, "/") {
|
|
|
|
name := filepath.Base(runtime.config.Engine.OCIRuntime)
|
2019-06-20 19:05:46 +00:00
|
|
|
|
2020-04-15 18:48:53 +00:00
|
|
|
ociRuntime, err := newConmonOCIRuntime(name, []string{runtime.config.Engine.OCIRuntime}, runtime.conmonPath, runtime.config)
|
2019-06-20 19:05:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
runtime.ociRuntimes[name] = ociRuntime
|
|
|
|
runtime.defaultOCIRuntime = ociRuntime
|
|
|
|
} else {
|
2020-03-27 14:13:51 +00:00
|
|
|
ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime]
|
2019-06-20 19:05:46 +00:00
|
|
|
if !ok {
|
2020-03-27 14:13:51 +00:00
|
|
|
return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.Engine.OCIRuntime)
|
2019-06-20 19:05:46 +00:00
|
|
|
}
|
|
|
|
runtime.defaultOCIRuntime = ociRuntime
|
2019-06-19 21:08:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do we have at least one valid OCI runtime?
|
|
|
|
if len(runtime.ociRuntimes) == 0 {
|
2019-06-24 20:48:34 +00:00
|
|
|
return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured")
|
2019-06-19 21:08:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Do we have a default runtime?
|
|
|
|
if runtime.defaultOCIRuntime == nil {
|
2019-06-24 20:48:34 +00:00
|
|
|
return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured")
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make the per-boot files directory if it does not exist
|
2020-03-27 14:13:51 +00:00
|
|
|
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil {
|
2017-11-01 15:24:59 +00:00
|
|
|
// The directory is allowed to exist
|
|
|
|
if !os.IsExist(err) {
|
2018-03-01 16:56:22 +00:00
|
|
|
return errors.Wrapf(err, "error creating runtime temporary files directory %s",
|
2020-03-27 14:13:51 +00:00
|
|
|
runtime.config.Engine.TmpDir)
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-30 19:25:00 +00:00
|
|
|
// Set up the CNI net plugin
|
2018-12-04 19:57:06 +00:00
|
|
|
if !rootless.IsRootless() {
|
2020-03-27 14:13:51 +00:00
|
|
|
|
|
|
|
netPlugin, err := ocicni.InitCNI(runtime.config.Network.DefaultNetwork, runtime.config.Network.NetworkConfigDir, runtime.config.Network.CNIPluginDirs...)
|
2018-12-04 19:57:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "error configuring CNI network plugin")
|
|
|
|
}
|
|
|
|
runtime.netPlugin = netPlugin
|
2017-11-30 19:25:00 +00:00
|
|
|
}
|
|
|
|
|
2019-01-03 21:09:37 +00:00
|
|
|
// We now need to see if the system has restarted
|
|
|
|
// We check for the presence of a file in our tmp directory to verify this
|
|
|
|
// This check must be locked to prevent races
|
2020-03-27 14:13:51 +00:00
|
|
|
runtimeAliveLock := filepath.Join(runtime.config.Engine.TmpDir, "alive.lck")
|
|
|
|
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
|
2019-01-03 21:09:37 +00:00
|
|
|
aliveLock, err := storage.GetLockfile(runtimeAliveLock)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "error acquiring runtime init lock")
|
|
|
|
}
|
|
|
|
// Acquire the lock and hold it until we return
|
|
|
|
// This ensures that no two processes will be in runtime.refresh at once
|
|
|
|
// TODO: we can't close the FD in this lock, so we should keep it around
|
|
|
|
// and use it to lock important operations
|
|
|
|
aliveLock.Lock()
|
|
|
|
doRefresh := false
|
|
|
|
defer func() {
|
2019-05-28 13:39:13 +00:00
|
|
|
if aliveLock.Locked() {
|
2019-01-03 21:09:37 +00:00
|
|
|
aliveLock.Unlock()
|
|
|
|
}
|
|
|
|
}()
|
2019-02-15 14:59:11 +00:00
|
|
|
|
2019-01-03 21:09:37 +00:00
|
|
|
_, err = os.Stat(runtimeAliveFile)
|
|
|
|
if err != nil {
|
2019-04-10 21:12:30 +00:00
|
|
|
// If we need to refresh, then it is safe to assume there are
|
|
|
|
// no containers running. Create immediately a namespace, as
|
|
|
|
// we will need to access the storage.
|
|
|
|
if os.Geteuid() != 0 {
|
2019-05-28 13:39:13 +00:00
|
|
|
aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec.
|
2019-05-08 11:49:07 +00:00
|
|
|
pausePid, err := util.GetRootlessPauseProcessPidPath()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "could not get pause process pid file path")
|
|
|
|
}
|
|
|
|
became, ret, err := rootless.BecomeRootInUserNS(pausePid)
|
2019-04-10 21:12:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if became {
|
|
|
|
os.Exit(ret)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2019-01-03 21:09:37 +00:00
|
|
|
// If the file doesn't exist, we need to refresh the state
|
|
|
|
// This will trigger on first use as well, but refreshing an
|
|
|
|
// empty state only creates a single file
|
|
|
|
// As such, it's not really a performance concern
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
doRefresh = true
|
|
|
|
} else {
|
|
|
|
return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 09:06:24 +00:00
|
|
|
runtime.lockManager, err = getLockManager(runtime)
|
2019-01-09 19:46:33 +00:00
|
|
|
if err != nil {
|
2019-06-27 09:06:24 +00:00
|
|
|
return err
|
2019-01-03 21:09:37 +00:00
|
|
|
}
|
|
|
|
|
2019-02-15 15:33:59 +00:00
|
|
|
// If we're renumbering locks, do it now.
|
|
|
|
// It breaks out of normal runtime init, and will not return a valid
|
|
|
|
// runtime.
|
|
|
|
if runtime.doRenumber {
|
2019-02-18 21:20:02 +00:00
|
|
|
if err := runtime.renumberLocks(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-02-15 15:33:59 +00:00
|
|
|
}
|
|
|
|
|
2018-08-23 17:48:07 +00:00
|
|
|
// If we need to refresh the state, do it now - things are guaranteed to
|
|
|
|
// be set up by now.
|
|
|
|
if doRefresh {
|
2019-07-09 14:06:57 +00:00
|
|
|
// Ensure we have a store before refresh occurs
|
|
|
|
if runtime.store == nil {
|
|
|
|
if err := runtime.configureStore(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-19 11:08:52 +00:00
|
|
|
if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
|
|
|
|
return err2
|
2017-12-01 18:26:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-01 15:24:59 +00:00
|
|
|
// Mark the runtime as valid - ready to be used, cannot be modified
|
|
|
|
// further
|
|
|
|
runtime.valid = true
|
|
|
|
|
2019-04-15 20:03:47 +00:00
|
|
|
if runtime.doMigrate {
|
2019-05-01 19:07:30 +00:00
|
|
|
if err := runtime.migrate(ctx); err != nil {
|
2019-04-15 20:03:47 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-01 16:56:22 +00:00
|
|
|
return nil
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetConfig returns a copy of the configuration used by the runtime
|
2019-10-21 17:48:23 +00:00
|
|
|
func (r *Runtime) GetConfig() (*config.Config, error) {
|
2017-11-01 15:24:59 +00:00
|
|
|
r.lock.RLock()
|
|
|
|
defer r.lock.RUnlock()
|
|
|
|
|
|
|
|
if !r.valid {
|
2019-06-24 20:48:34 +00:00
|
|
|
return nil, define.ErrRuntimeStopped
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
2019-10-21 17:48:23 +00:00
|
|
|
config := new(config.Config)
|
2017-11-01 15:24:59 +00:00
|
|
|
|
|
|
|
// Copy so the caller won't be able to modify the actual config
|
2019-03-25 19:43:38 +00:00
|
|
|
if err := JSONDeepCopy(r.config, config); err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error copying config")
|
|
|
|
}
|
2017-11-01 15:24:59 +00:00
|
|
|
|
2019-03-25 19:43:38 +00:00
|
|
|
return config, nil
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
2019-07-08 18:20:17 +00:00
|
|
|
// DeferredShutdown shuts down the runtime without exposing any
|
|
|
|
// errors. This is only meant to be used when the runtime is being
|
|
|
|
// shutdown within a defer statement; else use Shutdown
|
|
|
|
func (r *Runtime) DeferredShutdown(force bool) {
|
|
|
|
_ = r.Shutdown(force)
|
|
|
|
}
|
|
|
|
|
2017-11-01 15:24:59 +00:00
|
|
|
// Shutdown shuts down the runtime and associated containers and storage
|
|
|
|
// If force is true, containers and mounted storage will be shut down before
|
|
|
|
// cleaning up; if force is false, an error will be returned if there are
|
|
|
|
// still containers running or mounted
|
|
|
|
func (r *Runtime) Shutdown(force bool) error {
|
|
|
|
r.lock.Lock()
|
|
|
|
defer r.lock.Unlock()
|
|
|
|
|
|
|
|
if !r.valid {
|
2019-06-24 20:48:34 +00:00
|
|
|
return define.ErrRuntimeStopped
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
r.valid = false
|
|
|
|
|
2017-11-28 16:26:06 +00:00
|
|
|
// Shutdown all containers if --force is given
|
|
|
|
if force {
|
|
|
|
ctrs, err := r.state.AllContainers()
|
2017-11-28 16:30:15 +00:00
|
|
|
if err != nil {
|
2017-11-28 16:26:06 +00:00
|
|
|
logrus.Errorf("Error retrieving containers from database: %v", err)
|
|
|
|
} else {
|
|
|
|
for _, ctr := range ctrs {
|
2020-03-27 14:13:51 +00:00
|
|
|
if err := ctr.StopWithTimeout(r.config.Engine.StopTimeout); err != nil {
|
2017-11-28 16:26:06 +00:00
|
|
|
logrus.Errorf("Error stopping container %s: %v", ctr.ID(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-03 13:25:16 +00:00
|
|
|
var lastError error
|
2020-07-09 19:46:14 +00:00
|
|
|
// If no store was requested, it can be nil and there is no need to
|
2019-07-01 18:35:16 +00:00
|
|
|
// attempt to shut it down
|
2018-08-23 21:02:04 +00:00
|
|
|
if r.store != nil {
|
|
|
|
if _, err := r.store.Shutdown(force); err != nil {
|
|
|
|
lastError = errors.Wrapf(err, "Error shutting down container storage")
|
|
|
|
}
|
2017-12-03 13:25:16 +00:00
|
|
|
}
|
|
|
|
if err := r.state.Close(); err != nil {
|
|
|
|
if lastError != nil {
|
|
|
|
logrus.Errorf("%v", lastError)
|
|
|
|
}
|
|
|
|
lastError = err
|
2017-11-09 18:51:20 +00:00
|
|
|
}
|
|
|
|
|
2017-12-03 13:25:16 +00:00
|
|
|
return lastError
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
2017-12-01 18:26:58 +00:00
|
|
|
|
|
|
|
// Reconfigures the runtime after a reboot
|
|
|
|
// Refreshes the state, recreating temporary files
|
|
|
|
// Does not check validity as the runtime is not valid until after this has run
|
|
|
|
func (r *Runtime) refresh(alivePath string) error {
|
2019-04-25 15:29:16 +00:00
|
|
|
logrus.Debugf("Podman detected system restart - performing state refresh")
|
|
|
|
|
2017-12-07 18:15:34 +00:00
|
|
|
// First clear the state in the database
|
|
|
|
if err := r.state.Refresh(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next refresh the state of all containers to recreate dirs and
|
Ensure volumes reacquire locks on state refresh
After a restart, pods and containers both run a refresh()
function to prepare to run after a reboot. Until now, volumes
have not had a similar function, because they had no per-boot
setup to perform.
Unfortunately, this was not noticed when in-memory locking was
introduced to volumes. The refresh() routine is, among other
things, responsible for ensuring that locks are reserved after a
reboot, ensuring they cannot be taken by a freshly-created
container, pod, or volume. If this reservation is not done, we
can end up with two objects using the same lock, potentially
needing to lock each other for some operations - classic recipe
for deadlocks.
Add a refresh() function to volumes to perform lock reservation
and ensure it is called as part of overall refresh().
Fixes #4605
Fixes #4621
Signed-off-by: Matthew Heon <matthew.heon@pm.me>
2019-12-03 04:06:00 +00:00
|
|
|
// namespaces, and all the pods to recreate cgroups.
|
|
|
|
// Containers, pods, and volumes must also reacquire their locks.
|
2017-12-01 18:26:58 +00:00
|
|
|
ctrs, err := r.state.AllContainers()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "error retrieving all containers from state")
|
|
|
|
}
|
2018-05-16 18:58:46 +00:00
|
|
|
pods, err := r.state.AllPods()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "error retrieving all pods from state")
|
|
|
|
}
|
Ensure volumes reacquire locks on state refresh
After a restart, pods and containers both run a refresh()
function to prepare to run after a reboot. Until now, volumes
have not had a similar function, because they had no per-boot
setup to perform.
Unfortunately, this was not noticed when in-memory locking was
introduced to volumes. The refresh() routine is, among other
things, responsible for ensuring that locks are reserved after a
reboot, ensuring they cannot be taken by a freshly-created
container, pod, or volume. If this reservation is not done, we
can end up with two objects using the same lock, potentially
needing to lock each other for some operations - classic recipe
for deadlocks.
Add a refresh() function to volumes to perform lock reservation
and ensure it is called as part of overall refresh().
Fixes #4605
Fixes #4621
Signed-off-by: Matthew Heon <matthew.heon@pm.me>
2019-12-03 04:06:00 +00:00
|
|
|
vols, err := r.state.AllVolumes()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "error retrieving all volumes from state")
|
|
|
|
}
|
|
|
|
// No locks are taken during pod, volume, and container refresh.
|
|
|
|
// Furthermore, the pod/volume/container refresh() functions are not
|
2018-08-23 19:13:41 +00:00
|
|
|
// allowed to take locks themselves.
|
Ensure volumes reacquire locks on state refresh
After a restart, pods and containers both run a refresh()
function to prepare to run after a reboot. Until now, volumes
have not had a similar function, because they had no per-boot
setup to perform.
Unfortunately, this was not noticed when in-memory locking was
introduced to volumes. The refresh() routine is, among other
things, responsible for ensuring that locks are reserved after a
reboot, ensuring they cannot be taken by a freshly-created
container, pod, or volume. If this reservation is not done, we
can end up with two objects using the same lock, potentially
needing to lock each other for some operations - classic recipe
for deadlocks.
Add a refresh() function to volumes to perform lock reservation
and ensure it is called as part of overall refresh().
Fixes #4605
Fixes #4621
Signed-off-by: Matthew Heon <matthew.heon@pm.me>
2019-12-03 04:06:00 +00:00
|
|
|
// We cannot assume that any pod/volume/container has a valid lock until
|
2018-08-23 19:13:41 +00:00
|
|
|
// after this function has returned.
|
|
|
|
// The runtime alive lock should suffice to provide mutual exclusion
|
|
|
|
// until this has run.
|
2017-12-01 18:26:58 +00:00
|
|
|
for _, ctr := range ctrs {
|
|
|
|
if err := ctr.refresh(); err != nil {
|
2018-08-10 14:35:25 +00:00
|
|
|
logrus.Errorf("Error refreshing container %s: %v", ctr.ID(), err)
|
2017-12-01 18:26:58 +00:00
|
|
|
}
|
|
|
|
}
|
2018-05-16 18:58:46 +00:00
|
|
|
for _, pod := range pods {
|
|
|
|
if err := pod.refresh(); err != nil {
|
2018-08-10 14:35:25 +00:00
|
|
|
logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err)
|
2018-05-16 18:58:46 +00:00
|
|
|
}
|
|
|
|
}
|
Ensure volumes reacquire locks on state refresh
After a restart, pods and containers both run a refresh()
function to prepare to run after a reboot. Until now, volumes
have not had a similar function, because they had no per-boot
setup to perform.
Unfortunately, this was not noticed when in-memory locking was
introduced to volumes. The refresh() routine is, among other
things, responsible for ensuring that locks are reserved after a
reboot, ensuring they cannot be taken by a freshly-created
container, pod, or volume. If this reservation is not done, we
can end up with two objects using the same lock, potentially
needing to lock each other for some operations - classic recipe
for deadlocks.
Add a refresh() function to volumes to perform lock reservation
and ensure it is called as part of overall refresh().
Fixes #4605
Fixes #4621
Signed-off-by: Matthew Heon <matthew.heon@pm.me>
2019-12-03 04:06:00 +00:00
|
|
|
for _, vol := range vols {
|
|
|
|
if err := vol.refresh(); err != nil {
|
|
|
|
logrus.Errorf("Error refreshing volume %s: %v", vol.Name(), err)
|
|
|
|
}
|
|
|
|
}
|
2017-12-01 18:26:58 +00:00
|
|
|
|
2018-01-10 21:42:49 +00:00
|
|
|
// Create a file indicating the runtime is alive and ready
|
2017-12-01 18:26:58 +00:00
|
|
|
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "error creating runtime status file %s", alivePath)
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
|
2019-04-25 20:23:09 +00:00
|
|
|
r.newSystemEvent(events.Refresh)
|
|
|
|
|
2017-12-01 18:26:58 +00:00
|
|
|
return nil
|
|
|
|
}
|
2017-12-12 17:48:51 +00:00
|
|
|
|
|
|
|
// Info returns the store and host information
|
2020-03-15 16:53:59 +00:00
|
|
|
func (r *Runtime) Info() (*define.Info, error) {
|
|
|
|
return r.info()
|
2017-12-12 17:48:51 +00:00
|
|
|
}
|
2018-02-28 20:06:05 +00:00
|
|
|
|
2018-05-25 00:50:37 +00:00
|
|
|
// generateName generates a unique name for a container or pod.
|
2018-03-18 00:08:27 +00:00
|
|
|
func (r *Runtime) generateName() (string, error) {
|
|
|
|
for {
|
|
|
|
name := namesgenerator.GetRandomName(0)
|
|
|
|
// Make sure container with this name does not exist
|
|
|
|
if _, err := r.state.LookupContainer(name); err == nil {
|
|
|
|
continue
|
2020-01-13 12:01:45 +00:00
|
|
|
} else if errors.Cause(err) != define.ErrNoSuchCtr {
|
|
|
|
return "", err
|
2018-03-18 00:08:27 +00:00
|
|
|
}
|
|
|
|
// Make sure pod with this name does not exist
|
|
|
|
if _, err := r.state.LookupPod(name); err == nil {
|
|
|
|
continue
|
2020-01-13 12:01:45 +00:00
|
|
|
} else if errors.Cause(err) != define.ErrNoSuchPod {
|
|
|
|
return "", err
|
2018-03-18 00:08:27 +00:00
|
|
|
}
|
|
|
|
return name, nil
|
|
|
|
}
|
|
|
|
// The code should never reach here.
|
|
|
|
}
|
|
|
|
|
2019-07-09 14:06:57 +00:00
|
|
|
// Configure store and image runtime
|
|
|
|
func (r *Runtime) configureStore() error {
|
2020-03-27 14:13:51 +00:00
|
|
|
store, err := storage.GetStore(r.storageConfig)
|
2019-07-09 14:06:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
r.store = store
|
|
|
|
is.Transport.SetStore(store)
|
|
|
|
|
2019-07-23 14:01:12 +00:00
|
|
|
// Set up a storage service for creating container root filesystems from
|
|
|
|
// images
|
2020-06-11 18:40:38 +00:00
|
|
|
r.storageService = getStorageService(r.store)
|
2019-07-23 14:01:12 +00:00
|
|
|
|
2019-07-09 14:06:57 +00:00
|
|
|
ir := image.NewImageRuntimeFromStore(r.store)
|
2020-03-27 14:13:51 +00:00
|
|
|
ir.SignaturePolicyPath = r.config.Engine.SignaturePolicyPath
|
|
|
|
ir.EventsLogFilePath = r.config.Engine.EventsLogFilePath
|
|
|
|
ir.EventsLogger = r.config.Engine.EventsLogger
|
2019-07-09 14:06:57 +00:00
|
|
|
|
|
|
|
r.imageRuntime = ir
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ImageRuntime returns the imageruntime for image operations.
|
|
|
|
// If WithNoStore() was used, no image runtime will be available, and this
|
|
|
|
// function will return nil.
|
2018-03-15 15:06:49 +00:00
|
|
|
func (r *Runtime) ImageRuntime() *image.Runtime {
|
|
|
|
return r.imageRuntime
|
|
|
|
}
|
2018-11-29 14:55:15 +00:00
|
|
|
|
|
|
|
// SystemContext returns the imagecontext
|
|
|
|
func (r *Runtime) SystemContext() *types.SystemContext {
|
|
|
|
return r.imageContext
|
|
|
|
}
|
2019-09-13 18:01:53 +00:00
|
|
|
|
2019-10-08 17:53:36 +00:00
|
|
|
// GetOCIRuntimePath retrieves the path of the default OCI runtime.
|
|
|
|
func (r *Runtime) GetOCIRuntimePath() string {
|
|
|
|
return r.defaultOCIRuntime.Path()
|
|
|
|
}
|
2020-03-27 14:13:51 +00:00
|
|
|
|
|
|
|
// StorageConfig retrieves the storage options for the container runtime
|
|
|
|
func (r *Runtime) StorageConfig() storage.StoreOptions {
|
|
|
|
return r.storageConfig
|
|
|
|
}
|
|
|
|
|
2020-04-16 04:41:09 +00:00
|
|
|
// GetStore returns the runtime stores
|
|
|
|
func (r *Runtime) GetStore() storage.Store {
|
|
|
|
return r.store
|
|
|
|
}
|
|
|
|
|
2020-03-27 14:13:51 +00:00
|
|
|
// DBConfig is a set of Libpod runtime configuration settings that are saved in
|
|
|
|
// a State when it is first created, and can subsequently be retrieved.
|
|
|
|
type DBConfig struct {
|
|
|
|
LibpodRoot string
|
|
|
|
LibpodTmp string
|
|
|
|
StorageRoot string
|
|
|
|
StorageTmp string
|
|
|
|
GraphDriver string
|
|
|
|
VolumePath string
|
|
|
|
}
|
|
|
|
|
|
|
|
// mergeDBConfig merges the configuration from the database.
|
2020-06-11 18:40:38 +00:00
|
|
|
func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) {
|
2020-03-27 14:13:51 +00:00
|
|
|
|
2020-05-08 12:37:14 +00:00
|
|
|
c := &r.config.Engine
|
2020-03-27 14:13:51 +00:00
|
|
|
if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" {
|
|
|
|
if r.storageConfig.RunRoot != dbConfig.StorageTmp &&
|
|
|
|
r.storageConfig.RunRoot != "" {
|
|
|
|
logrus.Debugf("Overriding run root %q with %q from database",
|
|
|
|
r.storageConfig.RunRoot, dbConfig.StorageTmp)
|
|
|
|
}
|
|
|
|
r.storageConfig.RunRoot = dbConfig.StorageTmp
|
|
|
|
}
|
|
|
|
|
|
|
|
if !r.storageSet.GraphRootSet && dbConfig.StorageRoot != "" {
|
|
|
|
if r.storageConfig.GraphRoot != dbConfig.StorageRoot &&
|
|
|
|
r.storageConfig.GraphRoot != "" {
|
|
|
|
logrus.Debugf("Overriding graph root %q with %q from database",
|
|
|
|
r.storageConfig.GraphRoot, dbConfig.StorageRoot)
|
|
|
|
}
|
|
|
|
r.storageConfig.GraphRoot = dbConfig.StorageRoot
|
|
|
|
}
|
|
|
|
|
|
|
|
if !r.storageSet.GraphDriverNameSet && dbConfig.GraphDriver != "" {
|
|
|
|
if r.storageConfig.GraphDriverName != dbConfig.GraphDriver &&
|
|
|
|
r.storageConfig.GraphDriverName != "" {
|
|
|
|
logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
|
|
|
|
r.storageConfig.GraphDriverName, dbConfig.GraphDriver)
|
|
|
|
}
|
|
|
|
r.storageConfig.GraphDriverName = dbConfig.GraphDriver
|
|
|
|
}
|
|
|
|
|
|
|
|
if !r.storageSet.StaticDirSet && dbConfig.LibpodRoot != "" {
|
|
|
|
if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
|
|
|
|
logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
|
|
|
|
}
|
|
|
|
c.StaticDir = dbConfig.LibpodRoot
|
|
|
|
}
|
|
|
|
|
|
|
|
if !r.storageSet.TmpDirSet && dbConfig.LibpodTmp != "" {
|
|
|
|
if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
|
|
|
|
logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
|
|
|
|
}
|
|
|
|
c.TmpDir = dbConfig.LibpodTmp
|
|
|
|
c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !r.storageSet.VolumePathSet && dbConfig.VolumePath != "" {
|
|
|
|
if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
|
|
|
|
logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
|
|
|
|
}
|
|
|
|
c.VolumePath = dbConfig.VolumePath
|
|
|
|
}
|
|
|
|
}
|
2020-04-14 20:44:37 +00:00
|
|
|
|
|
|
|
func (r *Runtime) EnableLabeling() bool {
|
|
|
|
return r.config.Containers.EnableLabeling
|
|
|
|
}
|