2017-11-01 15:24:59 +00:00
|
|
|
package libpod
|
|
|
|
|
|
|
|
import (
|
2020-02-03 16:17:32 +00:00
|
|
|
"net"
|
2018-07-11 20:27:52 +00:00
|
|
|
"time"
|
2017-11-01 15:24:59 +00:00
|
|
|
|
2021-02-20 13:28:38 +00:00
|
|
|
"github.com/containers/podman/v3/libpod/define"
|
|
|
|
"github.com/containers/podman/v3/libpod/lock"
|
2018-11-19 19:20:56 +00:00
|
|
|
"github.com/cri-o/ocicni/pkg/ocicni"
|
2018-08-22 17:13:07 +00:00
|
|
|
"github.com/pkg/errors"
|
2017-11-01 15:24:59 +00:00
|
|
|
)
|
|
|
|
|
2018-07-30 13:58:37 +00:00
|
|
|
// Pod represents a group of containers that are managed together.
|
|
|
|
// Any operations on a Pod that access state must begin with a call to
|
|
|
|
// updatePod().
|
|
|
|
// There is no guarantee that state exists in a readable state before this call,
|
|
|
|
// and even if it does its contents will be out of date and must be refreshed
|
|
|
|
// from the database.
|
|
|
|
// Generally, this requirement applies only to top-level functions; helpers can
|
|
|
|
// assume their callers handled this requirement. Generally speaking, if a
|
|
|
|
// function takes the pod lock and accesses any part of state, it should
|
|
|
|
// updatePod() immediately after locking.
|
2018-07-27 17:58:50 +00:00
|
|
|
// Pod represents a group of containers that may share namespaces
|
2017-11-01 15:24:59 +00:00
|
|
|
type Pod struct {
|
2018-02-09 22:13:07 +00:00
|
|
|
config *PodConfig
|
2018-05-14 23:30:11 +00:00
|
|
|
state *podState
|
2017-11-01 15:24:59 +00:00
|
|
|
|
2018-02-09 22:13:07 +00:00
|
|
|
valid bool
|
|
|
|
runtime *Runtime
|
2018-08-24 19:15:56 +00:00
|
|
|
lock lock.Locker
|
2018-02-09 22:13:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// PodConfig represents a pod's static configuration
|
|
|
|
type PodConfig struct {
|
2018-05-14 23:30:11 +00:00
|
|
|
ID string `json:"id"`
|
|
|
|
Name string `json:"name"`
|
2018-06-25 14:35:15 +00:00
|
|
|
// Namespace the pod is in
|
|
|
|
Namespace string `json:"namespace,omitempty"`
|
2018-05-14 23:30:11 +00:00
|
|
|
|
2019-08-17 02:42:36 +00:00
|
|
|
Hostname string `json:"hostname,omitempty"`
|
|
|
|
|
2018-05-14 23:30:11 +00:00
|
|
|
// Labels contains labels applied to the pod
|
|
|
|
Labels map[string]string `json:"labels"`
|
|
|
|
// CgroupParent contains the pod's CGroup parent
|
|
|
|
CgroupParent string `json:"cgroupParent"`
|
2018-05-16 16:45:09 +00:00
|
|
|
// UsePodCgroup indicates whether the pod will create its own CGroup and
|
|
|
|
// join containers to it.
|
|
|
|
// If true, all containers joined to the pod will use the pod cgroup as
|
|
|
|
// their cgroup parent, and cannot set a different cgroup parent
|
2018-07-27 17:58:50 +00:00
|
|
|
UsePodCgroup bool `json:"sharesCgroup,omitempty"`
|
|
|
|
|
|
|
|
// The following UsePod{kernelNamespace} indicate whether the containers
|
|
|
|
// in the pod will inherit the namespace from the first container in the pod.
|
Fix bug where pods would unintentionally share cgroupns
This one was a massive pain to track down.
The original symptom was an error message from rootless Podman
trying to make a container in a pod. I unfortunately did not look
at the error message closely enough to realize that the namespace
in question was the cgroup namespace (the reproducer pod was
explicitly set to only share the network namespace), else this
would have been quite a bit shorter.
I spent considerable effort trying to track down differences
between the inspect output of the two containers, and when that
failed I was forced to resort to diffing the OCI specs. That
finally proved fruitful, and I was able to determine what should
have been obvious all along: the container was joining the cgroup
namespace of the infra container when it really ought not to
have.
From there, I discovered a variable collision in pod config. The
UsePodCgroup variable means "create a parent cgroup for the pod
and join containers in the pod to it". Unfortunately, it is very
similar to UsePodUTS, UsePodNet, etc, which mean "the pod shares
this namespace", so an accessor was accidentally added for it
that indicated the pod shared the cgroup namespace when it really
did not. Once I realized that, it was a quick fix - add a bool to
the pod's configuration to indicate whether the cgroup ns was
shared (distinct from UsePodCgroup) and use that for the
accessor.
Also included are fixes for `podman inspect` and
`podman pod inspect` that fix them to actually display the state
of the cgroup namespace (for container inspect) and what
namespaces are shared (for pod inspect). Either of those would
have made tracking this down considerably quicker.
Fixes #6149
Signed-off-by: Matthew Heon <mheon@redhat.com>
2020-05-08 21:41:50 +00:00
|
|
|
UsePodPID bool `json:"sharesPid,omitempty"`
|
|
|
|
UsePodIPC bool `json:"sharesIpc,omitempty"`
|
|
|
|
UsePodNet bool `json:"sharesNet,omitempty"`
|
|
|
|
UsePodMount bool `json:"sharesMnt,omitempty"`
|
|
|
|
UsePodUser bool `json:"sharesUser,omitempty"`
|
|
|
|
UsePodUTS bool `json:"sharesUts,omitempty"`
|
|
|
|
UsePodCgroupNS bool `json:"sharesCgroupNS,omitempty"`
|
2018-07-27 17:58:50 +00:00
|
|
|
|
2018-08-17 14:36:51 +00:00
|
|
|
InfraContainer *InfraContainerConfig `json:"infraConfig"`
|
2018-07-11 20:27:52 +00:00
|
|
|
|
|
|
|
// Time pod was created
|
|
|
|
CreatedTime time.Time `json:"created"`
|
2018-08-24 19:15:56 +00:00
|
|
|
|
2020-05-28 08:54:17 +00:00
|
|
|
// CreateCommand is the full command plus arguments of the process the
|
|
|
|
// container has been created with.
|
|
|
|
CreateCommand []string `json:"CreateCommand,omitempty"`
|
|
|
|
|
2018-08-24 19:15:56 +00:00
|
|
|
// ID of the pod's lock
|
|
|
|
LockID uint32 `json:"lockID"`
|
2018-05-14 23:30:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// podState represents a pod's state
|
|
|
|
type podState struct {
|
|
|
|
// CgroupPath is the path to the pod's CGroup
|
2018-08-11 15:19:08 +00:00
|
|
|
CgroupPath string `json:"cgroupPath"`
|
2018-08-17 14:36:51 +00:00
|
|
|
// InfraContainerID is the container that holds pod namespace information
|
|
|
|
// Most often an infra container
|
|
|
|
InfraContainerID string
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
Ensure pod infra containers have an exit command
Most Libpod containers are made via `pkg/specgen/generate` which
includes code to generate an appropriate exit command which will
handle unmounting the container's storage, cleaning up the
container's network, etc. There is one notable exception: pod
infra containers, which are made entirely within Libpod and do
not touch pkg/specgen. As such, no cleanup process, network never
cleaned up, bad things can happen.
There is good news, though - it's not that difficult to add this,
and it's done in this PR. Generally speaking, we don't allow
passing options directly to the infra container at create time,
but we do (optionally) proxy a pre-approved set of options into
it when we create it. Add ExitCommand to these options, and set
it at time of pod creation using the same code we use to generate
exit commands for normal containers.
Fixes #7103
Signed-off-by: Matthew Heon <mheon@redhat.com>
2020-08-10 19:00:42 +00:00
|
|
|
// InfraContainerConfig is the configuration for the pod's infra container.
|
|
|
|
// Generally speaking, these are equivalent to container configuration options
|
|
|
|
// you will find in container_config.go (and even named identically), save for
|
|
|
|
// HasInfraContainer (which determines if an infra container is even created -
|
|
|
|
// if it is false, no other options in this struct will be used) and HostNetwork
|
|
|
|
// (this involves the created OCI spec, and as such is not represented directly
|
|
|
|
// in container_config.go).
|
|
|
|
// Generally speaking, aside from those two exceptions, these options will set
|
|
|
|
// the equivalent field in the container's configuration.
|
2018-08-17 14:36:51 +00:00
|
|
|
type InfraContainerConfig struct {
|
2020-05-29 11:20:22 +00:00
|
|
|
ConmonPidFile string `json:"conmonPidFile"`
|
2020-02-03 16:17:32 +00:00
|
|
|
HasInfraContainer bool `json:"makeInfraContainer"`
|
2021-02-01 18:53:14 +00:00
|
|
|
NoNetwork bool `json:"noNetwork,omitempty"`
|
2020-02-17 21:21:07 +00:00
|
|
|
HostNetwork bool `json:"infraHostNetwork,omitempty"`
|
2020-02-03 16:17:32 +00:00
|
|
|
PortBindings []ocicni.PortMapping `json:"infraPortBindings"`
|
|
|
|
StaticIP net.IP `json:"staticIP,omitempty"`
|
|
|
|
StaticMAC net.HardwareAddr `json:"staticMAC,omitempty"`
|
|
|
|
UseImageResolvConf bool `json:"useImageResolvConf,omitempty"`
|
|
|
|
DNSServer []string `json:"dnsServer,omitempty"`
|
|
|
|
DNSSearch []string `json:"dnsSearch,omitempty"`
|
|
|
|
DNSOption []string `json:"dnsOption,omitempty"`
|
|
|
|
UseImageHosts bool `json:"useImageHosts,omitempty"`
|
|
|
|
HostAdd []string `json:"hostsAdd,omitempty"`
|
|
|
|
Networks []string `json:"networks,omitempty"`
|
Ensure pod infra containers have an exit command
Most Libpod containers are made via `pkg/specgen/generate` which
includes code to generate an appropriate exit command which will
handle unmounting the container's storage, cleaning up the
container's network, etc. There is one notable exception: pod
infra containers, which are made entirely within Libpod and do
not touch pkg/specgen. As such, no cleanup process, network never
cleaned up, bad things can happen.
There is good news, though - it's not that difficult to add this,
and it's done in this PR. Generally speaking, we don't allow
passing options directly to the infra container at create time,
but we do (optionally) proxy a pre-approved set of options into
it when we create it. Add ExitCommand to these options, and set
it at time of pod creation using the same code we use to generate
exit commands for normal containers.
Fixes #7103
Signed-off-by: Matthew Heon <mheon@redhat.com>
2020-08-10 19:00:42 +00:00
|
|
|
ExitCommand []string `json:"exitCommand,omitempty"`
|
2020-09-13 15:54:08 +00:00
|
|
|
InfraImage string `json:"infraImage,omitempty"`
|
|
|
|
InfraCommand []string `json:"infraCommand,omitempty"`
|
2020-09-25 14:00:43 +00:00
|
|
|
Slirp4netns bool `json:"slirp4netns,omitempty"`
|
|
|
|
NetworkOptions map[string][]string `json:"network_options,omitempty"`
|
2018-07-27 17:58:50 +00:00
|
|
|
}
|
|
|
|
|
2017-11-01 15:24:59 +00:00
|
|
|
// ID retrieves the pod's ID
|
|
|
|
func (p *Pod) ID() string {
|
2018-02-09 22:13:07 +00:00
|
|
|
return p.config.ID
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Name retrieves the pod's name
|
|
|
|
func (p *Pod) Name() string {
|
2018-02-09 22:13:07 +00:00
|
|
|
return p.config.Name
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
2018-06-25 14:35:15 +00:00
|
|
|
// Namespace returns the pod's libpod namespace.
|
|
|
|
// Namespaces are used to logically separate containers and pods in the state.
|
|
|
|
func (p *Pod) Namespace() string {
|
|
|
|
return p.config.Namespace
|
|
|
|
}
|
|
|
|
|
2017-12-13 21:58:14 +00:00
|
|
|
// Labels returns the pod's labels
|
|
|
|
func (p *Pod) Labels() map[string]string {
|
|
|
|
labels := make(map[string]string)
|
2018-02-09 22:13:07 +00:00
|
|
|
for key, value := range p.config.Labels {
|
2017-12-13 21:58:14 +00:00
|
|
|
labels[key] = value
|
|
|
|
}
|
|
|
|
|
|
|
|
return labels
|
|
|
|
}
|
|
|
|
|
2018-07-11 20:27:52 +00:00
|
|
|
// CreatedTime gets the time when the pod was created
|
|
|
|
func (p *Pod) CreatedTime() time.Time {
|
|
|
|
return p.config.CreatedTime
|
|
|
|
}
|
|
|
|
|
2020-06-05 13:34:04 +00:00
|
|
|
// CreateCommand returns the os.Args of the process with which the pod has been
|
|
|
|
// created.
|
|
|
|
func (p *Pod) CreateCommand() []string {
|
|
|
|
return p.config.CreateCommand
|
|
|
|
}
|
|
|
|
|
2018-05-14 23:30:11 +00:00
|
|
|
// CgroupParent returns the pod's CGroup parent
|
|
|
|
func (p *Pod) CgroupParent() string {
|
|
|
|
return p.config.CgroupParent
|
|
|
|
}
|
|
|
|
|
2018-07-27 17:58:50 +00:00
|
|
|
// SharesPID returns whether containers in pod
|
|
|
|
// default to use PID namespace of first container in pod
|
|
|
|
func (p *Pod) SharesPID() bool {
|
|
|
|
return p.config.UsePodPID
|
|
|
|
}
|
|
|
|
|
|
|
|
// SharesIPC returns whether containers in pod
|
|
|
|
// default to use IPC namespace of first container in pod
|
|
|
|
func (p *Pod) SharesIPC() bool {
|
|
|
|
return p.config.UsePodIPC
|
|
|
|
}
|
|
|
|
|
|
|
|
// SharesNet returns whether containers in pod
|
|
|
|
// default to use network namespace of first container in pod
|
|
|
|
func (p *Pod) SharesNet() bool {
|
|
|
|
return p.config.UsePodNet
|
|
|
|
}
|
|
|
|
|
2018-08-20 21:56:35 +00:00
|
|
|
// SharesMount returns whether containers in pod
|
2018-07-27 17:58:50 +00:00
|
|
|
// default to use PID namespace of first container in pod
|
2018-08-20 21:56:35 +00:00
|
|
|
func (p *Pod) SharesMount() bool {
|
|
|
|
return p.config.UsePodMount
|
2018-07-27 17:58:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SharesUser returns whether containers in pod
|
|
|
|
// default to use user namespace of first container in pod
|
|
|
|
func (p *Pod) SharesUser() bool {
|
|
|
|
return p.config.UsePodUser
|
|
|
|
}
|
|
|
|
|
|
|
|
// SharesUTS returns whether containers in pod
|
|
|
|
// default to use UTS namespace of first container in pod
|
|
|
|
func (p *Pod) SharesUTS() bool {
|
|
|
|
return p.config.UsePodUTS
|
|
|
|
}
|
|
|
|
|
|
|
|
// SharesCgroup returns whether containers in the pod will default to this pod's
|
2018-05-16 16:45:09 +00:00
|
|
|
// cgroup instead of the default libpod parent
|
2018-07-27 17:58:50 +00:00
|
|
|
func (p *Pod) SharesCgroup() bool {
|
Fix bug where pods would unintentionally share cgroupns
This one was a massive pain to track down.
The original symptom was an error message from rootless Podman
trying to make a container in a pod. I unfortunately did not look
at the error message closely enough to realize that the namespace
in question was the cgroup namespace (the reproducer pod was
explicitly set to only share the network namespace), else this
would have been quite a bit shorter.
I spent considerable effort trying to track down differences
between the inspect output of the two containers, and when that
failed I was forced to resort to diffing the OCI specs. That
finally proved fruitful, and I was able to determine what should
have been obvious all along: the container was joining the cgroup
namespace of the infra container when it really ought not to
have.
From there, I discovered a variable collision in pod config. The
UsePodCgroup variable means "create a parent cgroup for the pod
and join containers in the pod to it". Unfortunately, it is very
similar to UsePodUTS, UsePodNet, etc, which mean "the pod shares
this namespace", so an accessor was accidentally added for it
that indicated the pod shared the cgroup namespace when it really
did not. Once I realized that, it was a quick fix - add a bool to
the pod's configuration to indicate whether the cgroup ns was
shared (distinct from UsePodCgroup) and use that for the
accessor.
Also included are fixes for `podman inspect` and
`podman pod inspect` that fix them to actually display the state
of the cgroup namespace (for container inspect) and what
namespaces are shared (for pod inspect). Either of those would
have made tracking this down considerably quicker.
Fixes #6149
Signed-off-by: Matthew Heon <mheon@redhat.com>
2020-05-08 21:41:50 +00:00
|
|
|
return p.config.UsePodCgroupNS
|
2018-05-16 16:45:09 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 20:32:10 +00:00
|
|
|
// Hostname returns the hostname of the pod.
|
|
|
|
func (p *Pod) Hostname() string {
|
|
|
|
return p.config.Hostname
|
|
|
|
}
|
|
|
|
|
2018-05-16 16:45:09 +00:00
|
|
|
// CgroupPath returns the path to the pod's CGroup
|
|
|
|
func (p *Pod) CgroupPath() (string, error) {
|
|
|
|
p.lock.Lock()
|
2018-06-05 20:07:35 +00:00
|
|
|
defer p.lock.Unlock()
|
2018-05-16 16:45:09 +00:00
|
|
|
if err := p.updatePod(); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return p.state.CgroupPath, nil
|
|
|
|
}
|
|
|
|
|
2017-11-01 15:24:59 +00:00
|
|
|
// HasContainer checks if a container is present in the pod
|
|
|
|
func (p *Pod) HasContainer(id string) (bool, error) {
|
|
|
|
if !p.valid {
|
2019-06-24 20:48:34 +00:00
|
|
|
return false, define.ErrPodRemoved
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
2018-01-18 15:49:01 +00:00
|
|
|
return p.runtime.state.PodHasContainer(p, id)
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
2018-01-29 09:59:25 +00:00
|
|
|
// AllContainersByID returns the container IDs of all the containers in the pod
|
2018-01-18 15:49:01 +00:00
|
|
|
func (p *Pod) AllContainersByID() ([]string, error) {
|
2017-12-13 21:58:14 +00:00
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
2017-11-01 15:24:59 +00:00
|
|
|
|
|
|
|
if !p.valid {
|
2019-06-24 20:48:34 +00:00
|
|
|
return nil, define.ErrPodRemoved
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
2018-01-18 15:49:01 +00:00
|
|
|
return p.runtime.state.PodContainersByID(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
// AllContainers retrieves the containers in the pod
|
|
|
|
func (p *Pod) AllContainers() ([]*Container, error) {
|
|
|
|
if !p.valid {
|
2019-06-24 20:48:34 +00:00
|
|
|
return nil, define.ErrPodRemoved
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
2018-08-08 20:16:01 +00:00
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
return p.allContainers()
|
|
|
|
}
|
2017-11-01 15:24:59 +00:00
|
|
|
|
2018-08-08 20:16:01 +00:00
|
|
|
func (p *Pod) allContainers() ([]*Container, error) {
|
2018-01-18 15:49:01 +00:00
|
|
|
return p.runtime.state.PodContainers(p)
|
2017-11-01 15:24:59 +00:00
|
|
|
}
|
|
|
|
|
2018-08-17 14:36:51 +00:00
|
|
|
// HasInfraContainer returns whether the pod will create an infra container
|
|
|
|
func (p *Pod) HasInfraContainer() bool {
|
|
|
|
return p.config.InfraContainer.HasInfraContainer
|
2018-07-27 17:58:50 +00:00
|
|
|
}
|
|
|
|
|
2018-08-17 14:36:51 +00:00
|
|
|
// SharesNamespaces checks if the pod has any kernel namespaces set as shared. An infra container will not be
|
2018-07-27 17:58:50 +00:00
|
|
|
// created if no kernel namespaces are shared.
|
|
|
|
func (p *Pod) SharesNamespaces() bool {
|
2018-08-20 21:56:35 +00:00
|
|
|
return p.SharesPID() || p.SharesIPC() || p.SharesNet() || p.SharesMount() || p.SharesUser() || p.SharesUTS()
|
2018-07-27 17:58:50 +00:00
|
|
|
}
|
|
|
|
|
2018-08-17 14:36:51 +00:00
|
|
|
// InfraContainerID returns the infra container ID for a pod.
|
|
|
|
// If the container returned is "", the pod has no infra container.
|
|
|
|
func (p *Pod) InfraContainerID() (string, error) {
|
2018-07-27 17:58:50 +00:00
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
|
|
|
|
if err := p.updatePod(); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2018-08-17 14:36:51 +00:00
|
|
|
return p.state.InfraContainerID, nil
|
2018-07-27 17:58:50 +00:00
|
|
|
}
|
|
|
|
|
2020-06-05 14:57:58 +00:00
|
|
|
// InfraContainer returns the infra container.
|
|
|
|
func (p *Pod) InfraContainer() (*Container, error) {
|
|
|
|
if !p.HasInfraContainer() {
|
2020-06-05 16:23:12 +00:00
|
|
|
return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no infra container")
|
2020-06-05 14:57:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
id, err := p.InfraContainerID()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return p.runtime.state.Container(id)
|
|
|
|
}
|
|
|
|
|
2018-02-19 16:05:43 +00:00
|
|
|
// TODO add pod batching
|
|
|
|
// Lock pod to avoid lock contention
|
|
|
|
// Store and lock all containers (no RemoveContainer in batch guarantees cache will not become stale)
|
2018-08-08 20:16:01 +00:00
|
|
|
|
|
|
|
// PodContainerStats is an organization struct for pods and their containers
|
|
|
|
type PodContainerStats struct {
|
|
|
|
Pod *Pod
|
2020-05-04 19:10:30 +00:00
|
|
|
ContainerStats map[string]*define.ContainerStats
|
2018-08-08 20:16:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetPodStats returns the stats for each of its containers
|
2020-05-04 19:10:30 +00:00
|
|
|
func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerStats) (map[string]*define.ContainerStats, error) {
|
2018-08-08 20:16:01 +00:00
|
|
|
var (
|
|
|
|
ok bool
|
2020-05-04 19:10:30 +00:00
|
|
|
prevStat *define.ContainerStats
|
2018-08-08 20:16:01 +00:00
|
|
|
)
|
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
|
|
|
|
if err := p.updatePod(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
containers, err := p.runtime.state.PodContainers(p)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-05-04 19:10:30 +00:00
|
|
|
newContainerStats := make(map[string]*define.ContainerStats)
|
2018-08-08 20:16:01 +00:00
|
|
|
for _, c := range containers {
|
|
|
|
if prevStat, ok = previousContainerStats[c.ID()]; !ok {
|
2020-05-04 19:10:30 +00:00
|
|
|
prevStat = &define.ContainerStats{}
|
2018-08-08 20:16:01 +00:00
|
|
|
}
|
|
|
|
newStats, err := c.GetContainerStats(prevStat)
|
2018-08-22 17:13:07 +00:00
|
|
|
// If the container wasn't running, don't include it
|
|
|
|
// but also suppress the error
|
2019-06-24 20:48:34 +00:00
|
|
|
if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid {
|
2018-08-08 20:16:01 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-08-22 17:13:07 +00:00
|
|
|
if err == nil {
|
|
|
|
newContainerStats[c.ID()] = newStats
|
|
|
|
}
|
2018-08-08 20:16:01 +00:00
|
|
|
}
|
|
|
|
return newContainerStats, nil
|
|
|
|
}
|
2020-10-02 16:02:16 +00:00
|
|
|
|
|
|
|
// ProcessLabel returns the SELinux label associated with the pod
|
|
|
|
func (p *Pod) ProcessLabel() (string, error) {
|
|
|
|
if !p.HasInfraContainer() {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
|
|
|
|
id, err := p.InfraContainerID()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
ctr, err := p.runtime.state.Container(id)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return ctr.ProcessLabel(), nil
|
|
|
|
}
|