Kube Play: use passthrough as the default log-driver if service-container is set

Reasoning
---------
When the log-driver is passthrough, the journal socket is passed to the containers as-is which has two advantages:
1. journald can see who the actual sender of the log event is,
    rather than thinking everything comes from the conmon process
2. conmon will not have to copy all the log data

Code Changes
------------
If log-driver was not set by the user and service-container is set use
passthrough as the default log-driver

Update the system tests
- explicitly set logdriver in sdnotify and play tests
- podman-kube template test:  Verify the default log driver for service-container

Signed-off-by: Ygal Blum <ygal.blum@gmail.com>
This commit is contained in:
Ygal Blum 2022-12-19 17:05:12 +02:00
parent ecbb52cb47
commit 68fbebfacc
4 changed files with 19 additions and 4 deletions

View file

@ -73,6 +73,7 @@ var (
podman play kube --creds user:password --seccomp-profile-root /custom/path apache.yml
podman play kube https://example.com/nginx.yml`,
}
logDriverFlagName = "log-driver"
)
func init() {
@ -116,7 +117,6 @@ func playFlags(cmd *cobra.Command) {
flags.IPSliceVar(&playOptions.StaticIPs, staticIPFlagName, nil, "Static IP addresses to assign to the pods")
_ = cmd.RegisterFlagCompletionFunc(staticIPFlagName, completion.AutocompleteNone)
logDriverFlagName := "log-driver"
flags.StringVar(&playOptions.LogDriver, logDriverFlagName, common.LogDriver(), "Logging driver for the container")
_ = cmd.RegisterFlagCompletionFunc(logDriverFlagName, common.AutocompleteLogDriver)
@ -247,6 +247,15 @@ func play(cmd *cobra.Command, args []string) error {
return errors.New("--force may be specified only with --down")
}
// When running under Systemd use passthrough as the default log-driver.
// When doing so, the journal socket is passed to the containers as-is which has two advantages:
// 1. journald can see who the actual sender of the log event is,
// rather than thinking everything comes from the conmon process
// 2. conmon will not have to copy all the log data
if !cmd.Flags().Changed(logDriverFlagName) && playOptions.ServiceContainer {
playOptions.LogDriver = define.PassthroughLogging
}
reader, err := readerFromArg(args[0])
if err != nil {
return err

View file

@ -414,6 +414,12 @@ EOF
run_podman 125 container rm $service_container
is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)"
# Verify that the log-driver for the Pod's containers is passthrough
for name in "a" "b"; do
run_podman container inspect test_pod-${name} --format "{{.HostConfig.LogConfig.Type}}"
is $output "passthrough"
done
# Add a simple `auto-update --dry-run` test here to avoid too much redundancy
# with 255-auto-update.bats
run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"

View file

@ -225,7 +225,7 @@ EOF
wait_for_file $_SOCAT_LOG
# Will run until all containers have stopped.
run_podman play kube --service-container=true $yaml_source
run_podman play kube --service-container=true --log-driver journald $yaml_source
run_podman container wait $service_container test_pod-test
# Make sure the containers have the correct policy.
@ -302,7 +302,7 @@ EOF
# Run `play kube` in the background as it will wait for all containers to
# send the READY=1 message.
timeout --foreground -v --kill=10 60 \
$PODMAN play kube --service-container=true $yaml_source &>/dev/null &
$PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null &
# Wait for both containers to be running
for i in $(seq 1 20); do

View file

@ -126,7 +126,7 @@ EOF
# Run `play kube` in the background as it will wait for the service
# container to exit.
timeout --foreground -v --kill=10 60 \
$PODMAN play kube --service-container=true $yaml_source &>/dev/null &
$PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null &
# Wait for the container to be running
container_a=test_pod-test