Added multiarch build support for Teleport (#17597)

This commit is contained in:
fheinecke 2022-10-31 13:00:55 -05:00 committed by GitHub
parent f1fd8ec767
commit 1472e9cf9e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 8556 additions and 1300 deletions

8931
.drone.yml

File diff suppressed because it is too large Load diff

View file

@ -13,10 +13,7 @@
# Master/dev branch: "1.0.0-dev"
VERSION=12.0.0-dev
DOCKER_IMAGE_QUAY ?= quay.io/gravitational/teleport
DOCKER_IMAGE_ECR ?= public.ecr.aws/gravitational/teleport
DOCKER_IMAGE_STAGING ?= 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport
DOCKER_IMAGE ?= teleport
GOPATH ?= $(shell go env GOPATH)
@ -408,11 +405,10 @@ release-arm64:
$(MAKE) release ARCH=arm64
#
# make release-unix - Produces a binary release tarball containing teleport,
# tctl, and tsh.
# make build-archive - Packages the results of a build into a release tarball
#
.PHONY:
release-unix: clean full
.PHONY: build-archive
build-archive:
@echo "---> Creating OSS release archive."
mkdir teleport
cp -rf $(BUILDDIR)/* \
@ -425,6 +421,13 @@ release-unix: clean full
tar $(TAR_FLAGS) -c teleport | gzip -n > $(RELEASE).tar.gz
rm -rf teleport
@echo "---> Created $(RELEASE).tar.gz."
#
# make release-unix - Produces a binary release tarball containing teleport,
# tctl, and tsh.
#
.PHONY:
release-unix: clean full build-archive
@if [ -f e/Makefile ]; then $(MAKE) -C e release; fi
#
@ -998,53 +1001,17 @@ install: build
cp -f $(BUILDDIR)/teleport $(BINDIR)/
mkdir -p $(DATADIR)
# Docker image build. Always build the binaries themselves within docker (see
# the "docker" rule) to avoid dependencies on the host libc version.
.PHONY: image
image: clean docker-binaries
image: OS=linux
image: TARBALL_PATH_SECTION:=-s "$(shell pwd)"
image: clean docker-binaries build-archive oss-deb
cp ./build.assets/charts/Dockerfile $(BUILDDIR)/
cd $(BUILDDIR) && docker build --no-cache . -t $(DOCKER_IMAGE_QUAY):$(VERSION)
cd $(BUILDDIR) && docker build --no-cache . -t $(DOCKER_IMAGE):$(VERSION)-$(ARCH) --target teleport \
--build-arg DEB_PATH="./teleport_$(VERSION)_$(ARCH).deb"
if [ -f e/Makefile ]; then $(MAKE) -C e image; fi
.PHONY: publish
publish: image
docker push $(DOCKER_IMAGE_QUAY):$(VERSION)
if [ -f e/Makefile ]; then $(MAKE) -C e publish; fi
.PHONY: publish-ecr
publish-ecr: image
docker tag $(DOCKER_IMAGE_QUAY) $(DOCKER_IMAGE_ECR)
docker push $(DOCKER_IMAGE_ECR):$(VERSION)
if [ -f e/Makefile ]; then $(MAKE) -C e publish-ecr; fi
# Docker image build in CI.
# This is run to build and push Docker images to a private repository as part of the build process.
# When we are ready to make the images public after testing (i.e. when publishing a release), we pull these
# images down, retag them and push them up to the production repo so they're available for use.
# This job can be removed/consolidated after we switch over completely from using Jenkins to using Drone.
.PHONY: image-ci
image-ci: clean docker-binaries
cp ./build.assets/charts/Dockerfile $(BUILDDIR)/
cd $(BUILDDIR) && docker build --no-cache . -t $(DOCKER_IMAGE_STAGING):$(VERSION)
if [ -f e/Makefile ]; then $(MAKE) -C e image-ci; fi
# DOCKER_CLI_EXPERIMENTAL=enabled is set to allow inspecting the manifest for present images.
# https://docs.docker.com/engine/reference/commandline/cli/#experimental-features
# The internal staging images use amazon ECR's immutable repository settings. This makes overwrites impossible currently.
# This can cause issues when drone tagging pipelines must be re-run due to failures.
# Currently the work around for this is to not attempt to push to the image when it already exists.
.PHONY: publish-ci
publish-ci: image-ci
@if DOCKER_CLI_EXPERIMENTAL=enabled docker manifest inspect "$(DOCKER_IMAGE_STAGING):$(VERSION)" >/dev/null 2>&1; then\
echo "$(DOCKER_IMAGE_STAGING):$(VERSION) already exists. "; \
else \
docker push "$(DOCKER_IMAGE_STAGING):$(VERSION)"; \
fi
if [ -f e/Makefile ]; then $(MAKE) -C e publish-ci; fi
.PHONY: print-version
print-version:
@echo $(VERSION)
@ -1097,13 +1064,17 @@ rpm:
rpm-unsigned:
$(MAKE) UNSIGNED_RPM=true rpm
# build .deb
.PHONY: deb
deb:
# build open source .deb only
.PHONY: oss-deb
oss-deb:
mkdir -p $(BUILDDIR)/
cp ./build.assets/build-package.sh ./build.assets/build-common.sh $(BUILDDIR)/
chmod +x $(BUILDDIR)/build-package.sh
cd $(BUILDDIR) && ./build-package.sh -t oss -v $(VERSION) -p deb -a $(ARCH) $(RUNTIME_SECTION) $(TARBALL_PATH_SECTION)
# build .deb
.PHONY: deb
deb: oss-deb
if [ -f e/Makefile ]; then $(MAKE) -C e deb; fi
# check binary compatibility with different OSes

View file

@ -1,45 +0,0 @@
# First stage downloads pre-compiled Teleport archive from get.gravitational.com
# and extracts binaries from the archive.
FROM alpine AS download
ARG DOWNLOAD_TYPE=teleport
ARG VERSION_TAG
ARG OS
ARG ARCH
ARG EXTRA_DOWNLOAD_ARGS=""
WORKDIR /tmp
# Install dependencies.
RUN apk --update --no-cache add curl tar
# Download the appropriate binary tarball from get.gravitational.com and extract the binaries into
# a temporary directory for us to use in the second stage.
RUN mkdir -p build && \
curl -Ls https://get.gravitational.com/${DOWNLOAD_TYPE}-${VERSION_TAG}-${OS}-${ARCH}${EXTRA_DOWNLOAD_ARGS}-bin.tar.gz | tar -xzf - && \
cp $DOWNLOAD_TYPE/teleport $DOWNLOAD_TYPE/tctl $DOWNLOAD_TYPE/tsh $DOWNLOAD_TYPE/tbot build
# Second stage builds final container with teleport binaries.
FROM ubuntu:20.04 AS teleport
# Install ca-certificates, dumb-init and libelf1, then clean up.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y ca-certificates dumb-init libelf1 && \
update-ca-certificates && \
apt-get -y clean && \
rm -rf /var/lib/apt/lists/*
# Copy "teleport", "tctl", "tbot", and "tsh" binaries from the previous stage.
COPY --from=download /tmp/build/teleport /usr/local/bin/teleport
COPY --from=download /tmp/build/tctl /usr/local/bin/tctl
COPY --from=download /tmp/build/tsh /usr/local/bin/tsh
COPY --from=download /tmp/build/tbot /usr/local/bin/tbot
# Run Teleport inside the image with a default config file location.
ENTRYPOINT ["/usr/bin/dumb-init", "teleport", "start", "-c", "/etc/teleport/teleport.yaml"]
# Optional third stage which is only run when building the FIPS image.
FROM teleport AS teleport-fips
# Override the standard entrypoint set in the previous image with the --fips argument to start in FIPS mode.
ENTRYPOINT ["/usr/bin/dumb-init", "teleport", "start", "-c", "/etc/teleport/teleport.yaml", "--fips"]

View file

@ -1,44 +0,0 @@
# First stage downloads pre-compiled Teleport archive from get.gravitational.com
# and extracts binaries from the archive.
FROM alpine AS download
ARG DOWNLOAD_TYPE=teleport
ARG VERSION_TAG
ARG OS
ARG ARCH
ARG EXTRA_DOWNLOAD_ARGS=""
WORKDIR /tmp
# Install dependencies.
RUN apk --update --no-cache add curl tar
# Download the appropriate binary tarball from get.gravitational.com and extract the binaries into
# a temporary directory for us to use in the second stage.
RUN mkdir -p build && \
curl -Ls https://get.gravitational.com/${DOWNLOAD_TYPE}-${VERSION_TAG}-${OS}-${ARCH}${EXTRA_DOWNLOAD_ARGS}-bin.tar.gz | tar -xzf - && \
cp $DOWNLOAD_TYPE/teleport $DOWNLOAD_TYPE/tctl $DOWNLOAD_TYPE/tsh build
# Second stage builds final container with teleport binaries.
FROM ubuntu:20.04 AS teleport
# Install ca-certificates, dumb-init and libelf1, then clean up.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y ca-certificates dumb-init libelf1 && \
update-ca-certificates && \
apt-get -y clean && \
rm -rf /var/lib/apt/lists/*
# Copy "teleport", "tctl", and "tsh" binaries from the previous stage.
COPY --from=download /tmp/build/teleport /usr/local/bin/teleport
COPY --from=download /tmp/build/tctl /usr/local/bin/tctl
COPY --from=download /tmp/build/tsh /usr/local/bin/tsh
# Run Teleport inside the image with a default config file location.
ENTRYPOINT ["/usr/bin/dumb-init", "teleport", "start", "-c", "/etc/teleport/teleport.yaml"]
# Optional third stage which is only run when building the FIPS image.
FROM teleport AS teleport-fips
# Override the standard entrypoint set in the previous image with the --fips argument to start in FIPS mode.
ENTRYPOINT ["/usr/bin/dumb-init", "teleport", "start", "-c", "/etc/teleport/teleport.yaml", "--fips"]

View file

@ -1,4 +1,9 @@
FROM ubuntu:20.04
# Stage to build the image, without FIPS entrypoint argument
FROM ubuntu:20.04 AS teleport
# Copy the deb archive
ARG DEB_PATH
COPY ${DEB_PATH?} /tmp/teleport.deb
# Install dumb-init and ca-certificates. The dumb-init package is to ensure
# signals and orphaned processes are are handled correctly. The ca-certificate
@ -39,18 +44,22 @@ FROM ubuntu:20.04
# "apt-get update" to reduce the size of the image.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \
# Install dependencies
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y ca-certificates dumb-init libelf1 && \
# Install tools
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y net-tools iputils-ping inetutils-telnet netcat tcpdump busybox && \
busybox --install -s && \
update-ca-certificates && \
# Install Teleport
dpkg -i /tmp/teleport.deb && \
# Cleanup
apt-get -y clean && \
rm -rf /var/lib/apt/lists/*
# Bundle "teleport", "tctl", "tbot", and "tsh" binaries into image.
COPY teleport /usr/local/bin/teleport
COPY tctl /usr/local/bin/tctl
COPY tsh /usr/local/bin/tsh
COPY tbot /usr/local/bin/tbot
rm -rf /var/lib/apt/lists/* && \
rm -rf /tmp/*
# By setting this entry point, we expose make target as command.
ENTRYPOINT ["/usr/bin/dumb-init", "teleport", "start", "-c", "/etc/teleport/teleport.yaml"]
# Stage to launch Teleport with the fips argument
FROM teleport AS teleport-fips
ENTRYPOINT ["/usr/bin/dumb-init", "teleport", "start", "-c", "/etc/teleport/teleport.yaml", "--fips"]

View file

@ -1,56 +0,0 @@
FROM ubuntu:20.04
# Install dumb-init and ca-certificates. The dumb-init package is to ensure
# signals and orphaned processes are are handled correctly. The ca-certificate
# package is installed because the base Ubuntu image does not come with any
# certificate authorities. libelf1 is a dependency introduced by Teleport 7.0.
#
# The below packages are provided for debug purposes. Installing them adds around
# six megabytes to the image size. The packages include the following commands:
# * net-tools
# * netstat
# * ifconfig
# * ipmaddr
# * iptunnel
# * mii-tool
# * nameif
# * plipconfig
# * rarp
# * route
# * slattach
# * arp
# * iputils-ping
# * ping
# * ping4
# * ping6
# * inetutils-telnet
# * telnet
# * netcat
# * netcat
# * tcpdump
# * tcpdump
# * busybox (see "busybox --list" for all provided utils)
# * less
# * nslookup
# * vi
# * wget
#
# Note that /var/lib/apt/lists/* is cleaned up in the same RUN command as
# "apt-get update" to reduce the size of the image.
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y ca-certificates dumb-init libelf1 && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y net-tools iputils-ping inetutils-telnet netcat tcpdump busybox && \
busybox --install -s && \
update-ca-certificates && \
apt-get -y clean && \
rm -rf /var/lib/apt/lists/*
# Bundle "teleport", "tctl", "tbot", and "tsh" binaries into image.
COPY teleport /usr/local/bin/teleport
COPY tctl /usr/local/bin/tctl
COPY tsh /usr/local/bin/tsh
COPY tbot /usr/local/bin/tbot
# By setting this entry point, we expose make target as command.
ENTRYPOINT ["/usr/bin/dumb-init", "teleport", "start", "-c", "/etc/teleport/teleport.yaml", "--fips"]

View file

@ -345,3 +345,16 @@ func verifyNotPrereleaseStep() step {
Commands: commands,
}
}
func sliceSelect[T, V any](slice []T, selector func(T) V) []V {
selectedValues := make([]V, len(slice))
for i, entry := range slice {
selectedValues[i] = selector(entry)
}
return selectedValues
}
func getStepNames(steps []step) []string {
return sliceSelect(steps, func(s step) string { return s.Name })
}

View file

@ -18,6 +18,8 @@ import (
"fmt"
"path"
"regexp"
"golang.org/x/exp/maps"
)
// Describes a Gravitational "product", where a "product" is a piece of software
@ -28,10 +30,65 @@ type Product struct {
WorkingDirectory string // Working directory to use for "docker build".
DockerfileTarget string // Optional. Defines a dockerfile target to stop at on build.
SupportedArchs []string // ISAs that the builder should produce
SetupSteps []step // Product-specific steps that must be ran before building an image.
SetupSteps []step // Product-specific, arch agnostic steps that must be ran before building an image.
ArchSetupSteps map[string][]step // Product and arch specific steps that must be ran before building an image.
DockerfileArgBuilder func(arch string) []string // Generator that returns "docker build --arg" strings
ImageBuilder func(repo *ContainerRepo, tag *ImageTag) *Image // Generator that returns an Image struct that defines what "docker build" should produce
GetRequiredStepNames func(arch string) []string // Generator that returns the name of the steps that "docker build" should wait for
}
func NewTeleportProduct(isEnterprise, isFips bool, version *ReleaseVersion) *Product {
workingDirectory := "/go/build"
downloadURL := fmt.Sprintf(
"https://raw.githubusercontent.com/gravitational/teleport/%s/build.assets/charts/Dockerfile",
version.ShellVersion,
)
name := "teleport"
dockerfileTarget := "teleport"
supportedArches := []string{"amd64"}
if isEnterprise {
name += "-ent"
}
if isFips {
dockerfileTarget += "-fips"
name += "-fips"
} else {
supportedArches = append(supportedArches, "arm", "arm64")
}
setupSteps, dockerfilePath, downloadProfileName := getTeleportSetupSteps(name, workingDirectory, downloadURL)
archSetupSteps, debPaths := getTeleportArchsSetupSteps(supportedArches, workingDirectory, downloadProfileName, version, isEnterprise, isFips)
return &Product{
Name: name,
DockerfilePath: dockerfilePath,
WorkingDirectory: workingDirectory,
DockerfileTarget: dockerfileTarget,
SupportedArchs: supportedArches,
SetupSteps: setupSteps,
ArchSetupSteps: archSetupSteps,
DockerfileArgBuilder: func(arch string) []string {
return []string{
fmt.Sprintf("DEB_PATH=%s", debPaths[arch]),
}
},
ImageBuilder: func(repo *ContainerRepo, tag *ImageTag) *Image {
imageProductName := "teleport"
if isEnterprise {
imageProductName += "-ent"
}
if isFips {
tag.AppendString("fips")
}
return &Image{
Repo: repo,
Name: imageProductName,
Tag: tag,
}
},
}
}
func NewTeleportOperatorProduct(cloneDirectory string) *Product {
@ -76,60 +133,232 @@ func NewTeleportOperatorProduct(cloneDirectory string) *Product {
}
}
func (p *Product) getBaseImage(arch string, version *ReleaseVersion) *Image {
return &Image{
Name: p.Name,
Tag: &ImageTag{
// Builds all the steps required to prepare the pipeline for building Teleport images.
// Returns the setup steps, the path to the downloaded Teleport dockerfile, and the name of the
// AWS profile that can be used to download artifacts from S3.
func getTeleportSetupSteps(productName, workingPath, downloadURL string) ([]step, string, string) {
assumeS3DownloadRoleStep, profileName := assumeS3DownloadRoleStep(productName)
downloadDockerfileStep, dockerfilePath := downloadTeleportDockerfileStep(productName, workingPath, downloadURL)
// Additional setup steps in the future should go here
return []step{assumeS3DownloadRoleStep, downloadDockerfileStep}, dockerfilePath, profileName
}
// Generates steps that download a deb for each supported arch to the working directory.
// Returns maps keyed by the supported arches, with the generated setup steps and deb paths.
func getTeleportArchsSetupSteps(supportedArchs []string, workingDirectory, profile string, version *ReleaseVersion,
isEnterprise, isFips bool) (map[string][]step, map[string]string) {
archSetupSteps := make(map[string][]step, len(supportedArchs))
debPaths := make(map[string]string, len(supportedArchs))
for _, supportedArch := range supportedArchs {
archSetupStep, debPath := getTeleportArchSetupStep(supportedArch, workingDirectory, profile, version, isEnterprise, isFips)
archSetupSteps[supportedArch] = []step{archSetupStep}
debPaths[supportedArch] = debPath
}
return archSetupSteps, debPaths
}
// Generates steps that download a deb for each supported arch to the working directory.
// Returns the generated step, and the path to the downloaded deb.
func getTeleportArchSetupStep(arch, workingDirectory, profile string, version *ReleaseVersion, isEnterprise, isFips bool) (step, string) {
shellDebName := buildTeleportDebName(version, arch, isEnterprise, isFips, false)
humanDebName := buildTeleportDebName(version, arch, isEnterprise, isFips, true)
commands := generateDownloadCommandsForArch(shellDebName, version.GetFullSemver().GetSemverValue(), workingDirectory, profile)
downloadStep := step{
Name: fmt.Sprintf("Download %q artifacts from S3", humanDebName),
Image: "amazon/aws-cli",
Environment: map[string]value{
"AWS_REGION": {raw: "us-west-2"},
"AWS_S3_BUCKET": {fromSecret: "AWS_S3_BUCKET"},
"AWS_PROFILE": {raw: profile},
},
Commands: commands,
Volumes: []volumeRef{volumeRefAwsConfig},
}
return downloadStep, shellDebName
}
// Generates the commands to download `debName` from s3 to `workingDirectory`.
// Returns the commands as well as the path where the deb will be downloaded to.
func generateDownloadCommandsForArch(debName, trimmedTag, workingDirectory, profile string) []string {
bucketPath := fmt.Sprintf("s3://$AWS_S3_BUCKET/teleport/tag/%s/", trimmedTag)
checkCommands := []string{
"SUCCESS=true",
fmt.Sprintf("aws s3 ls %s | tr -s ' ' | cut -d' ' -f 4 | grep -x %s || SUCCESS=false", bucketPath, debName),
}
successCommand := "[ \"$SUCCESS\" = \"true\" ]"
remotePath := fmt.Sprintf("%s%s", bucketPath, debName)
downloadPath := path.Join(workingDirectory, debName)
commands := make([]string, 0)
// Wait up to an hour for debs to be build and published to s3 by other pipelines
commands = append(commands, wrapCommandsInTimeout(checkCommands, successCommand, 60*60, 60)...)
commands = append(commands, fmt.Sprintf("mkdir -pv %q", workingDirectory))
commands = append(commands, fmt.Sprintf("aws s3 cp %s %s", remotePath, downloadPath))
return commands
}
// Returns either a human-readable or shell-evaluable Teleport deb name.
func buildTeleportDebName(version *ReleaseVersion, arch string, isEnterprise, isFips, humanReadable bool) string {
var versionString string
if humanReadable {
versionString = fmt.Sprintf("%s-tag", version.MajorVersion)
} else {
versionString = version.GetFullSemver().GetSemverValue()
}
debName := "teleport"
if isEnterprise {
debName = fmt.Sprintf("%s-ent", debName)
}
debName = fmt.Sprintf("%s_%s", debName, versionString)
if isFips {
debName = fmt.Sprintf("%s-fips", debName)
}
debName = fmt.Sprintf("%s_%s.deb", debName, arch)
return debName
}
// Creates a shell loop with a timeout
// commands: commands to run in a loop
// successCommand: should evaluate to shell true (i.e. `[ true ]`) when the loop has succeeded
// timeoutSeconds: how long in seconds to wait before the loop fails
// sleepTimeSeconds: how long to wait after every iteration before running again
func wrapCommandsInTimeout(commands []string, successCommand string, timeoutSeconds int, sleepTimeSeconds int) []string {
setupCommands := []string{
fmt.Sprintf("END_TIME=$(( $(date +%%s) + %d ))", timeoutSeconds),
"TIMED_OUT=true",
"while [ $(date +%s) -lt $${END_TIME?} ]; do",
}
finalizeCommands := []string{
// Evaluate the condition
fmt.Sprintf("%s && TIMED_OUT=false && break;", successCommand),
// Sleep if not met
fmt.Sprintf("echo 'Condition not met yet, waiting another %d seconds...'", sleepTimeSeconds),
fmt.Sprintf("sleep %d", sleepTimeSeconds),
"done",
// Conditionally log timeout failure and exit
fmt.Sprintf("[ $${TIMED_OUT?} = true ] && echo 'Timed out while waiting for condition: %s' && exit 1", successCommand),
}
loopCommands := make([]string, 0)
loopCommands = append(loopCommands, setupCommands...)
loopCommands = append(loopCommands, commands...)
loopCommands = append(loopCommands, finalizeCommands...)
return loopCommands
}
// Generates a step that downloads the Teleport Dockerfile
// Returns the generated step and the path to the downloaded Dockerfile
func downloadTeleportDockerfileStep(productName, workingPath, downloadURL string) (step, string) {
// Enterprise and fips specific dockerfiles should be configured here in the future if needed
dockerfilePath := path.Join(workingPath, fmt.Sprintf("Dockerfile-%s", productName))
return step{
Name: fmt.Sprintf("Download Teleport Dockerfile to %q for %s", dockerfilePath, productName),
Image: "alpine",
Commands: []string{
"apk add curl",
fmt.Sprintf("mkdir -pv $(dirname %q)", dockerfilePath),
fmt.Sprintf("curl -Ls -o %q %q", dockerfilePath, downloadURL),
},
}, dockerfilePath
}
func assumeS3DownloadRoleStep(productName string) (step, string) {
profileName := fmt.Sprintf("s3-download-%s", productName)
return kubernetesAssumeAwsRoleStep(kubernetesRoleSettings{
awsRoleSettings: awsRoleSettings{
awsAccessKeyID: value{fromSecret: "AWS_ACCESS_KEY_ID"},
awsSecretAccessKey: value{fromSecret: "AWS_SECRET_ACCESS_KEY"},
role: value{fromSecret: "AWS_ROLE"},
},
configVolume: volumeRefAwsConfig,
profile: profileName,
name: fmt.Sprintf("Assume S3 Download AWS Role for %s", productName),
append: true,
}), profileName
}
func (p *Product) getBaseImage(arch string, version *ReleaseVersion, containerRepo *ContainerRepo) *Image {
return p.ImageBuilder(
containerRepo,
&ImageTag{
ShellBaseValue: version.GetFullSemver().GetSemverValue(),
DisplayBaseValue: version.MajorVersion,
Arch: arch,
},
}
)
}
func (p *Product) GetLocalRegistryImage(arch string, version *ReleaseVersion) *Image {
image := p.getBaseImage(arch, version)
image.Repo = NewLocalContainerRepo()
return image
return p.getBaseImage(arch, version, GetLocalContainerRepo())
}
func (p *Product) GetStagingRegistryImage(arch string, version *ReleaseVersion, stagingRepo *ContainerRepo) *Image {
image := p.getBaseImage(arch, version)
image.Repo = stagingRepo
return image
return p.getBaseImage(arch, version, stagingRepo)
}
func (p *Product) buildSteps(version *ReleaseVersion, setupStepNames []string, flags *TriggerFlags) []step {
func (p *Product) buildSteps(version *ReleaseVersion, parentStepNames []string, flags *TriggerFlags) []step {
steps := make([]step, 0)
// Get the container repos images will be pushed to
stagingRepo := GetStagingContainerRepo(flags.UseUniqueStagingTag)
publicEcrPullRegistry := GetPublicEcrPullRegistry()
productionRepos := GetProductionContainerRepos()
for _, setupStep := range p.SetupSteps {
setupStep.DependsOn = append(setupStep.DependsOn, setupStepNames...)
steps = append(steps, setupStep)
setupStepNames = append(setupStepNames, setupStep.Name)
// Collect the name of the steps that are required before build/retrieval
productSetupStepNames := make([]string, 0)
if flags.ShouldBuildNewImages {
for _, setupStep := range p.SetupSteps {
// Wait for the parent steps before starting on the product setup steps
setupStep.DependsOn = append(setupStep.DependsOn, parentStepNames...)
steps = append(steps, setupStep)
productSetupStepNames = append(productSetupStepNames, setupStep.Name)
}
}
if len(productSetupStepNames) == 0 {
// Cover the case where there are no product setup steps
productSetupStepNames = parentStepNames
}
archBuildStepDetails := make([]*buildStepOutput, 0, len(p.SupportedArchs))
for i, supportedArch := range p.SupportedArchs {
// Add image build/retrieval steps
for _, supportedArch := range p.SupportedArchs {
// Include steps for building images from scratch
if flags.ShouldBuildNewImages {
archBuildStep, archBuildStepDetail := p.createBuildStep(supportedArch, version, i)
archBuildStep, archBuildStepDetail := p.createBuildStep(supportedArch, version, publicEcrPullRegistry)
// Collect the name of steps that are required before build, taking into account arch-specific steps
setupStepNames := make([]string, 0)
for _, archSetupStep := range p.ArchSetupSteps[supportedArch] {
archSetupStep.DependsOn = append(archSetupStep.DependsOn, productSetupStepNames...)
steps = append(steps, archSetupStep)
setupStepNames = append(setupStepNames, archSetupStep.Name)
}
if len(setupStepNames) == 0 {
// Cover the case where there are no arch specific steps
setupStepNames = productSetupStepNames
}
archBuildStep.DependsOn = append(archBuildStep.DependsOn, setupStepNames...)
if p.GetRequiredStepNames != nil {
archBuildStep.DependsOn = append(archBuildStep.DependsOn, p.GetRequiredStepNames(supportedArch)...)
}
steps = append(steps, archBuildStep)
archBuildStepDetails = append(archBuildStepDetails, archBuildStepDetail)
} else {
stagingImage := p.GetStagingRegistryImage(supportedArch, version, stagingRepo)
pullStagingImageStep, locallyPushedImage := stagingRepo.pullPushStep(stagingImage, setupStepNames)
pullStagingImageStep, locallyPushedImage := stagingRepo.pullPushStep(stagingImage, productSetupStepNames)
steps = append(steps, pullStagingImageStep)
// Generate build details that point to the pulled staging images
@ -142,8 +371,17 @@ func (p *Product) buildSteps(version *ReleaseVersion, setupStepNames []string, f
}
}
// Add publish steps
for _, containerRepo := range getReposToPublishTo(productionRepos, stagingRepo, flags) {
steps = append(steps, containerRepo.buildSteps(archBuildStepDetails, flags)...)
buildSteps := containerRepo.buildSteps(archBuildStepDetails, flags)
// Add repo setup step dependency to the build steps
setupStepNames := getStepNames(containerRepo.SetupSteps)
for _, buildStep := range buildSteps {
buildStep.DependsOn = append(buildStep.DependsOn, setupStepNames...)
}
steps = append(steps, buildSteps...)
}
return steps
@ -175,7 +413,7 @@ func cleanBuilderName(builderName string) string {
return invalidBuildxCharExpression.ReplaceAllString(builderName, "-")
}
func (p *Product) createBuildStep(arch string, version *ReleaseVersion, delay int) (step, *buildStepOutput) {
func (p *Product) createBuildStep(arch string, version *ReleaseVersion, publicEcrPullRegistry *ContainerRepo) (step, *buildStepOutput) {
localRegistryImage := p.GetLocalRegistryImage(arch, version)
builderName := cleanBuilderName(fmt.Sprintf("%s-builder", localRegistryImage.GetDisplayName()))
@ -200,36 +438,41 @@ func (p *Product) createBuildStep(arch string, version *ReleaseVersion, delay in
buildCommand += fmt.Sprintf(" --file %q", p.DockerfilePath)
if p.DockerfileArgBuilder != nil {
for _, buildArg := range p.DockerfileArgBuilder(arch) {
buildCommand += fmt.Sprintf(" --build-arg %q", buildArg)
buildCommand += fmt.Sprintf(" --build-arg %s", buildArg)
}
}
buildCommand += " " + p.WorkingDirectory
delayTime := delay * 5
// This is important to prevent pull rate limiting. See `GetPublicEcrPullRegistry` doc comment
// for details.
authenticatedBuildCommands := publicEcrPullRegistry.buildCommandsWithLogin([]string{buildCommand})
commands := []string{
"docker run --privileged --rm tonistiigi/binfmt --install all",
fmt.Sprintf("mkdir -pv %q && cd %q", p.WorkingDirectory, p.WorkingDirectory),
fmt.Sprintf("mkdir -pv %q", buildxConfigFileDir),
fmt.Sprintf("echo '[registry.%q]' > %q", LocalRegistrySocket, buildxConfigFilePath),
fmt.Sprintf("echo ' http = true' >> %q", buildxConfigFilePath),
buildxCreateCommand,
}
commands = append(commands, authenticatedBuildCommands...)
commands = append(commands,
fmt.Sprintf("docker buildx rm %q", builderName),
fmt.Sprintf("rm -rf %q", buildxConfigFileDir),
)
envVars := maps.Clone(publicEcrPullRegistry.EnvironmentVars)
envVars["DOCKER_BUILDKIT"] = value{
raw: "1",
}
step := step{
Name: p.GetBuildStepName(arch, version),
Image: "docker",
Volumes: dockerVolumeRefs(),
Environment: map[string]value{
"DOCKER_BUILDKIT": {
raw: "1",
},
},
Commands: []string{
// Without a delay buildx can occasionally try to pull base images faster than container registries will allow,
// triggering a rate limit.
fmt.Sprintf("echo 'Sleeping %ds to avoid registry pull rate limits' && sleep %d", delayTime, delayTime),
"docker run --privileged --rm tonistiigi/binfmt --install all",
fmt.Sprintf("mkdir -pv %q && cd %q", p.WorkingDirectory, p.WorkingDirectory),
fmt.Sprintf("mkdir -pv %q", buildxConfigFileDir),
fmt.Sprintf("echo '[registry.%q]' > %q", LocalRegistrySocket, buildxConfigFilePath),
fmt.Sprintf("echo ' http = true' >> %q", buildxConfigFilePath),
buildxCreateCommand,
buildCommand,
fmt.Sprintf("docker buildx rm %q", builderName),
fmt.Sprintf("rm -rf %q", buildxConfigFileDir),
},
Name: p.GetBuildStepName(arch, version),
Image: "docker",
Volumes: dockerVolumeRefs(volumeRefAwsConfig),
Environment: envVars,
Commands: commands,
DependsOn: getStepNames(publicEcrPullRegistry.SetupSteps),
}
return step, &buildStepOutput{

View file

@ -21,11 +21,12 @@ import (
// Describes a Drone trigger as it pertains to container image building.
type TriggerInfo struct {
Trigger trigger
Name string
Flags *TriggerFlags
SupportedVersions []*ReleaseVersion
SetupSteps []step
Trigger trigger
Name string
Flags *TriggerFlags
SupportedVersions []*ReleaseVersion
SetupSteps []step
ParentePipelineNames []string
}
// This type is mainly used to make passing these vars around cleaner
@ -55,6 +56,9 @@ func NewTagTrigger(branchMajorVersion string) *TriggerInfo {
RelativeVersionName: "branch",
},
},
ParentePipelineNames: []string{
tagCleanupPipelineName,
},
}
}
@ -87,25 +91,25 @@ func NewCronTrigger(latestMajorVersions []string) *TriggerInfo {
return nil
}
majorVersionVarDirectory := "/go/vars/full-version"
majorVersionVarBasePath := "/go/vars/full-version"
supportedVersions := make([]*ReleaseVersion, 0, len(latestMajorVersions))
if len(latestMajorVersions) > 0 {
latestMajorVersion := latestMajorVersions[0]
supportedVersions = append(supportedVersions, &ReleaseVersion{
MajorVersion: latestMajorVersion,
ShellVersion: readCronShellVersionCommand(majorVersionVarDirectory, latestMajorVersion),
ShellVersion: readCronShellVersionCommand(majorVersionVarBasePath, latestMajorVersion),
RelativeVersionName: "current-version",
SetupSteps: []step{getLatestSemverStep(latestMajorVersion, majorVersionVarDirectory)},
SetupSteps: []step{getLatestSemverStep(latestMajorVersion, majorVersionVarBasePath)},
})
if len(latestMajorVersions) > 1 {
for i, majorVersion := range latestMajorVersions[1:] {
supportedVersions = append(supportedVersions, &ReleaseVersion{
MajorVersion: majorVersion,
ShellVersion: readCronShellVersionCommand(majorVersionVarDirectory, majorVersion),
ShellVersion: readCronShellVersionCommand(majorVersionVarBasePath, majorVersion),
RelativeVersionName: fmt.Sprintf("previous-version-%d", i+1),
SetupSteps: []step{getLatestSemverStep(majorVersion, majorVersionVarDirectory)},
SetupSteps: []step{getLatestSemverStep(majorVersion, majorVersionVarBasePath)},
})
}
}
@ -124,26 +128,26 @@ func NewCronTrigger(latestMajorVersions []string) *TriggerInfo {
}
}
func getLatestSemverStep(majorVersion string, majorVersionVarDirectory string) step {
func getLatestSemverStep(majorVersion string, majorVersionVarBasePath string) step {
// We don't use "/go/src/github.com/gravitational/teleport" here as a later stage
// may need to clone a different version, and "/go" persists between steps
cloneDirectory := "/tmp/teleport"
majorVersionVarPath := path.Join(majorVersionVarDirectory, majorVersion)
majorVersionVarPath := fmt.Sprintf("%s-%s", majorVersionVarBasePath, majorVersion)
return step{
Name: fmt.Sprintf("Find the latest available semver for %s", majorVersion),
Image: fmt.Sprintf("golang:%s", GoVersion),
Commands: append(
cloneRepoCommands(cloneDirectory, fmt.Sprintf("branch/%s", majorVersion)),
fmt.Sprintf("mkdir -pv %q", majorVersionVarDirectory),
fmt.Sprintf("mkdir -pv $(dirname %q)", majorVersionVarPath),
fmt.Sprintf("cd %q", path.Join(cloneDirectory, "build.assets", "tooling", "cmd", "query-latest")),
fmt.Sprintf("go run . %q > %q", majorVersion, majorVersionVarPath),
fmt.Sprintf("go run . %q | sed 's/v//' > %q", majorVersion, majorVersionVarPath),
fmt.Sprintf("echo Found full semver \"$(cat %q)\" for major version %q", majorVersionVarPath, majorVersion),
),
}
}
func readCronShellVersionCommand(majorVersionDirectory, majorVersion string) string {
return fmt.Sprintf("$(cat '%s')", path.Join(majorVersionDirectory, majorVersion))
return fmt.Sprintf("v$(cat '%s-%s')", majorVersionDirectory, majorVersion)
}
// Drone triggers must all evaluate to "true" for a pipeline to be executed.
@ -155,6 +159,7 @@ func (ti *TriggerInfo) buildPipelines() []pipeline {
pipeline := teleportVersion.buildVersionPipeline(ti.SetupSteps, ti.Flags)
pipeline.Name += "-" + ti.Name
pipeline.Trigger = ti.Trigger
pipeline.DependsOn = append(pipeline.DependsOn, ti.ParentePipelineNames...)
pipelines = append(pipelines, pipeline)
}

View file

@ -45,7 +45,7 @@ func (rv *ReleaseVersion) buildVersionPipeline(triggerSetupSteps []step, flags *
dockerService(),
dockerRegistryService(),
}
pipeline.Volumes = dockerVolumes()
pipeline.Volumes = dockerVolumes(volumeAwsConfig)
pipeline.Environment = map[string]value{
"DEBIAN_FRONTEND": {
raw: "noninteractive",
@ -78,7 +78,7 @@ func (rv *ReleaseVersion) getSetupStepInformation(triggerSetupSteps []step) ([]s
return setupSteps, nextStageSetupStepNames
}
func (rv *ReleaseVersion) buildSteps(setupStepNames []string, flags *TriggerFlags) []step {
func (rv *ReleaseVersion) buildSteps(parentSetupStepNames []string, flags *TriggerFlags) []step {
clonedRepoPath := "/go/src/github.com/gravitational/teleport"
steps := make([]step, 0)
@ -88,12 +88,33 @@ func (rv *ReleaseVersion) buildSteps(setupStepNames []string, flags *TriggerFlag
cloneRepoStep(clonedRepoPath, rv.ShellVersion),
rv.buildSplitSemverSteps(flags.ShouldOnlyPublishFullSemver),
}
for _, setupStep := range setupSteps {
setupStep.DependsOn = append(setupStep.DependsOn, setupStepNames...)
steps = append(steps, setupStep)
setupStepNames = append(setupStepNames, setupStep.Name)
// These are sequential to prevent read/write contention by mounting volumes on
// multiple containeres at once
repos := getReposUsedByPipeline(flags)
var previousSetupRepo *ContainerRepo
for _, containerRepo := range repos {
repoSetupSteps := containerRepo.SetupSteps
if previousSetupRepo != nil {
previousRepoStepNames := getStepNames(previousSetupRepo.SetupSteps)
for i, repoSetupStep := range repoSetupSteps {
repoSetupSteps[i].DependsOn = append(repoSetupStep.DependsOn, previousRepoStepNames...)
}
}
setupSteps = append(setupSteps, repoSetupSteps...)
if len(repoSetupSteps) > 0 {
previousSetupRepo = containerRepo
}
}
for _, setupStep := range setupSteps {
setupStep.DependsOn = append(setupStep.DependsOn, parentSetupStepNames...)
steps = append(steps, setupStep)
}
setupStepNames := append(parentSetupStepNames, getStepNames(setupSteps)...)
for _, product := range rv.getProducts(clonedRepoPath) {
steps = append(steps, product.buildSteps(rv, setupStepNames, flags)...)
}
@ -101,6 +122,20 @@ func (rv *ReleaseVersion) buildSteps(setupStepNames []string, flags *TriggerFlag
return steps
}
func getReposUsedByPipeline(flags *TriggerFlags) []*ContainerRepo {
repos := []*ContainerRepo{GetStagingContainerRepo(flags.UseUniqueStagingTag)}
if flags.ShouldBuildNewImages {
repos = append(repos, GetPublicEcrPullRegistry())
}
if flags.ShouldAffectProductionImages {
repos = append(repos, GetProductionContainerRepos()...)
}
return repos
}
type Semver struct {
Name string // Human-readable name for the information contained in the semver, i.e. "major"
FilePath string // The path under the working dir where the information can be read from
@ -203,9 +238,16 @@ func (rv *ReleaseVersion) buildSplitSemverSteps(onlyBuildFullSemver bool) step {
}
func (rv *ReleaseVersion) getProducts(clonedRepoPath string) []*Product {
teleportProducts := []*Product{
NewTeleportProduct(false, false, rv), // OSS
NewTeleportProduct(true, false, rv), // Enterprise
NewTeleportProduct(true, true, rv), // Enterprise/FIPS
}
teleportOperatorProduct := NewTeleportOperatorProduct(clonedRepoPath)
products := make([]*Product, 0, 1)
products := make([]*Product, 0, len(teleportProducts)+1)
products = append(products, teleportProducts...)
products = append(products, teleportOperatorProduct)
return products

View file

@ -24,33 +24,36 @@ import (
// Describes a registry and repo that images are to be published to.
type ContainerRepo struct {
Name string
IsProductionRepo bool
IsImmutable bool
EnvironmentVars map[string]value
RegistryDomain string
RegistryOrg string
LoginCommands []string
TagBuilder func(baseTag *ImageTag) *ImageTag // Postprocessor for tags that append CR-specific suffixes
Name string // Human readable name for the repo. Does not need to match remote value.
IsImmutable bool // True if the repo supports updating existing tags, false otherwise
EnvironmentVars map[string]value // Steps that use the described repo should include these env vars
RegistryDomain string // The registry that hosts the container repo
RegistryOrg string // The organization name (usually "gravitational") that the repo is listed under
SetupSteps []step // Optional field that can be used to run setup code prior to first login
LoginCommands []string // Commands to authenticate the docker daemon with the repo
TagBuilder func(baseTag *ImageTag) *ImageTag // Postprocessor for tags that append CR-specific suffixes
}
func NewEcrContainerRepo(accessKeyIDSecret, secretAccessKeySecret, domain string, isProduction, isImmutable, guaranteeUnique bool) *ContainerRepo {
nameSuffix := "staging"
func NewEcrContainerRepo(accessKeyIDSecret, secretAccessKeySecret, roleSecret, domain, name string,
isPublic, isImmutable, guaranteeUnique bool) *ContainerRepo {
ecrRegion := StagingEcrRegion
loginSubcommand := "ecr"
if isProduction {
nameSuffix = "production"
if isPublic {
ecrRegion = PublicEcrRegion
loginSubcommand = "ecr-public"
}
repoName := fmt.Sprintf("ECR - %s", name)
profileName := fmt.Sprintf("ecr-%s", name)
registryOrg := ProductionRegistryOrg
if configureForPRTestingOnly {
accessKeyIDSecret = testingSecretPrefix + accessKeyIDSecret
secretAccessKeySecret = testingSecretPrefix + secretAccessKeySecret
roleSecret = testingSecretPrefix + roleSecret
registryOrg = testingECRRegistryOrg
if !isProduction {
if !isPublic {
domain = testingECRDomain
ecrRegion = testingECRRegion
}
@ -66,20 +69,27 @@ func NewEcrContainerRepo(accessKeyIDSecret, secretAccessKeySecret, domain string
}
return &ContainerRepo{
Name: fmt.Sprintf("ECR - %s", nameSuffix),
IsProductionRepo: isProduction,
IsImmutable: isImmutable,
Name: repoName,
IsImmutable: isImmutable,
EnvironmentVars: map[string]value{
"AWS_ACCESS_KEY_ID": {
fromSecret: accessKeyIDSecret,
},
"AWS_SECRET_ACCESS_KEY": {
fromSecret: secretAccessKeySecret,
},
"AWS_PROFILE": {raw: profileName},
},
RegistryDomain: domain,
RegistryOrg: registryOrg,
LoginCommands: loginCommands,
SetupSteps: []step{
kubernetesAssumeAwsRoleStep(kubernetesRoleSettings{
awsRoleSettings: awsRoleSettings{
awsAccessKeyID: value{fromSecret: accessKeyIDSecret},
awsSecretAccessKey: value{fromSecret: secretAccessKeySecret},
role: value{fromSecret: roleSecret},
},
configVolume: volumeRefAwsConfig,
profile: profileName,
name: fmt.Sprintf("Assume %s AWS Role", repoName),
append: true,
}),
},
LoginCommands: loginCommands,
TagBuilder: func(tag *ImageTag) *ImageTag {
if guaranteeUnique {
tag.AppendString("$TIMESTAMP")
@ -99,9 +109,8 @@ func NewQuayContainerRepo(dockerUsername, dockerPassword string) *ContainerRepo
}
return &ContainerRepo{
Name: "Quay",
IsProductionRepo: true,
IsImmutable: false,
Name: "Quay",
IsImmutable: false,
EnvironmentVars: map[string]value{
"QUAY_USERNAME": {
fromSecret: dockerUsername,
@ -120,10 +129,9 @@ func NewQuayContainerRepo(dockerUsername, dockerPassword string) *ContainerRepo
func NewLocalContainerRepo() *ContainerRepo {
return &ContainerRepo{
Name: "Local Registry",
IsProductionRepo: false,
IsImmutable: false,
RegistryDomain: LocalRegistrySocket,
Name: "Local Registry",
IsImmutable: false,
RegistryDomain: LocalRegistrySocket,
}
}
@ -132,16 +140,29 @@ func GetLocalContainerRepo() *ContainerRepo {
}
func GetStagingContainerRepo(uniqueStagingTag bool) *ContainerRepo {
return NewEcrContainerRepo("STAGING_TELEPORT_DRONE_USER_ECR_KEY", "STAGING_TELEPORT_DRONE_USER_ECR_SECRET", StagingRegistry, false, true, uniqueStagingTag)
return NewEcrContainerRepo("STAGING_TELEPORT_DRONE_USER_ECR_KEY", "STAGING_TELEPORT_DRONE_USER_ECR_SECRET",
"STAGING_TELEPORT_DRONE_ECR_AWS_ROLE", StagingRegistry, "staging", false, true, uniqueStagingTag)
}
func GetProductionContainerRepos() []*ContainerRepo {
return []*ContainerRepo{
NewQuayContainerRepo("PRODUCTION_QUAYIO_DOCKER_USERNAME", "PRODUCTION_QUAYIO_DOCKER_PASSWORD"),
NewEcrContainerRepo("PRODUCTION_TELEPORT_DRONE_USER_ECR_KEY", "PRODUCTION_TELEPORT_DRONE_USER_ECR_SECRET", ProductionRegistry, true, false, false),
NewEcrContainerRepo("PRODUCTION_TELEPORT_DRONE_USER_ECR_KEY", "PRODUCTION_TELEPORT_DRONE_USER_ECR_SECRET",
"PRODUCTION_TELEPORT_DRONE_ECR_AWS_ROLE", ProductionRegistry, "production", true, false, false),
}
}
// This is a special case of "public.ecr.aws". This references a public ECR repo that may only ever be pulled from.
// The purpose of this is to authenticate with public ECR prior to `docker buildx build` so that the build command
// will pull from the repo as an authenticated user. Pulling as an authenticated user greatly increase the number
// of layers that can be pulled per second, which fixes certain issues with running build commands in parallel.
func GetPublicEcrPullRegistry() *ContainerRepo {
// Note: these credentials currently allow for push and pull. I'd recommend either a separate role or set of
// credentials for pull only access.
return NewEcrContainerRepo("PRODUCTION_TELEPORT_DRONE_USER_ECR_KEY", "PRODUCTION_TELEPORT_DRONE_USER_ECR_SECRET",
"PRODUCTION_TELEPORT_DRONE_ECR_AWS_ROLE", ProductionRegistry, "authenticated-pull", true, false, false)
}
func (cr *ContainerRepo) buildSteps(buildStepDetails []*buildStepOutput, flags *TriggerFlags) []step {
if len(buildStepDetails) == 0 {
return nil
@ -225,7 +246,7 @@ func (cr *ContainerRepo) pullPushStep(image *Image, dependencySteps []string) (s
return step{
Name: fmt.Sprintf("Pull %s and push it to %s", image.GetDisplayName(), localRepo.Name),
Image: "docker",
Volumes: dockerVolumeRefs(),
Volumes: dockerVolumeRefs(volumeRefAwsConfig),
Environment: cr.EnvironmentVars,
Commands: commands,
DependsOn: dependencySteps,
@ -274,7 +295,7 @@ func (cr *ContainerRepo) tagAndPushStep(buildStepDetails *buildStepOutput, image
step := step{
Name: fmt.Sprintf("Tag and push image %q to %s", buildStepDetails.BuiltImage.GetDisplayName(), cr.Name),
Image: "docker",
Volumes: dockerVolumeRefs(),
Volumes: dockerVolumeRefs(volumeRefAwsConfig),
Environment: cr.EnvironmentVars,
Commands: commands,
DependsOn: dependencySteps,
@ -302,7 +323,7 @@ func (cr *ContainerRepo) createAndPushManifestStep(manifestImage *Image, pushSte
return step{
Name: fmt.Sprintf("Create manifest and push %q to %s", manifestImage.GetDisplayName(), cr.Name),
Image: "docker",
Volumes: dockerVolumeRefs(),
Volumes: dockerVolumeRefs(volumeRefAwsConfig),
Environment: cr.EnvironmentVars,
Commands: cr.buildCommandsWithLogin(commands),
DependsOn: pushStepNames,

View file

@ -57,10 +57,10 @@ package main
const (
configureForPRTestingOnly bool = false
testingSecretPrefix string = "TEST_"
testingQuayRegistryOrg string = "" //"fred_heinecke"
testingQuayRegistryOrg string = "" // "fred_heinecke"
testingECRRegistryOrg string = "u8j2q1d9"
testingECRRegion string = "us-east-2"
prBranch string = "" //"fred/multiarch-teleport-container-images"
prBranch string = "" // "fred/multiarch-teleport-actual-container-images"
testingECRDomain string = "278576220453.dkr.ecr.us-east-2.amazonaws.com"
)

View file

@ -14,147 +14,13 @@
package main
import "fmt"
func promoteBuildPipelines() []pipeline {
promotePipelines := make([]pipeline, 0)
promotePipelines = append(promotePipelines, promoteBuildOsRepoPipelines()...)
promotePipelines = append(promotePipelines, buildDockerPromotionPipelineECR(), buildDockerPromotionPipelineQuay())
return promotePipelines
}
func buildDockerPromotionPipelineECR() pipeline {
dockerPipeline := newKubePipeline("promote-docker-ecr")
dockerPipeline.Trigger = triggerPromote
dockerPipeline.Trigger.Target.Include = append(dockerPipeline.Trigger.Target.Include, "promote-docker", "promote-docker-ecr")
dockerPipeline.Workspace = workspace{Path: "/go"}
// Add docker service
dockerPipeline.Services = []service{
dockerService(),
}
dockerPipeline.Volumes = []volume{
volumeDocker,
volumeAwsConfig,
}
dockerPipeline.Steps = append(dockerPipeline.Steps, verifyTaggedStep())
dockerPipeline.Steps = append(dockerPipeline.Steps, waitForDockerStep())
// Pull/Push Steps
dockerPipeline.Steps = append(dockerPipeline.Steps, kubernetesAssumeAwsRoleStep(kubernetesRoleSettings{
awsRoleSettings: awsRoleSettings{
awsAccessKeyID: value{fromSecret: "PRODUCTION_TELEPORT_DRONE_USER_ECR_KEY"},
awsSecretAccessKey: value{fromSecret: "PRODUCTION_TELEPORT_DRONE_USER_ECR_SECRET"},
role: value{fromSecret: "PRODUCTION_TELEPORT_DRONE_ECR_AWS_ROLE"},
},
configVolume: volumeRefAwsConfig,
}))
dockerPipeline.Steps = append(dockerPipeline.Steps, step{
Name: "Pull/retag Docker images",
Image: "docker",
Volumes: []volumeRef{
volumeRefDocker,
volumeRefAwsConfig,
},
Commands: []string{
"apk add --no-cache aws-cli",
"export VERSION=${DRONE_TAG##v}",
// authenticate with staging credentials
"aws ecr get-login-password --region=us-west-2 | docker login -u=\"AWS\" --password-stdin " + StagingRegistry,
// pull staging images
"echo \"---> Pulling images for $${VERSION}\"",
fmt.Sprintf("docker pull %s/gravitational/teleport:$${VERSION}", StagingRegistry),
fmt.Sprintf("docker pull %s/gravitational/teleport-ent:$${VERSION}", StagingRegistry),
fmt.Sprintf("docker pull %s/gravitational/teleport-ent:$${VERSION}-fips", StagingRegistry),
// retag images to production naming
"echo \"---> Tagging images for $${VERSION}\"",
fmt.Sprintf("docker tag %s/gravitational/teleport:$${VERSION} %s/gravitational/teleport:$${VERSION}", StagingRegistry, ProductionRegistry),
fmt.Sprintf("docker tag %s/gravitational/teleport-ent:$${VERSION} %s/gravitational/teleport-ent:$${VERSION}", StagingRegistry, ProductionRegistry),
fmt.Sprintf("docker tag %s/gravitational/teleport-ent:$${VERSION}-fips %s/gravitational/teleport-ent:$${VERSION}-fips", StagingRegistry, ProductionRegistry),
// authenticate with production credentials
"docker logout " + StagingRegistry,
"aws ecr-public get-login-password --region=us-east-1 | docker login -u=\"AWS\" --password-stdin " + ProductionRegistry,
// push production images
"echo \"---> Pushing images for $${VERSION}\"",
// push production images ECR
fmt.Sprintf("docker push %s/gravitational/teleport:$${VERSION}", ProductionRegistry),
fmt.Sprintf("docker push %s/gravitational/teleport-ent:$${VERSION}", ProductionRegistry),
fmt.Sprintf("docker push %s/gravitational/teleport-ent:$${VERSION}-fips", ProductionRegistry),
},
})
return dockerPipeline
}
func buildDockerPromotionPipelineQuay() pipeline {
dockerPipeline := newKubePipeline("promote-docker-quay")
dockerPipeline.Trigger = triggerPromote
dockerPipeline.Trigger.Target.Include = append(dockerPipeline.Trigger.Target.Include, "promote-docker", "promote-docker-quay")
dockerPipeline.Workspace = workspace{Path: "/go"}
// Add docker service
dockerPipeline.Services = []service{
dockerService(),
}
dockerPipeline.Volumes = []volume{
volumeDocker,
volumeAwsConfig,
}
dockerPipeline.Steps = append(dockerPipeline.Steps, verifyTaggedStep())
dockerPipeline.Steps = append(dockerPipeline.Steps, waitForDockerStep())
// Pull/Push Steps
dockerPipeline.Steps = append(dockerPipeline.Steps, kubernetesAssumeAwsRoleStep(kubernetesRoleSettings{
awsRoleSettings: awsRoleSettings{
awsAccessKeyID: value{fromSecret: "PRODUCTION_TELEPORT_DRONE_USER_ECR_KEY"},
awsSecretAccessKey: value{fromSecret: "PRODUCTION_TELEPORT_DRONE_USER_ECR_SECRET"},
role: value{fromSecret: "PRODUCTION_TELEPORT_DRONE_ECR_AWS_ROLE"},
},
configVolume: volumeRefAwsConfig,
}))
dockerPipeline.Steps = append(dockerPipeline.Steps, step{
Name: "Pull/retag Docker images",
Image: "docker",
Environment: map[string]value{
"QUAY_USERNAME": {fromSecret: "PRODUCTION_QUAYIO_DOCKER_USERNAME"},
"QUAY_PASSWORD": {fromSecret: "PRODUCTION_QUAYIO_DOCKER_PASSWORD"},
},
Volumes: []volumeRef{
volumeRefDocker,
volumeRefAwsConfig,
},
Commands: []string{
"apk add --no-cache aws-cli",
"export VERSION=${DRONE_TAG##v}",
// authenticate with staging credentials
"aws ecr get-login-password --region=us-west-2 | docker login -u=\"AWS\" --password-stdin " + StagingRegistry,
// pull staging images
"echo \"---> Pulling images for $${VERSION}\"",
fmt.Sprintf("docker pull %s/gravitational/teleport:$${VERSION}", StagingRegistry),
fmt.Sprintf("docker pull %s/gravitational/teleport-ent:$${VERSION}", StagingRegistry),
fmt.Sprintf("docker pull %s/gravitational/teleport-ent:$${VERSION}-fips", StagingRegistry),
// retag images to production naming
"echo \"---> Tagging images for $${VERSION}\"",
fmt.Sprintf("docker tag %s/gravitational/teleport:$${VERSION} %s/gravitational/teleport:$${VERSION}", StagingRegistry, ProductionRegistryQuay),
fmt.Sprintf("docker tag %s/gravitational/teleport-ent:$${VERSION} %s/gravitational/teleport-ent:$${VERSION}", StagingRegistry, ProductionRegistryQuay),
fmt.Sprintf("docker tag %s/gravitational/teleport-ent:$${VERSION}-fips %s/gravitational/teleport-ent:$${VERSION}-fips", StagingRegistry, ProductionRegistryQuay),
// authenticate with production credentials
"docker logout " + StagingRegistry,
"docker login -u=\"$QUAY_USERNAME\" -p=\"$QUAY_PASSWORD\" " + ProductionRegistryQuay,
// push production images
"echo \"---> Pushing images for $${VERSION}\"",
fmt.Sprintf("docker push %s/gravitational/teleport:$${VERSION}", ProductionRegistryQuay),
fmt.Sprintf("docker push %s/gravitational/teleport-ent:$${VERSION}", ProductionRegistryQuay),
fmt.Sprintf("docker push %s/gravitational/teleport-ent:$${VERSION}-fips", ProductionRegistryQuay),
},
})
return dockerPipeline
}
func publishReleasePipeline() pipeline {
return relcliPipeline(triggerPromote, "publish-rlz", "Publish in Release API", "relcli auto_publish -f -v 6")
}