Added YUM implementation of OS package build tool (#14203)

* Added YUM implementation of OS package build tool

* Addressed PR comments

* Added YUM migrations

* Added curl to YUM dependencies

* Changed pipelines to use golang:1.18.4-bullseye for Go

* Implemented proper repo downloading logic

* Fixed other merge conflicts

* Added artifacts cleanup

* Removed delete on s3 sync

* Added RPM migrations

* v8 migrations

* Partial v8 migration

* Migration remainder

* Reduced requested resources

* Updated resource limits per step

* Added k8s stage resource limits to drone

* Fixed format issue

* Removed resource requests

* Added `depends_on` support to dronegen

* v8.3 migrations

* Fixed parallelism

* Removed migration parallelism

* Fixed RPM base arch lookup

* v6 and v7 YUM migration

* Fixed missing ISA

* Updated repo file path

* Added logging

* Removed vars from repo file

* v8.3 migration first batch

* v8.3 migration second batch

* v9.0 migration

* v9.1 migration

* v9.2 migration

* v9.3 first migration

* v9.3 second migration

* v10.0 migration

* Removed migrations

* Disabled shell linting non-issues

* Fixed linter problem

* More linter fixes
This commit is contained in:
fheinecke 2022-08-02 16:32:59 -05:00 committed by GitHub
parent 7332ac90a9
commit b022fea56b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
26 changed files with 2878 additions and 809 deletions

View file

@ -5398,7 +5398,7 @@ volumes:
################################################
# Generated using dronegen, do not edit by hand!
# Use 'make dronegen' to update.
# Generated at dronegen/misc.go:138
# Generated at dronegen/os_repos.go:270
################################################
kind: pipeline
@ -5426,7 +5426,7 @@ steps:
################################################
# Generated using dronegen, do not edit by hand!
# Use 'make dronegen' to update.
# Generated at dronegen/misc.go:162
# Generated at dronegen/os_repos.go:294
################################################
kind: pipeline
@ -5469,6 +5469,7 @@ steps:
image: amazon/aws-cli
commands:
- mkdir -pv "$ARTIFACT_PATH"
- rm -rf "${ARTIFACT_PATH}/*"
- aws s3 sync --no-progress --delete --exclude "*" --include "*.deb*" s3://$AWS_S3_BUCKET/teleport/tag/${DRONE_TAG##v}/
"$ARTIFACT_PATH"
environment:
@ -5479,25 +5480,25 @@ steps:
from_secret: AWS_S3_BUCKET
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY
depends_on:
- Verify build is tagged
- Check out code
- Check if tag is prerelease
- name: Publish debs to APT repos for "${DRONE_TAG}"
image: golang:1.18.1-bullseye
image: golang:1.18.4-bullseye
commands:
- mkdir -pv -m0700 $GNUPGHOME
- echo "$GPG_RPM_SIGNING_ARCHIVE" | base64 -d | tar -xzf - -C $GNUPGHOME
- chown -R root:root $GNUPGHOME
- apt update
- apt install aptly tree -y
- apt install -y aptly
- mkdir -pv -m0700 "$GNUPGHOME"
- echo "$GPG_RPM_SIGNING_ARCHIVE" | base64 -d | tar -xzf - -C $GNUPGHOME
- chown -R root:root "$GNUPGHOME"
- cd "/go/src/github.com/gravitational/teleport/build.assets/tooling"
- export VERSION="${DRONE_TAG}"
- export RELEASE_CHANNEL="stable"
- go run ./cmd/build-apt-repos -bucket "$APT_S3_BUCKET" -local-bucket-path "$BUCKET_CACHE_PATH"
-artifact-version "$VERSION" -release-channel "$RELEASE_CHANNEL" -aptly-root-dir
"$APTLY_ROOT_DIR" -artifact-path "$ARTIFACT_PATH" -log-level 4
- rm -rf "$BUCKET_CACHE_PATH"
- df -h "$APTLY_ROOT_DIR"
- go run ./cmd/build-os-package-repos apt -bucket "$REPO_S3_BUCKET" -local-bucket-path
"$BUCKET_CACHE_PATH" -artifact-version "$VERSION" -release-channel "$RELEASE_CHANNEL"
-artifact-path "$ARTIFACT_PATH" -log-level 4 -aptly-root-dir "$APTLY_ROOT_DIR"
environment:
APT_S3_BUCKET:
from_secret: APT_REPO_NEW_AWS_S3_BUCKET
APTLY_ROOT_DIR: /mnt/aptly
ARTIFACT_PATH: /go/artifacts
AWS_ACCESS_KEY_ID:
@ -5506,16 +5507,24 @@ steps:
AWS_SECRET_ACCESS_KEY:
from_secret: APT_REPO_NEW_AWS_SECRET_ACCESS_KEY
BUCKET_CACHE_PATH: /tmp/bucket
DEBIAN_FRONTEND: noninteractive
GNUPGHOME: /tmpfs/gnupg
GPG_RPM_SIGNING_ARCHIVE:
from_secret: GPG_RPM_SIGNING_ARCHIVE
REPO_S3_BUCKET:
from_secret: APT_REPO_NEW_AWS_S3_BUCKET
volumes:
- name: aptrepo
- name: apt-persistence
path: /mnt
- name: tmpfs
path: /tmpfs
depends_on:
- Download artifacts for "${DRONE_TAG}"
- Verify build is tagged
- Check out code
- Check if tag is prerelease
volumes:
- name: aptrepo
- name: apt-persistence
claim:
name: drone-s3-aptrepo-pvc
- name: tmpfs
@ -5526,12 +5535,40 @@ volumes:
################################################
# Generated using dronegen, do not edit by hand!
# Use 'make dronegen' to update.
# Generated at dronegen/promote.go:81
# Generated at dronegen/os_repos.go:270
################################################
kind: pipeline
type: kubernetes
name: promote-docker-quay
name: migrate-yum-new-repos
trigger:
event:
include:
- custom
repo:
include:
- non-existent-repository
branch:
include:
- non-existent-branch
clone:
disable: true
steps:
- name: Placeholder
image: alpine:latest
commands:
- echo "This command, step, and pipeline never runs"
---
################################################
# Generated using dronegen, do not edit by hand!
# Use 'make dronegen' to update.
# Generated at dronegen/os_repos.go:294
################################################
kind: pipeline
type: kubernetes
name: publish-yum-new-repos
trigger:
event:
include:
@ -5539,11 +5576,9 @@ trigger:
target:
include:
- production
- promote-docker
- promote-docker-quay
repo:
include:
- gravitational/*
- gravitational/teleport
workspace:
path: /go
clone:
@ -5554,74 +5589,91 @@ steps:
commands:
- '[ -n ${DRONE_TAG} ] || (echo ''DRONE_TAG is not set. Is the commit tagged?''
&& exit 1)'
- name: Wait for docker
image: docker
- name: Check out code
image: alpine/git:latest
commands:
- timeout 30s /bin/sh -c 'while [ ! -S /var/run/docker.sock ]; do sleep 1; done'
volumes:
- name: dockersock
path: /var/run
- name: Pull/retag Docker images
image: docker
- mkdir -p "/go/src/github.com/gravitational/teleport"
- cd "/go/src/github.com/gravitational/teleport"
- git clone https://github.com/gravitational/${DRONE_REPO_NAME}.git .
- git checkout "${DRONE_TAG}"
- name: Check if tag is prerelease
image: golang:1.17-alpine
commands:
- apk add --no-cache aws-cli
- export VERSION=${DRONE_TAG##v}
- docker login -u="$STAGING_QUAY_USERNAME" -p="$STAGING_QUAY_PASSWORD" quay.io
- aws ecr get-login-password --region=us-west-2 | docker login -u="AWS" --password-stdin
146628656107.dkr.ecr.us-west-2.amazonaws.com
- echo "---> Pulling images for $${VERSION}"
- docker pull 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport:$${VERSION}
- docker pull 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport-ent:$${VERSION}
- docker pull 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport-ent:$${VERSION}-fips
- docker pull quay.io/gravitational/teleport-operator-ci:$${VERSION}
- echo "---> Tagging images for $${VERSION}"
- docker tag 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport:$${VERSION}
quay.io/gravitational/teleport:$${VERSION}
- docker tag 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport-ent:$${VERSION}
quay.io/gravitational/teleport-ent:$${VERSION}
- docker tag 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport-ent:$${VERSION}-fips
quay.io/gravitational/teleport-ent:$${VERSION}-fips
- docker tag quay.io/gravitational/teleport-operator-ci:$${VERSION} quay.io/gravitational/teleport-operator:$${VERSION}
- docker logout quay.io
- docker logout 146628656107.dkr.ecr.us-west-2.amazonaws.com
- docker login -u="$QUAY_USERNAME" -p="$QUAY_PASSWORD" quay.io
- echo "---> Pushing images for $${VERSION}"
- docker push quay.io/gravitational/teleport:$${VERSION}
- docker push quay.io/gravitational/teleport-ent:$${VERSION}
- docker push quay.io/gravitational/teleport-ent:$${VERSION}-fips
- docker push quay.io/gravitational/teleport-operator:$${VERSION}
- cd "/go/src/github.com/gravitational/teleport/build.assets/tooling"
- go run ./cmd/check -tag ${DRONE_TAG} -check prerelease || (echo '---> This is
a prerelease, not publishing ${DRONE_TAG} packages to APT repos' && exit 78)
- name: Download artifacts for "${DRONE_TAG}"
image: amazon/aws-cli
commands:
- mkdir -pv "$ARTIFACT_PATH"
- rm -rf "${ARTIFACT_PATH}/*"
- aws s3 sync --no-progress --delete --exclude "*" --include "*.rpm*" s3://$AWS_S3_BUCKET/teleport/tag/${DRONE_TAG##v}/
"$ARTIFACT_PATH"
environment:
ARTIFACT_PATH: /go/artifacts
AWS_ACCESS_KEY_ID:
from_secret: STAGING_TELEPORT_DRONE_USER_ECR_KEY
from_secret: AWS_ACCESS_KEY_ID
AWS_S3_BUCKET:
from_secret: AWS_S3_BUCKET
AWS_SECRET_ACCESS_KEY:
from_secret: STAGING_TELEPORT_DRONE_USER_ECR_SECRET
QUAY_PASSWORD:
from_secret: PRODUCTION_QUAYIO_DOCKER_PASSWORD
QUAY_USERNAME:
from_secret: PRODUCTION_QUAYIO_DOCKER_USERNAME
STAGING_QUAY_PASSWORD:
from_secret: QUAYIO_DOCKER_PASSWORD
STAGING_QUAY_USERNAME:
from_secret: QUAYIO_DOCKER_USERNAME
from_secret: AWS_SECRET_ACCESS_KEY
depends_on:
- Verify build is tagged
- Check out code
- Check if tag is prerelease
- name: Publish rpms to YUM repos for "${DRONE_TAG}"
image: golang:1.18.4-bullseye
commands:
- apt update
- apt install -y createrepo-c
- mkdir -pv "$CACHE_DIR"
- mkdir -pv -m0700 "$GNUPGHOME"
- echo "$GPG_RPM_SIGNING_ARCHIVE" | base64 -d | tar -xzf - -C $GNUPGHOME
- chown -R root:root "$GNUPGHOME"
- cd "/go/src/github.com/gravitational/teleport/build.assets/tooling"
- export VERSION="${DRONE_TAG}"
- export RELEASE_CHANNEL="stable"
- go run ./cmd/build-os-package-repos yum -bucket "$REPO_S3_BUCKET" -local-bucket-path
"$BUCKET_CACHE_PATH" -artifact-version "$VERSION" -release-channel "$RELEASE_CHANNEL"
-artifact-path "$ARTIFACT_PATH" -log-level 4 -cache-dir "$CACHE_DIR"
environment:
ARTIFACT_PATH: /go/artifacts
AWS_ACCESS_KEY_ID:
from_secret: YUM_REPO_NEW_AWS_ACCESS_KEY_ID
AWS_REGION: us-west-2
AWS_SECRET_ACCESS_KEY:
from_secret: YUM_REPO_NEW_AWS_SECRET_ACCESS_KEY
BUCKET_CACHE_PATH: /mnt/bucket
CACHE_DIR: /mnt/createrepo_cache
DEBIAN_FRONTEND: noninteractive
GNUPGHOME: /tmpfs/gnupg
GPG_RPM_SIGNING_ARCHIVE:
from_secret: GPG_RPM_SIGNING_ARCHIVE
REPO_S3_BUCKET:
from_secret: YUM_REPO_NEW_AWS_S3_BUCKET
volumes:
- name: dockersock
path: /var/run
services:
- name: Start Docker
image: docker:dind
privileged: true
volumes:
- name: dockersock
path: /var/run
- name: yum-persistence
path: /mnt
- name: tmpfs
path: /tmpfs
depends_on:
- Download artifacts for "${DRONE_TAG}"
- Verify build is tagged
- Check out code
- Check if tag is prerelease
volumes:
- name: dockersock
temp: {}
- name: yum-persistence
claim:
name: drone-s3-yumrepo-pvc
- name: tmpfs
temp:
medium: memory
---
################################################
# Generated using dronegen, do not edit by hand!
# Use 'make dronegen' to update.
# Generated at dronegen/promote.go:27
# Generated at dronegen/promote.go:28
################################################
kind: pipeline
@ -5700,6 +5752,101 @@ volumes:
- name: dockersock
temp: {}
---
################################################
# Generated using dronegen, do not edit by hand!
# Use 'make dronegen' to update.
# Generated at dronegen/promote.go:82
################################################
kind: pipeline
type: kubernetes
name: promote-docker-quay
trigger:
event:
include:
- promote
target:
include:
- production
- promote-docker
- promote-docker-quay
repo:
include:
- gravitational/*
workspace:
path: /go
clone:
disable: true
steps:
- name: Verify build is tagged
image: alpine:latest
commands:
- '[ -n ${DRONE_TAG} ] || (echo ''DRONE_TAG is not set. Is the commit tagged?''
&& exit 1)'
- name: Wait for docker
image: docker
commands:
- timeout 30s /bin/sh -c 'while [ ! -S /var/run/docker.sock ]; do sleep 1; done'
volumes:
- name: dockersock
path: /var/run
- name: Pull/retag Docker images
image: docker
commands:
- apk add --no-cache aws-cli
- export VERSION=${DRONE_TAG##v}
- docker login -u="$STAGING_QUAY_USERNAME" -p="$STAGING_QUAY_PASSWORD" quay.io
- aws ecr get-login-password --region=us-west-2 | docker login -u="AWS" --password-stdin
146628656107.dkr.ecr.us-west-2.amazonaws.com
- echo "---> Pulling images for $${VERSION}"
- docker pull 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport:$${VERSION}
- docker pull 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport-ent:$${VERSION}
- docker pull 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport-ent:$${VERSION}-fips
- docker pull quay.io/gravitational/teleport-operator-ci:$${VERSION}
- echo "---> Tagging images for $${VERSION}"
- docker tag 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport:$${VERSION}
quay.io/gravitational/teleport:$${VERSION}
- docker tag 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport-ent:$${VERSION}
quay.io/gravitational/teleport-ent:$${VERSION}
- docker tag 146628656107.dkr.ecr.us-west-2.amazonaws.com/gravitational/teleport-ent:$${VERSION}-fips
quay.io/gravitational/teleport-ent:$${VERSION}-fips
- docker tag quay.io/gravitational/teleport-operator-ci:$${VERSION} quay.io/gravitational/teleport-operator:$${VERSION}
- docker logout quay.io
- docker logout 146628656107.dkr.ecr.us-west-2.amazonaws.com
- docker login -u="$QUAY_USERNAME" -p="$QUAY_PASSWORD" quay.io
- echo "---> Pushing images for $${VERSION}"
- docker push quay.io/gravitational/teleport:$${VERSION}
- docker push quay.io/gravitational/teleport-ent:$${VERSION}
- docker push quay.io/gravitational/teleport-ent:$${VERSION}-fips
- ERSION}
environment:
AWS_ACCESS_KEY_ID:
from_secret: STAGING_TELEPORT_DRONE_USER_ECR_KEY
AWS_SECRET_ACCESS_KEY:
from_secret: STAGING_TELEPORT_DRONE_USER_ECR_SECRET
QUAY_PASSWORD:
from_secret: PRODUCTION_QUAYIO_DOCKER_PASSWORD
QUAY_USERNAME:
from_secret: PRODUCTION_QUAYIO_DOCKER_USERNAME
STAGING_QUAY_PASSWORD:
from_secret: QUAYIO_DOCKER_PASSWORD
STAGING_QUAY_USERNAME:
from_secret: QUAYIO_DOCKER_USERNAME
volumes:
- name: dockersock
path: /var/run
services:
- name: Start Docker
image: docker:dind
privileged: true
volumes:
- name: dockersock
path: /var/run
volumes:
- name: dockersock
temp: {}
---
kind: pipeline
type: kubernetes
@ -6091,6 +6238,6 @@ volumes:
name: drone-s3-debrepo-pvc
---
kind: signature
hmac: cfe9263c545d10b26f27dc10b8b5e5b833d0fca4333860550dae28113cef681a
hmac: 886b4ee9d5440155e696730354e0d8f773def8d703105d58d7017bf997e0c687
...

View file

@ -1,159 +0,0 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"strings"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
const StableChannelFlagValue string = "stable"
type Config struct {
artifactPath string
artifactVersion string
bucketName string
localBucketPath string
releaseChannel string
aptlyPath string
logLevel uint
logJSON bool
}
// Parses and validates the provided flags, returning the parsed arguments in a struct.
func ParseFlags() (*Config, error) {
homeDir, err := os.UserHomeDir()
if err != nil {
return nil, trace.Wrap(err, "failed to get user's home directory path")
}
config := &Config{}
flag.StringVar(&config.artifactPath, "artifact-path", "/artifacts", "Path to the filesystem tree containing the *.deb files to add to the APT repos")
flag.StringVar(&config.artifactVersion, "artifact-version", "", "The version of the artifacts that will be added to the APT repos")
flag.StringVar(&config.releaseChannel, "release-channel", "", "The release channel of the APT repos that the artifacts should be added to")
flag.StringVar(&config.bucketName, "bucket", "", "The name of the S3 bucket where the repo should be synced to/from")
flag.StringVar(&config.localBucketPath, "local-bucket-path", "/bucket", "The local path where the bucket should be synced to")
flag.StringVar(&config.aptlyPath, "aptly-root-dir", homeDir, "The Aptly \"rootDir\" (see https://www.aptly.info/doc/configuration/ for details)")
flag.UintVar(&config.logLevel, "log-level", uint(logrus.InfoLevel), "Log level from 0 to 6, 6 being the most verbose")
flag.BoolVar(&config.logJSON, "log-json", false, "True if the log entries should use JSON format, false for text logging")
flag.Parse()
if err := Check(config); err != nil {
return nil, trace.Wrap(err, "failed to validate flags")
}
return config, nil
}
func Check(config *Config) error {
if err := validateArtifactPath(config.artifactPath); err != nil {
return trace.Wrap(err, "failed to validate the artifact path flag")
}
if err := validateBucketName(config.bucketName); err != nil {
return trace.Wrap(err, "failed to validate the bucket name flag")
}
if err := validateLocalBucketPath(config.localBucketPath); err != nil {
return trace.Wrap(err, "failed to validate the local bucket path flag")
}
if err := validateArtifactVersion(config.artifactVersion); err != nil {
return trace.Wrap(err, "failed to validate the artifact version flag")
}
if err := validateReleaseChannel(config.releaseChannel); err != nil {
return trace.Wrap(err, "failed to validate the release channel flag")
}
if err := validateLogLevel(config.logLevel); err != nil {
return trace.Wrap(err, "failed to validate the log level flag")
}
return nil
}
func validateArtifactPath(value string) error {
if value == "" {
return trace.BadParameter("the artifact-path flag should not be empty")
}
if stat, err := os.Stat(value); os.IsNotExist(err) {
return trace.BadParameter("the artifact-path %q does not exist", value)
} else if !stat.IsDir() {
return trace.BadParameter("the artifact-path %q is not a directory", value)
}
return nil
}
func validateBucketName(value string) error {
if value == "" {
return trace.BadParameter("the bucket flag should not be empty")
}
return nil
}
func validateLocalBucketPath(value string) error {
if value == "" {
return trace.BadParameter("the local-bucket-path flag should not be empty")
}
if stat, err := os.Stat(value); err == nil && !stat.IsDir() {
return trace.BadParameter("the local bucket path points to a file instead of a directory")
}
return nil
}
func validateArtifactVersion(value string) error {
if value == "" {
return trace.BadParameter("the artifact-version flag should not be empty")
}
if !semver.IsValid(value) {
return trace.BadParameter("the artifact-version flag does not contain a valid semver version string")
}
return nil
}
func validateReleaseChannel(value string) error {
if value == "" {
return trace.BadParameter("the release-channel flag should not be empty")
}
// Not sure what other channels we'd want to support, but they should be listed here
validReleaseChannels := []string{StableChannelFlagValue}
for _, validReleaseChannel := range validReleaseChannels {
if value == validReleaseChannel {
return nil
}
}
return trace.BadParameter("the release channel contains an invalid value. Valid values are: %s", strings.Join(validReleaseChannels, ","))
}
func validateLogLevel(value uint) error {
if value > 6 {
return trace.BadParameter("the log-level flag should be between 0 and 6")
}
return nil
}

View file

@ -1,78 +0,0 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os"
log "github.com/sirupsen/logrus"
)
func main() {
supportedOSs := map[string][]string{
"debian": { // See https://wiki.debian.org/DebianReleases#Production_Releases for details
"stretch", // 9
"buster", // 10
"bullseye", // 11
"bookwork", // 12
"trixie", // 13
},
"ubuntu": { // See https://wiki.ubuntu.com/Releases for details
"xenial", // 16.04 LTS
"yakkety", // 16.10 (EOL)
"zesty", // 17.04 (EOL)
"artful", // 17.10 (EOL)
"bionic", // 18.04 LTS
"cosmic", // 18.10 (EOL)
"disco", // 19.04 (EOL)
"eoan", // 19.10 (EOL)
"focal", // 20.04 LTS
"groovy", // 20.10 (EOL)
"hirsuite", // 21.04 (EOL)
"impish", // 21.10 (EOL on 7/14/22)
"jammy", // 22.04 LTS
},
}
config, err := ParseFlags()
if err != nil {
log.Fatal(err.Error())
}
setupLogger(config)
log.Debugf("Starting tool with config: %v", config)
art, err := NewAptRepoTool(config, supportedOSs)
if err != nil {
log.Fatal(err.Error())
}
err = art.Run()
if err != nil {
log.Fatal(err.Error())
}
}
func setupLogger(config *Config) {
if config.logJSON {
log.SetFormatter(&log.JSONFormatter{})
} else {
log.SetFormatter(&log.TextFormatter{})
}
log.SetOutput(os.Stdout)
log.SetLevel(log.Level(config.logLevel))
}

View file

@ -1,102 +0,0 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/gravitational/trace"
"github.com/seqsense/s3sync"
"github.com/sirupsen/logrus"
)
type S3manager struct {
syncManager *s3sync.Manager
bucketName string
bucketPath string
}
func NewS3Manager(bucketName string) *S3manager {
// Right now the AWS session is only used by this manager, but if it ends
// up being needed elsewhere then it should probably be moved to an arg
awsSession := session.Must(session.NewSession())
manager := &S3manager{
syncManager: s3sync.New(awsSession),
bucketName: bucketName,
bucketPath: fmt.Sprintf("s3://%s", bucketName),
}
s3sync.SetLogger(&s3logger{})
return manager
}
func (s *S3manager) DownloadExistingRepo(localPath string) error {
err := ensureDirectoryExists(localPath)
if err != nil {
return trace.Wrap(err, "failed to ensure path %q exists", localPath)
}
err = s.sync(localPath, true)
if err != nil {
return trace.Wrap(err, "failed to download bucket")
}
return nil
}
func (s *S3manager) UploadBuiltRepo(localPath string) error {
err := s.sync(localPath, false)
if err != nil {
return trace.Wrap(err, "failed to upload bucket")
}
return nil
}
func (s *S3manager) sync(localPath string, download bool) error {
var src, dest string
if download {
src = s.bucketPath
dest = localPath
} else {
src = localPath
dest = s.bucketPath
}
logrus.Infof("Performing S3 sync from %q to %q...", src, dest)
err := s.syncManager.Sync(src, dest)
if err != nil {
return trace.Wrap(err, "failed to sync %q to %q", src, dest)
}
logrus.Infoln("S3 sync complete")
return nil
}
func ensureDirectoryExists(path string) error {
err := os.MkdirAll(path, 0660)
if err != nil {
return trace.Wrap(err, "failed to create directory %q", path)
}
return nil
}

View file

@ -22,40 +22,51 @@ import (
"strings"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
type AptRepoTool struct {
config *Config
config *AptConfig
aptly *Aptly
gpg *GPG
s3Manager *S3manager
supportedOSs map[string][]string
}
// Instantiates a new apt repo tool instance and performs any required setup/config.
func NewAptRepoTool(config *Config, supportedOSs map[string][]string) (*AptRepoTool, error) {
art := &AptRepoTool{
config: config,
s3Manager: NewS3Manager(config.bucketName),
supportedOSs: supportedOSs,
}
func NewAptRepoTool(config *AptConfig, supportedOSs map[string][]string) (*AptRepoTool, error) {
aptly, err := NewAptly(config.aptlyPath)
if err != nil {
return nil, trace.Wrap(err, "failed to create a new aptly instance")
}
art.aptly = aptly
gpg, err := NewGPG()
if err != nil {
return nil, trace.Wrap(err, "failed to create a new GPG instance")
}
return art, nil
s3Manager, err := NewS3Manager(config.S3Config)
if err != nil {
return nil, trace.Wrap(err, "failed to create a new s3manager instance")
}
return &AptRepoTool{
aptly: aptly,
config: config,
gpg: gpg,
s3Manager: s3Manager,
supportedOSs: supportedOSs,
}, nil
}
// Runs the tool, creating and updating APT repos based upon the current configuration.
func (art *AptRepoTool) Run() error {
start := time.Now()
logrus.Infoln("Starting APT repo build process...")
logrus.Debugf("Using config: %+v", spew.Sdump(art.config))
isFirstRun, err := art.aptly.IsFirstRun()
if err != nil {
@ -65,7 +76,7 @@ func (art *AptRepoTool) Run() error {
if isFirstRun {
logrus.Warningln("First run or disaster recovery detected, attempting to rebuild existing repos from APT repository...")
err = art.s3Manager.DownloadExistingRepo(art.config.localBucketPath)
err = art.s3Manager.DownloadExistingRepo()
if err != nil {
return trace.Wrap(err, "failed to sync existing repo from S3 bucket")
}
@ -74,6 +85,8 @@ func (art *AptRepoTool) Run() error {
if err != nil {
return trace.Wrap(err, "failed to recreate existing repos")
}
} else {
logrus.Debugf("Not first run of tool, skipping Aptly repository rebuild process")
}
// Note: this logic will only push the artifact into the `art.supportedOSs` repos.
@ -94,11 +107,24 @@ func (art *AptRepoTool) Run() error {
return trace.Wrap(err, "failed to publish repos")
}
err = art.s3Manager.UploadBuiltRepo(filepath.Join(art.aptly.rootDir, "public"))
// Both Hashicorp and Docker publish their key to this path
err = art.gpg.WritePublicKeyToFile(filepath.Join(art.aptly.rootDir, "public", "gpg"))
if err != nil {
return trace.Wrap(err, "failed to write GPG public key")
}
art.s3Manager.ChangeLocalBucketPath(filepath.Join(art.aptly.rootDir, "public"))
err = art.s3Manager.UploadBuiltRepo()
if err != nil {
return trace.Wrap(err, "failed to sync changes to S3 bucket")
}
// Future work: add literals to config?
err = art.s3Manager.UploadRedirectURL("index.html", "https://goteleport.com/docs/installation/#linux")
if err != nil {
return trace.Wrap(err, "failed to redirect index page to Teleport docs")
}
logrus.Infof("APT repo build process completed in %s", time.Since(start).Round(time.Millisecond))
return nil
}

View file

@ -22,9 +22,7 @@ import (
"errors"
"fmt"
"io/fs"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
@ -66,7 +64,7 @@ func (*Aptly) ensureDefaultConfigExists() error {
// ran, which messes up the output.
// Note: it is important to not use any repo-related commands here as they have a side effect of
// also creating the Aptly rootDir structure which is usually undesirable here
_, err := buildAndRunCommand("aptly", "config", "show")
_, err := BuildAndRunCommand("aptly", "config", "show")
if err != nil {
return trace.Wrap(err, "failed to create default Aptly config")
}
@ -86,7 +84,7 @@ func (a *Aptly) updateConfiguration() error {
logrus.Debugf("Built Aptly config: %v", aptlyConfigMap)
saveAptlyConfigMap(aptlyConfigMap)
configOutput, err := buildAndRunCommand("aptly", "config", "show")
configOutput, err := BuildAndRunCommand("aptly", "config", "show")
if err != nil {
return trace.Wrap(err, "failed to check Aptly config")
}
@ -192,7 +190,7 @@ func (a *Aptly) CreateRepoIfNotExists(r *Repo) (bool, error) {
distributionArg := fmt.Sprintf("-distribution=%s", r.osVersion)
componentArg := fmt.Sprintf("-component=%s/%s", r.releaseChannel, r.majorVersion)
_, err = buildAndRunCommand("aptly", "repo", "create", distributionArg, componentArg, r.Name())
_, err = BuildAndRunCommand("aptly", "repo", "create", distributionArg, componentArg, r.Name())
if err != nil {
return false, trace.Wrap(err, "failed to create repo %q", r.Name())
}
@ -222,7 +220,7 @@ func (a *Aptly) GetExistingRepoNames() ([]string, error) {
// ...
// <repo name N>
// ```
output, err := buildAndRunCommand("aptly", "repo", "list", "-raw")
output, err := BuildAndRunCommand("aptly", "repo", "list", "-raw")
if err != nil {
return nil, trace.Wrap(err, "failed to get a list of existing repos")
}
@ -248,7 +246,7 @@ func (a *Aptly) GetExistingRepoNames() ([]string, error) {
func (a *Aptly) ImportDeb(repoName string, debPath string) error {
logrus.Infof("Importing deb(s) from %q into repo %q...", debPath, repoName)
_, err := buildAndRunCommand("aptly", "repo", "add", repoName, debPath)
_, err := BuildAndRunCommand("aptly", "repo", "add", repoName, debPath)
if err != nil {
return trace.Wrap(err, "failed to add %q to repo %q", debPath, repoName)
}
@ -320,7 +318,7 @@ func parsePackagesFile(packagesPath string) ([]string, error) {
logrus.Debugf("Parsing packages file %q", packagesPath)
file, err := os.Open(packagesPath)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
defer file.Close()
@ -394,7 +392,7 @@ func (a *Aptly) PublishRepos(repos []*Repo, repoOS string, repoOSVersion string)
// If all repos have been published
if areSomePublished && !areSomeUnpublished {
// Update rather than republish
_, err := buildAndRunCommand("aptly", "publish", "update", repoOSVersion, repoOS)
_, err := BuildAndRunCommand("aptly", "publish", "update", repoOSVersion, repoOS)
if err != nil {
return trace.Wrap(err, "failed to update publish repos with OS %q and OS version %q", repoOS, repoOSVersion)
}
@ -406,7 +404,7 @@ func (a *Aptly) PublishRepos(repos []*Repo, repoOS string, repoOSVersion string)
// This will occur if there is a new major release, a OS version is supported, or a new release channel is added
if areSomePublished && areSomeUnpublished {
// Drop the currently published APT repo so that it can be rebuilt from scratch
_, err := buildAndRunCommand("aptly", "publish", "drop", repoOSVersion, repoOS)
_, err := BuildAndRunCommand("aptly", "publish", "drop", repoOSVersion, repoOS)
if err != nil {
return trace.Wrap(err, "failed to update publish repos with OS %q and OS version %q", repoOS, repoOSVersion)
}
@ -423,7 +421,7 @@ func (a *Aptly) PublishRepos(repos []*Repo, repoOS string, repoOSVersion string)
args = append(args, repoOS)
// Full command is `aptly publish repo -component=<, repeating len(repos) - 1 times> <repo names> <repo OS>`
_, err = buildAndRunCommand("aptly", args...)
_, err = BuildAndRunCommand("aptly", args...)
if err != nil {
return trace.Wrap(err, "failed to publish repos")
}
@ -501,7 +499,7 @@ func (a *Aptly) GetPublishedRepoNames() ([]string, error) {
// No snapshots/local repos have been published. Publish a snapshot by running `aptly publish snapshot ...`.
// ```
// Note that the `-raw` argument is not used here as it does not provide sufficient information
output, err := buildAndRunCommand("aptly", "publish", "list")
output, err := BuildAndRunCommand("aptly", "publish", "list")
if err != nil {
return nil, trace.Wrap(err, "failed to get a list of published repos")
}
@ -675,26 +673,3 @@ func getSubdirectories(basePath string) ([]string, error) {
return subdirectories, nil
}
func buildAndRunCommand(command string, args ...string) (string, error) {
cmd := exec.Command(command, args...)
logrus.Debugf("Running command \"%s '%s'\"", command, strings.Join(args, "' '"))
output, err := cmd.CombinedOutput()
if output != nil {
logrus.Debugf("Command output: %s", string(output))
}
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
exitCode := exitError.ExitCode()
logrus.Debugf("Command exited with exit code %d", exitCode)
} else {
logrus.Debugln("Command failed without an exit code")
}
return "", trace.Wrap(err, "Command failed, see debug output for additional details")
}
logrus.Debugln("Command exited successfully")
return string(output), nil
}

View file

@ -0,0 +1,53 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"errors"
"os/exec"
"strings"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
)
// Builds an runs a command with the provided arguments. Extensively logs command
// details to the debug log. Returns stdout and stderr combined, along with an
// error iff one occurred.
func BuildAndRunCommand(command string, args ...string) (string, error) {
cmd := exec.Command(command, args...)
logrus.Debugf("Running command \"%s '%s'\"", command, strings.Join(args, "' '"))
output, err := cmd.CombinedOutput()
if output != nil {
logrus.Debugf("Command output: %s", string(output))
}
if err != nil {
var exitError *exec.ExitError
if errors.As(err, &exitError) {
exitCode := exitError.ExitCode()
logrus.Debugf("Command exited with exit code %d", exitCode)
} else {
logrus.Debugln("Command failed without an exit code")
}
return "", trace.Wrap(err, "Command failed, see debug output for additional details")
}
logrus.Debugln("Command exited successfully")
return string(output), nil
}

View file

@ -0,0 +1,282 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"strings"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
const StableChannelFlagValue string = "stable"
type LoggerConfig struct {
logLevel uint
logJSON bool
}
func NewLoggerConfigWithFlagset(fs *flag.FlagSet) *LoggerConfig {
lc := &LoggerConfig{}
fs.UintVar(&lc.logLevel, "log-level", uint(logrus.InfoLevel), "Log level from 0 to 6, 6 being the most verbose")
fs.BoolVar(&lc.logJSON, "log-json", false, "True if the log entries should use JSON format, false for text logging")
return lc
}
func (lc *LoggerConfig) Check() error {
if err := lc.validateLogLevel(); err != nil {
return trace.Wrap(err, "failed to validate the log level flag")
}
return nil
}
func (lc *LoggerConfig) validateLogLevel() error {
if lc.logLevel > 6 {
return trace.BadParameter("the log-level flag should be between 0 and 6")
}
return nil
}
type S3Config struct {
bucketName string
localBucketPath string
maxConcurrentSyncs int
}
func NewS3ConfigWithFlagset(fs *flag.FlagSet) *S3Config {
s3c := &S3Config{}
fs.StringVar(&s3c.bucketName, "bucket", "", "The name of the S3 bucket where the repo should be synced to/from")
fs.StringVar(&s3c.localBucketPath, "local-bucket-path", "/bucket", "The local path where the bucket should be synced to")
fs.IntVar(&s3c.maxConcurrentSyncs, "max-concurrent-syncs", 16, "The maximum number of S3 bucket syncs that may run in parallel (-1 for unlimited, 16 default)")
return s3c
}
func (s3c *S3Config) Check() error {
if err := s3c.validateBucketName(); err != nil {
return trace.Wrap(err, "failed to validate the bucket name flag")
}
if err := s3c.validateLocalBucketPath(); err != nil {
return trace.Wrap(err, "failed to validate the local bucket path flag")
}
if err := s3c.validateMaxConcurrentSyncs(); err != nil {
return trace.Wrap(err, "failed to validate the max concurrent syncs flag")
}
return nil
}
func (s3c *S3Config) validateBucketName() error {
if s3c.bucketName == "" {
return trace.BadParameter("the bucket flag should not be empty")
}
return nil
}
func (s3c *S3Config) validateLocalBucketPath() error {
if s3c.localBucketPath == "" {
return trace.BadParameter("the local-bucket-path flag should not be empty")
}
if stat, err := os.Stat(s3c.localBucketPath); err == nil && !stat.IsDir() {
return trace.BadParameter("the local bucket path points to a file instead of a directory")
}
return nil
}
func (s3c *S3Config) validateMaxConcurrentSyncs() error {
if s3c.maxConcurrentSyncs < -1 {
return trace.BadParameter("the max-concurrent-syncs flag must be greater than -1")
}
return nil
}
// This type is common to all other config types
type Config struct {
*LoggerConfig
*S3Config
artifactPath string
artifactVersion string
printHelp bool
releaseChannel string
}
func NewConfigWithFlagset(fs *flag.FlagSet) *Config {
c := &Config{}
c.LoggerConfig = NewLoggerConfigWithFlagset(fs)
c.S3Config = NewS3ConfigWithFlagset(fs)
fs.StringVar(&c.artifactPath, "artifact-path", "/artifacts", "Path to the filesystem tree containing the *.deb or *.rpm files to add to the repos")
fs.StringVar(&c.artifactVersion, "artifact-version", "", "The version of the artifacts that will be added to the repos")
fs.Visit(func(f *flag.Flag) {
if f.Name == "-h" || f.Name == "--help" {
c.printHelp = true
}
})
fs.StringVar(&c.releaseChannel, "release-channel", "", "The release channel of the repos that the artifacts should be added to")
return c
}
func (c *Config) Check() error {
if err := c.LoggerConfig.Check(); err != nil {
return trace.Wrap(err, "failed to validate logger config")
}
if err := c.S3Config.Check(); err != nil {
return trace.Wrap(err, "failed to validate S3 config")
}
if err := c.validateArtifactPath(); err != nil {
return trace.Wrap(err, "failed to validate the artifact path flag")
}
if err := c.validateArtifactVersion(); err != nil {
return trace.Wrap(err, "failed to validate the artifact version flag")
}
if err := c.validateReleaseChannel(); err != nil {
return trace.Wrap(err, "failed to validate the release channel flag")
}
return nil
}
func (c *Config) validateArtifactPath() error {
if c.artifactPath == "" {
return trace.BadParameter("the artifact-path flag should not be empty")
}
if stat, err := os.Stat(c.artifactPath); os.IsNotExist(err) {
return trace.BadParameter("the artifact-path %q does not exist", c.artifactPath)
} else if !stat.IsDir() {
return trace.BadParameter("the artifact-path %q is not a directory", c.artifactPath)
}
return nil
}
func (c *Config) validateArtifactVersion() error {
if c.artifactVersion == "" {
return trace.BadParameter("the artifact-version flag should not be empty")
}
if !semver.IsValid(c.artifactVersion) {
return trace.BadParameter("the artifact-version flag does not contain a valid semver version string")
}
return nil
}
func (c *Config) validateReleaseChannel() error {
if c.releaseChannel == "" {
return trace.BadParameter("the release-channel flag should not be empty")
}
// Not sure what other channels we'd want to support, but they should be listed here
validReleaseChannels := []string{StableChannelFlagValue}
for _, validReleaseChannel := range validReleaseChannels {
if c.releaseChannel == validReleaseChannel {
return nil
}
}
return trace.BadParameter("the release channel contains an invalid value. Valid values are: %s", strings.Join(validReleaseChannels, ","))
}
// APT-specific config
type AptConfig struct {
*Config
aptlyPath string
}
func NewAptConfigWithFlagSet(fs *flag.FlagSet) (*AptConfig, error) {
ac := &AptConfig{}
ac.Config = NewConfigWithFlagset(fs)
homeDir, err := os.UserHomeDir()
if err != nil {
return nil, trace.Wrap(err, "failed to get user's home directory path")
}
fs.StringVar(&ac.aptlyPath, "aptly-root-dir", homeDir, "The Aptly \"rootDir\" (see https://www.aptly.info/doc/configuration/ for details)")
return ac, nil
}
func (ac *AptConfig) validateAptlyPath() error {
if ac.aptlyPath == "" {
return trace.BadParameter("the aptly-root-dir flag should not be empty")
}
return nil
}
func (ac *AptConfig) Check() error {
if err := ac.Config.Check(); err != nil {
return trace.Wrap(err, "failed to validate common config")
}
if err := ac.validateAptlyPath(); err != nil {
return trace.Wrap(err, "failed to validate the aptly-root-dir path flag")
}
return nil
}
// YUM-specific config
type YumConfig struct {
*Config
cacheDir string
}
func NewYumConfigWithFlagSet(fs *flag.FlagSet) *YumConfig {
yc := &YumConfig{}
yc.Config = NewConfigWithFlagset(fs)
fs.StringVar(&yc.cacheDir, "cache-dir", "/tmp/createrepo/cache", "The createrepo checksum caching directory (see https://linux.die.net/man/8/createrepo for details")
return yc
}
func (yc *YumConfig) validateCacheDir() error {
if yc.cacheDir == "" {
return trace.BadParameter("the cache-dir flag should not be empty")
}
return nil
}
func (yc *YumConfig) Check() error {
if err := yc.Config.Check(); err != nil {
return trace.Wrap(err, "failed to validate common config")
}
if err := yc.validateCacheDir(); err != nil {
return trace.Wrap(err, "failed to validate the cache-dir path flag")
}
return nil
}

View file

@ -0,0 +1,83 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os"
"os/exec"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
)
type CreateRepo struct {
cacheDir string
binaryName string
}
// Instantiates createrepo, ensuring all system requirements for performing createrepo operations
// have been met
func NewCreateRepo(cacheDir string) (*CreateRepo, error) {
cr := &CreateRepo{
cacheDir: cacheDir,
// `createrepo_c` is the "new" (as in 9 years old) replacement for `createrepo`
// This can be replace with "createrepo" in the unlikely chance that there is
// a problem
binaryName: "createrepo_c",
}
err := cr.ensureBinaryExists()
if err != nil {
return nil, trace.Wrap(err, "failed to ensure CreateRepo binary exists")
}
// Ensure the cache dir exists
err = os.MkdirAll(cr.cacheDir, 0660)
if err != nil {
return nil, trace.Wrap(err, "failed to ensure %q exists", cr.cacheDir)
}
return cr, nil
}
func (cr *CreateRepo) ensureBinaryExists() error {
_, err := exec.LookPath(cr.binaryName)
if err != nil {
return trace.Wrap(err, "failed to verify that %q binary exists", cr.binaryName)
}
return nil
}
func (cr *CreateRepo) CreateOrUpdateRepo(repoPath string) error {
// <cr.binaryName> --cachedir <cr.cacheDir> --update <repoPath>
logrus.Debugf("Updating repo metadata for repo at %q", repoPath)
args := []string{
"--cachedir",
cr.cacheDir,
"--update",
repoPath,
}
_, err := BuildAndRunCommand(cr.binaryName, args...)
if err != nil {
return trace.Wrap(err, "createrepo create/update command failed on path %q with cache directory %q", repoPath, cr.cacheDir)
}
return nil
}

View file

@ -0,0 +1,119 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os"
"strings"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
)
type GPG struct{}
// Instantiates GPG, ensuring all system requirements for using GPG are fulfilled
func NewGPG() (*GPG, error) {
g := &GPG{}
err := g.ensureFirstRunHasOccurred()
if err != nil {
return nil, trace.Wrap(err, "failed to setup GPG")
}
err = g.ensureSecretKeyExists()
if err != nil {
return nil, trace.Wrap(err, "failed to ensure a secret key exists")
}
return g, nil
}
// The first time GPG is run for a user with any "meaningful" arguments it will
// generate several files and log what it created to stdout. These logs can
// disrupt parsing of GPG command outputs, so we force it to happen here, once,
// rather than try and handle it on each GPG call.
func (*GPG) ensureFirstRunHasOccurred() error {
_, err := BuildAndRunCommand("gpg", "--fingerprint")
if err != nil {
return trace.Wrap(err, "failed to ensure GPG has been ran once")
}
return nil
}
func (*GPG) ensureSecretKeyExists() error {
output, err := BuildAndRunCommand("gpg", "--list-secret-keys", "--with-colons")
if err != nil {
return trace.Wrap(err, "failed to ensure GPG secret key exists")
}
outputLineCount := strings.Count(output, "\n")
if outputLineCount < 1 {
return trace.Errorf("failed to find a GPG secret key")
}
return nil
}
// Creates a detached, armored signature for the provided file using the default GPG key
func (*GPG) SignFile(filePath string) error {
// While this could be done via a Go module, the x/crypto/openpgp library has been frozen
// and deprecated for almost 18 months. Others exist, but given the security implications of
// using a less reputable Go module I've decided to just call `gpg` via shell instead.
// Additionally this works and is just _so easy_ that it's probably not worth the effort to
// use another library that reinvents the wheel.
logrus.Debugf("Signing repo metadata at %q", filePath)
// gpg --batch --yes --detach-sign --armor <filePath>
_, err := BuildAndRunCommand("gpg", "--batch", "--yes", "--detach-sign", "--armor", filePath)
if err != nil {
return trace.Wrap(err, "failed to run GPG signing command on %q", filePath)
}
return nil
}
// Get the armored default public GPG key, ready to be written to a file
func (*GPG) GetPublicKey() (string, error) {
// For reference here is how another company formats their key:
// https://download.docker.com/linux/rhel/gpg
logrus.Debug("Attempting to get the default public GPG key")
key, err := BuildAndRunCommand("gpg", "--export", "--armor", "--no-version")
if err != nil {
return "", trace.Wrap(err, "failed to export the default public GPG key")
}
return key, nil
}
func (g *GPG) WritePublicKeyToFile(filePath string) error {
logrus.Debugf("Writing the default armored public GPG key to %q", filePath)
key, err := g.GetPublicKey()
if err != nil {
return trace.Wrap(err, "failed to retrieve public key")
}
err = os.WriteFile(filePath, []byte(key), 0664)
if err != nil {
return trace.Wrap(err, "failed to write key to %q", filePath)
}
return nil
}

View file

@ -0,0 +1,119 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"strings"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
)
func main() {
err := run()
if err != nil {
logrus.Fatal(err.Error())
}
}
func buildSubcommandRunners() ([]Runner, error) {
ar, err := NewAptRunner()
if err != nil {
return nil, trace.Wrap(err, "failed to instantiate new APT runner")
}
yr, err := NewYumRunner()
if err != nil {
return nil, trace.Wrap(err, "failed to instantiate new YUM runner")
}
// These should be sorted alphabetically by `Name()`
return []Runner{
*ar,
*yr,
}, nil
}
func run() error {
subcommands, err := buildSubcommandRunners()
if err != nil {
return trace.Wrap(err, "failed to build subcommand runners")
}
// 2 = program name + subcommand
if len(os.Args) < 2 {
logHelp(subcommands)
return trace.Errorf("subcommand not provided")
}
subcommandName := strings.ToLower(os.Args[1])
for _, subcommand := range subcommands {
if strings.ToLower(subcommandName) != subcommand.Name() {
continue
}
// 2 = program name + subcommand, skip them and get subcommand arguments
args := os.Args[2:]
err := subcommand.Init(args)
if err != nil {
return trace.Wrap(err, "failed to initialize runner for subcommand %q", subcommandName)
}
setupLogger(subcommand.GetLoggerConfig())
err = subcommand.Run()
if err != nil {
return trace.Wrap(err, "failed to run subcommand %q", subcommandName)
}
return nil
}
if subcommandName == "-h" {
logHelp(subcommands)
return nil
}
logHelp(subcommands)
return trace.Errorf("no subcommands found matching %q", subcommandName)
}
func logHelp(subcommands []Runner) {
executableName := os.Args[0]
fmt.Printf("%s - OS package repo builder/updater\n", executableName)
fmt.Println()
fmt.Println("Commands:")
fmt.Println()
for _, subcommand := range subcommands {
fmt.Printf("\t%s\t%s\n", subcommand.Name(), subcommand.Info())
}
fmt.Println()
fmt.Printf("Use \"%s <command> -h\" for more information about a command.\n", executableName)
fmt.Println()
}
func setupLogger(config *LoggerConfig) {
if config.logJSON {
logrus.SetFormatter(&logrus.JSONFormatter{})
} else {
logrus.SetFormatter(&logrus.TextFormatter{})
}
logrus.SetOutput(os.Stdout)
logrus.SetLevel(logrus.Level(config.logLevel))
logrus.Debugf("Setup logger with config: %+v", config)
}

View file

@ -0,0 +1,199 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"github.com/gravitational/trace"
)
// Pattern from https://www.digitalocean.com/community/tutorials/how-to-use-the-flag-package-in-go
type Runner interface {
Init([]string) error
Run() error
GetLoggerConfig() *LoggerConfig
Name() string
Info() string
}
// APT implementation
type AptRunner struct {
flags *flag.FlagSet
config *AptConfig
supportedOSs map[string][]string
}
func NewAptRunner() (*AptRunner, error) {
runner := &AptRunner{
supportedOSs: map[string][]string{
"debian": { // See https://wiki.debian.org/DebianReleases#Production_Releases for details
"stretch", // 9
"buster", // 10
"bullseye", // 11
"bookwork", // 12
"trixie", // 13
},
"ubuntu": { // See https://wiki.ubuntu.com/Releases for details
"xenial", // 16.04 LTS
"yakkety", // 16.10 (EOL)
"zesty", // 17.04 (EOL)
"artful", // 17.10 (EOL)
"bionic", // 18.04 LTS
"cosmic", // 18.10 (EOL)
"disco", // 19.04 (EOL)
"eoan", // 19.10 (EOL)
"focal", // 20.04 LTS
"groovy", // 20.10 (EOL)
"hirsuite", // 21.04 (EOL)
"impish", // 21.10 (EOL)
"jammy", // 22.04 LTS
},
},
}
runner.flags = flag.NewFlagSet(runner.Name(), flag.ExitOnError)
config, err := NewAptConfigWithFlagSet(runner.flags)
if err != nil {
return nil, trace.Wrap(err, "failed to create a new APT config instance")
}
runner.config = config
return runner, nil
}
func (ar AptRunner) Init(args []string) error {
err := ar.flags.Parse(args)
if err != nil {
return trace.Wrap(err, "failed to parse arguments")
}
err = ar.config.Check()
if err != nil {
return trace.Wrap(err, "failed to validate APT config arguments")
}
return nil
}
func (ar AptRunner) Run() error {
if ar.config.printHelp {
ar.flags.Usage()
return nil
}
art, err := NewAptRepoTool(ar.config, ar.supportedOSs)
if err != nil {
return trace.Wrap(err, "failed to create a new APT repo tool instance")
}
err = art.Run()
if err != nil {
return trace.Wrap(err, "APT runner failed")
}
return nil
}
func (AptRunner) Name() string {
return "apt"
}
func (AptRunner) Info() string {
return "builds APT repos"
}
func (ar AptRunner) GetLoggerConfig() *LoggerConfig {
return ar.config.LoggerConfig
}
// YUM implementation
type YumRunner struct {
flags *flag.FlagSet
config *YumConfig
supportedOSs map[string][]string
}
func NewYumRunner() (*YumRunner, error) {
runner := &YumRunner{
supportedOSs: map[string][]string{
"rhel": { // See https://access.redhat.com/articles/3078 for details
"7",
"8",
"9",
},
"centos": { // See https://endoflife.date/centos for details
"7",
"8",
"9",
},
// "$releasever" is a hot mess for Amazon Linux. No good documentation on this outside of just running
// a container or EC2 instance and manually checking $releasever values
"amzn": {
// "latest" // 1, aka 2018.03.0.20201028.0
"2", // 2, aka 2.0.20201111.0
// "2022.0.20220531" // 2022 (new naming scheme, preview) aka 2022.0.20220531
},
},
}
runner.flags = flag.NewFlagSet(runner.Name(), flag.ExitOnError)
runner.config = NewYumConfigWithFlagSet(runner.flags)
return runner, nil
}
func (yr YumRunner) Init(args []string) error {
err := yr.flags.Parse(args)
if err != nil {
return trace.Wrap(err, "failed to parse arguments")
}
err = yr.config.Check()
if err != nil {
return trace.Wrap(err, "failed to validate YUM config arguments")
}
return nil
}
func (yr YumRunner) Run() error {
yrt, err := NewYumRepoTool(yr.config, yr.supportedOSs)
if err != nil {
return trace.Wrap(err, "failed to create a new YUM repo tool instance")
}
err = yrt.Run()
if err != nil {
return trace.Wrap(err, "YUM runner failed")
}
return nil
}
func (YumRunner) Name() string {
return "yum"
}
func (YumRunner) Info() string {
return "builds YUM repos"
}
func (yr YumRunner) GetLoggerConfig() *LoggerConfig {
return yr.config.LoggerConfig
}

View file

@ -0,0 +1,511 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"fmt"
"io"
"io/fs"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/gravitational/trace"
"github.com/inhies/go-bytesize"
"github.com/seqsense/s3sync"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type S3manager struct {
syncManager *s3sync.Manager
uploader *s3manager.Uploader
downloader *s3manager.Downloader
bucketLocalPath string
bucketName string
bucketURL *url.URL
maxConcurrentSyncs int
downloadedBytes int64
}
func NewS3Manager(config *S3Config) (*S3manager, error) {
// Right now the AWS session is only used by this manager, but if it ends
// up being needed elsewhere then it should probably be moved to an arg
awsSession, err := session.NewSession()
if err != nil {
return nil, trace.Wrap(err, "failed to create a new AWS session")
}
syncManagerMaxConcurrentSyncs := config.maxConcurrentSyncs
if syncManagerMaxConcurrentSyncs < 0 {
// This isn't unlimited but due to the s3sync library's parallelism implementation
// this must be limited to a "reasonable" number
syncManagerMaxConcurrentSyncs = 128
}
s := &S3manager{
bucketName: config.bucketName,
bucketURL: &url.URL{
Scheme: "s3",
Host: config.bucketName,
},
syncManager: s3sync.New(awsSession, s3sync.WithParallel(syncManagerMaxConcurrentSyncs)),
uploader: s3manager.NewUploader(awsSession),
downloader: s3manager.NewDownloader(awsSession),
maxConcurrentSyncs: config.maxConcurrentSyncs,
}
s.ChangeLocalBucketPath(config.localBucketPath)
s3sync.SetLogger(&s3logger{})
return s, nil
}
func (s *S3manager) ChangeLocalBucketPath(newBucketPath string) error {
s.bucketLocalPath = newBucketPath
// Ensure the local bucket path exists as it will be needed by all functions
err := os.MkdirAll(s.bucketLocalPath, 0660)
if err != nil {
return trace.Wrap(err, "failed to ensure path %q exists", s.bucketLocalPath)
}
return nil
}
func (s *S3manager) DownloadExistingRepo() error {
err := deleteAllFilesInDirectory(s.bucketLocalPath)
if err != nil {
return trace.Wrap(err, "failed to remove all filesystem entries in %q", s.bucketLocalPath)
}
downloadGroup := &errgroup.Group{}
downloadGroup.SetLimit(s.maxConcurrentSyncs)
linkMap := make(map[string]string)
var continuationToken *string
for {
listObjResponse, err := s.downloader.S3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: &s.bucketName,
ContinuationToken: continuationToken,
})
if err != nil {
return trace.Wrap(err, "failed to list objects for bucket %q", s.bucketName)
}
for _, s3object := range listObjResponse.Contents {
s.processS3ObjectDownload(s3object, downloadGroup, &linkMap)
}
continuationToken = listObjResponse.NextContinuationToken
if continuationToken == nil {
break
}
}
// Even if an error has occurred we should wait to exit until all running syncs have
// completed, even if not successful
logrus.Info("Waiting for download to complete...")
err = downloadGroup.Wait()
if err != nil {
return trace.Wrap(err, "failed to perform S3 sync from remote bucket %q to local bucket %q", s.bucketName, s.bucketLocalPath)
}
// Links must be created after their target exists
err = createLinks(linkMap)
if err != nil {
return trace.Wrap(err, "failed to create filesystem links for bucket %q", s.bucketName)
}
logrus.Infof("Downloaded %s bytes", bytesize.New(float64(s.downloadedBytes)))
return nil
}
func (s *S3manager) processS3ObjectDownload(s3object *s3.Object, downloadGroup *errgroup.Group, linkMap *map[string]string) {
downloadGroup.Go(func() error {
objectLink, err := s.getObjectLink(s3object)
if err != nil {
return trace.Wrap(err, "failed to get object link for key %q in bucket %q", *s3object.Key, s.bucketName)
}
// If the link does not start with a '/' then it is not a filesystem link
if objectLink != nil && len(*objectLink) > 0 && (*objectLink)[0] == '/' {
localObjectPath := filepath.Join(s.bucketLocalPath, *s3object.Key)
linkTarget := filepath.Join(s.bucketLocalPath, *objectLink)
(*linkMap)[localObjectPath] = linkTarget
return nil
}
err = s.downloadFile(s3object)
if err != nil {
return trace.Wrap(err, "failed to download S3 file %q from bucket %q", *s3object.Key, s.bucketName)
}
return nil
})
}
func createLinks(linkMap map[string]string) error {
for file, target := range linkMap {
logrus.Infof("Creating a symlink from %q to %q", file, target)
err := os.MkdirAll(filepath.Dir(file), 0660)
if err != nil {
return trace.Wrap(err, "failed to create directory structure for %q", file)
}
err = os.Symlink(target, file)
if err != nil {
return trace.Wrap(err, "failed to symlink %q to %q", file, target)
}
}
return nil
}
// This could potentially be made more efficient by running `os.RemoveAll` in a goroutine
// as random access on storage devices performs better at a higher queue depth
func deleteAllFilesInDirectory(dir string) error {
// Note that os.ReadDir does not follow/eval links which is important here
dirEntries, err := os.ReadDir(dir)
if err != nil {
return trace.Wrap(err, "failed to list directory entries for directory %q", dir)
}
for _, dirEntry := range dirEntries {
dirEntryPath := filepath.Join(dir, dirEntry.Name())
err = os.RemoveAll(dirEntryPath)
if err != nil {
return trace.Wrap(err, "failed to remove directory entry %q", dirEntryPath)
}
}
return nil
}
func (s *S3manager) getObjectLink(s3object *s3.Object) (*string, error) {
s3HeadObjectOutput, err := s.downloader.S3.HeadObject(&s3.HeadObjectInput{
Bucket: &s.bucketName,
Key: s3object.Key,
// Probably unnecessary but this will cause an error to be thrown if somebody is
// modifying the object while this program is running
IfMatch: s3object.ETag,
IfUnmodifiedSince: s3object.LastModified,
})
if err != nil {
return nil, trace.Wrap(err, "failed to retrieve metadata for key %q in bucket %q", *s3object.Key, s.bucketName)
}
return s3HeadObjectOutput.WebsiteRedirectLocation, nil
}
// s3sync has a bug when downloading a single file so this call reimplements s3sync's download
func (s *S3manager) downloadFile(s3object *s3.Object) error {
logrus.Infof("Downloading %q...", *s3object.Key)
localObjectPath := filepath.Join(s.bucketLocalPath, *s3object.Key)
err := os.MkdirAll(filepath.Dir(localObjectPath), 0660)
if err != nil {
return trace.Wrap(err, "failed to create directory structure for %q", localObjectPath)
}
fileWriter, err := os.Create(localObjectPath)
if err != nil {
return trace.Wrap(err, "failed to open %q for writing", localObjectPath)
}
defer fileWriter.Close()
fileDownloadByteCount, err := s.downloader.Download(fileWriter, &s3.GetObjectInput{
Bucket: aws.String(s.bucketName),
Key: aws.String(*s3object.Key),
})
if err != nil {
return trace.Wrap(err, "failed to download object %q from bucket %q to local path %q", *s3object.Key, s.bucketName, localObjectPath)
}
s.downloadedBytes += fileDownloadByteCount
err = os.Chtimes(localObjectPath, *s3object.LastModified, *s3object.LastModified)
if err != nil {
return trace.Wrap(err, "failed to update the access and modification time on file %q to %v", localObjectPath, *s3object.LastModified)
}
logrus.Infof("Download %q complete", *s3object.Key)
return nil
}
func (s *S3manager) UploadBuiltRepo() error {
err := s.sync(false)
if err != nil {
return trace.Wrap(err, "failed to upload bucket")
}
return nil
}
func (s *S3manager) UploadBuiltRepoWithRedirects(extensionToMatch, relativeRedirectDir string) error {
uploadGroup := &errgroup.Group{}
uploadGroup.SetLimit(s.maxConcurrentSyncs)
walkErr := filepath.WalkDir(s.bucketLocalPath, func(absPath string, info fs.DirEntry, err error) error {
logrus.Debugf("Starting on %q...", absPath)
if err != nil {
return trace.Wrap(err, "failed to walk over directory %q on path %q", s.bucketLocalPath)
}
syncFunc, err := s.syncGenericFsObject(absPath, info)
if err != nil {
return trace.Wrap(err, "failed to get syncing function for %q", absPath)
}
uploadGroup.Go(syncFunc)
logrus.Debugf("Upload for %q queued", absPath)
return nil
})
// Even if an error has occurred we should wait to exit until all running syncs have
// completed, even if not successful
logrus.Info("Waiting for sync to complete...")
syncErr := uploadGroup.Wait()
// Future work: add upload logging information once
// https://github.com/seqsense/s3sync/commit/29b3fcb259293d80634cb3916e0f28467d017087 has been released
logrus.Info("Sync has completed")
errs := make([]error, 0, 2)
if walkErr != nil {
errs = append(errs, trace.Wrap(walkErr, "failed to walk over entries in %q", s.bucketLocalPath))
}
if syncErr != nil {
errs = append(errs, trace.Wrap(syncErr, "failed to perform S3 sync from local bucket %q to remote bucket %q", s.bucketLocalPath, s.bucketName))
}
if len(errs) > 0 {
return trace.Wrap(trace.NewAggregate(errs...), "one or more erros occurred while uploading built repo %q", s.bucketLocalPath)
}
return nil
}
func (s *S3manager) syncGenericFsObject(absPath string, dirEntryInfo fs.DirEntry) (func() error, error) {
// Don't do anything with non-empty directories as they will be caught later by their contents
if dirEntryInfo.IsDir() {
f, err := s.buildSyncDirFunc(absPath)
if err != nil {
return nil, trace.Wrap(err, "failed to build directory syncing function to sync %q", absPath)
}
return f, nil
} else
// If symbolic link
if dirEntryInfo.Type()&fs.ModeSymlink != 0 {
f, err := s.buildSyncSymbolicLinkFunc(absPath)
if err != nil {
return nil, trace.Wrap(err, "failed to build symbolic link file syncing function to sync %q", absPath)
}
return f, nil
}
// sync a single file or directory
f, err := s.buildSyncSingleFsEntryFunc(absPath)
if err != nil {
return nil, trace.Wrap(err, "failed to build single file syncing function to sync %q", absPath)
}
return f, nil
}
func (s *S3manager) buildSyncDirFunc(absPath string) (func() error, error) {
isDirEmpty, err := isDirectoryEmpty(absPath)
if err != nil {
return nil, trace.Wrap(err, "failed to determine if directory %q is empty", absPath)
}
if !isDirEmpty {
logrus.Debug("Skipping non-empty directory")
return func() error { return nil }, nil
}
// If the directory has no contents, call sync normally which will create the directory remotely if not exists
f, err := s.buildSyncSingleFsEntryFunc(absPath)
if err != nil {
return nil, trace.Wrap(err, "failed to build single file syncing function to sync %q", absPath)
}
return f, nil
}
func (s *S3manager) buildSyncSymbolicLinkFunc(absPath string) (func() error, error) {
actualFilePath, err := filepath.EvalSymlinks(absPath)
if err != nil {
return nil, trace.Wrap(err, "failed to follow symlink for path %q", absPath)
}
isInBucket, err := isPathChildOfAnother(s.bucketLocalPath, actualFilePath)
if err != nil {
return nil, trace.Wrap(err, "failed to determine if %q is a child of %q", actualFilePath, s.bucketLocalPath)
}
if isInBucket {
// This will re-upload every redirect file ever created. Implementing "sync" functionality would
// require significantly more engineering effort and this cost is low so this shouldn't be a
// problem.
return func() error {
err := s.UploadRedirectFile(absPath, actualFilePath)
if err != nil {
return trace.Wrap(err, "failed to upload a redirect file to S3 for %q targeting %q", absPath, actualFilePath)
}
return nil
}, nil
}
// If not in bucket, call sync normally which will follow the symlink to the actual file and upload it
f, err := s.buildSyncSingleFsEntryFunc(absPath)
if err != nil {
return nil, trace.Wrap(err, "failed to build single file syncing function to sync %q", absPath)
}
return f, nil
}
func (s *S3manager) buildSyncSingleFsEntryFunc(absPath string) (func() error, error) {
relPath, err := filepath.Rel(s.bucketLocalPath, absPath)
if err != nil {
return nil, trace.Wrap(err, "failed to get %q relative to %q", absPath, s.bucketLocalPath)
}
remoteURL := getURLWithPath(*s.bucketURL, relPath)
return func() error {
err := s.syncManager.Sync(absPath, remoteURL)
if err != nil {
return trace.Wrap(err, "failed to sync from %q to %q", absPath, remoteURL)
}
return nil
}, nil
}
func getURLWithPath(baseURL url.URL, path string) string {
// Because this function is pass-by-value it should not modify `baseUrl`, where doing this directly on the
// provided parameter would modify it
baseURL.Path = path
return baseURL.String()
}
func isPathChildOfAnother(baseAbsPath string, testAbsPath string) (bool, error) {
// General implementation from https://stackoverflow.com/questions/28024731/check-if-given-path-is-a-subdirectory-of-another-in-golang
relPath, err := filepath.Rel(baseAbsPath, testAbsPath)
if err != nil {
return false, trace.Wrap(err, "failed to get the path of %q relative to %q", testAbsPath, baseAbsPath)
}
return !strings.HasPrefix(relPath, fmt.Sprintf("..%c", os.PathSeparator)) && relPath != "..", nil
}
func (s *S3manager) UploadRedirectFile(localAbsSrcPath, localAbsRemoteTargetPath string) error {
relSrcPath, err := filepath.Rel(s.bucketLocalPath, localAbsSrcPath)
if err != nil {
return trace.Wrap(err, "failed to get %q relative to %q", localAbsSrcPath, s.bucketLocalPath)
}
relTargetPath, err := filepath.Rel(s.bucketLocalPath, localAbsRemoteTargetPath)
if err != nil {
return trace.Wrap(err, "failed to get %q relative to %q", localAbsRemoteTargetPath, s.bucketLocalPath)
}
logrus.Infof("Creating a redirect file from %q to %q", relSrcPath, relTargetPath)
// S3 requires a prepended "/" to inform the redirect metadata that the target is another S3 object
// in the same bucket
s3TargetPath := filepath.Join("/", relTargetPath)
// Upload an empty file that when requested will redirect to the real one
_, err = s.uploader.Upload(&s3manager.UploadInput{
Bucket: &s.bucketName,
Key: &relSrcPath,
Body: bytes.NewReader([]byte{}),
WebsiteRedirectLocation: &s3TargetPath,
})
if err != nil {
return trace.Wrap(err, "failed to upload an empty redirect file to %q in bucket %q", relSrcPath, s.bucketName)
}
return nil
}
func (s *S3manager) UploadRedirectURL(remoteAbsSourcePath, targetURL string) error {
logrus.Infof("Creating redirect from %q to %q", remoteAbsSourcePath, targetURL)
_, err := s.uploader.Upload(&s3manager.UploadInput{
Bucket: &s.bucketName,
Key: &remoteAbsSourcePath,
Body: bytes.NewReader([]byte{}),
WebsiteRedirectLocation: &targetURL,
})
if err != nil {
return trace.Wrap(err, "failed to upload URL redirect file targeting %q to %q", targetURL, remoteAbsSourcePath)
}
return nil
}
func isDirectoryEmpty(dirPath string) (bool, error) {
// Pulled from https://stackoverflow.com/questions/30697324/how-to-check-if-directory-on-path-is-empty
f, err := os.Open(dirPath)
if err != nil {
return false, trace.Wrap(err, "failed to open directory %q", dirPath)
}
defer f.Close()
_, err = f.Readdirnames(1)
if err == io.EOF {
return true, nil
}
if err != nil {
return false, trace.Wrap(err, "failed to read the name of directories in %q", dirPath)
}
return false, nil
}
func (s *S3manager) sync(download bool) error {
var src, dest string
if download {
src = s.bucketURL.String()
dest = s.bucketLocalPath
} else {
src = s.bucketLocalPath
dest = s.bucketURL.String()
}
logrus.Infof("Performing S3 sync from %q to %q...", src, dest)
err := s.syncManager.Sync(src, dest)
if err != nil {
return trace.Wrap(err, "failed to sync %q to %q", src, dest)
}
logrus.Infoln("S3 sync complete")
return nil
}

View file

@ -0,0 +1,63 @@
#!/bin/bash
# shellcheck disable=SC2016,SC1004,SC2174,SC2155
set -xeu
# These must be set for the script to run
: "$AWS_ACCESS_KEY_ID"
: "$AWS_SECRET_ACCESS_KEY"
: "$AWS_SESSION_TOKEN"
ART_VERSION_TAG="8.3.15"
ARTIFACT_PATH="/go/artifacts"
CACHE_DIR="/mnt/createrepo_cache"
GNUPGHOME="/tmpfs/gnupg"
REPO_S3_BUCKET="fred-test1"
BUCKET_CACHE_PATH="/mnt/bucket"
export AWS_REGION="us-west-2"
: '
Run command:
docker run \
--rm -it \
-v "$(git rev-parse --show-toplevel)":/go/src/github.com/gravitational/teleport/ \
-v "$HOME/.aws":"/root/.aws" \
-e AWS_PROFILE="$AWS_PROFILE" \
-e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \
-e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \
-e AWS_SESSION_TOKEN="$AWS_SESSION_TOKEN" \
-e DEBIAN_FRONTEND="noninteractive" \
golang:1.18.4-bullseye /go/src/github.com/gravitational/teleport/build.assets/tooling/cmd/build-os-package-repos/test-rpm.sh
'
# Download the artifacts
apt update
apt install -y wget
mkdir -pv "$ARTIFACT_PATH"
cd "$ARTIFACT_PATH"
wget "https://get.gravitational.com/teleport-${ART_VERSION_TAG}-1.x86_64.rpm"
wget "https://get.gravitational.com/teleport-${ART_VERSION_TAG}-1.arm64.rpm"
wget "https://get.gravitational.com/teleport-${ART_VERSION_TAG}-1.i386.rpm"
wget "https://get.gravitational.com/teleport-${ART_VERSION_TAG}-1.arm.rpm"
apt install -y createrepo-c gnupg
mkdir -pv "$CACHE_DIR"
mkdir -pv -m0700 "$GNUPGHOME"
chown -R root:root "$GNUPGHOME"
export GPG_TTY=$(tty)
gpg --batch --gen-key <<EOF
Key-Type: 1
Key-Length: 2048
Subkey-Type: 1
Subkey-Length: 2048
Name-Real: Test RPM key
Name-Email: test@rpm.key
Expire-Date: 0
%no-protection
EOF
cd "/go/src/github.com/gravitational/teleport/build.assets/tooling"
export VERSION="v${ART_VERSION_TAG}"
export RELEASE_CHANNEL="stable"
go run ./cmd/build-os-package-repos yum -bucket "$REPO_S3_BUCKET" -local-bucket-path \
"$BUCKET_CACHE_PATH" -artifact-version "$VERSION" -release-channel "$RELEASE_CHANNEL" \
-artifact-path "$ARTIFACT_PATH" -log-level 4 -cache-dir "$CACHE_DIR"

View file

@ -0,0 +1,512 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"io"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
// The golang docs are wrong/out of date for this package. Check github instead.
"github.com/cavaliergopher/rpm"
"github.com/davecgh/go-spew/spew"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
type YumRepoTool struct {
config *YumConfig
s3Manager *S3manager
createRepo *CreateRepo
gpg *GPG
supportedOSs map[string][]string
}
const ArtifactExtension string = ".rpm"
// Instantiates a new yum repo tool instance and performs any required setup/config.
func NewYumRepoTool(config *YumConfig, supportedOSs map[string][]string) (*YumRepoTool, error) {
cr, err := NewCreateRepo(config.cacheDir)
if err != nil {
trace.Wrap(err, "failed to instantiate new CreateRepo instance")
}
s3Manager, err := NewS3Manager(config.S3Config)
if err != nil {
return nil, trace.Wrap(err, "failed to create a new s3manager instance")
}
gpg, err := NewGPG()
if err != nil {
return nil, trace.Wrap(err, "failed to create a new GPG instance")
}
return &YumRepoTool{
config: config,
s3Manager: s3Manager,
createRepo: cr,
gpg: gpg,
supportedOSs: supportedOSs,
}, nil
}
func (yrt *YumRepoTool) Run() error {
start := time.Now()
logrus.Infoln("Starting YUM repo build process...")
logrus.Debugf("Using config: %+v", spew.Sdump(yrt.config))
isFirstRun, err := yrt.isFirstRun()
if err != nil {
return trace.Wrap(err, "failed to determine if YUM repos have been built before")
}
if isFirstRun {
logrus.Warningln("First run or disaster recovery detected, attempting to rebuild existing repos from YUM repository...")
err = yrt.s3Manager.DownloadExistingRepo()
if err != nil {
return trace.Wrap(err, "failed to sync existing repo from S3 bucket")
}
// Additional first time setup can be done here, but shouldn't be needed
} else {
logrus.Debugf("Not first run of tool, skipping S3 resync")
}
// Both Hashicorp and Docker publish their key to this path
relativeGpgPublicKeyPath := "gpg"
err = yrt.gpg.WritePublicKeyToFile(path.Join(yrt.config.localBucketPath, relativeGpgPublicKeyPath))
if err != nil {
return trace.Wrap(err, "failed to write GPG public key")
}
sourceArtifactPaths, err := yrt.getSourceArtifactPaths()
if err != nil {
return trace.Wrap(err, "failed to get the file paths of available RPM artifacts")
}
// This can be anywhere under repoPath. Hardcoding it rather than putting it in config as it should not change
// between runs/versions.
relativeBucketArtifactPath := "RPMs"
bucketArtifactPaths, err := yrt.copyArtifactsToBucket(sourceArtifactPaths, relativeBucketArtifactPath)
if err != nil {
return trace.Wrap(err, "failed to transfer available RPM artifacts to a bucket subdirectory")
}
err = yrt.addArtifacts(bucketArtifactPaths, relativeGpgPublicKeyPath)
if err != nil {
return trace.Wrap(err, "failed to add artifacts")
}
err = yrt.s3Manager.UploadBuiltRepoWithRedirects(ArtifactExtension, relativeBucketArtifactPath)
if err != nil {
return trace.Wrap(err, "failed to sync changes to S3 bucket")
}
// Future work: add literals to config?
err = yrt.s3Manager.UploadRedirectURL("index.html", "https://goteleport.com/docs/installation/#linux")
if err != nil {
return trace.Wrap(err, "failed to redirect index page to Teleport docs")
}
logrus.Infof("YUM repo build process completed in %s", time.Since(start).Round(time.Millisecond))
return nil
}
func (yrt *YumRepoTool) isFirstRun() (bool, error) {
yumSyncPath := yrt.config.localBucketPath
logrus.Debugf("Checking if %q exists...", yumSyncPath)
files, err := os.ReadDir(yumSyncPath)
if err != nil {
return false, trace.Wrap(err, "failed to list files in %q", yumSyncPath)
}
logrus.Debugf("Found %d files in %q:", len(files), yumSyncPath)
for _, file := range files {
logrus.Debug(file.Name())
}
return len(files) == 0, nil
}
func (yrt *YumRepoTool) getSourceArtifactPaths() ([]string, error) {
artifactPath := yrt.config.artifactPath
logrus.Infof("Looking for artifacts in %q...", artifactPath)
fileDirEntries, err := os.ReadDir(artifactPath)
if err != nil {
return nil, trace.Wrap(err, "failed to list files in %q", artifactPath)
}
logrus.Infof("Found %d possible artifacts in %q", len(fileDirEntries), artifactPath)
// This allocates a capacity of the maximum that is possibly needed, but it is probably
// better than reallocating the underlying array by appending each time
validArtifactPaths := make([]string, 0, len(fileDirEntries))
for _, fileDirEntry := range fileDirEntries {
fileName := fileDirEntry.Name()
if path.Ext(fileName) != ArtifactExtension {
logrus.Debugf("The file %q does not have a %q extension, skipping...", fileName, ArtifactExtension)
continue
}
filePath := path.Join(artifactPath, fileName)
validArtifactPaths = append(validArtifactPaths, filePath)
logrus.Debugf("Found artifact %q", filePath)
}
logrus.Infof("Found %d artifacts", len(validArtifactPaths))
logrus.Debugf("Source artifact paths: %v", validArtifactPaths)
return validArtifactPaths, nil
}
func sortArtifactsByArch(artifactPaths []string) (map[string][]string, error) {
logrus.Info("Determining ISA of targeted artifacts...")
// Four is probably a decent guess for the number of ISAs we build for. This would cover:
// i386, x86_64, arm, arm64
archPackageMap := make(map[string][]string, 4)
for _, artifactPath := range artifactPaths {
logrus.Debugf("Attempting to open RPM %q...", artifactPath)
rpmPackage, err := rpm.Open(artifactPath)
if err != nil {
return nil, trace.Wrap(err, "failed to read package %q", artifactPath)
}
arch := rpmPackage.Architecture()
baseArch, err := getBaseArchForArch(arch)
if err != nil {
return nil, trace.Wrap(err, "failed to determine base architecture for artifact %q", artifactPath)
}
logrus.Debugf("Found %q with ISA %q and base ISA %q", artifactPath, arch, baseArch)
if rpmPackagePaths, ok := archPackageMap[baseArch]; ok {
archPackageMap[baseArch] = append(rpmPackagePaths, artifactPath)
} else {
archPackageMap[baseArch] = []string{artifactPath}
}
}
logrus.Infof("Found %d ISAs: %v", len(archPackageMap), archPackageMap)
return archPackageMap, nil
}
// Implementation pulled from https://github.com/rpm-software-management/yum/blob/master/rpmUtils/arch.py#L429
func getBaseArchForArch(arch string) (string, error) {
archTypes := map[string][]string{
"i386": {
"athlon",
"geode",
"i686",
"i586",
"i486",
"i386",
},
"x86_64": {
"amd64",
"ia32e",
"x86_64",
},
// This does not cover ARMv8 and above which have several strange corner cases
"arm": {
"armv2",
"armv3",
"armv4",
"armv5",
"armv6",
"armv7",
"arm",
},
"aarch64": {
"arm64",
"aarch64",
},
}
for baseArch, archTypes := range archTypes {
for _, archType := range archTypes {
if strings.HasPrefix(arch, archType) {
return baseArch, nil
}
}
}
return "", trace.Errorf("failed to determine base arch for architecture %q", arch)
}
func (yrt *YumRepoTool) addArtifacts(bucketArtifactPaths []string, relativeGpgPublicKeyPath string) error {
logrus.Info("Adding artifacts to repos...")
archs, err := sortArtifactsByArch(bucketArtifactPaths)
if err != nil {
return trace.Wrap(err, "failed to get artifacts by architecture")
}
repoCount := 0
for os, osVersions := range yrt.supportedOSs {
osPath := path.Join(yrt.config.localBucketPath, os)
for _, osVersion := range osVersions {
for arch, packages := range archs {
relativeRepoPath := path.Join(
osVersion,
"Teleport",
arch,
yrt.config.releaseChannel,
semver.Major(yrt.config.artifactVersion),
)
repoPath := path.Join(osPath, relativeRepoPath)
err := yrt.updateRepoWithArtifacts(packages, repoPath)
if err != nil {
return trace.Wrap(err, "failed to add artifact for YUM repo %q", relativeRepoPath)
}
repoFilePath := filepath.Join(repoPath, "teleport.repo")
err = yrt.createRepoFile(repoFilePath, os, osVersion, arch, relativeGpgPublicKeyPath)
if err != nil {
return trace.Wrap(err, "failed to create repo file for os %q at %q", os, repoFilePath)
}
repoCount++
}
}
}
logrus.Infof("Updated %d repos with %d artifacts", repoCount, len(bucketArtifactPaths))
return nil
}
func (yrt *YumRepoTool) updateRepoWithArtifacts(packagePaths []string, repoPath string) error {
logrus.Infof("Updating repo at %q with packages %v", repoPath, packagePaths)
// A soft copy here will have a significant performance impact, and S3 sync will follow links
err := yrt.copyArtifactsToRepo(packagePaths, repoPath)
if err != nil {
return trace.Wrap(err, "failed to copy artifacts to repo %q", repoPath)
}
err = yrt.updateRepoMetadata(repoPath)
if err != nil {
return trace.Wrap(err, "failed to update repo %q metadata", repoPath)
}
err = yrt.signRepoMetadata(repoPath)
if err != nil {
return trace.Wrap(err, "failed to sign repo %q metadata", repoPath)
}
logrus.Infof("Finished updating repo %q", repoPath)
return nil
}
func (yrt *YumRepoTool) copyArtifactsToRepo(artifactPaths []string, repoPath string) error {
// The "repo_rpms" directory here is arbitrary and not tied to anything else.
repoArtifactFolder := path.Join(repoPath, "repo_rpms")
_, err := copyArtifacts(artifactPaths, repoArtifactFolder, false)
if err != nil {
return trace.Wrap(err, "failed to copy artifacts %d artifacts to repo directory at %s", len(artifactPaths), repoArtifactFolder)
}
return nil
}
// Flattens artifactPaths into one directory and returns the created files in that directory
func (yrt *YumRepoTool) copyArtifactsToBucket(artifactPaths []string, bucketArtifactSubdirectory string) ([]string, error) {
bucketArtifactFolder := path.Join(yrt.config.localBucketPath, bucketArtifactSubdirectory)
// A "hard" copy is performed here because the bucket will usually be stored on a non-ephemeral filesystem path.
// If the artifacts are linked rather than copied then every time the uploaded bucket is synced on future runs
// the sync will re-download the real artifacts.
destinationArtifactPaths, err := copyArtifacts(artifactPaths, bucketArtifactFolder, true)
if err != nil {
return nil, trace.Wrap(err, "failed to copy artifacts %d artifacts to bucket directory at %s", len(artifactPaths), destinationArtifactPaths)
}
return destinationArtifactPaths, nil
}
func copyArtifacts(artifactPaths []string, destinationDirectory string, shouldHardCopy bool) ([]string, error) {
copyType := "soft"
if shouldHardCopy {
copyType = "hard"
}
logrus.Debugf("Copying %d artifacts to %q via a %s copy...", len(artifactPaths), destinationDirectory, copyType)
err := os.MkdirAll(destinationDirectory, 0660)
if err != nil {
return nil, trace.Wrap(err, "failed to ensure destination directory %q exists", destinationDirectory)
}
destinationArtifactPaths := make([]string, len(artifactPaths))
for i, artifactPath := range artifactPaths {
artifactDestinationPath := path.Join(destinationDirectory, path.Base(artifactPath))
if shouldHardCopy {
hardCopyFile(artifactPath, artifactDestinationPath)
} else {
softCopyFile(artifactPath, artifactDestinationPath)
}
destinationArtifactPaths[i] = artifactDestinationPath
}
logrus.Debugf("Successfully copied %d artifact(s) to %q", len(destinationArtifactPaths), destinationDirectory)
return destinationArtifactPaths, nil
}
func (yrt *YumRepoTool) updateRepoMetadata(repoPath string) error {
// Ensure the directory exists
err := os.MkdirAll(repoPath, 0660)
if err != nil {
return trace.Wrap(err, "failed to ensure repo directory %q exists", repoPath)
}
err = yrt.createRepo.CreateOrUpdateRepo(repoPath)
if err != nil {
return trace.Wrap(err, "failed to update repo metadata for %q", repoPath)
}
return nil
}
func (yrt *YumRepoTool) signRepoMetadata(repoPath string) error {
repomdPath := path.Join(repoPath, "repodata", "repomd.xml")
err := yrt.gpg.SignFile(repomdPath)
if err != nil {
return trace.Wrap(err, "failed to sign file %q", repomdPath)
}
return nil
}
// Creates an os-specific ".repo" file for yum-config-manager akin to
// https://rpm.releases.teleport.dev/teleport.repo
func (yrt *YumRepoTool) createRepoFile(filePath, osName, osVersion, arch, relativeGpgPublicKeyPath string) error {
// Future work: maybe move domain name to config?
domainName := "yum.releases.teleport.dev"
sectionName := "teleport"
// See these for config details:
// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/sec-configuring_yum_and_yum_repositories
// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/sec-using_yum_variables
repoOptions := map[string]string{
"name": "Gravitational Teleport packages",
"baseurl": (&url.URL{
Scheme: "https",
Host: domainName,
Path: strings.Join(
[]string{
osName,
osVersion,
"Teleport",
arch,
yrt.config.releaseChannel,
semver.Major(yrt.config.artifactVersion),
},
"/",
),
}).String(),
"enabled": "1",
"gpgcheck": "1",
"repo_gpgcheck": "1",
"gpgkey": (&url.URL{
Scheme: "https",
Host: domainName,
Path: relativeGpgPublicKeyPath,
}).String(),
}
// + 2 = repo header line, new line
repoFileLines := make([]string, 0, len(repoOptions)+2)
repoFileLines = append(repoFileLines, fmt.Sprintf("[%s]", sectionName))
for key, value := range repoOptions {
repoFileLines = append(repoFileLines, fmt.Sprintf("%s=%s", key, value))
}
repoFileLines = append(repoFileLines, "")
repoFileContent := strings.Join(repoFileLines, "\n")
err := os.WriteFile(filePath, []byte(repoFileContent), 0660)
if err != nil {
return trace.Wrap(err, "failed to create repo file at %q", filePath)
}
logrus.Infof("Created repo file at %q", filePath)
logrus.Debugf("Repo file contents:\n%s", repoFileContent)
return nil
}
// Guaranteed to perform a "copy" operation rather than just linking. Should only be used
// where linking is not acceptable.
func hardCopyFile(src, dest string) error {
// Implementation is a modified version of method 1 from
// https://opensource.com/article/18/6/copying-files-go
start := time.Now()
logrus.Debugf("Beginning hard file copy from %q to %q...", src, dest)
sourceFileStat, err := os.Stat(src)
if err != nil {
return trace.Wrap(err, "failed to get filesystem data for %q", src)
}
if !sourceFileStat.Mode().IsRegular() {
return trace.Errorf("Source file %q is not a regular file and cannot be copied", src)
}
sourceHandle, err := os.Open(src)
if err != nil {
return trace.Wrap(err, "failed to open source file %q for reading", src)
}
defer sourceHandle.Close()
destinationHandle, err := os.Create(dest)
if err != nil {
return trace.Wrap(err, "failed to open destinatino file %q for writing", dest)
}
defer destinationHandle.Close()
_, err = io.Copy(destinationHandle, sourceHandle)
if err != nil {
return trace.Wrap(err, "failed to copy source file %q to destination file %q", src, dest)
}
logrus.Debugf("File transfer from %q to %q completed in %s", src, dest, time.Since(start).Round(time.Millisecond))
return nil
}
// Copies or links the src file to dest path. The implementation of this function is subject
// to change. If a guaranteed is needed then `hardCopyFile` should be used instead.
func softCopyFile(src, dest string) error {
// Profiling has shown that disk reads/writes are a significant bottleneck with the
// APT side of the tool. This will reduce roughly 25GB of read/writes to nearly 0.
start := time.Now()
logrus.Debugf("Beginning soft file copy from %q to %q...", src, dest)
err := os.Symlink(src, dest)
if err != nil {
return trace.Wrap(err, "failed to link %q to %q", src, dest)
}
logrus.Debugf("File transfer from %q to %q completed in %s", src, dest, time.Since(start).Round(time.Nanosecond))
return nil
}

View file

@ -3,26 +3,29 @@ module github.com/gravitational/teleport/build.assets/tooling
go 1.18
require (
github.com/aws/aws-sdk-go v1.42.30
github.com/aws/aws-sdk-go v1.44.47
github.com/cavaliergopher/rpm v1.2.0
github.com/davecgh/go-spew v1.1.1
github.com/google/go-github/v41 v41.0.0
github.com/gravitational/trace v1.1.15
github.com/seqsense/s3sync v1.8.1
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf
github.com/seqsense/s3sync v1.8.2
github.com/sirupsen/logrus v1.8.1
github.com/stretchr/testify v1.2.2
golang.org/x/exp v0.0.0-20220518171630-0b5c67f07fdf
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/gabriel-vasile/mimetype v1.4.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
golang.org/x/net v0.0.0-20220630215102-69896b714898 // indirect
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
)

View file

@ -1,5 +1,8 @@
github.com/aws/aws-sdk-go v1.42.30 h1:GvzWHwAdE5ZQ9UOcq0lX+PTzVJ4+sm1DjYrk6nUSTgA=
github.com/aws/aws-sdk-go v1.42.30/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
github.com/aws/aws-sdk-go v1.44.19/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.47 h1:uyiNvoR4wfZ8Bp4ghgbyzGFIg5knjZMUAd5S9ba9qNU=
github.com/aws/aws-sdk-go v1.44.47/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/cavaliergopher/rpm v1.2.0 h1:s0h+QeVK252QFTolkhGiMeQ1f+tMeIMhGl8B1HUmGUc=
github.com/cavaliergopher/rpm v1.2.0/go.mod h1:R0q3vTqa7RUvPofAZYrnjJ63hh2vngjFfphuXiExVos=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -16,6 +19,8 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/gravitational/trace v1.1.15 h1:dfaFcARt110nCX6RSvrcRUbvRawEYAasXyCqnhXo0Xg=
github.com/gravitational/trace v1.1.15/go.mod h1:RvdOUHE4SHqR3oXlFFKnGzms8a5dugHygGw1bqDstYI=
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf h1:FtEj8sfIcaaBfAKrE1Cwb61YDtYq9JxChK1c7AKce7s=
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf/go.mod h1:yrqSXGoD/4EKfF26AOGzscPOgTTJcyAwM2rpixWT+t4=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@ -30,8 +35,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/seqsense/s3sync v1.8.1 h1:eUEQ1aeDXaQjsoMrQdDZ4ZL2yTXWHWKWyUigpk2mr5k=
github.com/seqsense/s3sync v1.8.1/go.mod h1:yVAPljnzE70ZLhn2OCaI35/59UpIl8RXYRvyU/Tthgs=
github.com/seqsense/s3sync v1.8.2 h1:oMgs3aZ07GovNc0uQ2oWYNPRN7i4foU9pwF3RhOO0Gk=
github.com/seqsense/s3sync v1.8.2/go.mod h1:lDmrc+z24//pnnFOFzBlBX+uJwVgnv429EGdeiP0/vg=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -48,23 +53,29 @@ golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220630215102-69896b714898 h1:K7wO6V1IrczY9QOQ2WkVpw4JQSwCd52UsxVEirZUfiw=
golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=

53
dronegen/apt.go Normal file
View file

@ -0,0 +1,53 @@
// Copyright 2021 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import "path"
// This function calls the build-apt-repos tool which handles the APT portion of RFD 0058.
func promoteAptPipeline() pipeline {
return getAptPipelineBuilder().buildPromoteOsPackagePipeline()
}
func migrateAptPipeline(triggerBranch string, migrationVersions []string) pipeline {
return getAptPipelineBuilder().buildMigrateOsPackagePipeline(triggerBranch, migrationVersions)
}
func getAptPipelineBuilder() *OsPackageToolPipelineBuilder {
optpb := NewOsPackageToolPipelineBuilder(
"drone-s3-aptrepo-pvc",
"deb",
"apt",
NewRepoBucketSecretNames(
"APT_REPO_NEW_AWS_S3_BUCKET",
"APT_REPO_NEW_AWS_ACCESS_KEY_ID",
"APT_REPO_NEW_AWS_SECRET_ACCESS_KEY",
),
)
optpb.environmentVars["APTLY_ROOT_DIR"] = value{
raw: path.Join(optpb.pvcMountPoint, "aptly"),
}
optpb.requiredPackages = []string{
"aptly",
}
optpb.extraArgs = []string{
"-aptly-root-dir \"$APTLY_ROOT_DIR\"",
}
return optpb
}

View file

@ -32,7 +32,7 @@ func main() {
pipelines = append(pipelines, pushPipelines()...)
pipelines = append(pipelines, tagPipelines()...)
pipelines = append(pipelines, cronPipelines()...)
pipelines = append(pipelines, artifactMigrationPipeline())
pipelines = append(pipelines, artifactMigrationPipeline()...)
pipelines = append(pipelines, promoteBuildPipelines()...)
pipelines = append(pipelines, updateDocsPipeline())
pipelines = append(pipelines, buildboxPipeline())

View file

@ -14,321 +14,6 @@
package main
import (
"fmt"
"path"
"strings"
)
// Used for one-off migrations of older versions.
// Use cases include:
// * We want to support another OS while providing backwards compatibility
// * We want to support another OS version while providing backwards compatibility
// * A customer wants to be able to install an older version via APT/YUM even if we
// no longer support it
// * RPM migrations after new YUM pipeline is done
func artifactMigrationPipeline() pipeline {
migrationVersions := []string{
// These versions were migrated as a part of the new `promoteAptPipeline`
// "v6.2.31",
// "v7.3.17",
// "v7.3.18",
// "v7.3.19",
// "v7.3.20",
// "v7.3.21",
// "v7.3.23",
// "v8.3.3",
// "v8.3.4",
// "v8.3.5",
// "v8.3.6",
// "v8.3.7",
// "v8.3.8",
// "v8.3.9",
// "v8.3.10",
// "v8.3.11",
// "v8.3.12",
// "v8.3.14",
// "v9.0.0",
// "v9.0.1",
// "v9.0.2",
// "v9.0.3",
// "v9.0.4",
// "v9.1.0",
// "v9.1.1",
// "v9.1.2",
// "v9.1.3",
// "v9.2.0",
// "v9.2.1",
// "v9.2.2",
// "v9.2.3",
// "v9.2.4",
// "v9.3.0",
// "v9.3.2",
// "v9.3.4",
// "v9.3.5",
}
// Pushing to this branch will trigger the listed versions to be migrated. Typically this should be
// the branch that these changes are being committed to.
migrationBranch := "" // "rfd/0058-package-distribution"
aptPipeline := migrateAptPipeline(migrationBranch, migrationVersions)
return aptPipeline
}
// This function calls the build-apt-repos tool which handles the APT portion of RFD 0058.
func promoteAptPipeline() pipeline {
aptVolumeName := "aptrepo"
checkoutPath := "/go/src/github.com/gravitational/teleport"
commitName := "${DRONE_TAG}"
p := buildBaseAptPipeline("publish-apt-new-repos", aptVolumeName, checkoutPath, commitName)
p.Trigger = triggerPromote
p.Trigger.Repo.Include = []string{"gravitational/teleport"}
steps := []step{
verifyTaggedBuildStep(),
}
steps = append(steps, p.Steps...)
steps = append(steps,
step{
Name: "Check if tag is prerelease",
Image: "golang:1.17-alpine",
Commands: []string{
fmt.Sprintf("cd %q", path.Join(checkoutPath, "build.assets", "tooling")),
"go run ./cmd/check -tag ${DRONE_TAG} -check prerelease || (echo '---> This is a prerelease, not publishing ${DRONE_TAG} packages to APT repos' && exit 78)",
},
},
)
steps = append(steps, getDroneTagVersionSteps(checkoutPath, aptVolumeName)...)
p.Steps = steps
return p
}
func migrateAptPipeline(triggerBranch string, migrationVersions []string) pipeline {
aptVolumeName := "aptrepo"
pipelineName := "migrate-apt-new-repos"
// DRONE_TAG is not available outside of promotion pipelines and will cause drone to fail with a
// "migrate-apt-new-repos: bad substitution" error if used here
checkoutPath := "/go/src/github.com/gravitational/teleport"
commitName := "${DRONE_COMMIT}"
// If migrations are not configured then don't run
if triggerBranch == "" || len(migrationVersions) == 0 {
return buildNeverTriggerPipeline(pipelineName)
}
p := buildBaseAptPipeline(pipelineName, aptVolumeName, checkoutPath, commitName)
p.Trigger = trigger{
Repo: triggerRef{Include: []string{"gravitational/teleport"}},
Event: triggerRef{Include: []string{"push"}},
Branch: triggerRef{Include: []string{triggerBranch}},
}
for _, migrationVersion := range migrationVersions {
p.Steps = append(p.Steps, getVersionSteps(checkoutPath, migrationVersion, aptVolumeName)...)
}
return p
}
// Builds a pipeline that is syntactically correct but should never trigger to create
// a placeholder pipeline
func buildNeverTriggerPipeline(pipelineName string) pipeline {
p := newKubePipeline(pipelineName)
p.Trigger = trigger{
Event: triggerRef{Include: []string{"custom"}},
Repo: triggerRef{Include: []string{"non-existent-repository"}},
Branch: triggerRef{Include: []string{"non-existent-branch"}},
}
p.Steps = []step{
{
Name: "Placeholder",
Image: "alpine:latest",
Commands: []string{
"echo \"This command, step, and pipeline never runs\"",
},
},
}
return p
}
// Functions that use this method should add at least:
// * a Trigger
// * Steps for checkout
func buildBaseAptPipeline(pipelineName, aptVolumeName, commit, checkoutPath string) pipeline {
p := newKubePipeline(pipelineName)
p.Workspace = workspace{Path: "/go"}
p.Volumes = []volume{
{
Name: aptVolumeName,
Claim: &volumeClaim{
Name: "drone-s3-aptrepo-pvc",
},
},
volumeTmpfs,
}
p.Steps = []step{
{
Name: "Check out code",
Image: "alpine/git:latest",
Commands: aptToolCheckoutCommands(checkoutPath, commit),
},
}
return p
}
func getDroneTagVersionSteps(codePath, aptVolumeName string) []step {
return getVersionSteps(codePath, "${DRONE_TAG}", aptVolumeName)
}
// Version should start with a 'v', i.e. v1.2.3 or v9.0.1, or should be an environment var
// i.e. ${DRONE_TAG}
func getVersionSteps(codePath, version, aptVolumeName string) []step {
artifactPath := "/go/artifacts"
pvcMountPoint := "/mnt"
var bucketFolder string
switch version[0:1] {
// If environment var
case "$":
// Remove the 'v' at runtime as the value isn't known at compile time
// This will change "${SOME_VAR}" to "${SOME_VAR##v}". `version` isn't actually
// an environment variable - it's a Drone substitution variable. See
// https://docs.drone.io/pipeline/environment/substitution/ for details.
bucketFolder = fmt.Sprintf("%s##v}", version[:len(version)-1])
// If static string
case "v":
// Remove the 'v' at compile time as the value is known then
bucketFolder = version[1:]
}
return []step{
{
Name: fmt.Sprintf("Download artifacts for %q", version),
Image: "amazon/aws-cli",
Environment: map[string]value{
"AWS_S3_BUCKET": {
fromSecret: "AWS_S3_BUCKET",
},
"AWS_ACCESS_KEY_ID": {
fromSecret: "AWS_ACCESS_KEY_ID",
},
"AWS_SECRET_ACCESS_KEY": {
fromSecret: "AWS_SECRET_ACCESS_KEY",
},
"ARTIFACT_PATH": {
raw: artifactPath,
},
},
Commands: []string{
"mkdir -pv \"$ARTIFACT_PATH\"",
strings.Join(
[]string{
"aws s3 sync",
"--no-progress",
"--delete",
"--exclude \"*\"",
"--include \"*.deb*\"",
fmt.Sprintf("s3://$AWS_S3_BUCKET/teleport/tag/%s/", bucketFolder),
"\"$ARTIFACT_PATH\"",
},
" ",
),
},
},
{
Name: fmt.Sprintf("Publish debs to APT repos for %q", version),
// TODO set this if drongen `step` supports https://docs.drone.io/pipeline/ssh/syntax/parallelism/ in the future
// DependsOn: []string {
// "Check out code",
// "Download artifacts",
// },
Image: "golang:1.18.1-bullseye",
Environment: map[string]value{
"APT_S3_BUCKET": {
fromSecret: "APT_REPO_NEW_AWS_S3_BUCKET",
},
"BUCKET_CACHE_PATH": {
// If we need to cache the bucket on the PVC for some reason in the future
// uncomment this line
// raw: path.Join(pvcMountPoint, "bucket-cache"),
raw: "/tmp/bucket",
},
"AWS_REGION": {
raw: "us-west-2",
},
"AWS_ACCESS_KEY_ID": {
fromSecret: "APT_REPO_NEW_AWS_ACCESS_KEY_ID",
},
"AWS_SECRET_ACCESS_KEY": {
fromSecret: "APT_REPO_NEW_AWS_SECRET_ACCESS_KEY",
},
"ARTIFACT_PATH": {
raw: artifactPath,
},
"APTLY_ROOT_DIR": {
raw: path.Join(pvcMountPoint, "aptly"),
},
"GNUPGHOME": {
raw: "/tmpfs/gnupg",
},
"GPG_RPM_SIGNING_ARCHIVE": {
fromSecret: "GPG_RPM_SIGNING_ARCHIVE",
},
},
Commands: []string{
"mkdir -pv -m0700 $GNUPGHOME",
"echo \"$GPG_RPM_SIGNING_ARCHIVE\" | base64 -d | tar -xzf - -C $GNUPGHOME",
"chown -R root:root $GNUPGHOME",
"apt update",
"apt install aptly tree -y",
fmt.Sprintf("cd %q", path.Join(codePath, "build.assets", "tooling")),
fmt.Sprintf("export VERSION=%q", version),
"export RELEASE_CHANNEL=\"stable\"", // The tool supports several release channels but I'm not sure where this should be configured
// "rm -rf \"$APTLY_ROOT_DIR\"", // Uncomment this to completely dump the Aptly database and force a rebuild
strings.Join(
[]string{
// This just makes the (long) command a little more readable
"go run ./cmd/build-apt-repos",
"-bucket \"$APT_S3_BUCKET\"",
"-local-bucket-path \"$BUCKET_CACHE_PATH\"",
"-artifact-version \"$VERSION\"",
"-release-channel \"$RELEASE_CHANNEL\"",
"-aptly-root-dir \"$APTLY_ROOT_DIR\"",
"-artifact-path \"$ARTIFACT_PATH\"",
"-log-level 4", // Set this to 5 for debug logging
},
" ",
),
"rm -rf \"$BUCKET_CACHE_PATH\"",
"df -h \"$APTLY_ROOT_DIR\"",
},
Volumes: []volumeRef{
{
Name: aptVolumeName,
Path: pvcMountPoint,
},
volumeRefTmpfs,
},
},
}
}
// Note that tags are also valid here as a tag refers to a specific commit
func aptToolCheckoutCommands(commit, checkoutPath string) []string {
commands := []string{
fmt.Sprintf("mkdir -p %q", checkoutPath),
fmt.Sprintf("cd %q", checkoutPath),
`git clone https://github.com/gravitational/${DRONE_REPO_NAME}.git .`,
fmt.Sprintf("git checkout %q", commit),
}
return commands
}
func updateDocsPipeline() pipeline {
// TODO: migrate
return pipeline{}

460
dronegen/os_repos.go Normal file
View file

@ -0,0 +1,460 @@
// Copyright 2021 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"path"
"strings"
)
func promoteBuildOsRepoPipelines() []pipeline {
aptPipeline := promoteAptPipeline()
yumPipeline := promoteYumPipeline()
return []pipeline{
aptPipeline,
yumPipeline,
}
}
// Used for one-off migrations of older versions.
// Use cases include:
// * We want to support another OS while providing backwards compatibility
// * We want to support another OS version while providing backwards compatibility
// * A customer wants to be able to install an older version via APT/YUM even if we
// no longer support it
// * RPM migrations after new YUM pipeline is done
func artifactMigrationPipeline() []pipeline {
migrationVersions := []string{
// These versions were migrated as a part of the new `promoteAptPipeline`
// "v6.2.31",
// "v7.3.17",
// "v7.3.18",
// "v7.3.19",
// "v7.3.20",
// "v7.3.21",
// "v7.3.23",
// "v8.3.3",
// "v8.3.4",
// "v8.3.5",
// "v8.3.6",
// "v8.3.7",
// "v8.3.8",
// "v8.3.9",
// "v8.3.10",
// "v8.3.11",
// "v8.3.12",
// "v8.3.14",
// "v8.3.15",
// "v8.3.16",
// "v9.0.0",
// "v9.0.1",
// "v9.0.2",
// "v9.0.3",
// "v9.0.4",
// "v9.1.0",
// "v9.1.1",
// "v9.1.2",
// "v9.1.3",
// "v9.2.0",
// "v9.2.1",
// "v9.2.2",
// "v9.2.3",
// "v9.2.4",
// "v9.3.0",
// "v9.3.2",
// "v9.3.4",
// "v9.3.5",
// "v9.3.6",
// "v9.3.7",
// "v9.3.9",
// "v9.3.10",
// "v9.3.12",
// "v9.3.13",
// "v10.0.0",
// "v10.0.1",
// "v10.0.2",
}
// Pushing to this branch will trigger the listed versions to be migrated. Typically this should be
// the branch that these changes are being committed to.
migrationBranch := "" // "rfd/0058-package-distribution"
aptPipeline := migrateAptPipeline(migrationBranch, migrationVersions)
yumPipeline := migrateYumPipeline(migrationBranch, migrationVersions)
return []pipeline{
aptPipeline,
yumPipeline,
}
}
type RepoBucketSecretNames struct {
bucketName string
accessKeyID string
secretAccessKey string
}
func NewRepoBucketSecretNames(bucketName, accessKeyID, secretAccessKey string) *RepoBucketSecretNames {
return &RepoBucketSecretNames{
bucketName: bucketName,
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
}
}
type OsPackageToolPipelineBuilder struct {
clameName string
packageType string
packageManagerName string
volumeName string
pipelineNameSuffix string
artifactPath string
pvcMountPoint string
bucketSecrets *RepoBucketSecretNames
extraArgs []string
requiredPackages []string
setupCommands []string
environmentVars map[string]value
}
// This function configures the build tool with it's requirements and sensible defaults.
// If additional configuration required then the returned struct should be modified prior
// to calling "build" functions on it.
func NewOsPackageToolPipelineBuilder(claimName, packageType, packageManagerName string, bucketSecrets *RepoBucketSecretNames) *OsPackageToolPipelineBuilder {
optpb := &OsPackageToolPipelineBuilder{
clameName: claimName,
packageType: packageType,
packageManagerName: packageManagerName,
bucketSecrets: bucketSecrets,
extraArgs: []string{},
setupCommands: []string{},
requiredPackages: []string{},
volumeName: fmt.Sprintf("%s-persistence", packageManagerName),
pipelineNameSuffix: fmt.Sprintf("%s-new-repos", packageManagerName),
artifactPath: "/go/artifacts",
pvcMountPoint: "/mnt",
}
optpb.environmentVars = map[string]value{
"REPO_S3_BUCKET": {
fromSecret: optpb.bucketSecrets.bucketName,
},
"AWS_ACCESS_KEY_ID": {
fromSecret: optpb.bucketSecrets.accessKeyID,
},
"AWS_SECRET_ACCESS_KEY": {
fromSecret: optpb.bucketSecrets.secretAccessKey,
},
"AWS_REGION": {
raw: "us-west-2",
},
"BUCKET_CACHE_PATH": {
// If we need to cache the bucket on the PVC for some reason in the future
// uncomment this line
// raw: path.Join(pvcMountPoint, "bucket-cache"),
raw: "/tmp/bucket",
},
"ARTIFACT_PATH": {
raw: optpb.artifactPath,
},
"GNUPGHOME": {
raw: "/tmpfs/gnupg",
},
"GPG_RPM_SIGNING_ARCHIVE": {
fromSecret: "GPG_RPM_SIGNING_ARCHIVE",
},
"DEBIAN_FRONTEND": {
raw: "noninteractive",
},
}
return optpb
}
func (optpb *OsPackageToolPipelineBuilder) buildPromoteOsPackagePipeline() pipeline {
pipelineName := fmt.Sprintf("publish-%s", optpb.pipelineNameSuffix)
checkoutPath := "/go/src/github.com/gravitational/teleport"
commitName := "${DRONE_TAG}"
checkoutStepName := "Check out code"
p := optpb.buildBaseOsPackagePipeline(pipelineName, checkoutStepName, checkoutPath, commitName)
p.Trigger = triggerPromote
p.Trigger.Repo.Include = []string{"gravitational/teleport"}
setupSteps := []step{
{
Name: "Verify build is tagged",
Image: "alpine:latest",
Commands: []string{
"[ -n ${DRONE_TAG} ] || (echo 'DRONE_TAG is not set. Is the commit tagged?' && exit 1)",
},
},
}
setupSteps = append(setupSteps, p.Steps...)
setupSteps = append(setupSteps,
step{
Name: "Check if tag is prerelease",
Image: "golang:1.17-alpine",
Commands: []string{
fmt.Sprintf("cd %q", path.Join(checkoutPath, "build.assets", "tooling")),
"go run ./cmd/check -tag ${DRONE_TAG} -check prerelease || (echo '---> This is a prerelease, not publishing ${DRONE_TAG} packages to APT repos' && exit 78)",
},
},
)
setupStepNames := make([]string, 0, len(setupSteps))
for _, setupStep := range setupSteps {
setupStepNames = append(setupStepNames, setupStep.Name)
}
versionSteps := optpb.getDroneTagVersionSteps(checkoutPath)
for i := range versionSteps {
versionStep := &versionSteps[i]
if versionStep.DependsOn == nil {
versionStep.DependsOn = setupStepNames
continue
}
versionStep.DependsOn = append(versionStep.DependsOn, setupStepNames...)
}
p.Steps = append(setupSteps, versionSteps...)
return p
}
func (optpb *OsPackageToolPipelineBuilder) buildMigrateOsPackagePipeline(triggerBranch string, migrationVersions []string) pipeline {
pipelineName := fmt.Sprintf("migrate-%s", optpb.pipelineNameSuffix)
checkoutPath := "/go/src/github.com/gravitational/teleport"
// DRONE_TAG is not available outside of promotion pipelines and will cause drone to fail with a
// "migrate-apt-new-repos: bad substitution" error if used here
commitName := "${DRONE_COMMIT}"
checkoutStepName := "Check out code"
// If migrations are not configured then don't run
if triggerBranch == "" || len(migrationVersions) == 0 {
return buildNeverTriggerPipeline(pipelineName)
}
p := optpb.buildBaseOsPackagePipeline(pipelineName, checkoutStepName, checkoutPath, commitName)
p.Trigger = trigger{
Repo: triggerRef{Include: []string{"gravitational/teleport"}},
Event: triggerRef{Include: []string{"push"}},
Branch: triggerRef{Include: []string{triggerBranch}},
}
for _, migrationVersion := range migrationVersions {
// Not enabling parallelism here so that multiple migrations don't run at once
p.Steps = append(p.Steps, optpb.getVersionSteps(checkoutPath, migrationVersion, false)...)
}
setStepResourceLimits(p.Steps)
return p
}
// Builds a pipeline that is syntactically correct but should never trigger to create
// a placeholder pipeline
func buildNeverTriggerPipeline(pipelineName string) pipeline {
p := newKubePipeline(pipelineName)
p.Trigger = trigger{
Event: triggerRef{Include: []string{"custom"}},
Repo: triggerRef{Include: []string{"non-existent-repository"}},
Branch: triggerRef{Include: []string{"non-existent-branch"}},
}
p.Steps = []step{
{
Name: "Placeholder",
Image: "alpine:latest",
Commands: []string{
"echo \"This command, step, and pipeline never runs\"",
},
},
}
return p
}
// Functions that use this method should add at least:
// * a Trigger
// * Steps for checkout
func (optpb *OsPackageToolPipelineBuilder) buildBaseOsPackagePipeline(pipelineName, checkoutStepName, checkoutPath, commit string) pipeline {
p := newKubePipeline(pipelineName)
p.Workspace = workspace{Path: "/go"}
p.Volumes = []volume{
{
Name: optpb.volumeName,
Claim: &volumeClaim{
Name: optpb.clameName,
},
},
volumeTmpfs,
}
p.Steps = []step{
{
Name: checkoutStepName,
Image: "alpine/git:latest",
Commands: toolCheckoutCommands(checkoutPath, commit),
},
}
setStepResourceLimits(p.Steps)
return p
}
func setStepResourceLimits(steps []step) {
// Not currently supported
// for i := range steps {
// step := &steps[i]
// if step.Resources == nil {
// step.Resources = &containerResources{}
// }
// if step.Resources.Requests == nil {
// step.Resources.Requests = &resourceSet{}
// }
// step.Resources.Requests.Cpu = 100
// step.Resources.Requests.Memory = (*resourceQuantity)(resource.NewQuantity(100*1024*1024, resource.BinarySI))
// }
}
// Note that tags are also valid here as a tag refers to a specific commit
func toolCheckoutCommands(checkoutPath, commit string) []string {
commands := []string{
fmt.Sprintf("mkdir -p %q", checkoutPath),
fmt.Sprintf("cd %q", checkoutPath),
`git clone https://github.com/gravitational/${DRONE_REPO_NAME}.git .`,
fmt.Sprintf("git checkout %q", commit),
}
return commands
}
func (optpb *OsPackageToolPipelineBuilder) getDroneTagVersionSteps(codePath string) []step {
return optpb.getVersionSteps(codePath, "${DRONE_TAG}", true)
}
// Version should start with a 'v', i.e. v1.2.3 or v9.0.1, or should be an environment var
// i.e. ${DRONE_TAG}
func (optpb *OsPackageToolPipelineBuilder) getVersionSteps(codePath, version string, enableParallelism bool) []step {
var bucketFolder string
switch version[0:1] {
// If environment var
case "$":
// Remove the 'v' at runtime as the value isn't known at compile time
// This will change "${SOME_VAR}" to "${SOME_VAR##v}". `version` isn't actually
// an environment variable - it's a Drone substitution variable. See
// https://docs.drone.io/pipeline/environment/substitution/ for details.
bucketFolder = fmt.Sprintf("%s##v}", version[:len(version)-1])
// If static string
case "v":
// Remove the 'v' at compile time as the value is known then
bucketFolder = version[1:]
}
toolSetupCommands := []string{}
if len(optpb.requiredPackages) > 0 {
toolSetupCommands = []string{
"apt update",
fmt.Sprintf("apt install -y %s", strings.Join(optpb.requiredPackages, " ")),
}
}
toolSetupCommands = append(toolSetupCommands, optpb.setupCommands...)
downloadStepName := fmt.Sprintf("Download artifacts for %q", version)
buildStepDependencies := []string{}
if enableParallelism {
buildStepDependencies = append(buildStepDependencies, downloadStepName)
}
return []step{
{
Name: downloadStepName,
Image: "amazon/aws-cli",
Environment: map[string]value{
"AWS_S3_BUCKET": {
fromSecret: "AWS_S3_BUCKET",
},
"AWS_ACCESS_KEY_ID": {
fromSecret: "AWS_ACCESS_KEY_ID",
},
"AWS_SECRET_ACCESS_KEY": {
fromSecret: "AWS_SECRET_ACCESS_KEY",
},
"ARTIFACT_PATH": {
raw: optpb.artifactPath,
},
},
Commands: []string{
"mkdir -pv \"$ARTIFACT_PATH\"",
// Clear out old versions from previous steps
"rm -rf \"${ARTIFACT_PATH}/*\"",
strings.Join(
[]string{
"aws s3 sync",
"--no-progress",
"--delete",
"--exclude \"*\"",
fmt.Sprintf("--include \"*.%s*\"", optpb.packageType),
fmt.Sprintf("s3://$AWS_S3_BUCKET/teleport/tag/%s/", bucketFolder),
"\"$ARTIFACT_PATH\"",
},
" ",
),
},
},
{
Name: fmt.Sprintf("Publish %ss to %s repos for %q", optpb.packageType, strings.ToUpper(optpb.packageManagerName), version),
Image: "golang:1.18.4-bullseye",
Environment: optpb.environmentVars,
Commands: append(
toolSetupCommands,
[]string{
"mkdir -pv -m0700 \"$GNUPGHOME\"",
"echo \"$GPG_RPM_SIGNING_ARCHIVE\" | base64 -d | tar -xzf - -C $GNUPGHOME",
"chown -R root:root \"$GNUPGHOME\"",
fmt.Sprintf("cd %q", path.Join(codePath, "build.assets", "tooling")),
fmt.Sprintf("export VERSION=%q", version),
"export RELEASE_CHANNEL=\"stable\"", // The tool supports several release channels but I'm not sure where this should be configured
strings.Join(
append(
[]string{
// This just makes the (long) command a little more readable
"go run ./cmd/build-os-package-repos",
optpb.packageManagerName,
"-bucket \"$REPO_S3_BUCKET\"",
"-local-bucket-path \"$BUCKET_CACHE_PATH\"",
"-artifact-version \"$VERSION\"",
"-release-channel \"$RELEASE_CHANNEL\"",
"-artifact-path \"$ARTIFACT_PATH\"",
"-log-level 4", // Set this to 5 for debug logging
},
optpb.extraArgs...,
),
" ",
),
}...,
),
Volumes: []volumeRef{
{
Name: optpb.volumeName,
Path: optpb.pvcMountPoint,
},
volumeRefTmpfs,
},
DependsOn: buildStepDependencies,
},
}
}

View file

@ -17,10 +17,11 @@ package main
import "fmt"
func promoteBuildPipelines() []pipeline {
aptPipeline := promoteAptPipeline()
dockerPipelineECR := buildDockerPromotionPipelineECR()
dockerPipelineQuay := buildDockerPromotionPipelineQuay()
return []pipeline{aptPipeline, dockerPipelineECR, dockerPipelineQuay}
promotePipelines := make([]pipeline, 0)
promotePipelines = append(promotePipelines, promoteBuildOsRepoPipelines()...)
promotePipelines = append(promotePipelines, buildDockerPromotionPipelineECR(), buildDockerPromotionPipelineQuay())
return promotePipelines
}
func buildDockerPromotionPipelineECR() pipeline {

View file

@ -19,6 +19,8 @@ import (
"fmt"
"runtime"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
)
// Types to mirror the YAML fields of the drone config.
@ -159,15 +161,17 @@ type volumeRef struct {
}
type step struct {
Name string `yaml:"name"`
Image string `yaml:"image,omitempty"`
Commands []string `yaml:"commands,omitempty"`
Environment map[string]value `yaml:"environment,omitempty"`
Volumes []volumeRef `yaml:"volumes,omitempty"`
Settings map[string]value `yaml:"settings,omitempty"`
Template []string `yaml:"template,omitempty"`
When *condition `yaml:"when,omitempty"`
Failure string `yaml:"failure,omitempty"`
Name string `yaml:"name"`
Image string `yaml:"image,omitempty"`
Commands []string `yaml:"commands,omitempty"`
Environment map[string]value `yaml:"environment,omitempty"`
Volumes []volumeRef `yaml:"volumes,omitempty"`
Settings map[string]value `yaml:"settings,omitempty"`
Template []string `yaml:"template,omitempty"`
When *condition `yaml:"when,omitempty"`
Failure string `yaml:"failure,omitempty"`
Resources *containerResources `yaml:"resources,omitempty"`
DependsOn []string `yaml:"depends_on,omitempty"`
}
type condition struct {
@ -210,3 +214,43 @@ func (v *value) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
return errors.New("can't unmarshal the value as either string or from_secret reference")
}
type containerResources struct {
Limits *resourceSet `yaml:"limits,omitempty"`
// Not currently supported
// Requests *resourceSet `yaml:"requests,omitempty"`
}
type resourceSet struct {
// Drone does not strictly follow the k8s CRD format for resources here
// See link for details:
// https://docs.drone.io/pipeline/kubernetes/syntax/steps/#resources
// CPU *resourceQuantity `yaml:"cpu,omitempty"`
CPU float64 `yaml:"cpu,omitempty"`
Memory *resourceQuantity `yaml:"memory,omitempty"`
}
// This is a workaround to get resource.Quantity to unmarshal correctly
type resourceQuantity resource.Quantity
func (rq *resourceQuantity) MarshalYAML() (interface{}, error) {
return ((*resource.Quantity)(rq)).String(), nil
}
func (rq *resourceQuantity) UnmarshalYAML(unmarshal func(interface{}) error) error {
var value string
if err := unmarshal(&value); err != nil {
return errors.New("failed to unmarshal the value into a string")
}
parsedValue, err := resource.ParseQuantity(value)
if err != nil {
return fmt.Errorf("failed to unmarshal string %q into resource quantity", value)
}
q := ((*resource.Quantity)(rq))
q.Add(parsedValue)
return nil
}

62
dronegen/yum.go Normal file
View file

@ -0,0 +1,62 @@
// Copyright 2021 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"path"
)
// This function calls the build-apt-repos tool which handles the APT portion of RFD 0058.
func promoteYumPipeline() pipeline {
return getYumPipelineBuilder().buildPromoteOsPackagePipeline()
}
func migrateYumPipeline(triggerBranch string, migrationVersions []string) pipeline {
return getYumPipelineBuilder().buildMigrateOsPackagePipeline(triggerBranch, migrationVersions)
}
func getYumPipelineBuilder() *OsPackageToolPipelineBuilder {
optpb := NewOsPackageToolPipelineBuilder(
"drone-s3-yumrepo-pvc",
"rpm",
"yum",
NewRepoBucketSecretNames(
"YUM_REPO_NEW_AWS_S3_BUCKET",
"YUM_REPO_NEW_AWS_ACCESS_KEY_ID",
"YUM_REPO_NEW_AWS_SECRET_ACCESS_KEY",
),
)
optpb.environmentVars["CACHE_DIR"] = value{
raw: path.Join(optpb.pvcMountPoint, "createrepo_cache"),
}
optpb.environmentVars["BUCKET_CACHE_PATH"] = value{
raw: path.Join(optpb.pvcMountPoint, "bucket"),
}
optpb.requiredPackages = []string{
"createrepo-c",
}
optpb.setupCommands = []string{
"mkdir -pv \"$CACHE_DIR\"",
}
optpb.extraArgs = []string{
"-cache-dir \"$CACHE_DIR\"",
}
return optpb
}