diff --git a/.github/workflows/check-docs.sh b/.github/workflows/check-docs.sh index b11f0f68..e2746906 100755 --- a/.github/workflows/check-docs.sh +++ b/.github/workflows/check-docs.sh @@ -2,9 +2,9 @@ set -eu make docs -if ! git diff --quiet docs tasks; then - echo "Docs / tasks are not up-to-date! Run 'make docs' to update." +if ! git diff --quiet docs; then + echo "Docs are not up-to-date! Run 'make docs' to update." exit 1 else - echo "Docs /tasks are up-to-date." + echo "Docs are up-to-date." fi diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 9072872d..7100f699 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -11,139 +11,44 @@ env: IMAGE_BASE: ${{ github.repository }} jobs: - build-images: - name: Build ODS images - runs-on: ubuntu-latest - strategy: - fail-fast: true - matrix: - image: ["aqua-scan", "finish", "go-toolset", "gradle-toolset", "helm", "node16-npm-toolset", "node18-npm-toolset", "package-image", "pipeline-manager", "python-toolset", "sonar", "start"] - steps: - - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - with: - driver-opts: | - image=moby/buildkit:master - network=host - - - name: Build image - uses: docker/build-push-action@v3 - with: - context: . - push: false - file: build/package/Dockerfile.${{ matrix.image }} - tags: localhost:5000/ods/ods-${{ matrix.image }}:latest - outputs: type=docker,dest=/tmp/image-ods-${{ matrix.image }}.tar - - - name: Upload artifacts - uses: actions/upload-artifact@v3 - with: - name: buildx-image-ods-${{ matrix.image }} - path: /tmp/image-ods-${{ matrix.image }}.tar - retention-days: 1 - pipeline-tests: name: Tests runs-on: ubuntu-latest - needs: build-images - env: - IMAGES: aqua-scan finish go-toolset gradle-toolset helm node16-npm-toolset node18-npm-toolset package-image pipeline-manager python-toolset sonar start steps: - - - name: Download image artifacts - uses: actions/download-artifact@v3 - with: - path: /tmp - name: Checkout uses: actions/checkout@v3 with: fetch-depth: 0 - - name: Setup KinD cluster with internal registry - working-directory: scripts - run: ./kind-with-registry.sh - - - name: Push images to local registry - run: | - images=(${{ env.IMAGES }}) - for image in ${images[*]} - do - echo "::group::Push ods-$image to local registry" - docker load --input /tmp/buildx-image-ods-$image/image-ods-$image.tar - docker push localhost:5000/ods/ods-$image:latest - if [[ "${{ github.event_name }}" == 'pull_request' ]] - then - docker rmi localhost:5000/ods/ods-$image:latest - fi - echo "::endgroup::" - done - - - name: Delete image tarballs - run: | - rm -rf /tmp/buildx-image-* - - - name: Setup kubectl - uses: azure/setup-kubectl@v3 - id: install - - - name: Install Tekton Core Components - run: make install-tekton-pipelines - - - name: Show disk space - run: df -h - - - name: Spin up Bitbucket container - run: make run-bitbucket - - - name: Spin up Nexus container - run: make run-nexus - - - name: Spin up SonarQube container - run: make run-sonarqube - - - name: Show disk space - run: df -h - - - name: Setup Go 1.19 - uses: actions/setup-go@v3 + name: Setup Go + uses: actions/setup-go@v4 with: - go-version: '1.19' + go-version: '1.21' - name: Check if docs are up-to-date run: ./.github/workflows/check-docs.sh - name: Run tests run: | - set -o pipefail - go test -v ./cmd/... | sed ''/PASS/s//$(printf "\033[32mPASS\033[0m")/'' | sed ''/FAIL/s//$(printf "\033[31mFAIL\033[0m")/'' - go test -v ./internal/... | sed ''/PASS/s//$(printf "\033[32mPASS\033[0m")/'' | sed ''/FAIL/s//$(printf "\033[31mFAIL\033[0m")/'' - go test -v ./pkg/... | sed ''/PASS/s//$(printf "\033[32mPASS\033[0m")/'' | sed ''/FAIL/s//$(printf "\033[31mFAIL\033[0m")/'' - go test -timeout 45m -v ./test/tasks/... -always-keep-tmp-workspaces | sed ''/PASS/s//$(printf "\033[32mPASS\033[0m")/'' | sed ''/FAIL/s//$(printf "\033[31mFAIL\033[0m")/'' - go test -timeout 10m -v ./test/e2e/... | sed ''/PASS/s//$(printf "\033[32mPASS\033[0m")/'' | sed ''/FAIL/s//$(printf "\033[31mFAIL\033[0m")/'' - - - name: Log into ghcr.io - if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v1 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Push images to ghcr.io - if: ${{ github.event_name != 'pull_request' }} - run: | - images=(${{ env.IMAGES }}) - for image in ${images[*]} - do - echo "::group::Push ods-$image to ghcr.io" - docker tag localhost:5000/ods/ods-$image:latest ghcr.io/${{ env.IMAGE_BASE }}/ods-$image:latest - docker push ghcr.io/${{ env.IMAGE_BASE }}/ods-$image:latest - echo "::endgroup::" - done + make test + # - + # name: Log into ghcr.io + # if: ${{ github.event_name != 'pull_request' }} + # uses: docker/login-action@v1 + # with: + # registry: ghcr.io + # username: ${{ github.actor }} + # password: ${{ secrets.GITHUB_TOKEN }} + # - + # name: Push images to ghcr.io + # if: ${{ github.event_name != 'pull_request' }} + # run: | + # images=(${{ env.IMAGES }}) + # for image in ${images[*]} + # do + # echo "::group::Push ods-$image to ghcr.io" + # docker tag localhost:5000/ods/ods-$image:latest ghcr.io/${{ env.IMAGE_BASE }}/ods-$image:latest + # docker push ghcr.io/${{ env.IMAGE_BASE }}/ods-$image:latest + # echo "::endgroup::" + # done diff --git a/CHANGELOG.md b/CHANGELOG.md index d54d4474..117c049a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,18 +13,13 @@ listed in the changelog. ### Fixed - ods.yaml branch trigger patterns must be lowercase ([#713](https://github.com/opendevstack/ods-pipeline/issues/713)) -- Go module name was incorrectly set to `github.com/opendevstack/pipeline` - -- sonar-scanner invocations stderr not captured ([#719](https://github.com/opendevstack/ods-pipeline/issues/719)) - -- sonar-scanner does not start properly: java is lacking tzdb.dat ([#723](https://github.com/opendevstack/ods-pipeline/issues/723)) -- update sonar-scanner and cnes-report ([#725](https://github.com/opendevstack/ods-pipeline/issues/725)) - -- SonarQube doesn't scan FE-related code ([#716](https://github.com/opendevstack/ods-pipeline/issues/716)) +- Go module name was incorrectly set to `github.com/opendevstack/pipeline` ### Changed +- Move pipeline tasks to separate repositories. This is a huge change with many implications. Instead of providing build, package and deploy taks as part of the `ods-pipeline` repository, the tasks are no provided by separate repositories, such as `ods-pipeline-go`, `ods-pipeline-sonar`, `ods-pipeline-image`, `ods-pipeline-helm` and so on. The only tasks that are provided by `ods-pipeline` are the start and finish tasks automatically injected into each pipeline. This change allows to have a different lifecycle for each task (or set of tasks). It also benefits maintenance greatly: running the tests for this repository is much faster now (around 10 minutes compared to 35+ minutes earlier). This repository facilitates task creation, maintenance and testing by providing a few Go packages that can be used by task repositories such as `ods-pipeline-helm`. For more information, see [#722](https://github.com/opendevstack/ods-pipeline/pull/722). + - Build tasks streamlining and avoidance of file copies (#678 fixed by [#710](https://github.com/opendevstack/ods-pipeline/pull/710)). This is an incompatible change. Build tasks were adjusted to (mostly) no longer copy build files in a dedicated location. Instead one should adjust the Dockerfile (or other downstream tasks) to directly consume the build outputs from their natural locations. In addition build task skipping now supports parameter `build-extra-inputs`. The package-image task `dockerfile` and `docker-dir` parameters have been changed to assume that the docker context and file are at the repository root. See the PR for further information and the issue for more context. ## [0.13.2] - 2023-07-18 diff --git a/Makefile b/Makefile index 4056a020..22b2af66 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ help: ## Show this help screen. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_-]+:.*?##/ { printf " \033[36m%-25s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) .PHONY: help -check-system: ## Check if system meets prerequisites. +check-system: ## Check if system meets prerequisites for development. cd scripts && ./check-system.sh .PHONY: check-system @@ -36,16 +36,26 @@ lint-shell: ## Run shellcheck. shellcheck scripts/*.sh build/package/scripts/* deploy/*.sh .PHONY: lint-shell -##@ Building - -tasks: ## Render tasks. - go run cmd/tasks/main.go -.PHONY: tasks - -docs: tasks ## Render documentation for tasks. - go run cmd/docs/main.go +docs: ## Render documentation for tasks. + renderedStartTask=$(shell mktemp); \ + helm template ods-pipeline deploy/chart --show-only=templates/task-start.yaml > $$renderedStartTask; \ + go run github.com/opendevstack/ods-pipeline/cmd/taskdoc \ + -task $$renderedStartTask \ + -description build/docs/task-start.adoc \ + -destination docs/task-start.adoc; \ + rm $$renderedStartTask + + renderedFinishTask=$(shell mktemp); \ + helm template ods-pipeline deploy/chart --show-only=templates/task-finish.yaml > $$renderedFinishTask; \ + go run github.com/opendevstack/ods-pipeline/cmd/taskdoc \ + -task $$renderedFinishTask \ + -description build/docs/task-finish.adoc \ + -destination docs/task-finish.adoc; \ + rm $$renderedFinishTask .PHONY: docs +##@ Building + build-artifact-download: build-artifact-download-linux build-artifact-download-darwin-amd64 build-artifact-download-darwin-arm64 build-artifact-download-windows ## Build artifact-download binary for each supported OS/arch. .PHONY: build-artifact-download @@ -67,7 +77,7 @@ build-artifact-download-windows: ## Build artifact-download Windows binary. ##@ Testing -test: test-cmd test-internal test-pkg test-tasks test-e2e ## Run complete testsuite. +test: test-cmd test-internal test-pkg test-e2e ## Run complete testsuite. .PHONY: test test-cmd: ## Run testsuite of cmd packages. @@ -82,69 +92,7 @@ test-pkg: ## Run testsuite of public packages. go test -cover ./pkg/... .PHONY: test-pkg -test-tasks: ## Run testsuite of Tekton tasks. - go test -v -count=1 -timeout $${ODS_TESTTIMEOUT:-30m} ./test/tasks/... -.PHONY: test-tasks - -test-e2e: ## Run testsuite of end-to-end pipeline run. - go test -v -count=1 -timeout $${ODS_TESTTIMEOUT:-10m} ./test/e2e/... +test-e2e: ## Run testsuite of tasks and full pipeline run. + go test -v -count=1 -timeout 20m -skip ^TestPipelineRun ./test/e2e/... + go test -v -count=1 -timeout 10m -run ^TestPipelineRun ./test/e2e/... .PHONY: test-e2e - -clear-tmp-workspaces: ## Clear temporary workspaces created in testruns. - chmod -R u+w test/testdata/workspaces/workspace-* - rm -rf test/testdata/workspaces/workspace-* -.PHONY: clear-tmp-workspaces - -##@ KinD (local development environment) - -prepare-local-env: create-kind-with-registry build-and-push-images install-tekton-pipelines run-bitbucket run-nexus run-sonarqube ## Prepare local environment from scratch. -.PHONY: prepare-local-env - -create-kind-with-registry: ## Create KinD cluster with local registry. - cd scripts && ./kind-with-registry.sh -.PHONY: create-kind-with-registry - -install-tekton-pipelines: ## Install Tekton pipelines in KinD cluster. - cd scripts && ./install-tekton-pipelines.sh -.PHONY: install-tekton-pipelines - -build-and-push-images: ## Build and push images to local registry. - cd scripts && ./build-and-push-images.sh -.PHONY: build-and-push-images - -run-bitbucket: ## Run Bitbucket server (using timebomb license, in "kind" network). - cd scripts && ./run-bitbucket.sh -.PHONY: run-bitbucket - -restart-bitbucket: ## Restart Bitbucket server (re-activating timebomb license). - cd scripts && ./restart-bitbucket.sh -.PHONY: restart-bitbucket - -run-nexus: ## Run Nexus server (in "kind" network). - cd scripts && ./run-nexus.sh -.PHONY: run-nexus - -run-sonarqube: ## Run SonarQube server (in "kind" network). - cd scripts && ./run-sonarqube.sh -.PHONY: run-sonarqube - -recreate-kind-cluster: ## Recreate KinD cluster including Tekton tasks. - cd scripts && ./kind-with-registry.sh --recreate - cd scripts && ./install-tekton-pipelines.sh -.PHONY: recreate-kind-cluster - -stop-local-env: ## Stop local environment. - cd scripts && ./stop-local-env.sh -.PHONY: stop-local-env - -start-local-env: ## Restart stopped local environment. - cd scripts && ./start-local-env.sh -.PHONY: start-local-env - -deploy: ## Install ODS pipeline resources in namespace. -ifeq ($(strip $(namespace)),) - @echo "Argument 'namespace' is required, e.g. make deploy namespace=foo-cd" - @exit 1 -endif - cd scripts && ./install-inside-kind.sh -n $(namespace) -.PHONY: deploy diff --git a/README.md b/README.md index fae70125..5bd7dd57 100644 --- a/README.md +++ b/README.md @@ -21,10 +21,8 @@ ODS Pipeline is well suited for regulated development (e.g. medical device softw ### Technical Reference * [Repository configuration (ods.yaml)](/docs/ods-configuration.adoc) -* Plumbing tasks: [ods-start](/docs/tasks/ods-start.adoc), [ods-finish](/docs/tasks/ods-finish.adoc) -* Build tasks: [ods-build-go](/docs/tasks/ods-build-go.adoc), [ods-build-gradle](/docs/tasks/ods-build-gradle.adoc), [ods-build-npm](/docs/tasks/ods-build-npm.adoc), [ods-build-python](/docs/tasks/ods-build-python.adoc) -* Package tasks: [ods-package-image](/docs/tasks/ods-package-image.adoc) -* Deploy tasks: [ods-deploy-helm](/docs/tasks/ods-deploy-helm.adoc) +* [Start task](/docs/task-start.adoc) +* [Finish task](/docs/task-finish.adoc) ### How-To Guides * [Working with secrets in Helm](/docs/helm-secrets.adoc) diff --git a/docs/tasks/descriptions/ods-finish.adoc b/build/docs/task-finish.adoc similarity index 100% rename from docs/tasks/descriptions/ods-finish.adoc rename to build/docs/task-finish.adoc diff --git a/docs/tasks/descriptions/ods-start.adoc b/build/docs/task-start.adoc similarity index 100% rename from docs/tasks/descriptions/ods-start.adoc rename to build/docs/task-start.adoc diff --git a/build/package/Dockerfile.finish b/build/images/Dockerfile.finish similarity index 100% rename from build/package/Dockerfile.finish rename to build/images/Dockerfile.finish diff --git a/build/package/Dockerfile.pipeline-manager b/build/images/Dockerfile.pipeline-manager similarity index 100% rename from build/package/Dockerfile.pipeline-manager rename to build/images/Dockerfile.pipeline-manager diff --git a/build/package/Dockerfile.start b/build/images/Dockerfile.start similarity index 100% rename from build/package/Dockerfile.start rename to build/images/Dockerfile.start diff --git a/build/package/scripts/cache-build.sh b/build/images/scripts/cache-build.sh similarity index 100% rename from build/package/scripts/cache-build.sh rename to build/images/scripts/cache-build.sh diff --git a/build/package/scripts/configure-truststore.sh b/build/images/scripts/configure-truststore.sh similarity index 100% rename from build/package/scripts/configure-truststore.sh rename to build/images/scripts/configure-truststore.sh diff --git a/build/package/scripts/copy-artifacts.sh b/build/images/scripts/copy-artifacts.sh similarity index 100% rename from build/package/scripts/copy-artifacts.sh rename to build/images/scripts/copy-artifacts.sh diff --git a/build/package/scripts/copy-build-if-cached.sh b/build/images/scripts/copy-build-if-cached.sh similarity index 100% rename from build/package/scripts/copy-build-if-cached.sh rename to build/images/scripts/copy-build-if-cached.sh diff --git a/build/package/Dockerfile.aqua-scan b/build/package/Dockerfile.aqua-scan deleted file mode 100644 index 7dd630fc..00000000 --- a/build/package/Dockerfile.aqua-scan +++ /dev/null @@ -1,27 +0,0 @@ -FROM golang:1.19 as builder - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER root -WORKDIR /usr/src/app - -# Build Go binary. -COPY go.mod . -COPY go.sum . -RUN go mod download -COPY cmd cmd -COPY internal internal -COPY pkg pkg -RUN cd cmd/aqua-scan && CGO_ENABLED=0 go build -o /usr/local/bin/ods-aqua-scan - -# Final image -# ubi-micro cannot be used as it misses the ca-certificates package. -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4 - -COPY --from=builder /usr/local/bin/ods-aqua-scan /usr/local/bin/ods-aqua-scan - -# Add scripts -COPY build/package/scripts/download-aqua-scanner.sh /usr/local/bin/download-aqua-scanner - -VOLUME /workspace/source - -USER 1001 diff --git a/build/package/Dockerfile.go-toolset b/build/package/Dockerfile.go-toolset deleted file mode 100644 index e284cc6d..00000000 --- a/build/package/Dockerfile.go-toolset +++ /dev/null @@ -1,34 +0,0 @@ -FROM registry.access.redhat.com/ubi8/go-toolset:1.18 - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER root - -ENV GOLANGCI_LINT_VERSION=v1.45.2 \ - GO_JUNIT_REPORT_VERSION=v2.0.0 \ - GOBIN=/usr/local/bin - -RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/$GOLANGCI_LINT_VERSION/install.sh | sh -s -- -b /usr/local/bin $GOLANGCI_LINT_VERSION - -RUN go install github.com/jstemmer/go-junit-report/v2@$GO_JUNIT_REPORT_VERSION - -# Add scripts -COPY build/package/scripts/cache-build.sh /usr/local/bin/cache-build -COPY build/package/scripts/copy-build-if-cached.sh /usr/local/bin/copy-build-if-cached -COPY build/package/scripts/copy-artifacts.sh /usr/local/bin/copy-artifacts -COPY build/package/scripts/build-go.sh /usr/local/bin/build-go -COPY build/package/scripts/supply-sonar-project-properties-default.sh /usr/local/bin/supply-sonar-project-properties-default -RUN chmod +x /usr/local/bin/build-go && \ - chmod +x /usr/local/bin/cache-build && \ - chmod +x /usr/local/bin/copy-build-if-cached && \ - chmod +x /usr/local/bin/copy-artifacts && \ - chmod +x /usr/local/bin/supply-sonar-project-properties-default - -# Add sonar-project.properties -COPY build/package/sonar-project.properties.d/go.properties /usr/local/default-sonar-project.properties - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -USER 1001 diff --git a/build/package/Dockerfile.gradle-toolset b/build/package/Dockerfile.gradle-toolset deleted file mode 100644 index 6bb9a334..00000000 --- a/build/package/Dockerfile.gradle-toolset +++ /dev/null @@ -1,48 +0,0 @@ -FROM registry.access.redhat.com/ubi8/openjdk-17:1.13 - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -ENV GIT_VERSION=2.39 \ - GRADLE_VERSION=7.4.2 \ - GRADLE_USER_HOME=/workspace/source/.ods-cache/deps/gradle - -ARG GRADLE_DOWNLOAD_SHA256=29e49b10984e585d8118b7d0bc452f944e386458df27371b49b4ac1dec4b7fda -ARG GRADLE_WRAPPER_DOWNLOAD_SHA256=29e49b10984e585d8118b7d0bc452f944e386458df27371b49b4ac1dec4b7fda - -USER root - -RUN microdnf install --nodocs git-${GIT_VERSION}* && microdnf clean all - -# Install Gradle -RUN cd /opt && \ - curl -LO https://services.gradle.org/distributions/gradle-${GRADLE_VERSION}-bin.zip && \ - echo "Checking hash of downloaded gradle distribution" && \ - echo "${GRADLE_DOWNLOAD_SHA256} gradle-${GRADLE_VERSION}-bin.zip" | sha256sum -c - && \ - unzip -d /opt/gradle gradle-${GRADLE_VERSION}-bin.zip && \ - ln -s /opt/gradle/gradle-${GRADLE_VERSION}/bin/gradle /usr/local/bin/gradle && \ - rm gradle-${GRADLE_VERSION}-bin.zip && \ - gradle -v && \ - echo "Loading gradle cache with gradlew ${GRADLE_VERSION} distribution" && \ - mkdir -p /tmp/temp-gradle-app && cd /tmp/temp-gradle-app && touch settings.gradle && \ - gradle wrapper --gradle-distribution-sha256-sum ${GRADLE_WRAPPER_DOWNLOAD_SHA256} && ./gradlew -version && \ - chown -R 1001:0 /workspace/source $HOME && \ - chmod -R g=u /workspace/source $HOME - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -# Add scripts -COPY build/package/scripts/cache-build.sh /usr/local/bin/cache-build -COPY build/package/scripts/copy-build-if-cached.sh /usr/local/bin/copy-build-if-cached -COPY build/package/scripts/copy-artifacts.sh /usr/local/bin/copy-artifacts -COPY build/package/scripts/build-gradle.sh /usr/local/bin/build-gradle -COPY build/package/scripts/supply-sonar-project-properties-default.sh /usr/local/bin/supply-sonar-project-properties-default -COPY build/package/scripts/configure-gradle.sh /usr/local/bin/configure-gradle -COPY build/package/scripts/configure-truststore.sh /usr/local/bin/configure-truststore - -# Add sonar-project.properties -COPY build/package/sonar-project.properties.d/gradle.properties /usr/local/default-sonar-project.properties - -USER 1001 diff --git a/build/package/Dockerfile.helm b/build/package/Dockerfile.helm deleted file mode 100644 index 8817f32c..00000000 --- a/build/package/Dockerfile.helm +++ /dev/null @@ -1,73 +0,0 @@ -FROM golang:1.19 as builder - -ARG TARGETARCH - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER root -WORKDIR /usr/src/app - -ENV HELM_VERSION=3.5.2 \ - SOPS_VERSION=3.7.1 \ - AGE_VERSION=1.0.0 \ - GOBIN=/usr/local/bin - -# Install Helm. -RUN mkdir -p /tmp/helm \ - && cd /tmp \ - && curl -LO https://get.helm.sh/helm-v${HELM_VERSION}-linux-${TARGETARCH}.tar.gz \ - && tar -zxvf helm-v${HELM_VERSION}-linux-${TARGETARCH}.tar.gz -C /tmp/helm \ - && mv /tmp/helm/linux-${TARGETARCH}/helm /usr/local/bin/helm \ - && chmod a+x /usr/local/bin/helm \ - && helm version \ - && helm env - -# Install sops. -RUN go install go.mozilla.org/sops/v3/cmd/sops@v${SOPS_VERSION} \ - && sops --version - -# Install age. -RUN go install filippo.io/age/cmd/...@v${AGE_VERSION} \ - && age --version - -# Build Go binary. -COPY go.mod . -COPY go.sum . -RUN go mod download -COPY cmd cmd -COPY internal internal -COPY pkg pkg -RUN cd cmd/deploy-helm && CGO_ENABLED=0 go build -o /usr/local/bin/deploy-helm - -# Final image -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4 - -ENV HELM_PLUGIN_DIFF_VERSION=3.3.2 \ - HELM_PLUGIN_SECRETS_VERSION=3.10.0 \ - HELM_PLUGINS=/usr/local/helm/plugins \ - SKOPEO_VERSION=1.11 \ - TAR_VERSION=1.30 \ - GIT_VERSION=2.39 \ - FINDUTILS_VERSION=4.6 - -# helm-secrets depends on xargs (from GNU findutils) in it's signal handlers, -# c.f. https://github.com/jkroepke/helm-secrets/blob/main/scripts/commands/helm.sh#L34-L36 -RUN microdnf install --nodocs skopeo-${SKOPEO_VERSION}* git-${GIT_VERSION}* tar-${TAR_VERSION}* findutils-${FINDUTILS_VERSION}* && microdnf clean all - -COPY --from=builder /usr/local/bin/deploy-helm /usr/local/bin/deploy-helm -COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm -COPY --from=builder /usr/local/bin/sops /usr/local/bin/sops -COPY --from=builder /usr/local/bin/age /usr/local/bin/age - -RUN mkdir -p $HELM_PLUGINS \ - && HELM_DATA_HOME=${HELM_PLUGINS%/*} helm plugin install https://github.com/databus23/helm-diff --version v${HELM_PLUGIN_DIFF_VERSION} \ - && HELM_DATA_HOME=${HELM_PLUGINS%/*} helm plugin install https://github.com/jkroepke/helm-secrets --version v${HELM_PLUGIN_SECRETS_VERSION} \ - && ls -lah $HELM_PLUGINS \ - && sops --version \ - && age --version - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -USER 1001 diff --git a/build/package/Dockerfile.node16-npm-toolset b/build/package/Dockerfile.node16-npm-toolset deleted file mode 100644 index a38daac9..00000000 --- a/build/package/Dockerfile.node16-npm-toolset +++ /dev/null @@ -1,41 +0,0 @@ -FROM registry.access.redhat.com/ubi8/nodejs-16:1 - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -ENV NPM_CONFIG_PREFIX=$HOME/.npm-global \ - LANG=en_US.UTF-8 \ - LC_ALL=en_US.UTF-8 - -RUN echo node version: $(node --version) && \ - echo npm version: $(npm --version) && \ - echo npx version: $(npx --version) - -WORKDIR /app - -USER root - -RUN mkdir -p /.npm $HOME/.npm-global/lib && \ - chown -R 1001:0 /app /.npm $HOME && \ - chmod -R g=u /app /.npm $HOME - -# Add scripts -COPY build/package/scripts/build-npm.sh /usr/local/bin/build-npm -COPY build/package/scripts/cache-build.sh /usr/local/bin/cache-build -COPY build/package/scripts/copy-build-if-cached.sh /usr/local/bin/copy-build-if-cached -COPY build/package/scripts/copy-artifacts.sh /usr/local/bin/copy-artifacts -COPY build/package/scripts/supply-sonar-project-properties-default.sh /usr/local/bin/supply-sonar-project-properties-default -RUN chmod +x /usr/local/bin/build-npm && \ - chmod +x /usr/local/bin/cache-build && \ - chmod +x /usr/local/bin/copy-build-if-cached && \ - chmod +x /usr/local/bin/copy-artifacts && \ - chmod +x /usr/local/bin/supply-sonar-project-properties-default - -# Add sonar-project.properties -COPY build/package/sonar-project.properties.d/npm.properties /usr/local/default-sonar-project.properties - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -USER 1001 diff --git a/build/package/Dockerfile.node18-npm-toolset b/build/package/Dockerfile.node18-npm-toolset deleted file mode 100644 index 767eb4db..00000000 --- a/build/package/Dockerfile.node18-npm-toolset +++ /dev/null @@ -1,41 +0,0 @@ -FROM registry.access.redhat.com/ubi8/nodejs-18:1 - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -ENV NPM_CONFIG_PREFIX=$HOME/.npm-global \ - LANG=en_US.UTF-8 \ - LC_ALL=en_US.UTF-8 - -RUN echo node version: $(node --version) && \ - echo npm version: $(npm --version) && \ - echo npx version: $(npx --version) - -WORKDIR /app - -USER root - -RUN mkdir -p /.npm $HOME/.npm-global/lib && \ - chown -R 1001:0 /app /.npm $HOME && \ - chmod -R g=u /app /.npm $HOME - -# Add scripts -COPY build/package/scripts/build-npm.sh /usr/local/bin/build-npm -COPY build/package/scripts/cache-build.sh /usr/local/bin/cache-build -COPY build/package/scripts/copy-build-if-cached.sh /usr/local/bin/copy-build-if-cached -COPY build/package/scripts/copy-artifacts.sh /usr/local/bin/copy-artifacts -COPY build/package/scripts/supply-sonar-project-properties-default.sh /usr/local/bin/supply-sonar-project-properties-default -RUN chmod +x /usr/local/bin/build-npm && \ - chmod +x /usr/local/bin/cache-build && \ - chmod +x /usr/local/bin/copy-build-if-cached && \ - chmod +x /usr/local/bin/copy-artifacts && \ - chmod +x /usr/local/bin/supply-sonar-project-properties-default - -# Add sonar-project.properties -COPY build/package/sonar-project.properties.d/npm.properties /usr/local/default-sonar-project.properties - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -USER 1001 diff --git a/build/package/Dockerfile.package-image b/build/package/Dockerfile.package-image deleted file mode 100644 index b0806148..00000000 --- a/build/package/Dockerfile.package-image +++ /dev/null @@ -1,49 +0,0 @@ -FROM golang:1.19 as builder - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER root -WORKDIR /usr/src/app - -# Build Go binary. -COPY go.mod . -COPY go.sum . -RUN go mod download -COPY cmd cmd -COPY internal internal -COPY pkg pkg -RUN cd cmd/package-image && CGO_ENABLED=0 go build -o /usr/local/bin/ods-package-image - -# Final image -# Based on https://catalog.redhat.com/software/containers/detail/5dca3d76dd19c71643b226d5?container-tabs=dockerfile. -FROM registry.access.redhat.com/ubi8:8.7 - -ENV BUILDAH_VERSION=1.29 \ - SKOPEO_VERSION=1.11 \ - TRIVY_VERSION=0.36.0 - -COPY --from=builder /usr/local/bin/ods-package-image /usr/local/bin/ods-package-image - -# Don't include container-selinux and remove -# directories used by yum that are just taking -# up space. -RUN useradd build; \ - dnf -y module enable container-tools:rhel8; \ - dnf -y update; dnf -y reinstall shadow-utils; \ - dnf -y install skopeo-${SKOPEO_VERSION}* buildah-${BUILDAH_VERSION}* fuse-overlayfs /etc/containers/storage.conf; \ - rm -rf /var/cache /var/log/dnf* /var/log/yum.* - -# Adjust storage.conf to enable Fuse storage. -RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' /etc/containers/storage.conf -RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock - -# Set up environment variables to note that this is -# not starting with usernamespace and default to -# isolate the filesystem with chroot. -ENV _BUILDAH_STARTED_IN_USERNS="" BUILDAH_ISOLATION=chroot - -VOLUME /var/lib/containers -VOLUME /home/build/.local/share/containers -VOLUME /workspace/source - -# Install Trivy -RUN curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin "v${TRIVY_VERSION}" diff --git a/build/package/Dockerfile.python-toolset b/build/package/Dockerfile.python-toolset deleted file mode 100644 index fcc074d6..00000000 --- a/build/package/Dockerfile.python-toolset +++ /dev/null @@ -1,29 +0,0 @@ -FROM registry.access.redhat.com/ubi8/python-39:1 - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -RUN pip3 config set global.cert /etc/ssl/certs/ca-bundle.crt - -USER root - -# Add scripts -COPY build/package/scripts/build-python.sh /usr/local/bin/build-python -COPY build/package/scripts/cache-build.sh /usr/local/bin/cache-build -COPY build/package/scripts/copy-build-if-cached.sh /usr/local/bin/copy-build-if-cached -COPY build/package/scripts/copy-artifacts.sh /usr/local/bin/copy-artifacts -COPY build/package/scripts/supply-sonar-project-properties-default.sh /usr/local/bin/supply-sonar-project-properties-default -RUN chmod +x /usr/local/bin/build-python && \ - chmod +x /usr/local/bin/cache-build && \ - chmod +x /usr/local/bin/copy-build-if-cached && \ - chmod +x /usr/local/bin/copy-artifacts && \ - chmod +x /usr/local/bin/supply-sonar-project-properties-default - -# Add sonar-project.properties -COPY build/package/sonar-project.properties.d/python.properties /usr/local/default-sonar-project.properties - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -USER 1001 diff --git a/build/package/Dockerfile.sonar b/build/package/Dockerfile.sonar deleted file mode 100644 index 6c71ce9a..00000000 --- a/build/package/Dockerfile.sonar +++ /dev/null @@ -1,63 +0,0 @@ -FROM golang:1.19 as builder - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER root -WORKDIR /usr/src/app - -ENV SONAR_SCANNER_VERSION=4.8.0.2856 \ - CNES_REPORT_VERSION=4.2.0 - -# Build Go binary. -COPY go.mod . -COPY go.sum . -RUN go mod download -COPY cmd cmd -COPY internal internal -COPY pkg pkg -RUN cd cmd/sonar && CGO_ENABLED=0 go build -o /usr/local/bin/sonar - -# Install Sonar Scanner. -RUN apt-get update && apt-get install -y unzip \ - && cd /tmp \ - && curl -LO https://repo1.maven.org/maven2/org/sonarsource/scanner/cli/sonar-scanner-cli/${SONAR_SCANNER_VERSION}/sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ - && unzip sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ - && mv sonar-scanner-${SONAR_SCANNER_VERSION} /usr/local/sonar-scanner-cli - -# Install CNES report. -RUN cd /tmp \ - && curl -L https://github.com/cnescatlab/sonar-cnes-report/releases/download/${CNES_REPORT_VERSION}/sonar-cnes-report-${CNES_REPORT_VERSION}.jar -o cnesreport.jar \ - && mkdir /usr/local/cnes \ - && mv cnesreport.jar /usr/local/cnes/cnesreport.jar \ - && chmod +x /usr/local/cnes/cnesreport.jar - -# Final image -FROM registry.access.redhat.com/ubi8/nodejs-18:1 - -ENV NPM_CONFIG_PREFIX=$HOME/.npm-global \ - LANG=en_US.UTF-8 \ - LC_ALL=en_US.UTF-8 - -RUN echo id: $(id) && \ - echo node version: $(node --version) && \ - echo npm version: $(npm --version) && \ - echo npx version: $(npx --version) - -ENV SONAR_EDITION="community" \ - JAVA_HOME=/usr/lib/jvm/jre-11 - -USER root -RUN INSTALL_PKGS="java-11-openjdk-headless which" && \ - yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ - rpm -V $INSTALL_PKGS && \ - yum -y clean all --enablerepo='*' - -COPY --from=builder /usr/local/bin/sonar /usr/local/bin/sonar -COPY --from=builder /usr/local/sonar-scanner-cli /usr/local/sonar-scanner-cli -COPY --from=builder /usr/local/cnes/cnesreport.jar /usr/local/cnes/cnesreport.jar -COPY build/package/scripts/configure-truststore.sh /usr/local/bin/configure-truststore - -ENV PATH=/usr/local/sonar-scanner-cli/bin:$PATH - -VOLUME /workspace/source - -USER 1001 diff --git a/build/package/scripts/build-go.sh b/build/package/scripts/build-go.sh deleted file mode 100755 index 4deddf89..00000000 --- a/build/package/scripts/build-go.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/bin/bash -set -eu - -copyLintReport() { - cat golangci-lint-report.txt - mkdir -p "${tmp_artifacts_dir}/lint-reports" - cp golangci-lint-report.txt "${tmp_artifacts_dir}/lint-reports/${ARTIFACT_PREFIX}report.txt" -} - -ENABLE_CGO="false" -GO_OS="" -GO_ARCH="" -OUTPUT_DIR="docker" -WORKING_DIR="." -ARTIFACT_PREFIX="" -PRE_TEST_SCRIPT="" -DEBUG="${DEBUG:-false}" - -while [ "$#" -gt 0 ]; do - case $1 in - - --working-dir) WORKING_DIR="$2"; shift;; - --working-dir=*) WORKING_DIR="${1#*=}";; - - --enable-cgo) ENABLE_CGO="$2"; shift;; - --enable-cgo=*) ENABLE_CGO="${1#*=}";; - - --go-os) GO_OS="$2"; shift;; - --go-os=*) GO_OS="${1#*=}";; - - --go-arch) GO_ARCH="$2"; shift;; - --go-arch=*) GO_ARCH="${1#*=}";; - - --output-dir) OUTPUT_DIR="$2"; shift;; - --output-dir=*) OUTPUT_DIR="${1#*=}";; - - --pre-test-script) PRE_TEST_SCRIPT="$2"; shift;; - --pre-test-script=*) PRE_TEST_SCRIPT="${1#*=}";; - - --debug) DEBUG="$2"; shift;; - --debug=*) DEBUG="${1#*=}";; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -if [ "${DEBUG}" == "true" ]; then - set -x -fi - -ROOT_DIR=$(pwd) -tmp_artifacts_dir="${ROOT_DIR}/.ods/tmp-artifacts" -# tmp_artifacts_dir enables keeping artifacts created by this build -# separate from other builds in the same repo to facilitate caching. -rm -rf "${tmp_artifacts_dir}" -if [ "${WORKING_DIR}" != "." ]; then - cd "${WORKING_DIR}" - ARTIFACT_PREFIX="${WORKING_DIR/\//-}-" -fi - -echo "Working on Go module in $(pwd) ..." - -go version -if [ "${ENABLE_CGO}" = "false" ]; then - export CGO_ENABLED=0 -fi -if [ -n "${GO_OS}" ]; then - export GOOS="${GO_OS}" -fi -if [ -n "${GO_ARCH}" ]; then - export GOARCH="${GO_ARCH}" -fi -export GOMODCACHE="$ROOT_DIR/.ods-cache/deps/gomod" -echo INFO: Using gomodule cache on repo pvc -echo GOMODCACHE="$GOMODCACHE" -df -h "$ROOT_DIR" - -echo "Checking format ..." -# shellcheck disable=SC2046 -unformatted=$(go fmt $(go list ./...)) -if [ -n "${unformatted}" ]; then - echo "Unformatted files:" - echo "${unformatted}" - echo "All files need to be gofmt'd. Please run: gofmt -w ." - exit 1 -fi - -echo "Linting ..." -golangci-lint version -set +e -rm golangci-lint-report.txt &>/dev/null -golangci-lint run > golangci-lint-report.txt -exitcode=$? -set -e -if [ $exitcode == 0 ]; then - echo "OK" > golangci-lint-report.txt - copyLintReport -else - copyLintReport - exit $exitcode -fi - -echo "Testing ..." -if [ -n "${PRE_TEST_SCRIPT}" ]; then - echo "Executing pre-test script ..." - ./"${PRE_TEST_SCRIPT}" -fi -GOPKGS=$(go list ./... | grep -v /vendor) -set +e -rm coverage.out test-results.txt report.xml &>/dev/null -go test -v -coverprofile=coverage.out "$GOPKGS" > test-results.txt 2>&1 -exitcode=$? -set -e -df -h "$ROOT_DIR" -if [ -f test-results.txt ]; then - cat test-results.txt - go-junit-report < test-results.txt > report.xml - mkdir -p "${tmp_artifacts_dir}/xunit-reports" - cp report.xml "${tmp_artifacts_dir}/xunit-reports/${ARTIFACT_PREFIX}report.xml" -else - echo "No test results found" - exit 1 -fi -if [ -f coverage.out ]; then - mkdir -p "${tmp_artifacts_dir}/code-coverage" - cp coverage.out "${tmp_artifacts_dir}/code-coverage/${ARTIFACT_PREFIX}coverage.out" -else - echo "No code coverage found" - exit 1 -fi -if [ $exitcode != 0 ]; then - exit $exitcode -fi -echo "Building ..." -go build -gcflags "all=-trimpath=$(pwd)" -o "${OUTPUT_DIR}/app" diff --git a/build/package/scripts/build-gradle.sh b/build/package/scripts/build-gradle.sh deleted file mode 100755 index 65bdb83d..00000000 --- a/build/package/scripts/build-gradle.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/bash -set -eu - -# the copy commands are based on GNU cp tools -# On a mac `brew install coreutils` gives `g` prefixed cmd line tools such as gcp -# to use these define env variable GNU_CP=gcp before invoking this script. -CP="${GNU_CP:-cp}" - -output_dir="docker" -working_dir="." -artifact_prefix="" -debug="${DEBUG:-false}" -gradle_build_dir="build" -gradle_additional_tasks= -gradle_options= - -while [ "$#" -gt 0 ]; do - case $1 in - - --working-dir) working_dir="$2"; shift;; - --working-dir=*) working_dir="${1#*=}";; - - --output-dir) output_dir="$2"; shift;; - --output-dir=*) output_dir="${1#*=}";; - - --gradle-build-dir) gradle_build_dir="$2"; shift;; - --gradle-build-dir=*) gradle_build_dir="${1#*=}";; - - --gradle-additional-tasks) gradle_additional_tasks="$2"; shift;; - --gradle-additional-tasks=*) gradle_additional_tasks="${1#*=}";; - - # Gradle project properties ref: https://docs.gradle.org/7.4.2/userguide/build_environment.html#sec:gradle_configuration_properties - # Gradle options ref: https://docs.gradle.org/7.4.2/userguide/command_line_interface.html - --gradle-options) gradle_options="$2"; shift;; - --gradle-options=*) gradle_options="${1#*=}";; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -root_dir=$(pwd) -tmp_artifacts_dir="${root_dir}/.ods/tmp-artifacts" -# tmp_artifacts_dir enables keeping artifacts created by this build -# separate from other builds in the same repo to facilitate caching. -rm -rf "${tmp_artifacts_dir}" -if [ "${working_dir}" != "." ]; then - cd "${working_dir}" - artifact_prefix="${working_dir/\//-}-" -fi - -if [ "${debug}" == "true" ]; then - set -x -fi - -echo "Using NEXUS_URL=$NEXUS_URL" -echo "Using GRADLE_OPTS=$GRADLE_OPTS" -echo "Using GRADLE_USER_HOME=$GRADLE_USER_HOME" -mkdir -p "${GRADLE_USER_HOME}" - -configure-gradle - -echo -echo "Working on Gradle project in '${working_dir}'..." -echo -echo "Gradlew version: " -./gradlew -version -echo -echo "Note on build environment variables available:" -echo -echo " ODS_OUTPUT_DIR: this environment variable points to the folder " -echo " that this build expects generated application artifacts to be copied to." -echo " The project gradle script should read this env var to copy all the " -echo " generated application artifacts." -echo -export ODS_OUTPUT_DIR=${output_dir} -echo "Exported env var 'ODS_OUTPUT_DIR' with value '${output_dir}'" -echo -echo "Building (Compile and Test) ..." -# shellcheck disable=SC2086 -./gradlew clean build ${gradle_additional_tasks} ${gradle_options} -echo - -echo "Verifying unit test report was generated ..." -unit_test_result_dir="${gradle_build_dir}/test-results/test" -if [ -d "${unit_test_result_dir}" ]; then - unit_test_artifacts_dir="${tmp_artifacts_dir}/xunit-reports" - mkdir -p "${unit_test_artifacts_dir}" - # Each test class produces its own report file, but they contain a fully qualified class - # name in their file name. Due to that, we do not need to add an artifact prefix to - # distinguish them with reports from other artifacts of the same repo/pipeline build. - "$CP" "${unit_test_result_dir}/"*.xml "${unit_test_artifacts_dir}" -else - echo "Build failed: no unit test results found in ${unit_test_result_dir}" - exit 1 -fi - -echo "Verifying unit test coverage report was generated ..." -coverage_result_dir="${gradle_build_dir}/reports/jacoco/test" -if [ -d "${coverage_result_dir}" ]; then - code_coverage_artifacts_dir="${tmp_artifacts_dir}/code-coverage" - mkdir -p "${code_coverage_artifacts_dir}" - "$CP" "${coverage_result_dir}/jacocoTestReport.xml" "${code_coverage_artifacts_dir}/${artifact_prefix}coverage.xml" -else - echo "Build failed: no unit test coverage report was found in ${coverage_result_dir}" - exit 1 -fi diff --git a/build/package/scripts/build-npm.sh b/build/package/scripts/build-npm.sh deleted file mode 100755 index eb3db36d..00000000 --- a/build/package/scripts/build-npm.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash -set -eu - -urlencode() { - local LC_COLLATE=C - local length="${#1}" - for (( i = 0; i < length; i++ )); do - local c="${1:$i:1}" - case $c in - [a-zA-Z0-9.~_-]) printf '%s' "$c" ;; - *) printf '%%%02X' "'$c" ;; - esac - done -} - -copyLintReport() { - cat eslint-report.txt - mkdir -p "${tmp_artifacts_dir}/lint-reports" - cp eslint-report.txt "${tmp_artifacts_dir}/lint-reports/${ARTIFACT_PREFIX}report.txt" -} - -WORKING_DIR="." -ARTIFACT_PREFIX="" -DEBUG="${DEBUG:-false}" - -while [ "$#" -gt 0 ]; do - case $1 in - - --working-dir) WORKING_DIR="$2"; shift;; - --working-dir=*) WORKING_DIR="${1#*=}";; - - --debug) DEBUG="$2"; shift;; - --debug=*) DEBUG="${1#*=}";; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -if [ "${DEBUG}" == "true" ]; then - set -x -fi - -ROOT_DIR=$(pwd) -tmp_artifacts_dir="${ROOT_DIR}/.ods/tmp-artifacts" -# tmp_artifacts_dir enables keeping artifacts created by this build -# separate from other builds in the same repo to facilitate caching. -rm -rf "${tmp_artifacts_dir}" -if [ "${WORKING_DIR}" != "." ]; then - cd "${WORKING_DIR}" - ARTIFACT_PREFIX="${WORKING_DIR/\//-}-" -fi - -echo "Configuring npm to use Nexus (${NEXUS_URL}) ..." -# Remove the protocol segment from NEXUS_URL -NEXUS_HOST=$(echo "${NEXUS_URL}" | sed -E 's/^\s*.*:\/\///g') -if [ -n "${NEXUS_URL}" ] && [ -n "${NEXUS_USERNAME}" ] && [ -n "${NEXUS_PASSWORD}" ]; then - NEXUS_AUTH="$(urlencode "${NEXUS_USERNAME}"):$(urlencode "${NEXUS_PASSWORD}")" - npm config set registry="$NEXUS_URL"/repository/npmjs/ - npm config set "//${NEXUS_HOST}/repository/npmjs/:_auth"="$(echo -n "$NEXUS_AUTH" | base64)" - npm config set email=no-reply@opendevstack.org - if [ -f /etc/ssl/certs/private-cert.pem ]; then - echo "Configuring private cert ..." - npm config set cafile=/etc/ssl/certs/private-cert.pem - fi -fi; - -echo "package-*.json checks ..." -if [ ! -f package.json ]; then - echo "File package.json not found" - exit 1 -fi -if [ ! -f package-lock.json ]; then - echo "File package-lock.json not found" - exit 1 -fi - -echo "Installing dependencies ..." -npm ci --ignore-scripts - -echo "Linting ..." -set +e -npm run lint > eslint-report.txt -exitcode=$? -set -e - -if [ $exitcode == 0 ]; then - echo "OK" > eslint-report.txt - copyLintReport -else - copyLintReport - exit $exitcode -fi - -echo "Building ..." -npm run build - -echo "Testing ..." -npm run test - -mkdir -p "${tmp_artifacts_dir}/xunit-reports" -cp build/test-results/test/report.xml "${tmp_artifacts_dir}/xunit-reports/${ARTIFACT_PREFIX}report.xml" - -mkdir -p "${tmp_artifacts_dir}/code-coverage" -cp build/coverage/clover.xml "${tmp_artifacts_dir}/code-coverage/${ARTIFACT_PREFIX}clover.xml" - -cp build/coverage/coverage-final.json "${tmp_artifacts_dir}/code-coverage/${ARTIFACT_PREFIX}coverage-final.json" - -cp build/coverage/lcov.info "${tmp_artifacts_dir}/code-coverage/${ARTIFACT_PREFIX}lcov.info" diff --git a/build/package/scripts/build-python.sh b/build/package/scripts/build-python.sh deleted file mode 100755 index ef2c12da..00000000 --- a/build/package/scripts/build-python.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash -set -eu - -urlencode() { - local LC_COLLATE=C - local length="${#1}" - for (( i = 0; i < length; i++ )); do - local c="${1:$i:1}" - case $c in - [a-zA-Z0-9.~_-]) printf '%s' "$c" ;; - *) printf '%%%02X' "'$c" ;; - esac - done -} - -MAX_LINE_LENGTH="120" -WORKING_DIR="." -ARTIFACT_PREFIX="" -PRE_TEST_SCRIPT="" -DEBUG="${DEBUG:-false}" - -while [ "$#" -gt 0 ]; do - case $1 in - - --working-dir) WORKING_DIR="$2"; shift;; - --working-dir=*) WORKING_DIR="${1#*=}";; - - --max-line-length) MAX_LINE_LENGTH="$2"; shift;; - --max-line-length=*) MAX_LINE_LENGTH="${1#*=}";; - - --pre-test-script) PRE_TEST_SCRIPT="$2"; shift;; - --pre-test-script=*) PRE_TEST_SCRIPT="${1#*=}";; - - --debug) DEBUG="$2"; shift;; - --debug=*) DEBUG="${1#*=}";; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -if [ "${DEBUG}" == "true" ]; then - set -x -fi - -ROOT_DIR=$(pwd) -tmp_artifacts_dir="${ROOT_DIR}/.ods/tmp-artifacts" -# tmp_artifacts_dir enables keeping artifacts created by this build -# separate from other builds in the same repo to facilitate caching. -rm -rf "${tmp_artifacts_dir}" -if [ "${WORKING_DIR}" != "." ]; then - cd "${WORKING_DIR}" - ARTIFACT_PREFIX="${WORKING_DIR/\//-}-" -fi - -echo "Configuring pip to use Nexus (${NEXUS_URL}) ..." -# Remove the protocol segment from NEXUS_URL -NEXUS_HOST=$(echo "${NEXUS_URL}" | sed -E 's/^\s*.*:\/\///g') -if [ -n "${NEXUS_HOST}" ] && [ -n "${NEXUS_USERNAME}" ] && [ -n "${NEXUS_PASSWORD}" ]; then - NEXUS_AUTH="$(urlencode "${NEXUS_USERNAME}"):$(urlencode "${NEXUS_PASSWORD}")" - NEXUS_URL_WITH_AUTH="$(echo "${NEXUS_URL}" | sed -E 's/:\/\//:\/\/'"${NEXUS_AUTH}"@'/g')" - pip3 config set global.index-url "${NEXUS_URL_WITH_AUTH}"/repository/pypi-all/simple - pip3 config set global.trusted-host "${NEXUS_HOST}" - pip3 config set global.extra-index-url https://pypi.org/simple -fi; - -echo "Installing test requirements ..." -# shellcheck source=/dev/null -pip install --upgrade pip -pip install -r tests_requirements.txt -pip check - -echo "Linting ..." -mypy src -flake8 --max-line-length="${MAX_LINE_LENGTH}" src - -if [ -n "${PRE_TEST_SCRIPT}" ]; then - echo "Executing pre-test script ..." - ./"${PRE_TEST_SCRIPT}" -fi - -echo "Testing ..." -rm report.xml coverage.xml &>/dev/null || true -PYTHONPATH=src python -m pytest --junitxml=report.xml -o junit_family=xunit2 --cov-report term-missing --cov-report xml:coverage.xml --cov=src -o testpaths=tests - -mkdir -p "${tmp_artifacts_dir}/xunit-reports" -cp report.xml "${tmp_artifacts_dir}/xunit-reports/${ARTIFACT_PREFIX}report.xml" -mkdir -p "${tmp_artifacts_dir}/code-coverage" -cp coverage.xml "${tmp_artifacts_dir}/code-coverage/${ARTIFACT_PREFIX}coverage.xml" diff --git a/build/package/scripts/configure-gradle.sh b/build/package/scripts/configure-gradle.sh deleted file mode 100755 index 83158ef7..00000000 --- a/build/package/scripts/configure-gradle.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -# This script checks for env variable HTTP_PROXY and adds them to gradle.properties. -CONTENT="" - -if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore_location="$(pwd)/.ods-cache/truststore/cacerts" - truststore_pass="changeit" - echo "Configuring Gradle to trust private cert ..." - configure-truststore --dest-store="${truststore_location}" --dest-storepass="${truststore_pass}" - # shellcheck disable=SC2181 - if [ $? -ne 0 ]; then - exit 1 - fi - # Configure Gradle to use the modified trust store. - CONTENT+="systemProp.javax.net.ssl.trustStore=${truststore_location}\n" - CONTENT+="systemProp.javax.net.ssl.trustStorePassword=${truststore_pass}\n" -fi - -if [ "${HTTP_PROXY}" != "" ]; then - echo "Configuring Gradle to honor HTTP_PROXY ..." - proxy=$(echo "$HTTP_PROXY" | sed -e "s|https://||g" | sed -e "s|http://||g") - proxy_hostp=$(echo "$proxy" | cut -d "@" -f2) - - CONTENT+="systemProp.proxySet=\"true\"\n" - - proxy_host=$(echo "$proxy_hostp" | cut -d ":" -f1) - CONTENT+="systemProp.http.proxyHost=${proxy_host}\n" - CONTENT+="systemProp.https.proxyHost=${proxy_host}\n" - - proxy_port=$(echo "$proxy_hostp" | cut -d ":" -f2) - CONTENT+="systemProp.http.proxyPort=${proxy_port}\n" - CONTENT+="systemProp.https.proxyPort=${proxy_port}\n" - - proxy_userp=$(echo "$proxy" | cut -d "@" -f1) - if [[ $proxy_userp != "$proxy_hostp" ]]; - then - proxy_user=$(echo "$proxy_userp" | cut -d ":" -f1) - CONTENT+="systemProp.http.proxyUser=${proxy_user}\n" - CONTENT+="systemProp.https.proxyUser=${proxy_user}\n" - - # shellcheck disable=SC2001 - proxy_pw=$(echo "$proxy_userp" | sed -e "s|$proxy_user:||g") - CONTENT+="systemProp.http.proxyPassword=${proxy_pw}\n" - CONTENT+="systemProp.https.proxyPassword=${proxy_pw}\n" - fi -fi - -if [ "${NO_PROXY}" != "" ]; then - echo "Configuring Gradle to honor NO_PROXY ..." - # shellcheck disable=SC2001 - noproxy_host=$(echo "$NO_PROXY" | sed -e 's|\,\.|\,\*\.|g') - # shellcheck disable=SC2001 - noproxy_host=$(echo "$noproxy_host" | sed -e "s/,/|/g") - CONTENT+="systemProp.http.nonProxyHosts=$noproxy_host\n" - CONTENT+="systemProp.https.nonProxyHosts=$noproxy_host\n" -fi - -if [ "${CONTENT}" != "" ]; then - echo -e "$CONTENT" > "${GRADLE_USER_HOME}/gradle.properties" -fi diff --git a/build/package/scripts/download-aqua-scanner.sh b/build/package/scripts/download-aqua-scanner.sh deleted file mode 100755 index 29e81300..00000000 --- a/build/package/scripts/download-aqua-scanner.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -set -eu - -md5_bin="${MD5_BIN:-"md5sum"}" -aqua_scanner_url="" -bin_dir=".ods-cache/bin" - -while [ "$#" -gt 0 ]; do - case $1 in - - --bin-dir) bin_dir="$2"; shift;; - --bin-dir=*) bin_dir="${1#*=}";; - - --aqua-scanner-url) aqua_scanner_url="$2"; shift;; - --aqua-scanner-url=*) aqua_scanner_url="${1#*=}";; - - --debug) set -x;; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -aqua_scanner_path="${bin_dir}/aquasec" -md5_aqua_scanner_url_path="${bin_dir}/.md5-aquasec" -mkdir -p "${bin_dir}" - -# Optionally install Aqua scanner. -# If the binary already exists and was downloaded from the -# URL given by aqua_scanner_url, skip download. -if [ -n "${aqua_scanner_url}" ] && [ "${aqua_scanner_url}" != "none" ]; then - md5_aqua_scanner_url=$(printf "%s" "${aqua_scanner_url}" | ${md5_bin} | cut -d- -f1) - if [ ! -f "${md5_aqua_scanner_url_path}" ] || [ "${md5_aqua_scanner_url}" != "$(cat "${md5_aqua_scanner_url_path}")" ]; then - echo 'Installing Aqua scanner...' - curl -sSf -L "${aqua_scanner_url}" -o aquasec - mv aquasec "${aqua_scanner_path}" - chmod +x "${aqua_scanner_path}" - echo "${md5_aqua_scanner_url}" > "${md5_aqua_scanner_url_path}" - echo 'Installed Aqua scanner version:' - version_output=$("${aqua_scanner_path}" version) - if [ "${version_output}" = "" ]; then - echo "Downloaded binary is broken. Re-run the task." - rm -rf "${bin_dir}" - exit 1 - fi - fi -fi diff --git a/build/package/scripts/supply-sonar-project-properties-default.sh b/build/package/scripts/supply-sonar-project-properties-default.sh deleted file mode 100755 index 8f35ef4d..00000000 --- a/build/package/scripts/supply-sonar-project-properties-default.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -eu - -working_dir="." - -while [ "$#" -gt 0 ]; do - case $1 in - --working-dir) working_dir="$2"; shift;; - --working-dir=*) working_dir="${1#*=}";; - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -echo "Checking for sonar-project.properties ..." -if [ ! -f "${working_dir}/sonar-project.properties" ]; then - echo "No sonar-project.properties present, using default:" - cat /usr/local/default-sonar-project.properties - cp /usr/local/default-sonar-project.properties "${working_dir}/sonar-project.properties" -fi diff --git a/build/package/sonar-project.properties.d/go.properties b/build/package/sonar-project.properties.d/go.properties deleted file mode 100644 index 88e2422f..00000000 --- a/build/package/sonar-project.properties.d/go.properties +++ /dev/null @@ -1,7 +0,0 @@ -sonar.sources=. -sonar.sourceEncoding=UTF-8 -sonar.exclusions=**/*_test.go,**/vendor/**,**/.ods-cache/** -sonar.tests=. -sonar.test.inclusions=**/*_test.go -sonar.test.exclusions=**/vendor/** -sonar.go.coverage.reportPaths=coverage.out diff --git a/build/package/sonar-project.properties.d/gradle.properties b/build/package/sonar-project.properties.d/gradle.properties deleted file mode 100644 index 024e42a7..00000000 --- a/build/package/sonar-project.properties.d/gradle.properties +++ /dev/null @@ -1,6 +0,0 @@ -sonar.sources=src -sonar.sourceEncoding=UTF-8 -sonar.coverage.jacoco.xmlReportPaths=build/reports/jacoco/test/jacocoTestReport.xml -sonar.java.binaries=build/classes -sonar.java.libraries=docker -sonar.junit.reportPaths=build/test-results/test diff --git a/build/package/sonar-project.properties.d/npm.properties b/build/package/sonar-project.properties.d/npm.properties deleted file mode 100644 index 41292803..00000000 --- a/build/package/sonar-project.properties.d/npm.properties +++ /dev/null @@ -1,5 +0,0 @@ -sonar.sources=src -sonar.sourceEncoding=UTF-8 -sonar.exclusions=**/*.html,**/*.scss,**/*.json,**/*.ico,**/*.svg,**/.ods-cache/** -sonar.coverage.exclusions=**/*.spec.ts,**/*.module.ts,src/environments/** -sonar.javascript.lcov.reportPaths=build/coverage/lcov.info diff --git a/build/package/sonar-project.properties.d/python.properties b/build/package/sonar-project.properties.d/python.properties deleted file mode 100644 index 1178e2b1..00000000 --- a/build/package/sonar-project.properties.d/python.properties +++ /dev/null @@ -1,8 +0,0 @@ -sonar.sources=src -sonar.sourceEncoding=UTF-8 -sonar.exclusions=**/.ods-cache/** -sonar.tests=tests -sonar.test.inclusions=**/*_test.py -sonar.coverage.exclusions=tests/** -sonar.python.xunit.reportPath=report.xml -sonar.python.coverage.reportPaths=coverage.xml diff --git a/cmd/aqua-scan/aqua.go b/cmd/aqua-scan/aqua.go deleted file mode 100644 index 534313b8..00000000 --- a/cmd/aqua-scan/aqua.go +++ /dev/null @@ -1,107 +0,0 @@ -package main - -import ( - "fmt" - "io" - "net/url" - "os" - "path/filepath" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/image" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -const ( - aquasecBin = "./.ods-cache/bin/aquasec" - scanComplianceFailureExitCode = 4 - scanLicenseValidationFailureExitCode = 5 -) - -// aquaScanURL returns an URL to the given aquaImage. -func aquaScanURL(opts options, aquaImage string) (string, error) { - aquaURL, err := url.Parse(opts.aquaURL) - if err != nil { - return "", fmt.Errorf("parse base URL: %w", err) - } - aquaPath := fmt.Sprintf( - "/#/images/%s/%s/vulns", - url.QueryEscape(opts.aquaRegistry), url.QueryEscape(aquaImage), - ) - fullURL, err := aquaURL.Parse(aquaPath) - if err != nil { - return "", fmt.Errorf("parse URL path: %w", err) - } - return fullURL.String(), nil -} - -// aquaScan runs the scan and returns whether there was a policy incompliance or not. -// An error is returned when the scan cannot be started or encounters failures -// unrelated to policy compliance. -func runScan(exe string, args []string, outWriter, errWriter io.Writer) (bool, error) { - // STDERR contains the scan log output, hence we read it before STDOUT. - // STDOUT contains the scan summary (incl. ASCII table). - return command.RunWithSpecialFailureCode( - exe, args, []string{}, outWriter, errWriter, scanComplianceFailureExitCode, - ) -} - -// aquaAssembleScanArgs creates args/flags to pass to the Aqua scanner based on given arguments. -func aquaAssembleScanArgs(opts options, image, htmlReportFile, jsonReportFile string) []string { - return []string{ - "scan", - "--dockerless", "--register", "--text", - fmt.Sprintf("--htmlfile=%s", htmlReportFile), - fmt.Sprintf("--jsonfile=%s", jsonReportFile), - "-w", "/tmp", - fmt.Sprintf("--user=%s", opts.aquaUsername), - fmt.Sprintf("--password=%s", opts.aquaPassword), - fmt.Sprintf("--host=%s", opts.aquaURL), - image, - fmt.Sprintf("--registry=%s", opts.aquaRegistry), - } -} - -// htmlReportFilename returns the HTML report filename for given image. -func htmlReportFilename(iid image.Identity) string { - return fmt.Sprintf("%s.html", iid.ImageStream) -} - -// htmlReportFilename returns the JSON report filename for given image. -func jsonReportFilename(iid image.Identity) string { - return fmt.Sprintf("%s.json", iid.ImageStream) -} - -// reportFilenames returns the list of scan report filenames. -func reportFilenames(iid image.Identity) []string { - return []string{htmlReportFilename(iid), jsonReportFilename(iid)} -} - -// aquaReportsExist checks whether the reports associated with the image name -// exist in the given artifacts path. -func aquaReportsExist(artifactsPath string, iid image.Identity) bool { - d := filepath.Join(artifactsPath, pipelinectxt.AquaScansDir) - for _, f := range reportFilenames(iid) { - if _, err := os.Stat(filepath.Join(d, f)); err != nil { - return false - } - } - return true -} - -// copyAquaReportsToArtifacts copies the Aqua scan reports to the artifacts directory. -func copyAquaReportsToArtifacts(htmlReportFile, jsonReportFile string) error { - if _, err := os.Stat(htmlReportFile); err == nil { - err := pipelinectxt.CopyArtifact(htmlReportFile, pipelinectxt.AquaScansPath) - if err != nil { - return fmt.Errorf("copying HTML report to artifacts failed: %w", err) - } - } - if _, err := os.Stat(jsonReportFile); err == nil { - err := pipelinectxt.CopyArtifact(jsonReportFile, pipelinectxt.AquaScansPath) - if err != nil { - return fmt.Errorf("copying JSON report to artifacts failed: %w", err) - } - } - return nil -} diff --git a/cmd/aqua-scan/aqua_test.go b/cmd/aqua-scan/aqua_test.go deleted file mode 100644 index 09e564b5..00000000 --- a/cmd/aqua-scan/aqua_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package main - -import ( - "bytes" - "strconv" - "testing" -) - -func TestAquaScan(t *testing.T) { - tests := map[string]struct { - cmdExitCode int - wantSuccess bool - wantErr bool - }{ - "scan exits with license validation failure exit code": { - cmdExitCode: scanLicenseValidationFailureExitCode, - wantSuccess: false, - wantErr: true, - }, - "scan exits with compliance failure exit code": { - cmdExitCode: scanComplianceFailureExitCode, - wantSuccess: false, - wantErr: false, - }, - "scan passes": { - cmdExitCode: 0, - wantSuccess: true, - wantErr: false, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - var stdout, stderr bytes.Buffer - success, err := runScan( - "../../test/scripts/exit-with-code.sh", - []string{"", "", strconv.Itoa(tc.cmdExitCode)}, - &stdout, &stderr, - ) - if tc.wantErr && err == nil { - t.Fatal("want err, got none") - } - if !tc.wantErr && err != nil { - t.Fatalf("want no err, got %s", err) - } - if tc.wantSuccess != success { - t.Fatalf("want success=%v, got success=%v", tc.wantSuccess, success) - } - }) - } -} - -func TestAquaScanURL(t *testing.T) { - tests := map[string]struct { - aquaURL string - }{ - "base URL without trailing slash": { - aquaURL: "https://console.example.com", - }, - "base URL with trailing slash": { - aquaURL: "https://console.example.com/", - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - opts := options{aquaURL: tc.aquaURL, aquaRegistry: "ods"} - u, err := aquaScanURL(opts, "foo") - if err != nil { - t.Fatal(err) - } - want := "https://console.example.com/#/images/ods/foo/vulns" - if u != want { - t.Fatalf("want: %s, got: %s", want, u) - } - }) - } -} diff --git a/cmd/aqua-scan/bitbucket.go b/cmd/aqua-scan/bitbucket.go deleted file mode 100644 index f918a52d..00000000 --- a/cmd/aqua-scan/bitbucket.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "fmt" - "time" - - "github.com/opendevstack/ods-pipeline/pkg/bitbucket" - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -// createBitbucketInsightReport attaches a code insight report to the Git commit -// being built in Bitbucket. The code insight report points to the Aqua security scan. -func createBitbucketInsightReport(opts options, aquaScanUrl string, success bool, ctxt *pipelinectxt.ODSContext) error { - var logger logging.LeveledLoggerInterface - if opts.debug { - logger = &logging.LeveledLogger{Level: logging.LevelDebug} - } - bitbucketClient, err := bitbucket.NewClient(&bitbucket.ClientConfig{ - APIToken: opts.bitbucketAccessToken, - BaseURL: opts.bitbucketURL, - Logger: logger, - }) - if err != nil { - return fmt.Errorf("bitbucket client: %w", err) - } - reportKey := "org.opendevstack.aquasec" - scanResult := bitbucket.InsightReportFail - if success { - scanResult = bitbucket.InsightReportPass - } - _, err = bitbucketClient.InsightReportCreate( - ctxt.Project, - ctxt.Repository, - ctxt.GitCommitSHA, - reportKey, - bitbucket.InsightReportCreatePayload{ - Title: "Aqua Security", - Reporter: "OpenDevStack", - CreatedDate: time.Now().Unix(), - Details: "Please visit the following link to review the Aqua Security scan report:", - Result: scanResult, - Data: []bitbucket.InsightReportData{ - { - Title: "Report", - Type: "LINK", - Value: map[string]string{ - "linktext": "Result in Aqua", - "href": aquaScanUrl, - }, - }, - }, - }, - ) - return err -} diff --git a/cmd/aqua-scan/main.go b/cmd/aqua-scan/main.go deleted file mode 100644 index 4997094d..00000000 --- a/cmd/aqua-scan/main.go +++ /dev/null @@ -1,77 +0,0 @@ -package main - -import ( - "flag" - "os" - - "github.com/opendevstack/ods-pipeline/internal/image" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "golang.org/x/exp/slog" -) - -type options struct { - checkoutDir string - imageStream string - imageNamespace string - bitbucketAccessToken string - bitbucketURL string - aquaUsername string - aquaPassword string - aquaURL string - aquaRegistry string - aquasecGate bool - debug bool -} - -type aquaScan struct { - opts options - ctxt *pipelinectxt.ODSContext - imageId image.Identity -} - -var defaultOptions = options{ - checkoutDir: ".", - imageStream: "", - imageNamespace: "", - bitbucketAccessToken: os.Getenv("BITBUCKET_ACCESS_TOKEN"), - bitbucketURL: os.Getenv("BITBUCKET_URL"), - aquaUsername: os.Getenv("AQUA_USERNAME"), - aquaPassword: os.Getenv("AQUA_PASSWORD"), - aquaURL: os.Getenv("AQUA_URL"), - aquaRegistry: os.Getenv("AQUA_REGISTRY"), - aquasecGate: false, - debug: (os.Getenv("DEBUG") == "true"), -} - -func main() { - opts := options{} - flag.StringVar(&opts.checkoutDir, "checkout-dir", defaultOptions.checkoutDir, "Checkout dir") - flag.StringVar(&opts.imageStream, "image-stream", defaultOptions.imageStream, "Image stream") - flag.StringVar(&opts.imageNamespace, "image-namespace", defaultOptions.imageNamespace, "image namespace") - flag.StringVar(&opts.bitbucketAccessToken, "bitbucket-access-token", defaultOptions.bitbucketAccessToken, "bitbucket-access-token") - flag.StringVar(&opts.bitbucketURL, "bitbucket-url", defaultOptions.bitbucketURL, "bitbucket-url") - flag.StringVar(&opts.aquaUsername, "aqua-username", defaultOptions.aquaUsername, "aqua-username") - flag.StringVar(&opts.aquaPassword, "aqua-password", defaultOptions.aquaPassword, "aqua-password") - flag.StringVar(&opts.aquaURL, "aqua-url", defaultOptions.aquaURL, "aqua-url") - flag.StringVar(&opts.aquaRegistry, "aqua-registry", defaultOptions.aquaRegistry, "aqua-registry") - flag.BoolVar(&opts.aquasecGate, "aqua-gate", defaultOptions.aquasecGate, "whether the Aqua security scan needs to pass for the task to succeed") - flag.BoolVar(&opts.debug, "debug", defaultOptions.debug, "debug mode") - flag.Parse() - - logLevel := slog.LevelInfo - if opts.debug { - logLevel = slog.LevelDebug - } - slog.SetDefault(slog.New(slog.HandlerOptions{Level: logLevel}.NewTextHandler(os.Stderr))) - - err := (&aquaScan{opts: opts}).runSteps( - setupContext(), - setImageId(), - skipIfScanArtifactsExist(), - scanImagesWithAqua(), - ) - if err != nil { - slog.Error("step failed", err) - os.Exit(1) - } -} diff --git a/cmd/aqua-scan/skip.go b/cmd/aqua-scan/skip.go deleted file mode 100644 index d404f22e..00000000 --- a/cmd/aqua-scan/skip.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -// skipRemainingSteps is a pseudo error used to indicate that remaining -// steps should be skipped. -type skipRemainingSteps struct { - msg string -} - -func (e *skipRemainingSteps) Error() string { - return e.msg -} diff --git a/cmd/aqua-scan/steps.go b/cmd/aqua-scan/steps.go deleted file mode 100644 index e72d71dd..00000000 --- a/cmd/aqua-scan/steps.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/opendevstack/ods-pipeline/internal/image" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "golang.org/x/exp/slog" -) - -type AquaScanStep func(d *aquaScan) (*aquaScan, error) - -func (s *aquaScan) runSteps(steps ...AquaScanStep) error { - var skip *skipRemainingSteps - var err error - for _, step := range steps { - s, err = step(s) - if err != nil { - if errors.As(err, &skip) { - slog.Info(err.Error()) - return nil - } - return err - } - } - return nil -} - -// setupContext creates and ODS context. -func setupContext() AquaScanStep { - return func(s *aquaScan) (*aquaScan, error) { - ctxt := &pipelinectxt.ODSContext{} - err := ctxt.ReadCache(s.opts.checkoutDir) - if err != nil { - return s, fmt.Errorf("read cache: %w", err) - } - s.ctxt = ctxt - - return s, nil - } -} - -func setImageId() AquaScanStep { - return func(p *aquaScan) (*aquaScan, error) { - p.imageId = image.CreateImageIdentity(p.ctxt, p.opts.imageNamespace, p.opts.imageStream) - return p, nil - } -} - -func skipIfScanArtifactsExist() AquaScanStep { - return func(s *aquaScan) (*aquaScan, error) { - if ok := aquaReportsExist(pipelinectxt.AquaScansPath, s.imageId); ok { - return s, &skipRemainingSteps{fmt.Sprintf("aqua scan artifact exists already for %s", s.imageId.ImageStream)} - } - return s, nil - } -} - -// scanImagesWithAqua runs the Aqua scanner over each image artifact. -func scanImagesWithAqua() AquaScanStep { - return func(s *aquaScan) (*aquaScan, error) { - slog.Info("Scanning image with Aqua scanner ...") - aquaImage := s.imageId.NamespaceStreamSha() - htmlReportFile := filepath.Join(s.opts.checkoutDir, htmlReportFilename(s.imageId)) - jsonReportFile := filepath.Join(s.opts.checkoutDir, jsonReportFilename(s.imageId)) - scanArgs := aquaAssembleScanArgs(s.opts, aquaImage, htmlReportFile, jsonReportFile) - scanSuccessful, err := runScan(aquasecBin, scanArgs, os.Stdout, os.Stderr) - if err != nil { - return s, fmt.Errorf("aqua scan: %w", err) - } - - if !scanSuccessful && s.opts.aquasecGate { - return s, errors.New("stopping build as successful Aqua scan is required") - } - - asu, err := aquaScanURL(s.opts, aquaImage) - if err != nil { - return s, fmt.Errorf("aqua scan URL: %w", err) - } - slog.Info("Aqua vulnerability report is at " + asu) - - err = copyAquaReportsToArtifacts(htmlReportFile, jsonReportFile) - if err != nil { - return s, err - } - - slog.Info("Creating Bitbucket code insight report ...") - err = createBitbucketInsightReport(s.opts, asu, scanSuccessful, s.ctxt) - if err != nil { - return s, err - } - return s, nil - } -} diff --git a/cmd/deploy-helm/age.go b/cmd/deploy-helm/age.go deleted file mode 100644 index 67290ba7..00000000 --- a/cmd/deploy-helm/age.go +++ /dev/null @@ -1,25 +0,0 @@ -package main - -import ( - "fmt" - "os" -) - -const ( - // ageKeyFilePath is the path where to store the age-key-secret openshift secret content, - // required by the helm secrets plugin. - ageKeyFilePath = "./key.txt" -) - -func storeAgeKey(ageKeyContent []byte) error { - file, err := os.Create(ageKeyFilePath) - if err != nil { - return fmt.Errorf("create age key file path: %w", err) - } - defer file.Close() - _, err = file.Write(ageKeyContent) - if err != nil { - return fmt.Errorf("write age key: %w", err) - } - return err -} diff --git a/cmd/deploy-helm/helm.go b/cmd/deploy-helm/helm.go deleted file mode 100644 index adc94a6e..00000000 --- a/cmd/deploy-helm/helm.go +++ /dev/null @@ -1,185 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/google/shlex" - "github.com/opendevstack/ods-pipeline/internal/command" - "sigs.k8s.io/yaml" -) - -const ( - // helmDiffDetectedMarker is the message Helm prints when helm-diff is - // configured to exit with a non-zero exit code when drift is detected. - helmDiffDetectedMarker = `Error: identified at least one change, exiting with non-zero exit code (detailed-exitcode parameter enabled)` - - // desiredDiffMessage is the message that should be presented to the user. - desiredDiffMessage = `plugin "diff" identified at least one change` - - // exit code returned from helm-diff when diff is detected. - diffDriftExitCode = 2 - - // exit code returned from helm-diff when there is an error (e.g. invalid resource manifests). - diffGenericExitCode = 1 -) - -type helmChart struct { - Name string `json:"name"` - Version string `json:"version"` -} - -// helmDiff runs the diff and returns whether the Helm release is in sync. -// An error is returned when the diff cannot be started or encounters failures -// unrelated to drift (such as invalid resource manifests). -func (d *deployHelm) helmDiff(args []string, outWriter, errWriter io.Writer) (bool, error) { - return command.RunWithSpecialFailureCode( - d.helmBin, args, []string{ - fmt.Sprintf("SOPS_AGE_KEY_FILE=%s", ageKeyFilePath), - "HELM_DIFF_IGNORE_UNKNOWN_FLAGS=true", // https://github.com/databus23/helm-diff/issues/278 - }, outWriter, errWriter, diffDriftExitCode, - ) -} - -// helmUpgrade runs given Helm command. -func (d *deployHelm) helmUpgrade(args []string, stdout, stderr io.Writer) error { - return command.Run( - d.helmBin, args, []string{fmt.Sprintf("SOPS_AGE_KEY_FILE=%s", ageKeyFilePath)}, stdout, stderr, - ) -} - -// assembleHelmDiffArgs creates a slice of arguments for "helm diff upgrade". -func (d *deployHelm) assembleHelmDiffArgs() ([]string, error) { - helmDiffArgs := []string{ - "--namespace=" + d.releaseNamespace, - "secrets", - "diff", - "upgrade", - "--detailed-exitcode", - "--no-color", - "--normalize-manifests", - } - helmDiffFlags, err := shlex.Split(d.opts.diffFlags) - if err != nil { - return []string{}, fmt.Errorf("parse diff flags (%s): %s", d.opts.diffFlags, err) - } - helmDiffArgs = append(helmDiffArgs, helmDiffFlags...) - commonArgs, err := d.commonHelmUpgradeArgs() - if err != nil { - return []string{}, fmt.Errorf("upgrade args: %w", err) - } - return append(helmDiffArgs, commonArgs...), nil -} - -// assembleHelmDiffArgs creates a slice of arguments for "helm upgrade". -func (d *deployHelm) assembleHelmUpgradeArgs() ([]string, error) { - helmUpgradeArgs := []string{ - "--namespace=" + d.releaseNamespace, - "secrets", - "upgrade", - } - commonArgs, err := d.commonHelmUpgradeArgs() - if err != nil { - return []string{}, fmt.Errorf("upgrade args: %w", err) - } - return append(helmUpgradeArgs, commonArgs...), nil -} - -// commonHelmUpgradeArgs returns arguments common to "helm upgrade" and "helm diff upgrade". -func (d *deployHelm) commonHelmUpgradeArgs() ([]string, error) { - args, err := shlex.Split(d.opts.upgradeFlags) - if err != nil { - return []string{}, fmt.Errorf("parse upgrade flags (%s): %s", d.opts.upgradeFlags, err) - } - if d.opts.debug { - args = append([]string{"--debug"}, args...) - } - if d.targetConfig.APIServer != "" { - args = append( - []string{ - fmt.Sprintf("--kube-apiserver=%s", d.targetConfig.APIServer), - fmt.Sprintf("--kube-token=%s", d.targetConfig.APIToken), - }, - args..., - ) - } - for _, vf := range d.valuesFiles { - args = append(args, fmt.Sprintf("--values=%s", vf)) - } - args = append(args, d.cliValues...) - args = append(args, d.releaseName, d.helmArchive) - return args, nil -} - -// getHelmChart reads given filename into a helmChart struct. -func getHelmChart(filename string) (*helmChart, error) { - y, err := os.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("read chart file: %w", err) - } - - var hc *helmChart - err = yaml.Unmarshal(y, &hc) - if err != nil { - return nil, fmt.Errorf("unmarshal chart: %w", err) - } - return hc, nil -} - -// cleanHelmDiffOutput removes error messages from the given Helm output. -// Those error messages are confusing, because they do not come from an actual -// error, but from detecting drift between desired and current Helm state. -func cleanHelmDiffOutput(out string) string { - if !strings.Contains(out, helmDiffDetectedMarker) { - return out - } - cleanedOut := strings.Replace( - out, helmDiffDetectedMarker, desiredDiffMessage, -1, - ) - r := regexp.MustCompile(`Error: plugin "(diff|secrets)" exited with error[\n]?`) - cleanedOut = r.ReplaceAllString(cleanedOut, "") - r = regexp.MustCompile(`helm.go:81: \[debug\] plugin "(diff|secrets)" exited with error[\n]?`) - cleanedOut = r.ReplaceAllString(cleanedOut, "") - return cleanedOut -} - -// printlnSafeHelmCmd prints all args that do not contain sensitive information. -func printlnSafeHelmCmd(args []string, outWriter io.Writer) { - safeArgs := []string{} - for _, a := range args { - if strings.HasPrefix(a, "--kube-token=") { - safeArgs = append(safeArgs, "--kube-token=***") - } else { - safeArgs = append(safeArgs, a) - } - } - fmt.Fprintln(outWriter, helmBin, strings.Join(safeArgs, " ")) -} - -// packageHelmChart creates a Helm package for given chart. -func packageHelmChart(chartDir, gitCommitSHA string, debug bool) (string, error) { - hc, err := getHelmChart(filepath.Join(chartDir, "Chart.yaml")) - if err != nil { - return "", fmt.Errorf("read chart: %w", err) - } - packageVersion := fmt.Sprintf("%s+%s", hc.Version, gitCommitSHA) - helmPackageArgs := []string{ - "package", - fmt.Sprintf("--app-version=%s", gitCommitSHA), - fmt.Sprintf("--version=%s", packageVersion), - } - if debug { - helmPackageArgs = append(helmPackageArgs, "--debug") - } - err = command.Run(helmBin, append(helmPackageArgs, chartDir), []string{}, os.Stdout, os.Stderr) - if err != nil { - return "", fmt.Errorf("package chart %s: %w", chartDir, err) - } - - helmArchive := fmt.Sprintf("%s-%s.tgz", hc.Name, packageVersion) - return helmArchive, nil -} diff --git a/cmd/deploy-helm/helm_test.go b/cmd/deploy-helm/helm_test.go deleted file mode 100644 index 2a7966ce..00000000 --- a/cmd/deploy-helm/helm_test.go +++ /dev/null @@ -1,299 +0,0 @@ -package main - -import ( - "bytes" - "strconv" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestHelmDiff(t *testing.T) { - tests := map[string]struct { - cmdExitCode int - wantInSync bool - wantErr bool - }{ - "diff exits with generic exit code": { - cmdExitCode: diffGenericExitCode, - wantInSync: false, - wantErr: true, - }, - "diff exits with drift exit code": { - cmdExitCode: diffDriftExitCode, - wantInSync: false, - wantErr: false, - }, - "diff passes (no drift)": { - cmdExitCode: 0, - wantInSync: true, - wantErr: false, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - var stdout, stderr bytes.Buffer - d := &deployHelm{helmBin: "../../test/scripts/exit-with-code.sh"} - driftDetected, err := d.helmDiff( - []string{"", "", strconv.Itoa(tc.cmdExitCode)}, - &stdout, &stderr, - ) - if tc.wantErr && err == nil { - t.Fatal("want err, got none") - } - if !tc.wantErr && err != nil { - t.Fatalf("want no err, got %s", err) - } - if tc.wantInSync != driftDetected { - t.Fatalf("want success=%v, got success=%v", tc.wantInSync, driftDetected) - } - }) - } -} - -func TestCleanHelmDiffOutput(t *testing.T) { - tests := map[string]struct { - example string - want string - }{ - "diff detected drift": { - example: `Error: identified at least one change, exiting with non-zero exit code (detailed-exitcode parameter enabled) -Error: plugin "diff" exited with error - -[helm-secrets] Removed: ./chart/secrets.dev.yaml.dec -Error: plugin "secrets" exited with error`, - want: `plugin "diff" identified at least one change - -[helm-secrets] Removed: ./chart/secrets.dev.yaml.dec -`, - }, - "diff detected drift with debug turned on": { - example: `Error: identified at least one change, exiting with non-zero exit code (detailed-exitcode parameter enabled) -Error: plugin "diff" exited with error -helm.go:81: [debug] plugin "diff" exited with error - -[helm-secrets] Removed: ./chart/secrets.dev.yaml.dec -Error: plugin "secrets" exited with error -helm.go:81: [debug] plugin "secrets" exited with error`, - want: `plugin "diff" identified at least one change - -[helm-secrets] Removed: ./chart/secrets.dev.yaml.dec -`, - }, - "diff encounters another error": { - example: `Error: This command needs 2 arguments: release name, chart path - -Use "diff [command] --help" for more information about a command. - -Error: plugin "diff" exited with error`, - want: `Error: This command needs 2 arguments: release name, chart path - -Use "diff [command] --help" for more information about a command. - -Error: plugin "diff" exited with error`, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := cleanHelmDiffOutput(tc.example) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Fatalf("output mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestAssembleHelmDiffArgs(t *testing.T) { - tests := map[string]struct { - releaseNamespace string - releaseName string - helmArchive string - opts options - valuesFiles []string - cliValues []string - want []string - }{ - "default": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install", debug: true}, - want: []string{"--namespace=a", "secrets", "diff", "upgrade", - "--detailed-exitcode", "--no-color", "--normalize-manifests", "--three-way-merge", "--debug", "--install", - "b", "c"}, - }, - "with no diff flags": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "", upgradeFlags: "--install"}, - want: []string{"--namespace=a", "secrets", "diff", "upgrade", - "--detailed-exitcode", "--no-color", "--normalize-manifests", "--install", - "b", "c"}, - }, - "with values file": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install"}, - valuesFiles: []string{"values.dev.yaml"}, - want: []string{"--namespace=a", "secrets", "diff", "upgrade", - "--detailed-exitcode", "--no-color", "--normalize-manifests", "--three-way-merge", "--install", "--values=values.dev.yaml", - "b", "c"}, - }, - "with CLI values": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install"}, - cliValues: []string{"--set=image.tag=abcdef"}, - want: []string{"--namespace=a", "secrets", "diff", "upgrade", - "--detailed-exitcode", "--no-color", "--normalize-manifests", "--three-way-merge", "--install", "--set=image.tag=abcdef", - "b", "c"}, - }, - "with multiple args": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{ - diffFlags: "--three-way-merge --no-hooks --include-tests", - upgradeFlags: "--install --wait", - }, - valuesFiles: []string{"secrets.yaml", "values.dev.yaml", "secrets.dev.yaml"}, - cliValues: []string{"--set=image.tag=abcdef", "--set=x=y"}, - want: []string{"--namespace=a", "secrets", "diff", "upgrade", - "--detailed-exitcode", "--no-color", "--normalize-manifests", - "--three-way-merge", "--no-hooks", "--include-tests", - "--install", "--wait", - "--values=secrets.yaml", "--values=values.dev.yaml", "--values=secrets.dev.yaml", - "--set=image.tag=abcdef", "--set=x=y", - "b", "c"}, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - d := &deployHelm{ - releaseNamespace: tc.releaseNamespace, - releaseName: tc.releaseName, - helmArchive: tc.helmArchive, - opts: tc.opts, - valuesFiles: tc.valuesFiles, - cliValues: tc.cliValues, - targetConfig: &targetEnvironment{}, - } - got, err := d.assembleHelmDiffArgs() - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Fatalf("args mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestAssembleHelmUpgradeArgs(t *testing.T) { - tests := map[string]struct { - releaseNamespace string - releaseName string - helmArchive string - opts options - valuesFiles []string - cliValues []string - want []string - }{ - "default": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install --wait", debug: true}, - want: []string{"--namespace=a", "secrets", "upgrade", - "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", - "--debug", - "--install", "--wait", - "b", "c"}, - }, - "with no upgrade flags": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: ""}, - want: []string{"--namespace=a", "secrets", "upgrade", - "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", - "b", "c"}, - }, - "with values file": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install --wait"}, - valuesFiles: []string{"values.dev.yaml"}, - want: []string{"--namespace=a", "secrets", "upgrade", - "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", - "--install", "--wait", - "--values=values.dev.yaml", - "b", "c"}, - }, - "with CLI values": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install --wait"}, - cliValues: []string{"--set=image.tag=abcdef"}, - want: []string{"--namespace=a", "secrets", "upgrade", - "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", - "--install", "--wait", - "--set=image.tag=abcdef", - "b", "c"}, - }, - "with multiple args": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install --atomic"}, - valuesFiles: []string{"secrets.yaml", "values.dev.yaml", "secrets.dev.yaml"}, - cliValues: []string{"--set=image.tag=abcdef", "--set=x=y"}, - want: []string{"--namespace=a", "secrets", "upgrade", - "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", - "--install", "--atomic", - "--values=secrets.yaml", "--values=values.dev.yaml", "--values=secrets.dev.yaml", - "--set=image.tag=abcdef", "--set=x=y", - "b", "c"}, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - d := &deployHelm{ - releaseNamespace: tc.releaseNamespace, - releaseName: tc.releaseName, - helmArchive: tc.helmArchive, - opts: tc.opts, - valuesFiles: tc.valuesFiles, - cliValues: tc.cliValues, - targetConfig: &targetEnvironment{ - APIServer: "https://example.com", - APIToken: "s3cr3t", - }, - } - got, err := d.assembleHelmUpgradeArgs() - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Fatalf("args mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestPrintlnSafeHelmCmd(t *testing.T) { - var stdout bytes.Buffer - printlnSafeHelmCmd([]string{"diff", "upgrade", "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", "--debug"}, &stdout) - want := "helm diff upgrade --kube-apiserver=https://example.com --kube-token=*** --debug" - got := strings.TrimSpace(stdout.String()) - if got != want { - t.Fatalf("want: '%s', got: '%s'", want, got) - } -} diff --git a/cmd/deploy-helm/main.go b/cmd/deploy-helm/main.go deleted file mode 100644 index 48b2c5f4..00000000 --- a/cmd/deploy-helm/main.go +++ /dev/null @@ -1,136 +0,0 @@ -package main - -import ( - "flag" - "io/fs" - "os" - - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "k8s.io/client-go/kubernetes" -) - -const ( - helmBin = "helm" - kubernetesServiceaccountDir = "/var/run/secrets/kubernetes.io/serviceaccount" -) - -type options struct { - // Name of the Secret resource holding the API user credentials. - apiCredentialsSecret string - // API server of the target cluster, including scheme. - apiServer string - // Target K8s namespace (or OpenShift project) to deploy into. - namespace string - // Hostname of the target registry to push images to. - registryHost string - // Location of checkout directory. - checkoutDir string - // Location of Helm chart directory. - chartDir string - // Name of Helm release. - releaseName string - // Flags to pass to `helm diff upgrade` (in addition to default ones and upgrade flags). - diffFlags string - // Flags to pass to `helm upgrade`. - upgradeFlags string - // Name of K8s secret holding the age key. - ageKeySecret string - // Field name within the K8s secret holding the age key. - ageKeySecretField string - // Location of the certificate directory. - certDir string - // Whether to TLS verify the source image registry. - srcRegistryTLSVerify bool - // Whether to perform just a diff without any upgrade. - diffOnly bool - // Whether to enable debug mode. - debug bool -} - -type deployHelm struct { - logger logging.LeveledLoggerInterface - // Name of helm binary. - helmBin string - opts options - releaseName string - releaseNamespace string - targetConfig *targetEnvironment - imageDigests []string - cliValues []string - helmArchive string - valuesFiles []string - clientset *kubernetes.Clientset - subrepos []fs.DirEntry - ctxt *pipelinectxt.ODSContext -} - -var defaultOptions = options{ - checkoutDir: ".", - chartDir: "./chart", - ageKeySecretField: "key.txt", - certDir: defaultCertDir(), - srcRegistryTLSVerify: true, - debug: (os.Getenv("DEBUG") == "true"), -} - -type targetEnvironment struct { - APIServer string - APIToken string - RegistryHost string - RegistryTLSVerify *bool - Namespace string -} - -func main() { - opts := options{} - flag.StringVar(&opts.checkoutDir, "checkout-dir", defaultOptions.checkoutDir, "Checkout dir") - flag.StringVar(&opts.chartDir, "chart-dir", defaultOptions.chartDir, "Chart dir") - flag.StringVar(&opts.releaseName, "release-name", defaultOptions.releaseName, "Name of Helm release") - flag.StringVar(&opts.diffFlags, "diff-flags", defaultOptions.diffFlags, "Flags to pass to `helm diff upgrade` (in addition to default ones and upgrade flags)") - flag.StringVar(&opts.upgradeFlags, "upgrade-flags", defaultOptions.upgradeFlags, "Flags to pass to `helm upgrade`") - flag.StringVar(&opts.ageKeySecret, "age-key-secret", defaultOptions.ageKeySecret, "Name of the secret containing the age key to use for helm-secrets") - flag.StringVar(&opts.ageKeySecretField, "age-key-secret-field", defaultOptions.ageKeySecretField, "Name of the field in the secret holding the age private key") - flag.StringVar(&opts.apiServer, "api-server", defaultOptions.apiServer, "API server of the target cluster, including scheme") - flag.StringVar(&opts.apiCredentialsSecret, "api-credentials-secret", defaultOptions.apiCredentialsSecret, "Name of the Secret resource holding the API user credentials") - flag.StringVar(&opts.registryHost, "registry-host", defaultOptions.registryHost, "Hostname of the target registry to push images to") - flag.StringVar(&opts.namespace, "namespace", defaultOptions.namespace, "Target K8s namespace (or OpenShift project) to deploy into") - flag.StringVar(&opts.certDir, "cert-dir", defaultOptions.certDir, "Use certificates at the specified path to access the registry") - flag.BoolVar(&opts.srcRegistryTLSVerify, "src-registry-tls-verify", defaultOptions.srcRegistryTLSVerify, "TLS verify source registry") - flag.BoolVar(&opts.diffOnly, "diff-only", defaultOptions.diffOnly, "Whether to perform only a diff") - flag.BoolVar(&opts.debug, "debug", defaultOptions.debug, "debug mode") - flag.Parse() - - var logger logging.LeveledLoggerInterface - if opts.debug { - logger = &logging.LeveledLogger{Level: logging.LevelDebug} - } else { - logger = &logging.LeveledLogger{Level: logging.LevelInfo} - } - - err := (&deployHelm{helmBin: helmBin, logger: logger, opts: opts}).runSteps( - setupContext(), - skipOnEmptyNamespace(), - setReleaseTarget(), - detectSubrepos(), - listHelmPlugins(), - packageHelmChartWithSubcharts(), - collectValuesFiles(), - importAgeKey(), - diffHelmRelease(), - detectImageDigests(), - copyImagesIntoReleaseNamespace(), - upgradeHelmRelease(), - ) - if err != nil { - logger.Errorf(err.Error()) - os.Exit(1) - } -} - -func defaultCertDir() string { - if _, err := os.Stat(kubernetesServiceaccountDir); err == nil { - return kubernetesServiceaccountDir - } - return "/etc/containers/certs.d" -} diff --git a/cmd/deploy-helm/skip.go b/cmd/deploy-helm/skip.go deleted file mode 100644 index d404f22e..00000000 --- a/cmd/deploy-helm/skip.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -// skipRemainingSteps is a pseudo error used to indicate that remaining -// steps should be skipped. -type skipRemainingSteps struct { - msg string -} - -func (e *skipRemainingSteps) Error() string { - return e.msg -} diff --git a/cmd/deploy-helm/skopeo.go b/cmd/deploy-helm/skopeo.go deleted file mode 100644 index 67becd69..00000000 --- a/cmd/deploy-helm/skopeo.go +++ /dev/null @@ -1,67 +0,0 @@ -package main - -import ( - "fmt" - "io" - "strings" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/pkg/artifact" -) - -func (d *deployHelm) copyImage(imageArtifact artifact.Image, destRegistryToken string, outWriter, errWriter io.Writer) error { - imageStream := imageArtifact.Name - d.logger.Infof("Copying image %s ...", imageStream) - srcImageURL := imageArtifact.Ref - // If the source registry should be TLS verified, the destination - // should be verified by default as well. - destRegistryTLSVerify := d.opts.srcRegistryTLSVerify - srcRegistryTLSVerify := d.opts.srcRegistryTLSVerify - // TLS verification of the KinD registry is not possible at the moment as - // requests error out with "server gave HTTP response to HTTPS client". - if strings.HasPrefix(imageArtifact.Registry, "kind-registry.kind") { - srcRegistryTLSVerify = false - destRegistryTLSVerify = false - } - if d.targetConfig.RegistryHost != "" && d.targetConfig.RegistryTLSVerify != nil { - destRegistryTLSVerify = *d.targetConfig.RegistryTLSVerify - } - destImageURL := getImageDestURL(d.targetConfig.RegistryHost, d.releaseNamespace, imageArtifact) - d.logger.Infof("Source image: %s", srcImageURL) - d.logger.Infof("Destination image: %s", destImageURL) - // TODO: for QA/PROD deployments we may want to ensure that the SHA - // recorded in Nexus matches the SHA referenced by the Git commit tag. - args := []string{ - "copy", - fmt.Sprintf("--src-tls-verify=%v", srcRegistryTLSVerify), - fmt.Sprintf("--dest-tls-verify=%v", destRegistryTLSVerify), - } - if srcRegistryTLSVerify { - args = append(args, fmt.Sprintf("--src-cert-dir=%v", d.opts.certDir)) - } - if destRegistryTLSVerify { - args = append(args, fmt.Sprintf("--dest-cert-dir=%v", d.opts.certDir)) - } - if destRegistryToken != "" { - args = append(args, "--dest-registry-token", destRegistryToken) - } - if d.opts.debug { - args = append(args, "--debug") - } - args = append( - args, fmt.Sprintf("docker://%s", srcImageURL), fmt.Sprintf("docker://%s", destImageURL), - ) - err := command.Run("skopeo", args, []string{}, outWriter, errWriter) - if err != nil { - return fmt.Errorf("skopeo copy %s: %w", srcImageURL, err) - } - return nil -} - -func getImageDestURL(registryHost, releaseNamespace string, imageArtifact artifact.Image) string { - if registryHost != "" { - return fmt.Sprintf("%s/%s/%s:%s", registryHost, releaseNamespace, imageArtifact.Name, imageArtifact.Tag) - } else { - return strings.Replace(imageArtifact.Ref, "/"+imageArtifact.Repository+"/", "/"+releaseNamespace+"/", -1) - } -} diff --git a/cmd/deploy-helm/steps.go b/cmd/deploy-helm/steps.go deleted file mode 100644 index da4aa674..00000000 --- a/cmd/deploy-helm/steps.go +++ /dev/null @@ -1,388 +0,0 @@ -package main - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "log" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/directory" - "github.com/opendevstack/ods-pipeline/internal/file" - k "github.com/opendevstack/ods-pipeline/internal/kubernetes" - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -const ( - tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" - subchartsDir = "charts" -) - -type DeployStep func(d *deployHelm) (*deployHelm, error) - -func (d *deployHelm) runSteps(steps ...DeployStep) error { - var skip *skipRemainingSteps - var err error - for _, step := range steps { - d, err = step(d) - if err != nil { - if errors.As(err, &skip) { - d.logger.Infof(err.Error()) - return nil - } - return err - } - } - return nil -} - -func setupContext() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - ctxt := &pipelinectxt.ODSContext{} - err := ctxt.ReadCache(d.opts.checkoutDir) - if err != nil { - return d, fmt.Errorf("read cache: %w", err) - } - d.ctxt = ctxt - - clientset, err := k.NewInClusterClientset() - if err != nil { - return d, fmt.Errorf("create Kubernetes clientset: %w", err) - } - d.clientset = clientset - - if d.opts.debug { - if err := directory.ListFiles(d.opts.certDir, os.Stdout); err != nil { - log.Fatal(err) - } - } - return d, nil - } -} - -func skipOnEmptyNamespace() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - if d.opts.namespace == "" { - return d, &skipRemainingSteps{"No namespace given. Skipping deployment ..."} - } - return d, nil - } -} - -func setReleaseTarget() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - // Release name - if d.opts.releaseName != "" { - d.releaseName = d.opts.releaseName - } else { - d.releaseName = d.ctxt.Component - } - d.logger.Infof("Release name: %s", d.releaseName) - - // Target environment configuration - targetConfig := &targetEnvironment{ - APIServer: d.opts.apiServer, - Namespace: d.opts.namespace, - RegistryHost: d.opts.registryHost, - } - if targetConfig.APIServer != "" { - token, err := tokenFromSecret(d.clientset, d.ctxt.Namespace, d.opts.apiCredentialsSecret) - if err != nil { - return d, fmt.Errorf("get API token from secret %s: %w", d.opts.apiCredentialsSecret, err) - } - targetConfig.APIToken = token - } - d.targetConfig = targetConfig - - // Release namespace - d.releaseNamespace = targetConfig.Namespace - pattern := "^[a-z][a-z0-9-]{0,61}[a-z]$" - matched, err := regexp.MatchString(pattern, d.releaseNamespace) - if err != nil || !matched { - return d, fmt.Errorf("release namespace: %s must match %s", d.releaseNamespace, pattern) - } - d.logger.Infof("Release namespace: %s", d.releaseNamespace) - - return d, nil - } -} - -func detectSubrepos() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - subrepos, err := pipelinectxt.DetectSubrepos() - if err != nil { - return d, fmt.Errorf("detect subrepos: %w", err) - } - d.subrepos = subrepos - return d, nil - } -} - -func detectImageDigests() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - digests, err := pipelinectxt.ReadArtifactFilesIncludingSubrepos(pipelinectxt.ImageDigestsPath, d.subrepos) - if err != nil { - return d, fmt.Errorf("collect image digests: %w", err) - } - d.imageDigests = digests - return d, nil - } -} - -func copyImagesIntoReleaseNamespace() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - if len(d.imageDigests) == 0 { - return d, nil - } - // Get destination registry token from secret or file in pod. - var destRegistryToken string - if d.targetConfig.APIToken != "" { - destRegistryToken = d.targetConfig.APIToken - } else { - token, err := getTrimmedFileContent(tokenFile) - if err != nil { - return d, fmt.Errorf("get token from file %s: %w", tokenFile, err) - } - destRegistryToken = token - } - - d.logger.Infof("Copying images into release namespace ...") - for _, artifactFile := range d.imageDigests { - imageArtifact, err := artifact.ReadFromFile(artifactFile) - if err != nil { - return d, fmt.Errorf("read image artifact %s: %w", artifactFile, err) - } - err = d.copyImage(*imageArtifact, destRegistryToken, os.Stdout, os.Stderr) - if err != nil { - return d, fmt.Errorf("copy image %s: %w", imageArtifact.Name, err) - } - } - - return d, nil - } -} - -func listHelmPlugins() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - d.logger.Infof("List Helm plugins...") - helmPluginArgs := []string{"plugin", "list"} - if d.opts.debug { - helmPluginArgs = append(helmPluginArgs, "--debug") - } - err := command.Run(d.helmBin, helmPluginArgs, []string{}, os.Stdout, os.Stderr) - if err != nil { - return d, fmt.Errorf("list Helm plugins: %w", err) - } - return d, nil - } -} - -func packageHelmChartWithSubcharts() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - // Collect values to be set via the CLI. - d.cliValues = []string{ - fmt.Sprintf("--set=image.tag=%s", d.ctxt.GitCommitSHA), - } - - d.logger.Infof("Adding dependencies from subrepos into the %s/ directory ...", subchartsDir) - // Find subcharts - chartsDir := filepath.Join(d.opts.chartDir, subchartsDir) - if _, err := os.Stat(chartsDir); os.IsNotExist(err) { - err = os.Mkdir(chartsDir, 0755) - if err != nil { - return d, fmt.Errorf("create %s: %s", chartsDir, err) - } - } - for _, r := range d.subrepos { - subrepo := filepath.Join(pipelinectxt.SubreposPath, r.Name()) - subchart := filepath.Join(subrepo, d.opts.chartDir) - if _, err := os.Stat(subchart); os.IsNotExist(err) { - d.logger.Infof("no chart in %s", r.Name()) - continue - } - gitCommitSHA, err := getTrimmedFileContent(filepath.Join(subrepo, ".ods", "git-commit-sha")) - if err != nil { - return d, fmt.Errorf("get commit SHA of %s: %w", subrepo, err) - } - hc, err := getHelmChart(filepath.Join(subchart, "Chart.yaml")) - if err != nil { - return d, fmt.Errorf("get Helm chart of %s: %w", subrepo, err) - } - d.cliValues = append(d.cliValues, fmt.Sprintf("--set=%s.image.tag=%s", hc.Name, gitCommitSHA)) - if d.releaseName == d.ctxt.Component { - d.cliValues = append(d.cliValues, fmt.Sprintf("--set=%s.fullnameOverride=%s", hc.Name, hc.Name)) - } - helmArchive, err := packageHelmChart(subchart, gitCommitSHA, d.opts.debug) - if err != nil { - return d, fmt.Errorf("package Helm chart of %s: %w", subrepo, err) - } - helmArchiveName := filepath.Base(helmArchive) - d.logger.Infof("copying %s into %s", helmArchiveName, chartsDir) - err = file.Copy(helmArchive, filepath.Join(chartsDir, helmArchiveName)) - if err != nil { - return d, fmt.Errorf("copy Helm archive of %s: %w", subrepo, err) - } - } - - subcharts, err := os.ReadDir(chartsDir) - if err != nil { - return d, fmt.Errorf("read %s: %w", chartsDir, err) - } - if len(subcharts) > 0 { - d.logger.Infof("Subcharts in %s:", chartsDir) - for _, sc := range subcharts { - d.logger.Infof(sc.Name()) - } - } - - d.logger.Infof("Packaging Helm chart ...") - helmArchive, err := packageHelmChart(d.opts.chartDir, d.ctxt.GitCommitSHA, d.opts.debug) - if err != nil { - return d, fmt.Errorf("package Helm chart: %w", err) - } - d.helmArchive = helmArchive - return d, nil - } -} - -func collectValuesFiles() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - d.logger.Infof("Collecting Helm values files ...") - d.valuesFiles = []string{} - valuesFilesCandidates := []string{ - fmt.Sprintf("%s/secrets.yaml", d.opts.chartDir), // equivalent values.yaml is added automatically by Helm - fmt.Sprintf("%s/values.%s.yaml", d.opts.chartDir, d.targetConfig.Namespace), - fmt.Sprintf("%s/secrets.%s.yaml", d.opts.chartDir, d.targetConfig.Namespace), - } - for _, vfc := range valuesFilesCandidates { - if _, err := os.Stat(vfc); os.IsNotExist(err) { - d.logger.Infof("%s is not present, skipping.", vfc) - } else { - d.logger.Infof("%s is present, adding.", vfc) - d.valuesFiles = append(d.valuesFiles, vfc) - } - } - return d, nil - } -} - -func importAgeKey() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - if len(d.opts.ageKeySecret) == 0 { - d.logger.Infof("Skipping import of age key for helm-secrets as parameter is not set ...") - return d, nil - } - d.logger.Infof("Storing age key for helm-secrets ...") - secret, err := d.clientset.CoreV1().Secrets(d.ctxt.Namespace).Get( - context.TODO(), d.opts.ageKeySecret, metav1.GetOptions{}, - ) - if err != nil { - d.logger.Infof("No secret %s found, skipping.", d.opts.ageKeySecret) - return d, nil - } - err = storeAgeKey(secret.Data[d.opts.ageKeySecretField]) - if err != nil { - return d, fmt.Errorf("store age key: %w", err) - } - d.logger.Infof("Age key secret %s stored.", d.opts.ageKeySecret) - return d, nil - } -} - -func diffHelmRelease() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - d.logger.Infof("Diffing Helm release against %s...", d.helmArchive) - helmDiffArgs, err := d.assembleHelmDiffArgs() - if err != nil { - return d, fmt.Errorf("assemble helm diff args: %w", err) - } - printlnSafeHelmCmd(helmDiffArgs, os.Stdout) - // helm-dff stderr contains confusing text about "errors" when drift is - // detected, therefore we want to collect and polish it before we print it. - // helm-diff stdout needs to be written into a buffer so that we can both - // print it and store it later as a deployment artifact. - var diffStdoutBuf, diffStderrBuf bytes.Buffer - diffStdoutWriter := io.MultiWriter(os.Stdout, &diffStdoutBuf) - inSync, err := d.helmDiff(helmDiffArgs, diffStdoutWriter, &diffStderrBuf) - fmt.Print(cleanHelmDiffOutput(diffStderrBuf.String())) - if err != nil { - return d, fmt.Errorf("helm diff: %w", err) - } - if d.opts.diffOnly { - return d, &skipRemainingSteps{"Only diff was requested, skipping helm upgrade."} - } - if inSync { - return d, &skipRemainingSteps{"No diff detected, skipping helm upgrade."} - } - - err = writeDeploymentArtifact(diffStdoutBuf.Bytes(), "diff", d.opts.chartDir, d.targetConfig.Namespace) - if err != nil { - return d, fmt.Errorf("write diff artifact: %w", err) - } - return d, nil - } -} - -func upgradeHelmRelease() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - d.logger.Infof("Upgrading Helm release to %s...", d.helmArchive) - helmUpgradeArgs, err := d.assembleHelmUpgradeArgs() - if err != nil { - return d, fmt.Errorf("assemble helm upgrade args: %w", err) - } - printlnSafeHelmCmd(helmUpgradeArgs, os.Stdout) - var upgradeStdoutBuf bytes.Buffer - upgradeStdoutWriter := io.MultiWriter(os.Stdout, &upgradeStdoutBuf) - err = d.helmUpgrade(helmUpgradeArgs, upgradeStdoutWriter, os.Stderr) - if err != nil { - return d, fmt.Errorf("helm upgrade: %w", err) - } - err = writeDeploymentArtifact(upgradeStdoutBuf.Bytes(), "release", d.opts.chartDir, d.targetConfig.Namespace) - if err != nil { - return d, fmt.Errorf("write release artifact: %w", err) - } - return d, nil - } -} - -func getTrimmedFileContent(filename string) (string, error) { - content, err := os.ReadFile(filename) - if err != nil { - return "", err - } - return strings.TrimSpace(string(content)), nil -} - -func tokenFromSecret(clientset *kubernetes.Clientset, namespace, name string) (string, error) { - secret, err := clientset.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return "", err - } - return string(secret.Data["token"]), nil -} - -func writeDeploymentArtifact(content []byte, filename, chartDir, targetEnv string) error { - err := os.MkdirAll(pipelinectxt.DeploymentsPath, 0755) - if err != nil { - return err - } - f := artifactFilename(filename, chartDir, targetEnv) + ".txt" - return os.WriteFile(filepath.Join(pipelinectxt.DeploymentsPath, f), content, 0644) -} - -func artifactFilename(filename, chartDir, targetEnv string) string { - trimmedChartDir := strings.TrimPrefix(chartDir, "./") - if trimmedChartDir != "chart" { - filename = fmt.Sprintf("%s-%s", strings.Replace(trimmedChartDir, "/", "-", -1), filename) - } - return fmt.Sprintf("%s-%s", filename, targetEnv) -} diff --git a/cmd/deploy-helm/steps_test.go b/cmd/deploy-helm/steps_test.go deleted file mode 100644 index 430fbfe5..00000000 --- a/cmd/deploy-helm/steps_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package main - -import ( - "fmt" - "testing" - - "github.com/opendevstack/ods-pipeline/pkg/artifact" -) - -func TestArtifactFilename(t *testing.T) { - tests := map[string]struct { - filename string - chartDir string - targetEnv string - want string - }{ - "default chart dir": { - filename: "diff", - chartDir: "./chart", - targetEnv: "foo-dev", - want: "diff-foo-dev", - }, - "default chart dir without prefix": { - filename: "diff", - chartDir: "chart", - targetEnv: "dev", - want: "diff-dev", - }, - "other chart dir": { - filename: "diff", - chartDir: "./foo-chart", - targetEnv: "qa", - want: "foo-chart-diff-qa", - }, - "other chart dir without prefix": { - filename: "diff", - chartDir: "bar-chart", - targetEnv: "foo-qa", - want: "bar-chart-diff-foo-qa", - }, - "nested chart dir": { - filename: "diff", - chartDir: "./some/path/chart", - targetEnv: "prod", - want: "some-path-chart-diff-prod", - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := artifactFilename(tc.filename, tc.chartDir, tc.targetEnv) - if got != tc.want { - t.Fatalf("want: %s, got: %s", tc.want, got) - } - }) - } -} - -func TestGetImageURLs(t *testing.T) { - srcHost := "image-registry.openshift-image-registry.svc:5000" - destHost := "default-route-openshift-image-registry.apps.example.com" - imgArtifact := artifact.Image{ - Ref: fmt.Sprintf("%s/foo/bar:baz", srcHost), - Repository: "foo", Name: "bar", Tag: "baz", - } - tests := map[string]struct { - registryHost string - releaseNamespace string - want string - }{ - "same cluster, same namespace": { - registryHost: "", - releaseNamespace: "foo", - want: fmt.Sprintf("%s/foo/bar:baz", srcHost), - }, - "same cluster, different namespace": { - registryHost: "", - releaseNamespace: "other", - want: fmt.Sprintf("%s/other/bar:baz", srcHost), - }, - "different cluster, same namespace": { - registryHost: destHost, - releaseNamespace: "foo", - want: fmt.Sprintf("%s/foo/bar:baz", destHost), - }, - "different cluster, different namespace": { - registryHost: destHost, - releaseNamespace: "other", - want: fmt.Sprintf("%s/other/bar:baz", destHost), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := getImageDestURL(tc.registryHost, tc.releaseNamespace, imgArtifact) - if got != tc.want { - t.Fatalf("want: %s, got: %s", tc.want, got) - } - }) - } -} diff --git a/cmd/docs/main.go b/cmd/docs/main.go deleted file mode 100644 index 20d17847..00000000 --- a/cmd/docs/main.go +++ /dev/null @@ -1,20 +0,0 @@ -package main - -import ( - "log" - "path/filepath" - - "github.com/opendevstack/ods-pipeline/internal/docs" - "github.com/opendevstack/ods-pipeline/internal/projectpath" -) - -func main() { - err := docs.RenderTasks( - filepath.Join(projectpath.Root, "deploy/ods-pipeline/charts/tasks"), - filepath.Join(projectpath.Root, "docs/tasks/descriptions"), - filepath.Join(projectpath.Root, "docs/tasks"), - ) - if err != nil { - log.Fatal(err) - } -} diff --git a/cmd/package-image/buildah.go b/cmd/package-image/buildah.go deleted file mode 100644 index a74cb1bd..00000000 --- a/cmd/package-image/buildah.go +++ /dev/null @@ -1,157 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "io" - "log" - "net/url" - "path/filepath" - "strings" - - "github.com/google/shlex" - "github.com/opendevstack/ods-pipeline/internal/command" -) - -const ( - buildahBin = "buildah" - buildahWorkdir = "/tmp" -) - -// buildahBuild builds a local image using the Dockerfile and context directory -// given in opts, tagging the resulting image with given tag. -func (p *packageImage) buildahBuild(outWriter, errWriter io.Writer) error { - args, err := p.buildahBuildArgs(p.imageRef()) - if err != nil { - return fmt.Errorf("assemble build args: %w", err) - } - return command.RunInDir(buildahBin, args, []string{}, buildahWorkdir, outWriter, errWriter) -} - -// buildahPush pushes a local image to a OCI formatted directory for trivy image scans. -func (p *packageImage) buildahPushTar(outWriter, errWriter io.Writer) error { - args := []string{ - fmt.Sprintf("--storage-driver=%s", p.opts.storageDriver), - "push", - fmt.Sprintf("--digestfile=%s", tektonResultsImageDigestFile), - } - if p.opts.debug { - args = append(args, "--log-level=debug") - } - args = append(args, p.imageRef(), fmt.Sprintf("oci:%s", filepath.Join(p.opts.checkoutDir, p.imageName()))) - return command.RunInDir(buildahBin, args, []string{}, buildahWorkdir, outWriter, errWriter) -} - -// buildahPush pushes a local image to the given imageRef. -func (p *packageImage) buildahPush(outWriter, errWriter io.Writer) error { - opts := p.opts - extraArgs, err := shlex.Split(opts.buildahPushExtraArgs) - if err != nil { - log.Printf("could not parse extra args (%s): %s", opts.buildahPushExtraArgs, err) - } - args := []string{ - fmt.Sprintf("--storage-driver=%s", opts.storageDriver), - "push", - fmt.Sprintf("--tls-verify=%v", opts.tlsVerify), - fmt.Sprintf("--cert-dir=%s", opts.certDir), - } - args = append(args, extraArgs...) - if opts.debug { - args = append(args, "--log-level=debug") - } - - source := p.imageId.ImageRefWithSha(opts.registry) - destination := fmt.Sprintf("docker://%s", source) - log.Printf("buildah push %s %s", source, destination) - args = append(args, source, destination) - return command.RunInDir(buildahBin, args, []string{}, buildahWorkdir, outWriter, errWriter) -} - -// buildahBuildArgs assembles the args to be passed to buildah based on -// given options and tag. -func (p *packageImage) buildahBuildArgs(tag string) ([]string, error) { - if tag == "" { - return nil, errors.New("tag must not be empty") - } - opts := p.opts - extraArgs, err := shlex.Split(opts.buildahBuildExtraArgs) - if err != nil { - return nil, fmt.Errorf("parse extra args (%s): %w", opts.buildahBuildExtraArgs, err) - } - - absDir, err := filepath.Abs(opts.checkoutDir) - if err != nil { - return nil, fmt.Errorf("abs dir: %w", err) - } - - args := []string{ - fmt.Sprintf("--storage-driver=%s", opts.storageDriver), - "bud", - fmt.Sprintf("--format=%s", opts.format), - fmt.Sprintf("--tls-verify=%v", opts.tlsVerify), - fmt.Sprintf("--cert-dir=%s", opts.certDir), - "--no-cache", - fmt.Sprintf("--file=%s", opts.dockerfile), - fmt.Sprintf("--tag=%s", tag), - } - args = append(args, extraArgs...) - nexusArgs, err := p.nexusBuildArgs() - if err != nil { - return nil, fmt.Errorf("add nexus build args: %w", err) - } - args = append(args, nexusArgs...) - - if opts.debug { - args = append(args, "--log-level=debug") - } - return append(args, filepath.Join(absDir, opts.contextDir)), nil -} - -// nexusBuildArgs computes --build-arg parameters so that the Dockerfile -// can access nexus as determined by the options nexus related -// parameters. -func (p *packageImage) nexusBuildArgs() ([]string, error) { - args := []string{} - opts := p.opts - if strings.TrimSpace(opts.nexusURL) != "" { - nexusUrl, err := url.Parse(opts.nexusURL) - if err != nil { - return nil, fmt.Errorf("could not parse nexus url (%s): %w", opts.nexusURL, err) - } - if nexusUrl.Host == "" { - return nil, fmt.Errorf("could not get host in nexus url (%s)", opts.nexusURL) - } - if opts.nexusUsername != "" { - if opts.nexusPassword == "" { - nexusUrl.User = url.User(opts.nexusUsername) - } else { - nexusUrl.User = url.UserPassword(opts.nexusUsername, opts.nexusPassword) - } - } - nexusAuth := nexusUrl.User.String() // this is encoded as needed. - a := strings.SplitN(nexusAuth, ":", 2) - unEscaped := "" - pwEscaped := "" - if len(a) > 0 { - unEscaped = a[0] - } - if len(a) > 1 { - pwEscaped = a[1] - } - args = []string{ - fmt.Sprintf("--build-arg=nexusUrl=%s", opts.nexusURL), - fmt.Sprintf("--build-arg=nexusUsername=%s", unEscaped), - fmt.Sprintf("--build-arg=nexusPassword=%s", pwEscaped), - fmt.Sprintf("--build-arg=nexusHost=%s", nexusUrl.Host), - } - args = append(args, fmt.Sprintf("--build-arg=nexusAuth=%s", nexusAuth)) - if nexusAuth != "" { - args = append(args, - fmt.Sprintf("--build-arg=nexusUrlWithAuth=%s://%s@%s", nexusUrl.Scheme, nexusAuth, nexusUrl.Host)) - } else { - args = append(args, - fmt.Sprintf("--build-arg=nexusUrlWithAuth=%s", opts.nexusURL)) - } - } - return args, nil -} diff --git a/cmd/package-image/buildah_test.go b/cmd/package-image/buildah_test.go deleted file mode 100644 index 2a626e7f..00000000 --- a/cmd/package-image/buildah_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestBuildahBuildArgs(t *testing.T) { - basePath, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - dockerDir := filepath.Join(basePath, "docker") - tests := map[string]struct { - opts options - tag string - wantArgs []string - wantErr string - }{ - "with default options": { - opts: defaultOptions, - tag: "foo", - wantArgs: []string{ - "--storage-driver=vfs", "bud", "--format=oci", - "--tls-verify=true", "--cert-dir=/etc/containers/certs.d", - "--no-cache", - "--file=./Dockerfile", "--tag=foo", dockerDir, - }, - }, - "with blank tag": { - opts: defaultOptions, - tag: "", - wantErr: "tag must not be empty", - }, - "with incorrect buildah extra args": { - opts: func(o options) options { o.buildahBuildExtraArgs = "\\"; return o }(defaultOptions), - tag: "foo", - wantErr: "parse extra args (\\): EOF found after escape character", - }, - "with Nexus args": { - opts: func(o options) options { - o.nexusURL = "http://nexus.example.com" - o.nexusUsername = "developer" - o.nexusPassword = "s3cr3t" - return o - }(defaultOptions), - tag: "foo", - wantArgs: []string{ - "--storage-driver=vfs", "bud", "--format=oci", - "--tls-verify=true", "--cert-dir=/etc/containers/certs.d", - "--no-cache", - "--file=./Dockerfile", "--tag=foo", - "--build-arg=nexusUrl=http://nexus.example.com", - "--build-arg=nexusUsername=developer", - "--build-arg=nexusPassword=s3cr3t", - "--build-arg=nexusHost=nexus.example.com", - "--build-arg=nexusAuth=developer:s3cr3t", - "--build-arg=nexusUrlWithAuth=http://developer:s3cr3t@nexus.example.com", - dockerDir, - }, - }, - "with debug on": { - opts: func(o options) options { o.debug = true; return o }(defaultOptions), - tag: "foo", - wantArgs: []string{ - "--storage-driver=vfs", "bud", "--format=oci", - "--tls-verify=true", "--cert-dir=/etc/containers/certs.d", - "--no-cache", - "--file=./Dockerfile", "--tag=foo", "--log-level=debug", dockerDir, - }, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - p := packageImage{opts: tc.opts} - got, err := p.buildahBuildArgs(tc.tag) - if err != nil { - if tc.wantErr != err.Error() { - t.Fatalf("want err: '%s', got err: %s", tc.wantErr, err) - } - } - if diff := cmp.Diff(tc.wantArgs, got); diff != "" { - t.Fatalf("args mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestNexusBuildArgs(t *testing.T) { - tests := map[string]struct { - nexusUrl string - nexusUsername string - nexusPassword string - baNexusUsername string - baNexusPassword string - baNexusHost string - baNexusAuth string - baNexusUrlWithAuth string - }{ - "simple-password": { - nexusUrl: "https://nexus-ods.example.openshiftapps.com", - nexusUsername: "un", - nexusPassword: "pw", - baNexusUsername: "un", - baNexusPassword: "pw", - baNexusHost: "nexus-ods.example.openshiftapps.com", - baNexusAuth: "un:pw", - baNexusUrlWithAuth: "https://un:pw@nexus-ods.example.openshiftapps.com", - }, - "simple-username-only": { - nexusUrl: "https://nexus-ods.example.openshiftapps.com", - nexusUsername: "un", - nexusPassword: "", - baNexusUsername: "un", - baNexusPassword: "", - baNexusHost: "nexus-ods.example.openshiftapps.com", - baNexusAuth: "un", - baNexusUrlWithAuth: "https://un@nexus-ods.example.openshiftapps.com", - }, - "simple-no-auth": { - nexusUrl: "https://nexus-ods.example.openshiftapps.com", - nexusUsername: "", - nexusPassword: "", - baNexusUsername: "", - baNexusPassword: "", - baNexusHost: "nexus-ods.example.openshiftapps.com", - baNexusAuth: "", - baNexusUrlWithAuth: "https://nexus-ods.example.openshiftapps.com", - }, - "complex-password": { - nexusUrl: "https://nexus-ods.example.openshiftapps.com", - nexusUsername: "user: mypw-to-follow", - nexusPassword: "a secret", - baNexusUsername: "user%3A%20mypw-to-follow", - baNexusPassword: "a%20secret", - baNexusHost: "nexus-ods.example.openshiftapps.com", - baNexusAuth: "user%3A%20mypw-to-follow:a%20secret", - baNexusUrlWithAuth: "https://user%3A%20mypw-to-follow:a%20secret@nexus-ods.example.openshiftapps.com", - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - opts := options{ - nexusURL: tc.nexusUrl, - nexusUsername: tc.nexusUsername, - nexusPassword: tc.nexusPassword, - } - p := packageImage{opts: opts} - args, err := p.nexusBuildArgs() - if err != nil { - t.Fatal(err) - } - - expected := []string{ - fmt.Sprintf("--build-arg=nexusUrl=%s", tc.nexusUrl), - fmt.Sprintf("--build-arg=nexusUsername=%s", tc.baNexusUsername), - fmt.Sprintf("--build-arg=nexusPassword=%s", tc.baNexusPassword), - fmt.Sprintf("--build-arg=nexusHost=%s", tc.baNexusHost), - fmt.Sprintf("--build-arg=nexusAuth=%s", tc.baNexusAuth), - fmt.Sprintf("--build-arg=nexusUrlWithAuth=%s", tc.baNexusUrlWithAuth), - } - if diff := cmp.Diff(expected, args); diff != "" { - t.Fatalf("expected (-want +got):\n%s", diff) - } - }) - } -} diff --git a/cmd/package-image/main.go b/cmd/package-image/main.go deleted file mode 100644 index caf3e49a..00000000 --- a/cmd/package-image/main.go +++ /dev/null @@ -1,167 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/opendevstack/ods-pipeline/internal/image" - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -const ( - kubernetesServiceaccountDir = "/var/run/secrets/kubernetes.io/serviceaccount" - tektonResultsImageDigestFile = "/tekton/results/image-digest" -) - -type options struct { - checkoutDir string - imageStream string - extraTags string - registry string - certDir string - imageNamespace string - tlsVerify bool - storageDriver string - format string - dockerfile string - contextDir string - nexusURL string - nexusUsername string - nexusPassword string - buildahBuildExtraArgs string - buildahPushExtraArgs string - trivySBOMExtraArgs string - debug bool -} - -type packageImage struct { - logger logging.LeveledLoggerInterface - opts options - parsedExtraTags []string - ctxt *pipelinectxt.ODSContext - imageId image.Identity - imageDigest string - sbomFile string -} - -func (p *packageImage) imageName() string { - return p.imageId.StreamSha() -} - -func (p *packageImage) imageNameNoSha() string { - return p.imageId.ImageStream -} - -func (p *packageImage) imageRef() string { - return p.imageId.ImageRefWithSha(p.opts.registry) -} - -func (p *packageImage) artifactImage() artifact.Image { - return p.imageId.ArtifactImage(p.opts.registry, p.imageDigest) -} - -func (p *packageImage) artifactImageForTag(tag string) artifact.Image { - imageExtraTag := p.imageId.Tag(tag) - return imageExtraTag.ArtifactImage(p.opts.registry, p.imageDigest) -} - -var defaultOptions = options{ - checkoutDir: ".", - imageStream: "", - extraTags: "", - registry: "image-registry.openshift-image-registry.svc:5000", - certDir: defaultCertDir(), - imageNamespace: "", - tlsVerify: true, - storageDriver: "vfs", - format: "oci", - dockerfile: "./Dockerfile", - contextDir: "docker", - nexusURL: os.Getenv("NEXUS_URL"), - nexusUsername: os.Getenv("NEXUS_USERNAME"), - nexusPassword: os.Getenv("NEXUS_PASSWORD"), - buildahBuildExtraArgs: "", - buildahPushExtraArgs: "", - trivySBOMExtraArgs: "", - debug: (os.Getenv("DEBUG") == "true"), -} - -func main() { - opts := options{} - flag.StringVar(&opts.checkoutDir, "checkout-dir", defaultOptions.checkoutDir, "Checkout dir") - flag.StringVar(&opts.imageStream, "image-stream", defaultOptions.imageStream, "Image stream") - flag.StringVar(&opts.extraTags, "extra-tags", defaultOptions.extraTags, "Extra tags") - flag.StringVar(&opts.registry, "registry", defaultOptions.registry, "Registry") - flag.StringVar(&opts.certDir, "cert-dir", defaultOptions.certDir, "Use certificates at the specified path to access the registry") - flag.StringVar(&opts.imageNamespace, "image-namespace", defaultOptions.imageNamespace, "image namespace") - flag.BoolVar(&opts.tlsVerify, "tls-verify", defaultOptions.tlsVerify, "TLS verify") - flag.StringVar(&opts.storageDriver, "storage-driver", defaultOptions.storageDriver, "storage driver") - flag.StringVar(&opts.format, "format", defaultOptions.format, "format of the built container, oci or docker") - flag.StringVar(&opts.dockerfile, "dockerfile", defaultOptions.dockerfile, "dockerfile") - flag.StringVar(&opts.contextDir, "context-dir", defaultOptions.contextDir, "contextDir") - flag.StringVar(&opts.nexusURL, "nexus-url", defaultOptions.nexusURL, "Nexus URL") - flag.StringVar(&opts.nexusUsername, "nexus-username", defaultOptions.nexusUsername, "Nexus username") - flag.StringVar(&opts.nexusPassword, "nexus-password", defaultOptions.nexusPassword, "Nexus password") - flag.StringVar(&opts.buildahBuildExtraArgs, "buildah-build-extra-args", defaultOptions.buildahBuildExtraArgs, "extra parameters passed for the build command when building images") - flag.StringVar(&opts.buildahPushExtraArgs, "buildah-push-extra-args", defaultOptions.buildahPushExtraArgs, "extra parameters passed for the push command when pushing images") - flag.StringVar(&opts.trivySBOMExtraArgs, "trivy-sbom-extra-args", defaultOptions.trivySBOMExtraArgs, "extra parameters passed for the trivy command to generate an SBOM") - flag.BoolVar(&opts.debug, "debug", defaultOptions.debug, "debug mode") - flag.Parse() - var logger logging.LeveledLoggerInterface - if opts.debug { - logger = &logging.LeveledLogger{Level: logging.LevelDebug} - } else { - logger = &logging.LeveledLogger{Level: logging.LevelInfo} - } - p := packageImage{logger: logger, opts: opts} - err := (&p).runSteps( - setExtraTags(), - setupContext(), - setImageId(), - skipIfImageArtifactExists(), - buildImageAndGenerateTar(), - generateSBOM(), - pushImage(), - storeArtifact(), - ) - if err != nil { - logger.Errorf(err.Error()) - os.Exit(1) - } - // If skipIfImageArtifactExists skips the remaining runSteps, extra-tags - // still should be processed if their related artifact has not been set. - err = (&p).runSteps(processExtraTags()) - if err != nil { - logger.Errorf(err.Error()) - os.Exit(1) - } -} - -func defaultCertDir() string { - if _, err := os.Stat(kubernetesServiceaccountDir); err == nil { - return kubernetesServiceaccountDir - } - return "/etc/containers/certs.d" -} - -// getImageDigestFromFile reads the image digest from the file written to by buildah. -func getImageDigestFromFile(workingDir string) (string, error) { - content, err := os.ReadFile(tektonResultsImageDigestFile) - if err != nil { - return "", err - } - return strings.TrimSpace(string(content)), nil -} - -// imageArtifactExists checks if image artifact JSON file exists in its artifacts path -func imageArtifactExists(p *packageImage) error { - imageArtifactsDir := filepath.Join(p.opts.checkoutDir, pipelinectxt.ImageDigestsPath) - imageArtifactFilename := fmt.Sprintf("%s.json", p.ctxt.Component) - _, err := os.Stat(filepath.Join(imageArtifactsDir, imageArtifactFilename)) - return err -} diff --git a/cmd/package-image/skip.go b/cmd/package-image/skip.go deleted file mode 100644 index d404f22e..00000000 --- a/cmd/package-image/skip.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -// skipRemainingSteps is a pseudo error used to indicate that remaining -// steps should be skipped. -type skipRemainingSteps struct { - msg string -} - -func (e *skipRemainingSteps) Error() string { - return e.msg -} diff --git a/cmd/package-image/skopeo_tag.go b/cmd/package-image/skopeo_tag.go deleted file mode 100644 index 2e44fea3..00000000 --- a/cmd/package-image/skopeo_tag.go +++ /dev/null @@ -1,45 +0,0 @@ -package main - -import ( - "fmt" - "io" - "strings" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/image" -) - -func (p *packageImage) skopeoTag(idt *image.IdentityWithTag, outWriter, errWriter io.Writer) error { - imageRef := idt.ImageRefWithSha(p.opts.registry) - p.logger.Infof("Tagging image %s with %s", imageRef, idt.Tag) - tlsVerify := p.opts.tlsVerify - // TLS verification of the KinD registry is not possible at the moment as - // requests error out with "server gave HTTP response to HTTPS client". - if strings.HasPrefix(p.opts.registry, "kind-registry.kind") { - tlsVerify = false - } - args := []string{ - "copy", - fmt.Sprintf("--src-tls-verify=%v", tlsVerify), - fmt.Sprintf("--dest-tls-verify=%v", tlsVerify), - } - if tlsVerify { - args = append(args, - fmt.Sprintf("--src-cert-dir=%v", p.opts.certDir), - fmt.Sprintf("--dest-cert-dir=%v", p.opts.certDir)) - } - if p.opts.debug { - args = append(args, "--debug") - } - source := fmt.Sprintf("docker://%s", imageRef) - destination := fmt.Sprintf("docker://%s", idt.ImageRef(p.opts.registry)) - - args = append(args, source, destination) - - p.logger.Infof("skopeo copy %s %s", source, destination) - err := command.Run("skopeo", args, []string{}, outWriter, errWriter) - if err != nil { - return fmt.Errorf("skopeo copy %s to %s: %w", source, destination, err) - } - return nil -} diff --git a/cmd/package-image/steps.go b/cmd/package-image/steps.go deleted file mode 100644 index e85309c9..00000000 --- a/cmd/package-image/steps.go +++ /dev/null @@ -1,187 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/google/shlex" - "github.com/opendevstack/ods-pipeline/internal/directory" - "github.com/opendevstack/ods-pipeline/internal/image" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -type PackageStep func(d *packageImage) (*packageImage, error) - -func (d *packageImage) runSteps(steps ...PackageStep) error { - var skip *skipRemainingSteps - var err error - for _, step := range steps { - d, err = step(d) - if err != nil { - if errors.As(err, &skip) { - d.logger.Infof(err.Error()) - return nil - } - return err - } - } - return nil -} - -func setupContext() PackageStep { - return func(p *packageImage) (*packageImage, error) { - ctxt := &pipelinectxt.ODSContext{} - err := ctxt.ReadCache(p.opts.checkoutDir) - if err != nil { - return p, fmt.Errorf("read cache: %w", err) - } - p.ctxt = ctxt - - if p.opts.debug { - if err := directory.ListFiles(p.opts.certDir, os.Stdout); err != nil { - p.logger.Errorf(err.Error()) - } - } - - // TLS verification of the KinD registry is not possible at the moment as - // requests error out with "server gave HTTP response to HTTPS client". - if strings.HasPrefix(p.opts.registry, "kind-registry.kind") { - p.opts.tlsVerify = false - } - - return p, nil - } -} - -func setExtraTags() PackageStep { - return func(p *packageImage) (*packageImage, error) { - extraTagsSpecified, err := shlex.Split(p.opts.extraTags) - if err != nil { - return p, fmt.Errorf("parse extra tags (%s): %w", p.opts.extraTags, err) - } - p.parsedExtraTags = extraTagsSpecified - return p, nil - } -} - -func setImageId() PackageStep { - return func(p *packageImage) (*packageImage, error) { - p.imageId = image.CreateImageIdentity(p.ctxt, p.opts.imageNamespace, p.opts.imageStream) - return p, nil - } -} - -// skipIfImageArtifactExists informs to skip next steps if ODS image artifact is already in place. -// In future we might want to check all the expected artifacts, that must exist to do skip properly. -func skipIfImageArtifactExists() PackageStep { - return func(p *packageImage) (*packageImage, error) { - fmt.Printf("Checking if image artifact for %s exists already ...\n", p.imageName()) - err := imageArtifactExists(p) - if err == nil { - return p, &skipRemainingSteps{"image artifact exists already"} - } - return p, nil - } -} - -func buildImageAndGenerateTar() PackageStep { - return func(p *packageImage) (*packageImage, error) { - fmt.Printf("Building image %s ...\n", p.imageName()) - err := p.buildahBuild(os.Stdout, os.Stderr) - if err != nil { - return p, fmt.Errorf("buildah bud: %w", err) - } - fmt.Printf("Creating local tar folder for image %s ...\n", p.imageName()) - err = p.buildahPushTar(os.Stdout, os.Stderr) - if err != nil { - return p, fmt.Errorf("buildah push tar: %w", err) - } - d, err := getImageDigestFromFile(p.opts.checkoutDir) - if err != nil { - return p, err - } - p.imageDigest = d - return p, nil - } -} - -func generateSBOM() PackageStep { - return func(p *packageImage) (*packageImage, error) { - fmt.Println("Generating image SBOM with trivy scanner ...") - err := p.generateImageSBOM() - if err != nil { - return p, fmt.Errorf("generate SBOM: %w", err) - } - return p, nil - } -} - -func pushImage() PackageStep { - return func(p *packageImage) (*packageImage, error) { - fmt.Printf("Pushing image %s ...\n", p.imageName()) - err := p.buildahPush(os.Stdout, os.Stderr) - if err != nil { - return p, fmt.Errorf("buildah push: %w", err) - } - return p, nil - } -} - -func storeArtifact() PackageStep { - return func(p *packageImage) (*packageImage, error) { - fmt.Println("Writing image artifact ...") - imageArtifactFilename := fmt.Sprintf("%s.json", p.imageNameNoSha()) - err := pipelinectxt.WriteJsonArtifact(p.artifactImage(), pipelinectxt.ImageDigestsPath, imageArtifactFilename) - if err != nil { - return p, err - } - - fmt.Println("Writing SBOM artifact ...") - err = pipelinectxt.CopyArtifact(p.sbomFile, pipelinectxt.SBOMsPath) - if err != nil { - return p, fmt.Errorf("copy SBOM report to artifacts: %w", err) - } - - return p, nil - } -} - -func processExtraTags() PackageStep { - return func(p *packageImage) (*packageImage, error) { - if len(p.parsedExtraTags) > 0 { - p.logger.Infof("Processing extra tags: %+q", p.parsedExtraTags) - for _, extraTag := range p.parsedExtraTags { - err := imageTagArtifactExists(p, extraTag) - if err == nil { - p.logger.Infof("Artifact exists for tag: %s", extraTag) - continue - } - p.logger.Infof("pushing extra tag: %s", extraTag) - imageExtraTag := p.imageId.Tag(extraTag) - err = p.skopeoTag(&imageExtraTag, os.Stdout, os.Stderr) - if err != nil { - return p, fmt.Errorf("skopeo push failed: %w", err) - } - - p.logger.Infof("Writing image artifact for tag: %s", extraTag) - image := p.artifactImageForTag(extraTag) - filename := fmt.Sprintf("%s-%s.json", p.imageId.ImageStream, extraTag) - err = pipelinectxt.WriteJsonArtifact(image, pipelinectxt.ImageDigestsPath, filename) - if err != nil { - return p, err - } - } - } - return p, nil - } -} - -func imageTagArtifactExists(p *packageImage, tag string) error { - imageArtifactsDir := filepath.Join(p.opts.checkoutDir, pipelinectxt.ImageDigestsPath) - filename := fmt.Sprintf("%s-%s.json", p.imageId.ImageStream, tag) - _, err := os.Stat(filepath.Join(imageArtifactsDir, filename)) - return err -} diff --git a/cmd/package-image/trivy.go b/cmd/package-image/trivy.go deleted file mode 100644 index 5a0a24dd..00000000 --- a/cmd/package-image/trivy.go +++ /dev/null @@ -1,39 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/google/shlex" - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -const ( - trivyBin = "trivy" - trivyWorkdir = "/tmp" -) - -func (p *packageImage) generateImageSBOM() error { - // settle for one format and name until we have use cases for multiple formats (we use spdx format). - // trivy support --formats: table, json, sarif, template, cyclonedx, spdx, spdx-json, github, cosign-vuln (default "table") - // more args for experimentation via extra args - extraArgs, err := shlex.Split(p.opts.trivySBOMExtraArgs) - if err != nil { - p.logger.Errorf("could not parse extra args (%s): %s", p.opts.trivySBOMExtraArgs, err) - } - sbomFilename := fmt.Sprintf("%s.%s", p.imageNameNoSha(), pipelinectxt.SBOMsFormat) - p.sbomFile = filepath.Join(trivyWorkdir, sbomFilename) - args := []string{ - "image", - fmt.Sprintf("--format=%s", pipelinectxt.SBOMsFormat), - fmt.Sprintf("--input=%s", filepath.Join(buildahWorkdir, p.imageNameNoSha())), - fmt.Sprintf("--output=%s", p.sbomFile), - } - if p.opts.debug { - args = append(args, "--debug=true") - } - args = append(args, extraArgs...) - return command.RunInDir(trivyBin, args, []string{}, trivyWorkdir, os.Stdout, os.Stderr) -} diff --git a/cmd/pipeline-manager/main.go b/cmd/pipeline-manager/main.go index 92949082..8699d60a 100644 --- a/cmd/pipeline-manager/main.go +++ b/cmd/pipeline-manager/main.go @@ -16,7 +16,6 @@ import ( tektonClient "github.com/opendevstack/ods-pipeline/internal/tekton" "github.com/opendevstack/ods-pipeline/pkg/bitbucket" "github.com/opendevstack/ods-pipeline/pkg/logging" - tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) const ( @@ -25,9 +24,6 @@ const ( repoBaseEnvVar = "REPO_BASE" tokenEnvVar = "ACCESS_TOKEN" webhookSecretEnvVar = "WEBHOOK_SECRET" - taskKindEnvVar = "ODS_TASK_KIND" - taskKindDefault = "Task" - taskSuffixEnvVar = "ODS_TASK_SUFFIX" storageProvisionerEnvVar = "ODS_STORAGE_PROVISIONER" storageClassNameEnvVar = "ODS_STORAGE_CLASS_NAME" storageClassNameDefault = "standard" @@ -77,10 +73,6 @@ func serve() error { return fmt.Errorf("%s must be set", webhookSecretEnvVar) } - taskKind := readStringFromEnvVar(taskKindEnvVar, taskKindDefault) - - taskSuffix := readStringFromEnvVar(taskSuffixEnvVar, "") - storageProvisioner := readStringFromEnvVar(storageProvisionerEnvVar, "") storageClassName := readStringFromEnvVar(storageClassNameEnvVar, storageClassNameDefault) @@ -152,8 +144,6 @@ func serve() error { TektonClient: tClient, KubernetesClient: kClient, Logger: logger.WithTag("scheduler"), - TaskKind: tekton.TaskKind(taskKind), - TaskSuffix: taskSuffix, StorageConfig: manager.StorageConfig{ Provisioner: storageProvisioner, ClassName: storageClassName, diff --git a/cmd/sonar/main.go b/cmd/sonar/main.go deleted file mode 100644 index 11e2271c..00000000 --- a/cmd/sonar/main.go +++ /dev/null @@ -1,219 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "path/filepath" - "strings" - "time" - - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" -) - -type options struct { - sonarAuthToken string - sonarURL string - sonarEdition string - workingDir string - rootPath string - qualityGate bool - trustStore string - trustStorePassword string - debug bool -} - -var defaultOptions = options{ - sonarAuthToken: os.Getenv("SONAR_AUTH_TOKEN"), - sonarURL: os.Getenv("SONAR_URL"), - sonarEdition: os.Getenv("SONAR_EDITION"), - workingDir: ".", - qualityGate: false, - trustStore: "${JAVA_HOME}/lib/security/cacerts", - trustStorePassword: "changeit", - debug: (os.Getenv("DEBUG") == "true"), -} - -func main() { - rootPath, err := filepath.Abs(".") - if err != nil { - log.Fatal(err) - } - - opts := options{rootPath: rootPath} - flag.StringVar(&opts.sonarAuthToken, "sonar-auth-token", defaultOptions.sonarAuthToken, "sonar-auth-token") - flag.StringVar(&opts.sonarURL, "sonar-url", defaultOptions.sonarURL, "sonar-url") - flag.StringVar(&opts.sonarEdition, "sonar-edition", defaultOptions.sonarEdition, "sonar-edition") - flag.StringVar(&opts.workingDir, "working-dir", defaultOptions.workingDir, "working directory") - flag.BoolVar(&opts.qualityGate, "quality-gate", defaultOptions.qualityGate, "require quality gate pass") - flag.StringVar(&opts.trustStore, "truststore", defaultOptions.trustStore, "JKS truststore") - flag.StringVar(&opts.trustStorePassword, "truststore-pass", defaultOptions.trustStorePassword, "JKS truststore password") - flag.BoolVar(&opts.debug, "debug", defaultOptions.debug, "debug mode") - flag.Parse() - - var logger logging.LeveledLoggerInterface - if opts.debug { - logger = &logging.LeveledLogger{Level: logging.LevelDebug} - } else { - logger = &logging.LeveledLogger{Level: logging.LevelInfo} - } - - ctxt := &pipelinectxt.ODSContext{} - err = ctxt.ReadCache(".") - if err != nil { - log.Fatal(err) - } - - err = os.Chdir(opts.workingDir) - if err != nil { - log.Fatal(err) - } - - sonarClient, err := sonar.NewClient(&sonar.ClientConfig{ - APIToken: opts.sonarAuthToken, - BaseURL: opts.sonarURL, - ServerEdition: opts.sonarEdition, - TrustStore: opts.trustStore, - TrustStorePassword: opts.trustStorePassword, - Debug: opts.debug, - Logger: logger, - }) - if err != nil { - log.Fatal("sonar client:", err) - } - - err = sonarScan(logger, opts, ctxt, sonarClient) - if err != nil { - log.Fatal(err) - } -} - -func sonarScan( - logger logging.LeveledLoggerInterface, - opts options, - ctxt *pipelinectxt.ODSContext, - sonarClient sonar.ClientInterface) error { - artifactPrefix := "" - if opts.workingDir != "." { - artifactPrefix = strings.Replace(opts.workingDir, "/", "-", -1) + "-" - } - - sonarProject := sonar.ProjectKey(ctxt, artifactPrefix) - - logger.Infof("Scanning with sonar-scanner ...") - var prInfo *sonar.PullRequest - if len(ctxt.PullRequestKey) > 0 && ctxt.PullRequestKey != "0" && len(ctxt.PullRequestBase) > 0 { - logger.Infof("Pull request (ID %s) detected.", ctxt.PullRequestKey) - prInfo = &sonar.PullRequest{ - Key: ctxt.PullRequestKey, - Branch: ctxt.GitRef, - Base: ctxt.PullRequestBase, - } - } - err := sonarClient.Scan( - sonarProject, - ctxt.GitRef, - ctxt.GitCommitSHA, - prInfo, - os.Stdout, - os.Stderr, - ) - if err != nil { - return fmt.Errorf("scan failed: %w", err) - } - - logger.Infof("Wait until compute engine task finishes ...") - err = waitUntilComputeEngineTaskIsSuccessful(logger, sonarClient) - if err != nil { - return fmt.Errorf("background task did not finish successfully: %w", err) - } - - if prInfo == nil { - logger.Infof("Generating reports ...") - err := sonarClient.GenerateReports( - sonarProject, - "OpenDevStack", - ctxt.GitRef, - opts.rootPath, - artifactPrefix, - ) - if err != nil { - logger.Errorf(err.Error()) - os.Exit(1) - } - } else { - logger.Infof("No reports are generated for pull request scans.") - } - - if opts.qualityGate { - logger.Infof("Checking quality gate ...") - qualityGateResult, err := sonarClient.QualityGateGet(sonar.QualityGateGetParams{ - ProjectKey: sonarProject, - Branch: ctxt.GitRef, - PullRequest: ctxt.PullRequestKey, - }) - if err != nil { - return fmt.Errorf("quality gate could not be retrieved: %w", err) - } - err = pipelinectxt.WriteJsonArtifact( - qualityGateResult, - filepath.Join(opts.rootPath, pipelinectxt.SonarAnalysisPath), - fmt.Sprintf("%squality-gate.json", artifactPrefix), - ) - if err != nil { - return fmt.Errorf("quality gate status could not be stored as an artifact: %w", err) - } - actualStatus := qualityGateResult.ProjectStatus.Status - if actualStatus != sonar.QualityGateStatusOk { - return fmt.Errorf( - "quality gate status is '%s', not '%s'", - actualStatus, sonar.QualityGateStatusOk, - ) - } else { - logger.Infof("Quality gate passed.") - } - } - - return nil -} - -// waitUntilComputeEngineTaskIsSuccessful reads the scanner report file and -// extracts the task ID. It then waits until the corresponding background task -// in SonarQube succeeds. If the tasks fails or the timeout is reached, an -// error is returned. -func waitUntilComputeEngineTaskIsSuccessful(logger logging.LeveledLoggerInterface, sonarClient sonar.ClientInterface) error { - reportTaskID, err := sonarClient.ExtractComputeEngineTaskID(sonar.ReportTaskFile) - if err != nil { - return fmt.Errorf("cannot read task ID: %w", err) - } - params := sonar.ComputeEngineTaskGetParams{ID: reportTaskID} - attempts := 8 // allows for over 4min task runtime - sleep := time.Second - for i := 0; i < attempts; i++ { - logger.Infof("Waiting %s before checking task status ...", sleep) - time.Sleep(sleep) - sleep *= 2 - task, err := sonarClient.ComputeEngineTaskGet(params) - if err != nil { - logger.Infof("cannot get status of task: %s", err) - continue - } - switch task.Status { - case sonar.TaskStatusInProgress: - logger.Infof("Background task %s has not finished yet", reportTaskID) - case sonar.TaskStatusPending: - logger.Infof("Background task %s has not started yet", reportTaskID) - case sonar.TaskStatusFailed: - return fmt.Errorf("background task %s has failed", reportTaskID) - case sonar.TaskStatusSuccess: - logger.Infof("Background task %s has finished successfully", reportTaskID) - return nil - default: - logger.Infof("Background task %s has unknown status %s", reportTaskID, task.Status) - } - } - return fmt.Errorf("background task %s did not succeed within timeout", reportTaskID) -} diff --git a/cmd/sonar/main_test.go b/cmd/sonar/main_test.go deleted file mode 100644 index e9ad9d56..00000000 --- a/cmd/sonar/main_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package main - -import ( - "io" - "os" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" -) - -type fakeClient struct { - scanPerformed bool - passQualityGate bool - qualityGateRetrieved bool - reportGenerated bool -} - -func (c *fakeClient) Scan(sonarProject, branch, commit string, pr *sonar.PullRequest, outWriter, errWriter io.Writer) error { - c.scanPerformed = true - return nil -} - -func (c *fakeClient) QualityGateGet(p sonar.QualityGateGetParams) (*sonar.QualityGate, error) { - c.qualityGateRetrieved = true - status := sonar.QualityGateStatusError - if c.passQualityGate { - status = sonar.QualityGateStatusOk - } - return &sonar.QualityGate{ProjectStatus: sonar.QualityGateProjectStatus{Status: status}}, nil -} - -func (c *fakeClient) GenerateReports(sonarProject, author, branch, rootPath, artifactPrefix string) error { - c.reportGenerated = true - return nil -} - -func (c *fakeClient) ExtractComputeEngineTaskID(filename string) (string, error) { - return "abc123", nil -} - -func (c *fakeClient) ComputeEngineTaskGet(params sonar.ComputeEngineTaskGetParams) (*sonar.ComputeEngineTask, error) { - return &sonar.ComputeEngineTask{Status: sonar.TaskStatusSuccess}, nil -} - -func TestSonarScan(t *testing.T) { - logger := &logging.LeveledLogger{Level: logging.LevelDebug} - - tests := map[string]struct { - // which SQ edition is in use - optSonarEdition string - // whether quality gate is required to pass - optQualityGate bool - - // PR key - ctxtPrKey string - // PR base - ctxtPrBase string - - // whether the quality gate in SQ passes (faked) - passQualityGate bool - - // whether scan should have been performed - wantScanPerformed bool - // whether report should have been generated - wantReportGenerated bool - // whether quality gate should have been retrieved - wantQualityGateRetrieved bool - // whether scanning should fail - if not empty, the actual error message - // will be checked to contain wantErr. - wantErr string - }{ - "developer edition generates report when no PR is present": { - optSonarEdition: "developer", - optQualityGate: true, - ctxtPrKey: "", - ctxtPrBase: "", - passQualityGate: true, - wantScanPerformed: true, - wantReportGenerated: true, - wantQualityGateRetrieved: true, - }, - "developer edition does not generate report when PR is present": { - optSonarEdition: "developer", - optQualityGate: true, - ctxtPrKey: "3", - ctxtPrBase: "master", - passQualityGate: true, - wantScanPerformed: true, - wantReportGenerated: false, - wantQualityGateRetrieved: true, - }, - "community edition generates report": { - optSonarEdition: "community", - optQualityGate: true, - ctxtPrKey: "", - ctxtPrBase: "", - passQualityGate: true, - wantScanPerformed: true, - wantReportGenerated: true, - wantQualityGateRetrieved: true, - }, - "does not check quality gate if disabled": { - optSonarEdition: "community", - optQualityGate: false, - ctxtPrKey: "", - ctxtPrBase: "", - passQualityGate: true, - wantScanPerformed: true, - wantReportGenerated: true, - wantQualityGateRetrieved: false, - }, - "fails if quality gate does not pass": { - optSonarEdition: "community", - optQualityGate: true, - ctxtPrKey: "", - ctxtPrBase: "", - passQualityGate: false, - wantScanPerformed: true, - wantReportGenerated: true, - wantQualityGateRetrieved: true, - wantErr: "quality gate status is 'ERROR', not 'OK'", - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - tempDir, err := os.MkdirTemp(".", "test-cmd-sonar-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - opts := options{ - sonarEdition: tc.optSonarEdition, - qualityGate: tc.optQualityGate, - workingDir: ".", - rootPath: tempDir, - } - ctxt := &pipelinectxt.ODSContext{PullRequestKey: tc.ctxtPrKey, PullRequestBase: tc.ctxtPrBase} - sonarClient := &fakeClient{passQualityGate: tc.passQualityGate} - err = sonarScan(logger, opts, ctxt, sonarClient) - if err != nil { - if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) { - t.Fatalf("want err to contain: %s, got err: %s", tc.wantErr, err) - } - } - if sonarClient.scanPerformed != tc.wantScanPerformed { - t.Fatalf("want scan performed to be %v, got %v", tc.wantScanPerformed, sonarClient.scanPerformed) - } - if sonarClient.reportGenerated != tc.wantReportGenerated { - t.Fatalf("want report generated to be %v, got %v", tc.wantReportGenerated, sonarClient.reportGenerated) - } - if sonarClient.qualityGateRetrieved != tc.wantQualityGateRetrieved { - t.Fatalf("want quality gate retrieved to be %v, got %v", tc.wantQualityGateRetrieved, sonarClient.qualityGateRetrieved) - } - }) - } - -} diff --git a/cmd/taskdoc/main.go b/cmd/taskdoc/main.go new file mode 100644 index 00000000..1550385b --- /dev/null +++ b/cmd/taskdoc/main.go @@ -0,0 +1,69 @@ +// Package taskdoc implements documentation rendering for tasks. +// It is intended to be run via `go run`, passing a task YAML manifest +// and a description in Asciidoctor format. The combined result will be +// written to the specified destination. +// +// Example invocation: +// +// go run github.com/opendevstack/ods-pipeline/cmd/taskdoc \ +// -task tasks/my-task.yaml \ +// -description build/docs/my-task.adoc \ +// -destination docs/my-task.adoc +// +// By default, taskdoc will use the template located at +// docs/tasks/template.adoc.tmpl to produce the resulting file. Another +// template can be specified via -template: +// +// go run github.com/opendevstack/ods-pipeline/cmd/taskdoc \ +// -task tasks/my-task.yaml \ +// -description build/docs/my-task.adoc \ +// -template /path/to/my-custom-template.adoc.tmpl \ +// -destination docs/my-task.adoc +package main + +import ( + "flag" + "log" + "os" + "text/template" + + "github.com/opendevstack/ods-pipeline/internal/projectpath" + "github.com/opendevstack/ods-pipeline/pkg/taskdoc" +) + +func main() { + taskFile := flag.String("task", "", "Task manifest") + descriptionFile := flag.String("description", "", "Description snippet") + templateFile := flag.String("template", projectpath.RootedPath("docs/tasks/template.adoc.tmpl"), "Template file") + destinationFile := flag.String("destination", "", "Destination file") + flag.Parse() + if err := render(*taskFile, *descriptionFile, *templateFile, *destinationFile); err != nil { + log.Fatal(err) + } +} + +func render(taskFile, descriptionFile, templateFile, destinationFile string) error { + t, err := os.ReadFile(taskFile) + if err != nil { + return err + } + d, err := os.ReadFile(descriptionFile) + if err != nil { + return err + } + tmpl, err := template.ParseFiles(templateFile) + if err != nil { + return err + } + + task, err := taskdoc.ParseTask(t, d) + if err != nil { + return err + } + + w, err := os.Create(destinationFile) + if err != nil { + return err + } + return taskdoc.RenderTaskDocumentation(w, tmpl, task) +} diff --git a/cmd/taskmanifest/main.go b/cmd/taskmanifest/main.go new file mode 100644 index 00000000..e36ebb91 --- /dev/null +++ b/cmd/taskmanifest/main.go @@ -0,0 +1,69 @@ +// Package taskmanifest implements manifest rendering for tasks. +// It is intended to be run via `go run`, passing a task YAML template +// and data to be rendered. The combined result will be +// written to the specified destination. The -data flag can be passed +// multiple times and may specify any key-value combination, which can then +// be consumed in the template through Go's text/template package. E.g. +// passing -data Foo=bar will replace {{.Foo}} in the template with bar. +// +// Example invocation: +// +// go run github.com/opendevstack/ods-pipeline/cmd/taskmanifest \ +// -data ImageRepository=ghcr.io/my-org/my-repo \ +// -data Version=latest \ +// -template build/tasks/my-task.yaml \ +// -destination tasks/my-task.yaml +package main + +import ( + "flag" + "fmt" + "log" + "os" + "strings" + "text/template" + + "github.com/opendevstack/ods-pipeline/pkg/taskmanifest" + "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +func main() { + templateFile := flag.String("template", "", "Template file") + destinationFile := flag.String("destination", "", "Destination file") + cc := tektontaskrun.NewClusterConfig() + mf := &MapFlag{v: cc.DefaultTaskTemplateData()} + flag.Var(mf, "data", "Key-value pairs") + flag.Parse() + if err := render(*templateFile, *destinationFile, mf.v); err != nil { + log.Fatal(err) + } +} + +func render(templateFile, destinationFile string, data map[string]string) error { + tmpl, err := template.ParseFiles(templateFile) + if err != nil { + return err + } + + w, err := os.Create(destinationFile) + if err != nil { + return err + } + return taskmanifest.RenderTask(w, tmpl, data) +} + +type MapFlag struct { + v map[string]string +} + +func (mf *MapFlag) String() string { + return fmt.Sprintf("%v", mf.v) +} +func (mf *MapFlag) Set(v string) error { + key, value, ok := strings.Cut(v, "=") + if !ok { + return fmt.Errorf("must have = sign") + } + mf.v[key] = value + return nil +} diff --git a/cmd/tasks/main.go b/cmd/tasks/main.go deleted file mode 100644 index f974f692..00000000 --- a/cmd/tasks/main.go +++ /dev/null @@ -1,19 +0,0 @@ -package main - -import ( - "log" - "path/filepath" - - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/internal/tasks" -) - -func main() { - err := tasks.Render( - filepath.Join(projectpath.Root, "deploy/ods-pipeline/charts/tasks"), - filepath.Join(projectpath.Root, "tasks"), - ) - if err != nil { - log.Fatal(err) - } -} diff --git a/deploy/ods-pipeline/.gitignore b/deploy/chart/.gitignore similarity index 100% rename from deploy/ods-pipeline/.gitignore rename to deploy/chart/.gitignore diff --git a/deploy/ods-pipeline/charts/tasks/Chart.yaml b/deploy/chart/Chart.yaml similarity index 92% rename from deploy/ods-pipeline/charts/tasks/Chart.yaml rename to deploy/chart/Chart.yaml index 5fbe09cb..e8197bba 100644 --- a/deploy/ods-pipeline/charts/tasks/Chart.yaml +++ b/deploy/chart/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -name: tasks -description: A Helm chart to setup ODS pipeline tasks +name: ods-pipeline +description: ODS Pipeline # A chart can be either an 'application' or a 'library' chart. # @@ -20,4 +20,5 @@ version: 0.13.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. appVersion: "0.13.2" diff --git a/deploy/ods-pipeline/charts/setup/templates/_helpers.tpl b/deploy/chart/templates/_helpers.tpl similarity index 84% rename from deploy/ods-pipeline/charts/setup/templates/_helpers.tpl rename to deploy/chart/templates/_helpers.tpl index b1c875bc..7ba5edc2 100644 --- a/deploy/ods-pipeline/charts/setup/templates/_helpers.tpl +++ b/deploy/chart/templates/_helpers.tpl @@ -60,15 +60,3 @@ Create the name of the service account to use {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} - -{{/* -Create the task suffix. -See https://github.com/Masterminds/sprig/issues/53#issuecomment-483414063. -*/}} -{{- define "taskSuffix" -}} -{{- if kindIs "invalid" .Values.global.taskSuffix }} -{{- printf "-v%s" (.Chart.AppVersion | replace "." "-") }} -{{- else }} -{{- .Values.global.taskSuffix }} -{{- end }} -{{- end }} diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-bitbucket.yaml b/deploy/chart/templates/configmap-bitbucket.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/configmap-bitbucket.yaml rename to deploy/chart/templates/configmap-bitbucket.yaml diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-cluster.yaml b/deploy/chart/templates/configmap-cluster.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/configmap-cluster.yaml rename to deploy/chart/templates/configmap-cluster.yaml diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-nexus.yaml b/deploy/chart/templates/configmap-nexus.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/configmap-nexus.yaml rename to deploy/chart/templates/configmap-nexus.yaml diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-notifications.yaml b/deploy/chart/templates/configmap-notifications.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/configmap-notifications.yaml rename to deploy/chart/templates/configmap-notifications.yaml diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-pipeline.yaml b/deploy/chart/templates/configmap-pipeline.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/configmap-pipeline.yaml rename to deploy/chart/templates/configmap-pipeline.yaml diff --git a/deploy/ods-pipeline/charts/setup/templates/deployment.yaml b/deploy/chart/templates/deployment.yaml similarity index 87% rename from deploy/ods-pipeline/charts/setup/templates/deployment.yaml rename to deploy/chart/templates/deployment.yaml index b82f39e7..482126c0 100644 --- a/deploy/ods-pipeline/charts/setup/templates/deployment.yaml +++ b/deploy/chart/templates/deployment.yaml @@ -17,7 +17,7 @@ spec: containers: - name: pipeline-manager securityContext: {} - image: "{{.Values.pipelineManager.imageRepository}}/ods-pipeline-manager:{{.Values.pipelineManager.imageTag | default .Chart.AppVersion}}" + image: "{{.Values.imageRepository}}/pipeline-manager:{{.Values.imageTag | default .Chart.AppVersion}}" ports: - name: http containerPort: 8080 @@ -53,10 +53,6 @@ spec: value: '{{int .Values.pipelineRunMinKeepHours}}' - name: ODS_PRUNE_MAX_KEEP_RUNS value: '{{int .Values.pipelineRunMaxKeepRuns}}' - - name: ODS_TASK_KIND - value: '{{default "Task" .Values.global.taskKind}}' - - name: ODS_TASK_SUFFIX - value: '{{- include "taskSuffix" .}}' readinessProbe: httpGet: path: /health diff --git a/deploy/ods-pipeline/charts/setup/templates/service.yaml b/deploy/chart/templates/service.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/service.yaml rename to deploy/chart/templates/service.yaml diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-finish.yaml b/deploy/chart/templates/task-finish.yaml similarity index 88% rename from deploy/ods-pipeline/charts/tasks/templates/task-ods-finish.yaml rename to deploy/chart/templates/task-finish.yaml index 251e9500..d8e06aea 100644 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-finish.yaml +++ b/deploy/chart/templates/task-finish.yaml @@ -1,9 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' +kind: Task metadata: - name: '{{default "ods" .Values.taskPrefix}}-finish{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep + name: ods-pipeline-finish spec: description: | Finishes the pipeline run. @@ -20,9 +18,9 @@ spec: description: Artifact target respository default: '' steps: - - name: ods-finish + - name: finish # Image is built from build/package/Dockerfile.finish. - image: '{{.Values.imageRepository}}/ods-finish:{{.Values.global.imageTag | default .Chart.AppVersion}}' + image: '{{.Values.imageRepository}}/finish:{{.Values.imageTag | default .Chart.AppVersion}}' env: - name: HOME value: '/tekton/home' diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-start.yaml b/deploy/chart/templates/task-start.yaml similarity index 94% rename from deploy/ods-pipeline/charts/tasks/templates/task-ods-start.yaml rename to deploy/chart/templates/task-start.yaml index 1faf9d42..4fbe25ea 100644 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-start.yaml +++ b/deploy/chart/templates/task-start.yaml @@ -1,9 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' +kind: Task metadata: - name: '{{default "ods" .Values.taskPrefix}}-start{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep + name: ods-pipeline-start spec: description: | Starts the pipeline run. @@ -76,9 +74,9 @@ spec: - description: The URL that was fetched by this task. name: url steps: - - name: ods-start + - name: start # Image is built from build/package/Dockerfile.start. - image: '{{.Values.imageRepository}}/ods-start:{{.Values.global.imageTag | default .Chart.AppVersion}}' + image: '{{.Values.imageRepository}}/start:{{.Values.imageTag | default .Chart.AppVersion}}' env: - name: HOME value: '/tekton/home' diff --git a/deploy/chart/values.kind.yaml b/deploy/chart/values.kind.yaml new file mode 100644 index 00000000..260bc492 --- /dev/null +++ b/deploy/chart/values.kind.yaml @@ -0,0 +1,14 @@ +imageTag: latest + +# Cluster +consoleUrl: 'http://example.com' +# Pipeline Manager +pipelineManager: + storageProvisioner: '' + storageClassName: 'standard' + storageSize: '2Gi' + +# Image repository to pull task images from. +# To test with the latest public ods-pipeline images, set +# global.imageTag to 'latest' and use: 'ghcr.io/opendevstack/ods-pipeline'. +imageRepository: localhost:5000/ods-pipeline diff --git a/deploy/chart/values.yaml b/deploy/chart/values.yaml new file mode 100644 index 00000000..33abac1c --- /dev/null +++ b/deploy/chart/values.yaml @@ -0,0 +1,113 @@ +# ----------------------- Installation hint ----------------------- +# !!! Important !!! +# This is the default values file - if you're editing this as +# part of the ODS pipeline installation you're in the wrong file! +# +# Please open ../values.yaml (the file you have created by making +# a copy of ../values.yaml.tmpl) and do your changes there. +# ----------------------- Installation hint ----------------------- + +# General +# Serviceaccount name to use for pipeline resources. +serviceAccountName: 'pipeline' +# Whether to enable debug mode +debug: 'false' + +# Bitbucket +# Bitbucket URL (including scheme, without trailing slash). +# Example: https://bitbucket.example.com. +bitbucketUrl: '' +# Bitbucket username. Example: cd_user. +bitbucketUsername: '' + +# Nexus +# Nexus URL (including scheme, without trailing slash). +# Example: https://nexus.example.com. +nexusUrl: '' +# Nexus username. Example: developer. +nexusUsername: '' + +# Cluster +# URL (including scheme, without trailing slash) of the OpenShift Web Console. +consoleUrl: 'http://example.com' + +# Notification Webhook +notification: + # Whether notifications should be sent to the URL specified below or not. + enabled: false + # URL of the configured webhook + url: 'http://example.com' + # The HTTP method to be used + method: 'POST' + # The HTTP content type header + contentType: 'application/json' + # Specify the outcomes you want to be notified of (allowed values: c.f. + # https://tekton.dev/docs/pipelines/pipelines/#using-aggregate-execution-status-of-all-tasks) + notifyOnStatus: + - 'Failed' + # Template to be processed and accepted by the configured webhook in use + # Below example might work for Microsoft Teams + requestTemplate: |- + { + "@type": "MessageCard", + "@context": "http://schema.org/extensions", + "themeColor": {{if eq .OverallStatus "Succeeded"}}"237b4b"{{else}}"c4314b"{{ end }}, + "summary": "{{.ODSContext.Project}} - ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", + "sections": [ + { + "activityTitle": "ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", + "activitySubtitle": "On Project {{.ODSContext.Project}}", + "activityImage": "https://avatars.githubusercontent.com/u/38974438?s=200&v=4", + "facts": [ + { + "name": "GitRef", + "value": "{{.ODSContext.GitRef}}" + } + ], + "markdown": true + } + ], + "potentialAction": [ + { + "@type": "OpenUri", + "name": "Go to PipelineRun", + "targets": [ + { + "os": "default", + "uri": "{{.PipelineRunURL}}" + } + ] + } + ] + } + +# Pipeline(Run) Pruning +# Minimum hours to keep a pipeline run. Has precendence over pipelineRunMaxKeepRuns. +# Must be at least 1. +pipelineRunMinKeepHours: '48' +# Maximum number of pipeline runs to keep. +# Must be at least 1. +pipelineRunMaxKeepRuns: '20' + +# Pipeline Manager +pipelineManager: + # PVC (used for the pipeline workspace) + # Storage provisioner. On AWS backed clusters, use 'kubernetes.io/aws-ebs'. + storageProvisioner: 'kubernetes.io/aws-ebs' + # Storage class. On AWS backed clusters, use 'gp2'. + storageClassName: 'gp2' + # Storage size. Defaults to 2Gi unless set explicitly here. + storageSize: '5Gi' + # Number of replicas to run for the pipeline manager. + replicaCount: 1 + # Deployment pod resources. Typically these settings should not need to change. + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +# Image repository to pull start/finish task images from. +imageRepository: ghcr.io/opendevstack/ods-pipeline diff --git a/deploy/install.sh b/deploy/install.sh index aa10e473..7137c993 100755 --- a/deploy/install.sh +++ b/deploy/install.sh @@ -10,15 +10,12 @@ NAMESPACE="" RELEASE_NAME="ods-pipeline" SERVICEACCOUNT="pipeline" VALUES_FILE="values.yaml" -CHART_DIR="./ods-pipeline" +CHART_DIR="./chart" # Secrets AUTH_SEPARATOR=":" -AQUA_AUTH="" -AQUA_SCANNER_URL="" BITBUCKET_AUTH="" BITBUCKET_WEBHOOK_SECRET="" NEXUS_AUTH="" -SONAR_AUTH="" PRIVATE_CERT="" # Check prerequisites. @@ -47,20 +44,15 @@ function usage { printf "\t--no-diff\t\t\tDo not run Helm diff before running Helm upgrade.\n" printf "\t--dry-run\t\t\tDo not apply any changes, instead just print what the script would do.\n" printf "\t--auth-separator\t\tCharacter to use as a separator for basic auth flags (defaults to '%s')\n" "$AUTH_SEPARATOR" - printf "\t--aqua-auth\t\t\tUsername and password (separated by '%s') of an Aqua user (if not given, script will prompt for this).\n" "$AUTH_SEPARATOR" - printf "\t--aqua-scanner-url\t\t\tURL from which to download Aqua scanner (if not given, script will prompt for this).\n" printf "\t--bitbucket-auth\t\tAccess token of a Bitbucket user (if not given, script will prompt for this).\n" printf "\t--bitbucket-webhook-secret\tSecret to protect webhook endpoint with (if not given, script will generate this).\n" printf "\t--nexus-auth\t\t\tUsername and password (separated by '%s') of a Nexus user (if not given, script will prompt for this).\n" "$AUTH_SEPARATOR" - printf "\t--sonar-auth\t\t\tAuth token of a SonarQube user (if not given, script will prompt for this).\n" printf "\t--private-cert\t\t\tHost from which to download private certificate (if not given, script will skip this).\n" printf "\nExample:\n\n" printf "\t%s \ \ \n\t\t--namespace foo \ \ - \n\t\t--aqua-auth 'user:password' \ \ \n\t\t--bitbucket-auth 'personal-access-token' \ \ - \n\t\t--nexus-auth 'user:password' \ \ - \n\t\t--sonar-auth 'auth-token' \n\n" "$0" + \n\t\t--nexus-auth 'user:password' \n\n" "$0" } while [ "$#" -gt 0 ]; do @@ -87,12 +79,6 @@ while [ "$#" -gt 0 ]; do --auth-separator) AUTH_SEPARATOR="$2"; shift;; --auth-separator=*) AUTH_SEPARATOR="${1#*=}";; - --aqua-auth) AQUA_AUTH="$2"; shift;; - --aqua-auth=*) AQUA_AUTH="${1#*=}";; - - --aqua-scanner-url) AQUA_SCANNER_URL="$2"; shift;; - --aqua-scanner-url=*) AQUA_SCANNER_URL="${1#*=}";; - --bitbucket-auth) BITBUCKET_AUTH="$2"; shift;; --bitbucket-auth=*) BITBUCKET_AUTH="${1#*=}";; @@ -102,9 +88,6 @@ while [ "$#" -gt 0 ]; do --nexus-auth) NEXUS_AUTH="$2"; shift;; --nexus-auth=*) NEXUS_AUTH="${1#*=}";; - --sonar-auth) SONAR_AUTH="$2"; shift;; - --sonar-auth=*) SONAR_AUTH="${1#*=}";; - --private-cert) PRIVATE_CERT="$2"; shift;; --private-cert=*) PRIVATE_CERT="${1#*=}";; @@ -231,19 +214,6 @@ echo "Installing secrets ..." if [ "${DRY_RUN}" == "true" ]; then echo "(skipping in dry-run)" else - installSecret "ods-aqua-auth" \ - "basic-auth-secret.yaml.tmpl" \ - "${AQUA_AUTH}" \ - "Please enter the username of an Aqua user with scan permissions. If you do not want to use Aqua, leave this empty:" \ - "Please enter the password of this Aqua user (input will be hidden). If you do not want to use Aqua, leave this empty:" - - # Aqua scanner URL is a single value. - installSecret "ods-aqua-scanner-url" \ - "opaque-secret.yaml.tmpl" \ - "${AQUA_SCANNER_URL}" \ - "" \ - "Please enter the URL from which to download the Aqua scanner binary. The URL may need to contain basic authentication - if so, ensure username/password are URL-encoded. Further, ensure that the version matches your Aqua server version. If you do not want to use Aqua, leave this empty:" - # Bitbucket username is not required as PAT alone is enough. installSecret "ods-bitbucket-auth" \ "basic-auth-secret.yaml.tmpl" \ @@ -264,13 +234,6 @@ else "Please enter the username of a Nexus user with write permission:" \ "Please enter the password of this Nexus user (input will be hidden):" - # SonarQube username is not required as auth token alone is enough. - installSecret "ods-sonar-auth" \ - "basic-auth-secret.yaml.tmpl" \ - "${SONAR_AUTH}" \ - "" \ - "Please enter an auth token of a SonarQube user with scan permissions (input will be hidden):" - installTLSSecret "ods-private-cert" "${PRIVATE_CERT}" fi diff --git a/deploy/ods-pipeline/Chart.yaml b/deploy/ods-pipeline/Chart.yaml deleted file mode 100644 index dae63cea..00000000 --- a/deploy/ods-pipeline/Chart.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: v2 -name: ods-pipeline -description: Umbrella chart for ods-pipeline - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.13.2 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.13.2" - -dependencies: - - name: setup - version: 0.13.2 - condition: setup.enabled - - name: tasks - version: 0.13.2 - condition: tasks.enabled diff --git a/deploy/ods-pipeline/charts/setup/Chart.yaml b/deploy/ods-pipeline/charts/setup/Chart.yaml deleted file mode 100644 index af91c0cf..00000000 --- a/deploy/ods-pipeline/charts/setup/Chart.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: v2 -name: setup -description: A Helm chart to setup ODS pipelines - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.13.2 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -appVersion: "0.13.2" diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-aqua.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-aqua.yaml deleted file mode 100644 index eea1b6b0..00000000 --- a/deploy/ods-pipeline/charts/setup/templates/configmap-aqua.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: ods-aqua - labels: - {{- include "chart.labels" . | nindent 4}} -data: - url: '{{.Values.aquaUrl | trimSuffix "/"}}' - registry: '{{.Values.aquaRegistry}}' diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-sonar.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-sonar.yaml deleted file mode 100644 index 5e4b35d6..00000000 --- a/deploy/ods-pipeline/charts/setup/templates/configmap-sonar.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: ods-sonar - labels: - {{- include "chart.labels" . | nindent 4}} -data: - url: '{{.Values.sonarUrl | trimSuffix "/"}}' - edition: '{{.Values.sonarEdition | default "community" }}' diff --git a/deploy/ods-pipeline/charts/setup/values.yaml b/deploy/ods-pipeline/charts/setup/values.yaml deleted file mode 100644 index e8d0e03d..00000000 --- a/deploy/ods-pipeline/charts/setup/values.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# override name to be consistent with previous, separate chart naming convention(s) -nameOverride: ods-pipeline diff --git a/deploy/ods-pipeline/charts/tasks/templates/_helpers.tpl b/deploy/ods-pipeline/charts/tasks/templates/_helpers.tpl deleted file mode 100644 index b1c875bc..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/_helpers.tpl +++ /dev/null @@ -1,74 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "chart.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "chart.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "chart.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "chart.labels" -}} -helm.sh/chart: {{ include "chart.chart" . }} -{{ include "chart.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "chart.selectorLabels" -}} -app.kubernetes.io/name: {{ include "chart.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "chart.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "chart.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} - -{{/* -Create the task suffix. -See https://github.com/Masterminds/sprig/issues/53#issuecomment-483414063. -*/}} -{{- define "taskSuffix" -}} -{{- if kindIs "invalid" .Values.global.taskSuffix }} -{{- printf "-v%s" (.Chart.AppVersion | replace "." "-") }} -{{- else }} -{{- .Values.global.taskSuffix }} -{{- end }} -{{- end }} diff --git a/deploy/ods-pipeline/charts/tasks/templates/_sonar-step.tpl b/deploy/ods-pipeline/charts/tasks/templates/_sonar-step.tpl deleted file mode 100644 index 93c5c925..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/_sonar-step.tpl +++ /dev/null @@ -1,52 +0,0 @@ -{{- define "sonar-step"}} -- name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: '{{.Values.imageRepository}}/ods-sonar:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - - truststore="${JAVA_HOME}/lib/security/cacerts" - if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore="$(pwd)/.ods-cache/truststore/cacerts" - configure-truststore --dest-store "${truststore}" - fi - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) \ - -truststore "${truststore}" - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) -{{- end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-go.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-go.yaml deleted file mode 100644 index 5216344c..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-go.yaml +++ /dev/null @@ -1,148 +0,0 @@ -{{if .Values.global.enabledTasks.buildGo }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-go{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Go (module) applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-build-go.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: enable-cgo - description: Whether to enable CGO. When not enabled the build will set `CGO_ENABLED=0`. - type: string - default: "false" - - name: go-os - description: "`GOOS` variable (the execution operating system such as `linux`, `windows`)." - type: string - default: "linux" - - name: go-arch - description: "`GOARCH` variable (the execution architecture such as `arm`, `amd64`)." - type: string - default: "amd64" - - name: output-dir - description: >- - Path to the directory into which the resulting Go binary should be copied, relative to `working-dir`. - This directory may then later be used as Docker context for example. - type: string - default: docker - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-go.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-go" - - name: pre-test-script - description: Script to execute before running tests, relative to the working directory. - type: string - default: "" - - name: sonar-quality-gate - description: Whether the SonarQube quality gate needs to pass for the task to succeed. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - {{- with ((.Values.go).sidecars) }} - sidecars: - {{- toYaml . | nindent 4 }} - {{- end }} - steps: - - name: build-go-binary - # Image is built from build/package/Dockerfile.go-toolset. - image: '{{.Values.imageRepository}}/ods-go-toolset:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {{- (.Values.go).resources | default dict | toYaml | nindent 8 }} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=go-$(params.go-os)-$(params.go-arch) - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.output-dir) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-go.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --enable-cgo=$(params.enable-cgo) \ - --go-os=$(params.go-os) \ - --go-arch=$(params.go-arch) \ - --pre-test-script=$(params.pre-test-script) \ - --output-dir=$(params.output-dir) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.output-dir) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - {{- include "sonar-step" . | indent 4}} - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-gradle.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-gradle.yaml deleted file mode 100644 index 25e53afb..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-gradle.yaml +++ /dev/null @@ -1,177 +0,0 @@ -{{if .Values.global.enabledTasks.buildGradle }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-gradle{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Gradle applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-build-gradle.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: gradle-additional-tasks - description: >- - Additional gradle tasks to be passed to the gradle build. (default tasks called are `clean` and `build`). - type: string - default: "" - - name: gradle-options - description: >- - Options to be passed to the gradle build. - (See ref: https://docs.gradle.org/7.4.2/userguide/command_line_interface.html#sec:command_line_debugging) - type: string - default: "--no-daemon --stacktrace" - - name: gradle-opts-env - description: >- - Will be exposed to the build via `GRADLE_OPTS` environment variable. - Specifies JVM arguments to use when starting the Gradle client VM. The client VM only handles command line input/output, so it is rare that one would need to change its VM options. - You can still use this to change the settings for the Gradle daemon which runs the actual build by setting the according Gradle properties by `-D`. - If you want to set the JVM arguments for the actual build you would do this via `-Dorg.gradle.jvmargs=-Xmx1024M` - (See ref: https://docs.gradle.org/7.4.2/userguide/build_environment.html#sec:gradle_configuration_properties). - type: string - default: "-Dorg.gradle.jvmargs=-Xmx512M" - - name: output-dir - description: >- - Path to the directory into which the resulting Java application jar should be copied, relative to `working-dir`. - This directory may then later be used as Docker context for example. - type: string - default: docker - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: cached-outputs - description: >- - List of build output directories (as colon separated string) to be cached. - These directories are relative to `working-dir`. - type: string - default: "docker" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-gradle.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-gradle" - - name: gradle-build-dir - description: >- - Path to the directory into which Gradle publishes its build. - type: string - default: build - - name: sonar-quality-gate - description: Whether the SonarQube quality gate needs to pass for the task to succeed. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - {{- with ((.Values.gradle).sidecars) }} - sidecars: - {{- toYaml . | nindent 4 }} - {{- end }} - steps: - - name: build-gradle-binary - # Image is built from build/package/Dockerfile.gradle-toolset. - image: '{{.Values.imageRepository}}/ods-gradle-toolset:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: GRADLE_OPTS - value: "$(params.gradle-opts-env)" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - resources: - {{- (.Values.gradle).resources | default dict | toYaml | nindent 8 }} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=gradle - if copy-build-if-cached \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-gradle.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --output-dir=$(params.output-dir) \ - --gradle-build-dir=$(params.gradle-build-dir) \ - --gradle-additional-tasks="$(params.gradle-additional-tasks)" \ - --gradle-options="$(params.gradle-options)" - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - {{- include "sonar-step" . | indent 4}} - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-npm.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-npm.yaml deleted file mode 100644 index 279be5d0..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-npm.yaml +++ /dev/null @@ -1,148 +0,0 @@ -{{if .Values.global.enabledTasks.buildNPM }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-npm{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Node.js applications using npm. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-build-npm.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: cached-outputs - description: >- - List of build output directories (as colon separated string) to be cached. - These directories are relative to the `working-dir` parameter` - Common build directories are `dist` (default), `build` and `public`. - If empty this could mean that the original sources are being used as build output and no caching of built files are needed. Nonetheless build skipping can still be remain enabled. - type: string - default: "dist" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-npm.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-npm" - - name: sonar-quality-gate - description: Whether quality gate needs to pass. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip the SonarQube analysis or not. - type: string - default: "false" - - name: node-version - description: "Node.js version to use - supported versions: 16, 18" - type: string - default: "18" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - {{- with ((.Values.npm).sidecars) }} - sidecars: - {{- toYaml . | nindent 4 }} - {{- end }} - steps: - - name: build-npm - # Image is built from build/package/Dockerfile.node-npm-toolset. - image: '{{.Values.imageRepository}}/ods-node$(params.node-version)-npm-toolset:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {{- (.Values.npm).resources | default dict | toYaml | nindent 8 }} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=npm - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-npm.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - {{- include "sonar-step" . | indent 4}} - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml deleted file mode 100644 index cac14b0a..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml +++ /dev/null @@ -1,144 +0,0 @@ -{{if .Values.global.enabledTasks.buildPython }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-python{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Python applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-build-python.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: max-line-length - description: Maximum line length. - type: string - default: "120" - - name: pre-test-script - description: Script to execute before running tests, relative to the working directory. - type: string - default: "" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-python.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-python" - - name: sonar-quality-gate - description: Whether quality gate needs to pass. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip the SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - {{- with ((.Values.python).sidecars) }} - sidecars: - {{- toYaml . | nindent 4 }} - {{- end }} - steps: - - name: build-python - # Image is built from build/package/Dockerfile.python-toolset. - image: '{{.Values.imageRepository}}/ods-python-toolset:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {{- (.Values.python).resources | default dict | toYaml | nindent 8 }} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=python - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-python.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --max-line-length=$(params.max-line-length) \ - --pre-test-script=$(params.pre-test-script) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - {{- include "sonar-step" . | indent 4}} - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-deploy-helm.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-deploy-helm.yaml deleted file mode 100644 index c199bccf..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-deploy-helm.yaml +++ /dev/null @@ -1,115 +0,0 @@ -{{if .Values.global.enabledTasks.deployHelm }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-deploy-helm{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Deploy Helm charts. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-deploy-helm.adoc - params: - - name: chart-dir - description: Helm chart directory that will be deployed - type: string - default: ./chart - - name: release-name - description: | - The Helm release name. If empty, the release name is simply the name of the chart. - - When this task is used in a repository which defines subcharts, and the parameter is not set, - then the task sets `.fullnameOverride` equal to the respective - subcomponent to avoid resources being prefixed with the umbrella repository - component name (assuming your resources are named using the `chart.fullname` - helper). However, if the parameter is specified, `.fullnameOverride` is not set. - As a result the `chart.fullname` helper prefixes resources with the specfied - `release-name` unless the chart's name contains the `release-name`. - type: string - default: '' - - name: diff-flags - description: Flags to pass to `helm diff upgrade` in addition to the ones specified via the `upgrade-flags` parameter. Note that the flags `--detailed-exitcode` and `--no-color` are automatically set and cannot be removed. If flags unknown to `helm diff` are passed, they are ignored. - type: string - default: '--three-way-merge' - - name: upgrade-flags - description: Flags to pass to `helm upgrade`. - type: string - default: '--install --wait' - - name: age-key-secret - description: | - Name of the secret containing the age key to use for helm-secrets. - If the secret exists, it is expected to have a field named `key.txt` with the age secret key in its content. - type: string - default: 'helm-secrets-age-key' - - name: api-server - description: | - API server of the target cluster, including scheme. - Only required if the target namespace is outside the cluster in which - the pipeline runs. - type: string - default: '' - - name: api-credentials-secret - description: | - Name of the Secret resource holding the token of a serviceaccount (in field `token`). - Only required when `api-server` is set. - type: string - default: '' - - name: namespace - description: | - Target K8s namespace (or OpenShift project) to deploy into. - If empty, the task will be a no-op. - type: string - default: '' - - name: registry-host - description: | - Hostname of the target registry to push images to. - If not given, the registy host of the source image is used. - type: string - default: '' - - name: diff-only - description: | - If set to true, the task will only perform a diff, and then stop. - No images will be promoted or upgrades attempted. - type: string - default: 'false' - steps: - - name: helm-upgrade-from-repo - # Image is built from build/package/Dockerfile.helm. - image: '{{.Values.imageRepository}}/ods-helm:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - - name: HOME - value: '/tekton/home' - resources: {} - script: | - # deploy-helm is built from /cmd/deploy-helm/main.go. - deploy-helm \ - -chart-dir=$(params.chart-dir) \ - -namespace=$(params.namespace) \ - -release-name=$(params.release-name) \ - -diff-flags="$(params.diff-flags)" \ - -upgrade-flags="$(params.upgrade-flags)" \ - -age-key-secret=$(params.age-key-secret) \ - -api-server=$(params.api-server) \ - -api-credentials-secret=$(params.api-credentials-secret) \ - -registry-host=$(params.registry-host) \ - -diff-only=$(params.diff-only) - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-package-image.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-package-image.yaml deleted file mode 100644 index 6bbab6a6..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-package-image.yaml +++ /dev/null @@ -1,192 +0,0 @@ -{{if .Values.global.enabledTasks.packageImage }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-package-image{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Packages applications into container images using buildah. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-package-image.adoc - params: - - name: registry - description: Image registry to push image to. - type: string - default: '{{default .Values.pushRegistry}}' - - name: image-stream - description: Reference of the image stream buildah will produce. If not set, the value of `.ods/component` is used. - type: string - default: '' - - name: extra-tags - description: Additional image tags (e.g. 'latest dev') for pushed images. The primary tag is based on the commit sha. Only tags currently missing from the image will be added. - type: string # Wanted to use and array but ran into [Cannot refer array params in script #4912](https://github.com/tektoncd/pipeline/issues/4912) - default: '' - - name: storage-driver - description: Set buildah storage driver. - type: string - default: vfs - - name: dockerfile - description: Path to the Dockerfile to build (relative to `docker-dir`). - type: string - default: ./Dockerfile - - name: docker-dir - description: Path to the directory to use as Docker context. - type: string - default: '.' - - name: format - description: 'The format of the built container, `oci` or `docker`.' - type: string - default: oci - - name: buildah-build-extra-args - description: Extra parameters passed for the build command when building images (e.g. '--build-arg=firstArg=one --build-arg=secondArg=two'). - type: string - default: '' - - name: buildah-push-extra-args - description: Extra parameters passed for the push command when pushing images. - type: string - default: '' - - name: trivy-sbom-extra-args - description: Extra parameters passed for the trivy command to generate an SBOM. - type: string - default: '' - - name: aqua-gate - description: Whether the Aqua security scan needs to pass for the task to succeed. - type: string - default: "false" - results: - - description: Digest of the image just built. - name: image-digest - steps: - - name: package-image - # Image is built from build/package/Dockerfile.package-image. - image: '{{.Values.imageRepository}}/ods-package-image:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - - # ods-package-image is built from cmd/package-image/main.go. - ods-package-image \ - -image-stream=$(params.image-stream) \ - -extra-tags=$(params.extra-tags) \ - -registry=$(params.registry) \ - -storage-driver=$(params.storage-driver) \ - -format=$(params.format) \ - -dockerfile=$(params.dockerfile) \ - -context-dir=$(params.docker-dir) \ - -buildah-build-extra-args=$(params.buildah-build-extra-args) \ - -buildah-push-extra-args=$(params.buildah-push-extra-args) \ - -trivy-sbom-extra-args=$(params.trivy-sbom-extra-args) - - # As this task does not run unter uid 1001, chown created artifacts - # to make them deletable by ods-start's cleanup procedure. - chown -R 1001:0 .ods/artifacts/image-digests .ods/artifacts/sboms - securityContext: - capabilities: - add: - - SETFCAP - volumeMounts: - - mountPath: /var/lib/containers - name: varlibcontainers - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: aqua-scan - # Image is built from build/package/Dockerfile.aqua-scan. - image: '{{.Values.imageRepository}}/ods-aqua-scan:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: HOME - value: '/tekton/home' - - name: BITBUCKET_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-bitbucket - - name: BITBUCKET_ACCESS_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-bitbucket-auth - - name: AQUA_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-aqua - - name: AQUA_REGISTRY - valueFrom: - configMapKeyRef: - key: registry - name: ods-aqua - - name: AQUA_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-aqua-auth - - name: AQUA_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-aqua-auth - - name: AQUA_SCANNER_URL - valueFrom: - secretKeyRef: - key: secret - name: ods-aqua-scanner-url - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "${AQUA_SCANNER_URL:0:4}" != "http" ]; then - echo "Skipping Aqua scan" - else - download-aqua-scanner \ - --aqua-scanner-url=${AQUA_SCANNER_URL} \ - $(case ${DEBUG} in (true) printf -- '--debug'; esac) - - # ods-aqua-scan is built from cmd/aqua-scan/main.go. - ods-aqua-scan \ - -image-stream=$(params.image-stream) \ - -aqua-gate=$(params.aqua-gate) - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - emptyDir: {} - name: varlibcontainers - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/values.docs.yaml b/deploy/ods-pipeline/charts/tasks/values.docs.yaml deleted file mode 100644 index 1640798d..00000000 --- a/deploy/ods-pipeline/charts/tasks/values.docs.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -global: - taskSuffix: '' - enabledTasks: - buildGo: true - buildGradle: true - buildPython: true - buildNPM: true - packageImage: true - deployHelm: true diff --git a/deploy/ods-pipeline/charts/tasks/values.yaml b/deploy/ods-pipeline/charts/tasks/values.yaml deleted file mode 100644 index 47b0c55b..00000000 --- a/deploy/ods-pipeline/charts/tasks/values.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# override name to be consistent with previous, separate chart naming convention(s) -nameOverride: ods-pipeline - -imageRepository: ghcr.io/opendevstack/ods-pipeline -pushRegistry: image-registry.openshift-image-registry.svc:5000 diff --git a/deploy/ods-pipeline/values.kind.yaml b/deploy/ods-pipeline/values.kind.yaml deleted file mode 100644 index 80011275..00000000 --- a/deploy/ods-pipeline/values.kind.yaml +++ /dev/null @@ -1,22 +0,0 @@ -global: - imageTag: latest - taskSuffix: '' - -setup: - # Cluster - consoleUrl: 'http://example.com' - # Pipeline Manager - pipelineManager: - storageProvisioner: '' - storageClassName: 'standard' - storageSize: '2Gi' - imageRepository: localhost:5000/ods - imageTag: 'latest' - -tasks: - # Image repository to pull task images from. - # To test with the latest public ods-pipeline images, set - # global.imageTag to 'latest' and use: 'ghcr.io/opendevstack/ods-pipeline'. - imageRepository: localhost:5000/ods - - pushRegistry: kind-registry.kind:5000 diff --git a/deploy/ods-pipeline/values.yaml b/deploy/ods-pipeline/values.yaml deleted file mode 100644 index a865efa8..00000000 --- a/deploy/ods-pipeline/values.yaml +++ /dev/null @@ -1,204 +0,0 @@ -# ----------------------- Installation hint ----------------------- -# !!! Important !!! -# This is the default values file - if you're editing this as -# part of the ODS pipeline installation you're in the wrong file! -# -# Please open ../values.yaml (the file you have created by making -# a copy of ../values.yaml.tmpl) and do your changes there. -# ----------------------- Installation hint ----------------------- - -# ####################################### # -# UMBRELLA # -# ####################################### # -global: - # Image tag to use for images referenced by tasks (defaults to the chart appVersion). - # imageTag: '' - # Suffix to append to the task name. If not set, the sufix will be computed - # from the chart appVersion in the form "-vMAJOR-MINOR-PATCH". - # taskSuffix: -latest - # Custom task kind (defaults to "Task") - # taskKind: "ClusterTask" - # enabledTasks controls which tasks will be installed. Set the tasks you do - # not want to install to false. - enabledTasks: - buildGo: true - buildGradle: true - buildPython: true - buildNPM: true - packageImage: true - deployHelm: true - - - -# ####################################### # -# SETUP CHART CONFIG # -# ####################################### # -setup: - # enable configuration and management chart - enabled: true - - # General - # Serviceaccount name to use for pipeline resources. - serviceAccountName: 'pipeline' - # Whether to enable debug mode - debug: 'false' - - # Bitbucket - # Bitbucket URL (including scheme, without trailing slash). - # Example: https://bitbucket.example.com. - bitbucketUrl: '' - # Bitbucket username. Example: cd_user. - bitbucketUsername: '' - - # Nexus - # Nexus URL (including scheme, without trailing slash). - # Example: https://nexus.example.com. - nexusUrl: '' - # Nexus username. Example: developer. - nexusUsername: '' - - # Sonar - # SonarQube URL (including scheme, without trailing slash). - # Example: https://sonarqube.example.com. - sonarUrl: '' - # SonarQube edition. Valid options: 'community', 'developer', 'enterprise' or 'datacenter' - sonarEdition: 'community' - - # Aqua - # Aqua URL (including scheme, without trailing slash). - # Example: https://aqua.example.com. - # Leave empty when not using Aqua. - aquaUrl: '' - # Aqua registry name. - # Leave empty when not using Aqua. - aquaRegistry: '' - # Aqua username. Example: developer. - # Leave empty when not using Aqua. - aquaUsername: '' - - # Cluster - # URL (including scheme, without trailing slash) of the OpenShift Web Console. - consoleUrl: 'http://example.com' - - # Notification Webhook - notification: - # Whether notifications should be sent to the URL specified below or not. - enabled: false - # URL of the configured webhook - url: 'http://example.com' - # The HTTP method to be used - method: 'POST' - # The HTTP content type header - contentType: 'application/json' - # Specify the outcomes you want to be notified of (allowed values: c.f. - # https://tekton.dev/docs/pipelines/pipelines/#using-aggregate-execution-status-of-all-tasks) - notifyOnStatus: - - 'Failed' - # Template to be processed and accepted by the configured webhook in use - # Below example might work for Microsoft Teams - requestTemplate: |- - { - "@type": "MessageCard", - "@context": "http://schema.org/extensions", - "themeColor": {{if eq .OverallStatus "Succeeded"}}"237b4b"{{else}}"c4314b"{{ end }}, - "summary": "{{.ODSContext.Project}} - ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", - "sections": [ - { - "activityTitle": "ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", - "activitySubtitle": "On Project {{.ODSContext.Project}}", - "activityImage": "https://avatars.githubusercontent.com/u/38974438?s=200&v=4", - "facts": [ - { - "name": "GitRef", - "value": "{{.ODSContext.GitRef}}" - } - ], - "markdown": true - } - ], - "potentialAction": [ - { - "@type": "OpenUri", - "name": "Go to PipelineRun", - "targets": [ - { - "os": "default", - "uri": "{{.PipelineRunURL}}" - } - ] - } - ] - } - - # Pipeline(Run) Pruning - # Minimum hours to keep a pipeline run. Has precendence over pipelineRunMaxKeepRuns. - # Must be at least 1. - pipelineRunMinKeepHours: '48' - # Maximum number of pipeline runs to keep. - # Must be at least 1. - pipelineRunMaxKeepRuns: '20' - - # Pipeline Manager - pipelineManager: - # PVC (used for the pipeline workspace) - # Storage provisioner. On AWS backed clusters, use 'kubernetes.io/aws-ebs'. - storageProvisioner: 'kubernetes.io/aws-ebs' - # Storage class. On AWS backed clusters, use 'gp2'. - storageClassName: 'gp2' - # Storage size. Defaults to 2Gi unless set explicitly here. - storageSize: '5Gi' - # Number of replicas to run for the pipeline manager. - replicaCount: 1 - # Repository from which to pull the pipeline manager container image. - imageRepository: ghcr.io/opendevstack/ods-pipeline - # Deployment pod resources. Typically these settings should not need to change. - resources: - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi - - - -# ####################################### # -# TASK CHART CONFIG # -# ####################################### # -tasks: - # enable task definition chart - enabled: true - - # Image repository to pull task images from. - imageRepository: ghcr.io/opendevstack/ods-pipeline - - # Custom task prefix (defaults to "ods") - # taskPrefix: "foo" - - # Registry to push images to from ods-package-image task. - pushRegistry: image-registry.openshift-image-registry.svc:5000 - - # To define build task specific sidecars and quotas, add resources/sidecar section(s) per task, - # e.g. - # - # go: - # # define custom resource quotas for the go build task - # resources: - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - # sidecars: - # # sidecars added to go build task - # - workspaces: null - # image: postgres - # name: postgres-sidecar - # resources: - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi diff --git a/deploy/values.yaml.tmpl b/deploy/values.yaml.tmpl index bbc1e4b6..a85ee168 100644 --- a/deploy/values.yaml.tmpl +++ b/deploy/values.yaml.tmpl @@ -6,48 +6,22 @@ # All of these configuration options are set to a default value in ods-pipeline/values.yaml. # If you want to override something there, copy the field here and configure as needed. -global: - # Configure which tasks should be installed. Tasks set to "false" will - # not be part of the installation and cannot be referenced from ods.yaml. - enabledTasks: - buildGo: false - buildGradle: false - buildPython: false - buildNPM: false - packageImage: true - deployHelm: true +# Bitbucket URL (including scheme, without trailing slash). +# Example: https://bitbucket.example.com. +bitbucketUrl: '' -setup: - # Bitbucket URL (including scheme, without trailing slash). - # Example: https://bitbucket.example.com. - bitbucketUrl: '' +# Nexus URL (including scheme, without trailing slash). +# Example: https://nexus.example.com. +nexusUrl: '' - # Nexus URL (including scheme, without trailing slash). - # Example: https://nexus.example.com. - nexusUrl: '' +# OpenShift Web ConsoleURL (including scheme, without trailing slash). +# Example: https://console-openshift-console.apps.foo.tftp.p1.openshiftapps.com. +consoleUrl: '' - # SonarQube URL (including scheme, without trailing slash). - # Example: https://sonarqube.example.com. - sonarUrl: '' - # SonarQube edition. Valid options: 'community', 'developer', 'enterprise' or 'datacenter' - sonarEdition: 'community' - - # Aqua URL (including scheme, without trailing slash). - # Example: https://aqua.example.com. - # Leave empty when not using Aqua. - aquaUrl: '' - # Aqua registry name. - # Leave empty when not using Aqua. - aquaRegistry: '' - - # OpenShift Web ConsoleURL (including scheme, without trailing slash). - # Example: https://console-openshift-console.apps.foo.tftp.p1.openshiftapps.com. - consoleUrl: '' - - # Pipeline Manager - pipelineManager: - # PVC (used for the pipeline workspace) - # Storage provisioner. On AWS backed clusters, use 'kubernetes.io/aws-ebs'. - storageProvisioner: 'kubernetes.io/aws-ebs' - # Storage class. On AWS backed clusters, use 'gp2'. - storageClassName: 'gp2' +# Pipeline Manager +pipelineManager: + # PVC (used for the pipeline workspace) + # Storage provisioner. On AWS backed clusters, use 'kubernetes.io/aws-ebs'. + storageProvisioner: 'kubernetes.io/aws-ebs' + # Storage class. On AWS backed clusters, use 'gp2'. + storageClassName: 'gp2' diff --git a/docs/add-to-repository.adoc b/docs/add-to-repository.adoc index 36e600f8..7031c0cd 100644 --- a/docs/add-to-repository.adoc +++ b/docs/add-to-repository.adoc @@ -22,18 +22,21 @@ pipeline: - tasks: - name: build taskRef: - kind: Task - name: ods-build-go-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-go.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/build.yaml } workspaces: - name: source workspace: shared-workspace ---- -`ods-build-go-v0-13-2` runs tests and produces a Go binary. If you use another technology, e.g. a Java project using Gradle, exchange the task with `ods-build-gradle-v0-13-2`. See the link:tasks/[tasks reference] for available tasks. +The task defined in `tasks/build.yaml` in the `ods-pipeline-go` repository runs tests and produces a Go binary. If you use another technology, e.g. a Java project using Gradle, exchange the task with one fitting your needs. === (Optionally) Create `Dockerfile` -If you want to create a container image with the produced Go binary, you can add the `ods-package-image-v0-13-2` task to the `tasks` list, like this: +If you want to create a container image with the produced Go binary, you can add the `ods-pipeline-buildah-package` task to the `tasks` list, like this: .ods.yaml [source,yaml] @@ -42,15 +45,21 @@ pipeline: - tasks: - name: build taskRef: - kind: Task - name: ods-build-go-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-go.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/build.yaml } workspaces: - name: source workspace: shared-workspace - name: package taskRef: - kind: Task - name: ods-package-image-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-buildah.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/package.yaml } runAfter: - build workspaces: @@ -72,13 +81,13 @@ EXPOSE 8080 CMD ["./app"] ---- -NOTE: `ods-build-go-v0-13-2` produces a binary called `app` and places it at `docker/app` so that it can be referenced in the `Dockerfile` and copied into the image by the `ods-package-image-v0-13-2` task. +NOTE: `ods-pipeline-go-build` produces a binary called `app` and places it at `docker/app` so that it can be referenced in the `Dockerfile` and copied into the image by the `ods-pipeline-buildah-package` task. NOTE: Pay attention to the `runAfter` configuration in the task list: it ensures that the tasks run sequentially and can use the outputs from the previous task(s). === (Optionally) Create Helm Chart -If you want to deploy the created image, you can add the `ods-deploy-helm-v0-13-2` task to the `tasks` list and configure which Kubernetes namespaces to deploy into, like this: +If you want to deploy the created image, you can add the `ods-pipeline-helm-deploy` task to the `tasks` list and configure which Kubernetes namespaces to deploy into, like this: .ods.yaml [source,yaml] @@ -88,15 +97,21 @@ pipeline: tasks: - name: build taskRef: - kind: Task - name: ods-build-go-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-go.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/build.yaml } workspaces: - name: source workspace: shared-workspace - name: package taskRef: - kind: Task - name: ods-package-image-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-buildah.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/package.yaml } runAfter: - build workspaces: @@ -104,8 +119,11 @@ pipeline: workspace: shared-workspace - name: deploy taskRef: - kind: Task - name: ods-deploy-helm-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-helm.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/deploy.yaml } params: - name: namespace value: foo-dev diff --git a/docs/architecture/component-namespaced-installation.puml b/docs/architecture/component-namespaced-installation.puml index 21667d96..324f203a 100644 --- a/docs/architecture/component-namespaced-installation.puml +++ b/docs/architecture/component-namespaced-installation.puml @@ -16,27 +16,13 @@ Container_Boundary(c1, "Namespaced ODS Pipeline Installation"){ } Boundary(tasks, "Tasks") { - Component(task_build_go, "ods-build-go", "Task", "Builds Go (module) applications") - Component(task_build_npm, "ods-build-npm", "Task", "Builds Node.js based applications using npm") - Component(task_build_python, "ods-build-python", "Task", "Builds Python applications") - Component(task_build_gradle, "ods-build-gradle", "Task", "Builds JDK based applications using Gradle") - Component(task_package_image, "ods-package-image", "Task", "Packages container images") - Component(task_deploy_helm, "ods-deploy-helm", "Task", "Deploys Helm charts") - Component(task_start, "ods-start", "Task", "Starts pipeline run (checkout repository, set build status, download artifacts, ...)") - Component(task_finish, "ods-finish", "Task", "Finishes pipeline run (set build status, upload artifacts, ...)") + Component(task_start, "ods-pipeline-start", "Task", "Starts pipeline run (checkout repository, set build status, download artifacts, ...)") + Component(task_finish, "ods-pipeline-finish", "Task", "Finishes pipeline run (set build status, upload artifacts, ...)") } Boundary(images, "Images") { - Component(image_go_toolset, "ods/go-toolset", "Container Image", "Go, golangci-lint, build script") - Component(image_node16_npm_toolset, "ods/node16-npm-toolset", "Container Image", "Node.js 16, npm, build script") - Component(image_node18_npm_toolset, "ods/node18-npm-toolset", "Container Image", "Node.js 18, npm, build script") - Component(image_python_toolset, "ods/python-toolset", "Container Image", "Python, build script") - Component(image_gradle_toolset, "ods/gradle-toolset", "Container Image", "JDK, Gradle, build script") - Component(image_buildah, "ods/buildah", "Container Image", "Buildah, Skopeo, Aqua scanner") - Component(image_helm, "ods/helm", "Container Image", "Helm, Skopeo, deploy script") - Component(image_start, "ods/start", "Container Image", "Git, start script") - Component(image_finish, "ods/finish", "Container Image", "Finish script") - Component(image_sonar, "ods/sonar", "Container Image", "sonar-scanner") + Component(image_start, "ods-pipeline/start", "Container Image", "Git, start script") + Component(image_finish, "ods-pipeline/finish", "Container Image", "Finish script") Component(image_pipeline_manager, "ods/pipeline-manager", "Container Image", "Webhook receiver and pipeline manager") } diff --git a/docs/authoring-tasks.adoc b/docs/authoring-tasks.adoc index 0ad602a0..f825831c 100644 --- a/docs/authoring-tasks.adoc +++ b/docs/authoring-tasks.adoc @@ -102,9 +102,7 @@ In theory you can use pretty much any image that works in OpenShift (e.g. the im === How do I create my own container image to use in a task? -In OpenShift, the easiest way is by creating an `ImageStream` and a `BuildConfig`. See the link:https://docs.openshift.com/container-platform/latest/cicd/builds/understanding-image-builds.html[OpenShift documentation on builds] for more information. You may also use the YAML definitions in `deploy/ods-pipeline/charts/images` as an example. - -Occasionally, you might want to extend the images used in an official tasks, e.g. to deploy additional CA certificates, configure proxy settings, etc. The `images` subchart of `ods-pipeline` provides build configurations that allow you to create images that are based on the official `ods-pipeline` images from ghcr.io. The build configurations include inline Dockerfiles that you can adjust to suit your specific needs. +In OpenShift, the easiest way is by creating an `ImageStream` and a `BuildConfig`. See the link:https://docs.openshift.com/container-platform/latest/cicd/builds/understanding-image-builds.html[OpenShift documentation on builds] for more information. === How can I test my tasks? diff --git a/docs/design/software-architecture.adoc b/docs/design/software-architecture.adoc index b9c7ea9b..c0bbf23b 100644 --- a/docs/design/software-architecture.adoc +++ b/docs/design/software-architecture.adoc @@ -49,79 +49,37 @@ image::http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.githubuse | ODS Pipeline Installation | Endpoint is an exposed event listener. -| Task `ods-start` +| Task `ods-pipeline-start` | Checkout repository | Git | Bitbucket | -| Task `ods-start` +| Task `ods-pipeline-start` | Set build status | HTTP / JSON API | Bitbucket | -| Task `ods-start` +| Task `ods-pipeline-start` | Download existing artifacts | HTTP / JSON API | Nexus | -| Task `ods-build-go`, task `ods-build-python`, task `ods-build-npm`, task `ods-build-gradle` -| Analyze source code -| HTTP / JSON API -| SonarQube -| - -| Task `ods-build-python`, task `ods-build-npm`, task `ods-build-gradle` -| Download dependencies -| HTTP -| Nexus -| Nexus serves as a proxy for third party dependencies (e.g. hosted on npm), and serves private dependencies hosted in Nexus as well. - -| Task `ods-package-image` -| Scan for vulnerabilities -| HTTP / JSON API -| Aqua -| - -| Task `ods-package-image` -| Push image -| HTTP -| Container registry (e.g. OpenShift ImageStream) -| - -| Task `ods-package-image` -| Tag image -| HTTP -| Container registry (e.g. OpenShift ImageStream) -| - -| Task `ods-deploy-helm` -| Copy image -| HTTP -| Container registry (e.g. OpenShift ImageStream) -| - -| Task `ods-deploy-helm` -| Install/Upgrade release -| HTTP / JSON API -| OpenShift project -| - -| Task `ods-finish` +| Task `ods-pipeline-finish` | Set build status | HTTP / JSON API | Bitbucket | -| Task `ods-finish` +| Task `ods-pipeline-finish` | Upload artifacts | HTTP / JSON API | Nexus | -| Task `ods-finish` +| Task `ods-pipeline-finish` | Send notifications | HTTP / API | Configured notification service @@ -156,7 +114,7 @@ ODS Pipeline runs on Redhat OpenShift Container Platform, and is embedded into a There is one major deployment (typically in the project's cd-namespace) of the system managed by project administrators. -The installation provides the tasks that projects can consume as `Task` resources. It allows triggering pipelines in response to Bitbucket events. +The installation provides a pipeline manager, allowing to trigger pipelines in response to Bitbucket events, and a start and finish task to do some plumbing for those pipelines, such as setting the Bitbucket build status. The deployment is described in a Helm chart. For details, see the link:../installation.adoc[Installation Guide]. diff --git a/docs/design/software-design-specification.adoc b/docs/design/software-design-specification.adoc index de7a45fe..91188bdf 100644 --- a/docs/design/software-design-specification.adoc +++ b/docs/design/software-design-specification.adoc @@ -22,31 +22,12 @@ As described in the architecture, the system is installed into local namespaces. ==== ODS Pipeline Installation -===== Shared `ods-sonar` image +===== Build skipping / cache scripts -[cols="1,1,3"] -|=== -| SDS-SHARED-1 -| `ods-sonar` container image -| Container image for SQ scanning. Based on `ubi8/nodejs-18` (SDS-EXT-32), includes software to analyze source code statically (SDS-SHARED-2, SDS-EXT-7, SDS-EXT-8 and SDS-EXT-30). - -| SDS-SHARED-2 -| `sonar` binary -a| Logic of SQ scanning. It runs `sonar-scanner` (SDS-EXT-7) on the sources, communicating with the SonarQube server specified by the `ods-sonar` config map and the `ods-sonar-auth` secret. After scanning, reports a generated using `cnes-report` (SDS-EXT-8) unless the scan is against a pull request. `cnes-report` is not compatible with PR scans, and reports are not needed for pull requests anyway as the evidence they provide is only needed for long-lived branches. - -The project name is fixed to `-`. - -If the server edition supports it, the branch parameter shall be set, unless the branch being built belongs to an open PR, in which case PR analysis parameter shall be sent instead. -|=== - -===== Shared scripts +These scripts may be used by build tasks to skip doing work when no changes have been made compared to a previous task run. [cols="1,1,3"] |=== -| SDS-SHARED-3 -| `supply-sonar-project-properties-default.sh` shell script -| Checks for `sonar-project.properties` file in the working directory. If that does not exist, the default properties file supplied in the container image is copied into the working directory. - | SDS-SHARED-4 | `cache-build.sh` shell script a| Caches a build's outputs and ods artifacts to the `build-task` cache area. @@ -108,119 +89,18 @@ Input parameters: |=== - -===== `ods-build-go` task - -[cols="1,1,3"] -|=== -| SDS-TASK-1 -| `ods-build-go` Task resource -a| The task defines two steps: - -. Build Go (module) applications (referencing SDS-TASK-2 and executing SDS-TASK-3). - This step supports build skipping (executing SDS-SHARED-5 and/or SDS-SHARED-4 if enabled with parameter `cache-build`) -. Analyze source code (referencing SDS-SHARED-1 and executing SDS-SHARED-2) - -Input parameters: - -* `working-dir`: allows customizing which directory is used as the Go module root. If set, artifacts are prefixed with `-`, and the SQ project is suffixed with `-`. -* `enable-cgo`: allows to enable `CGO` -* `go-os`: sets target operating system (`GOOS`) -* `go-arch`: sets target architecture (`GOARCH`) -* `output-dir`: sets destination directory of built binary -* `pre-test-script`: specifies script to run prior to tests -* `build-script`: specifies path to build script -* `sonar-quality-gate`: enables quality gate check -* `sonar-skip`: skips SonarQube analysis -* `cache-build`: if 'true' build skipping is enabled. -* `build-extra-inputs`: list of build source directories (as colon separated string) which influence the build in addition to the files in `working-dir`. - These directories are relative to the repository root. - -| SDS-TASK-2 -| `ods-go-toolset` container image -| Container image for building Go applications. Based on `ubi8/go-toolset` (SDS-EXT-25), includes SDS-EXT-4,EXT- SDS-EXT-5, SDS-SHARED-3, SDS-TASK-3 and SDS-TASK-25. - -| SDS-TASK-3 -| `build-go.sh` shell script -a| The go module cache is configured to be on the cache location of the PVC by setting environment variable `GOMODCACHE` to `.ods-cache/deps/gomod` (see https://go.dev/ref/mod#module-cache) - -Runs `gofmt` (SDS-EXT-3) to check all Go files are formatted. - -Runs `golangci-lint` (SDS-EXT-4) to check if there are any lint errors. A report is placed into `.ods/artifacts/lint-reports`. - -If the `pre-test-script` is set, it executes the given script before running tests. - -Runs `go test`, excluding the `vendor` directory, creating code coverage and xUnit report (using SDS-EXT-5). The artifacts are placed in the working directory and in `.ods/artifacts/code-coverage` and `.ods/artifacts/xunit-reports`, respectively. If the artifacts are already found in `.ods/artifacts`, then testing is skipped and the artifacts are copied to the working directory to expose them to SonarQube. - -Builds Go application (using SDS-EXT-3, optionally SDS-EXT-6) into specified output directory. - -Supplies default SonarQube project properties file if required (SDS-SHARED-3). - -| SDS-TASK-25 -| `go.properties` properties file -| Default configuration for Go SonarQube project. -|=== - -===== `ods-build-gradle` task - -[cols="1,1,3"] -|=== -| SDS-TASK-4 -| `ods-build-gradle` Task resource -a| The task defines two steps: - -. Build Gradle module (referencing SDS-TASK-5 and executing SDS-TASK-6) -. Analyze source code (referencing SDS-SHARED-1 and executing SDS-SHARED-2) - -Input parameters: - -* `working-dir`: allows customizing which directory is used as the Gradle module root. If set, artifacts are prefixed with `-`, and the SQ project is suffixed with `-`. -* `gradle-additional-tasks`: additional gradle tasks to be passed to the gradle build -* `gradle-options`: options to be passed to the gradle build -* `output-dir`: sets destination directory of built binary -* `build-script`: specifies path to build script -* `sonar-quality-gate`: enables quality gate check -* `sonar-skip`: skips SonarQube analysis -* `cache-build`: if 'true' build skipping is enabled. -* `build-extra-inputs`: list of build source directories (as colon separated string) which influence the build in addition to the files in `working-dir`. - These directories are relative to the repository root. -* `cached-outputs`: specifies directories to be cached in case the builds sources don't change. These directories are relative to `working-dir`. - -| SDS-TASK-5 -| `ods-gradle-toolset` container image -| Container image for building Gradle modules. Based on `ubi8/openjdk-17` (SDS-EXT-11), includes SDS-EXT-12, SDS-EXT-30, SDS-SHARED-3, SDS-TASK-6 and SDS-TASK-26. - -| SDS-TASK-6 -| `build-gradle.sh` shell script -a| Builds a Gradle module that provides a gradle build script into `docker/app.jar`. - -The destination directory can be changed by exporting the environment variable `ODS_OUTPUT_DIR`. - -Runs `gradlew clean build` to build the Gradle module, using options and additional tasks as passed from SDS-TASK-4. - -Generated unit test reports are placed in the working directory (for SonarQube to pick them up) and copied into `.ods/artifacts/xunit-reports`. - -Generated unit test coverage report are placed in the working directory (for SonarQube to pick them up) and copied into `.ods/artifacts/code-coverage`. - -Supplies default SonarQube project properties file if required (SDS-SHARED-3). - -| SDS-TASK-26 -| `gradle.properties` properties file -| Default configuration for Gradle SonarQube project. -|=== - -===== `ods-start` task +===== `ods-pipeline-start` task [cols="1,1,3"] |=== | SDS-TASK-7 -| `ods-start` Task resource +| `ods-pipeline-start` Task resource a| Task to start pipeline. References SDS-TASK-8 and executes SDS-TASK-9. Input parameters: TODO | SDS-TASK-8 -| `ods-start` container image +| `ods-pipeline-start` container image | Container image to start a pipeline. Based on `ubi8/ubi-minimal` (SDS-EXT-2), includes SDS-EXT-9, SDS-EXT-13, SDS-EXT-27, SDS-EXT-30 and SDS-TASK-9. | SDS-TASK-9 @@ -239,18 +119,18 @@ If the `artifact-source` parameter is given, any artifacts in the referenced Nex The Bitbucket build status of the commit being built is set to "in progress". The build status links back to the pipeline run. |=== -===== `ods-finish` task +===== `ods-pipeline-finish` task [cols="1,1,3"] |=== | SDS-TASK-10 -| `ods-finish` Task resource +| `ods-pipeline-finish` Task resource a| Task to finish pipeline. References SDS-TASK-11 and executes SDS-TASK-12. Input parameters: TODO | SDS-TASK-11 -| `ods-finish` container image +| `ods-pipeline-finish` container image | Container image to start a pipeline. Based on `ubi8/ubi-minimal` (SDS-EXT-2), includes SDS-EXT-30 and SDS-TASK-12. | SDS-TASK-12 @@ -269,175 +149,6 @@ Status notification message, webhook URL, content type, HTTP method, and trigger |=== -===== `ods-build-python` task - -[cols="1,1,3"] -|=== -| SDS-TASK-13 -| `ods-build-python` Task resource -a| The task defines two steps: - -. Build Python applications (referencing SDS-TASK-14 and executing SDS-TASK-15). - This step supports build skipping (executing SDS-SHARED-5 and/or SDS-SHARED-4 if enabled with parameter `cache-build`) -. Analyze source code (referencing SDS-SHARED-1 and executing SDS-SHARED-2) - -Input parameters: - -* `build-script`: specifies path to build script - -| SDS-TASK-14 -| `ods-python-toolset` container image -| Container image to build Python applications. Based on `ubi8/python-39` (SDS-EXT-28), includes SDS-SHARED-3, SDS-TASK-15 and SDS-TASK-27. - -| SDS-TASK-15 -| `build-python.sh` shell script -a| Runs `mypy` and `flake8` to lint source code and fails if there are any findings. The maximum allowed line length defaults to 120 can be set by the `max-line-length` task parameter. - -If the `pre-test-script` is set, it executes the given script before running tests. - -Runs `pytest`, creating code coverage and xUnit reports. The artifacts are placed in the working directory and in `.ods/artifacts/code-coverage` and `.ods/artifacts/xunit-reports`, respectively. - -Supplies default SonarQube project properties file if required (SDS-SHARED-3). - -| SDS-TASK-27 -| `python.properties` properties file -| Default configuration for Python SonarQube project. -|=== - -===== `ods-build-npm` task - -[cols="1,1,3"] -|=== -| SDS-TASK-16 -| `ods-build-npm` Task resource -a| The task defines two steps: - -. Build Node.js applications using npm (referencing SDS-TASK-17 or SDS-TASK-30 and executing SDS-TASK-18). - This step supports build skipping (executing SDS-SHARED-5 and/or SDS-SHARED-4 if enabled with parameter `cache-build`) -. Analyze source code (referencing SDS-SHARED-1 and executing SDS-SHARED-2) - -Input parameters: - -* `working-dir`: allows customizing which directory is used as the Node.js module root. If set, artifacts are prefixed with `-`, and the SQ project is suffixed with `-`. -* `max-lint-warnings`: maximum of allowed linting warnings after which eslint will exit with an error -* `lint-file-ext`: file extensions to lint -* `build-script`: specifies path to build script -* `sonar-quality-gate`: enables quality gate check -* `sonar-skip`: skips SonarQube analysis -* `cache-build`: if 'true' build skipping is enabled. -* `build-extra-inputs`: list of build source directories (as colon separated string) which influence the build in addition to the files in `working-dir`. - These directories are relative to the repository root. -* `cached-outputs`: specifies directories to be cached in case the builds sources don't change. These directories are relative to `working-dir`. Defaults to 'dist' - -| SDS-TASK-17 -| `ods-node16-npm-toolset` container image -| Container image to build Node.js applications using npm. Based on `ubi8/nodejs-16` (SDS-EXT-26), includes SDS-SHARED-3, SDS-TASK-18 and SDS-TASK-28. - -| SDS-TASK-18 -| `build-npm.sh` shell script -a| Checks that package.json and package-lock.json exist to require best practice of using lock files. See also https://github.com/opendevstack/ods-pipeline/discussions/411 - -Runs `npm run lint` to lint the source code. If there are any errors or warnings, the script should exit with a non-zero exit code. - -Runs `npm run test`, creating code coverage and xUnit reports. The artifacts are placed in the working directory and in `.ods/artifacts/code-coverage` and `.ods/artifacts/xunit-reports`, respectively. - -Supplies default SonarQube project properties file if required (SDS-SHARED-3). - -| SDS-TASK-28 -| `npm.properties` properties file -| Default configuration for npm SonarQube project. - -| SDS-TASK-30 -| `ods-node18-npm-toolset` container image -| Container image to build Node.js applications using npm. Based on `ubi8/nodejs-18` (SDS-EXT-32), includes SDS-SHARED-3, SDS-TASK-18 and SDS-TASK-28. -|=== - -==== `ods-package-image` task - -[cols="1,1,3"] -|=== -| SDS-TASK-19 -| `ods-package-image` Task resource -| Builds and scans a container image, then pushes it to a registry. References SDS-TASK-20 and SDS-TASK-31, and executes SDS-TASK-32 and SDS-TASK-21. - -| SDS-TASK-20 -| `ods-package-image` container image -| Container image to build, scan, push and tag images. Based on `ubi8` (SDS-EXT-1), includes SDS-EXT-17, SDS-EXT-18, SDS-EXT-31 and SDS-TASK-21. - -| SDS-TASK-31 -| `ods-aqua-scan` container image -| Container image to scan images. Based on `ubi8-minimal` (SDS-EXT-2). Includes SDS-TASK-33. - -| SDS-TASK-21 -| `package-image` binary -a| Checks if an image with the tag to built exist already in the target registry, and if so, skips the build and continues with processing tags. - -Builds a container image using SDS-EXT-18: - -* The Docker context directory defaults to `.` and can be overwritten by the `docker-dir` parameter. -* The Dockerfile defaults to `Dockerfile`, and can be overwritten by `dockerfile` parameter. The location is relative to the Docker context directory. -* The resulting image name and SHA is placed into `.ods/artifacts`. - -Pushes the image to the target registry (defaulting to an image stream in the namespace of the pipeline run) using SDS-EXT-18. - -Generates the SBOM report of the image using SDS-EXT-31. The resulting report is placed in `.ods/artifacts/sboms/.spdx` in link:https://spdx.dev/[SPDX] format. - -If the Aqua scanner is installed in the base image, the pushed image shall be scanned. The resulting report is placed in `.ods/artifacts` and attached as a code insight to Bitbucket. - -Processes tags specified in the `extra-tags` parameter to add tags to the image in the target registry: - -* If an artifact for the tagged image exists, the tag is not further processed. -* If there is no artifact for the tagged image: - * Adds the tag using skopeo (SDS-EXT-17) to the image in the target registry. - * The resulting tagged image artifact is placed into `.ods/artifacts`. - -| SDS-TASK-32 -| `download-aqua-scanner.sh` shell script -a| If a download URL is specified in the `ods-aqua-scanner-url` secret, the binary is downloaded from the given URL and placed onto the workspace PVC under `.ods-cache/bin`. Subsequent task runs check if the (same version of the) scanner is already present before downloading. - -| SDS-TASK-33 -| `aqua-scan` binary -a| Checks if a scan artifact exists already, and if so, skips the scan. - -If the Aqua scanner is installed via SDS-TASK-32, the pushed image shall be scanned. The resulting report is placed in `.ods/artifacts` and attached as a code insight to Bitbucket. -|=== - -==== `ods-deploy-helm` task - -[cols="1,1,3"] -|=== -| SDS-TASK-22 -| `ods-deploy-helm` Task resource -| Deploys a Helm chart and promotes images. References SDS-TASK-23 and executes SDS-TASK-24. - -| SDS-TASK-23 -| `ods-helm` container image -| Container image to promote images and deploy Helm charts. Based on `ubi8/ubi-minimal` (SDS-EXT-2), includes SDS-EXT-9, SDS-EXT-15, SDS-EXT-17, SDS-EXT-19, SDS-EXT-20, SDS-EXT-21, SDS-EXT-23, SDS-EXT-24, SDS-EXT-30 and SDS-TASK-24. - -| SDS-TASK-24 -| `deploy-helm` binary -a| Skips when no `namespace` parameter is given. - -Pushes images into the target namespace. - -* The images that are pushed are determined by the artifacts in `.ods/artifacts/image-digests`. Each artifact contains information from which registry / image stream to get the images. -* The target namespace is given through the `namespace` parameter. -* The target registry may also be external to the cluster in which the pipeline runs. The registry is identified by the `registry-host` parameter, and the credential token of `api-credentials-secret` is used to authenticate. - -Upgrades (or installs) a Helm chart. - -* The Helm chart is expected at the location identified by the `chart-dir` parameter (defaulting to `chart`). -* The task errors if no chart can be found. -* A diff is performed before the upgrade/install, using any flags provided by the `diff-flags` parameter. If there are no differences, upgrade/install is skipped. -* The upgrade/install waits until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. -* Any values and secrets files corresponding to the namespace are respected (`values.yaml`, `secrets.yaml`, `values..yaml`, `secrets..yaml`; in that order of specificity). -* A values file containing the Git commit SHA is auto-generated and added to the Helm diff/upgrade invocation. -* Any encrypted secrets files are decrypted on the fly, using the age key provided by the `Secret` identified by the `age-key-secret` parameter (defaulting to `helm-secrets-age-key`). The secret is expected to expose the age key under the `key.txt` field. -* The "app version" is set to the checked out Git commit SHA. -* Charts in any of the child repositories configured in `ods.y(a)ml` are packaged according to the same rules and added as subcharts. -* The target namespace may also be external to the cluster in which the pipeline runs. The API server is identified by the `api-server` parameter, and the credential token of `api-credentials-secret` is used to authenticate. -|=== - ===== Pipeline Manager [cols="1,1,3"] @@ -499,10 +210,7 @@ For all repositories in scope, the artifacts in the corresponding groups in Nexu |=== | SDS-SETUP-1 | Helm chart `ods-pipeline` -a| The Helm chart consists of two subcharts: - -* `tasks`: Contains `Task` resources -* `setup`: Contains resources related to the pipeline manager and config maps / secrets supporting pipeline runs +a| The Helm chart contains resources related to the pipeline manager, as well as config maps, secrets and tasks supporting pipeline runs. | SDS-SETUP-2 | `web-terminal-install.sh` script @@ -510,12 +218,12 @@ a| The script is supposed to be downloaded and piped into bash. The script insta | SDS-SETUP-3 | `install.sh` script -a| The script installs the Helm chart located in `deploy/ods-pipeline`. Further, it: +a| The script installs the Helm chart located in `deploy/chart`. Further, it: * creates the `pipeline` serviceaccount if it does not exist already * creates secrets holding relevant credentials (e.g. Bitbucket access token), either by prompting the user for values, or taking input from command line flags * in case of an update, modifies existing secrets when command line flags are given -* adds the `tekton.dev/git-0` annotation to the `ods-bitbucket-auth` secret (pointing to the Bitbucket URL) and associate the secret with the `pipeline` serviceaccount to enable `git clone`` in the `ods-start` task +* adds the `tekton.dev/git-0` annotation to the `ods-bitbucket-auth` secret (pointing to the Bitbucket URL) and associate the secret with the `pipeline` serviceaccount to enable `git clone`` in the `ods-pipeline-start` task |=== @@ -525,168 +233,30 @@ a| The script installs the Helm chart located in `deploy/ods-pipeline`. Further, |=== |ID |Name |Version |Description |Link -| SDS-EXT-1 -| Red Hat Universal Base Image 8 -| 8.7 -| Universal Base Image is designed and engineered to be the base layer for a wide range of applications, middleware and utilities. It is maintained by Red Hat and updated regularly. -| https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e - | SDS-EXT-2 | Red Hat Universal Base Image 8 Minimal | 8.4 | Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. It is maintained by Red Hat and updated regularly. | https://catalog.redhat.com/software/containers/ubi8/ubi-minimal/5c359a62bed8bd75a2c3fba8 -| SDS-EXT-3 -| Go -| 1.16 -| Go toolchain. -| https://golang.org - -| SDS-EXT-4 -| golangci-lint -| 1.45 -| golangci-lint is a Go linters aggregator. -| https://golangci-lint.run - -| SDS-EXT-5 -| junit-report -| 2.0 -| Converts go test output to an xml report, suitable for applications that expect junit xml reports. -| https://github.com/jstemmer/go-junit-report - -| SDS-EXT-6 -| gcc/gcc-c++ -| 8.5 -| Optimizing compiler supporting various programming languages, required for CGO. -| https://gcc.gnu.org - -| SDS-EXT-7 -| sonar-scanner -| 4.8 -| General purpose SonarQube scanner -| https://github.com/SonarSource/sonar-scanner-cli - -| SDS-EXT-8 -| cnes-report -| 4.2 -| Exports code analysis from a SonarQube server in various file formats. -| https://github.com/cnescatlab/sonar-cnes-report - | SDS-EXT-9 | Git | 2.39 | Distributed version control system. | https://git-scm.com -| SDS-EXT-11 -| Red Hat OpenJDK 17 Image -| 1.13 -| OpenJDK 17 container is a base platform for building and running plain Java 17 applications, e.g. fat-jar and flat classpath. -| https://catalog.redhat.com/software/containers/ubi8/openjdk-17/618bdbf34ae3739687568813 - -| SDS-EXT-12 -| Gradle -| 7.4.2 -| Build automation tool for multi-language software development. -| https://gradle.org - | SDS-EXT-13 | openssh-clients | 8.0 | Clients necessary to make encrypted connections to SSH servers. | https://www.openssh.com -| SDS-EXT-15 -| Tar -| 1.30 -| Used to create and extract archive files. -| https://www.gnu.org/software/tar/ - -| SDS-EXT-17 -| Skopeo -| 1.11 -| Tool for moving container images between different types of container storages. -| https://github.com/containers/skopeo - -| SDS-EXT-18 -| Buildah -| 1.29 -| Tool that facilitates building OCI images. -| https://github.com/containers/buildah - -| SDS-EXT-19 -| Helm -| 3.5 -| Package manager for Kubernetes. -| https://helm.sh - -| SDS-EXT-20 -| Helm Diff plugin -| 3.3 -| Shows a diff explaining what a helm upgrade would change. -| https://github.com/databus23/helm-diff - -| SDS-EXT-21 -| Helm Secrets plugin -| 3.10 -| Manages secrets with Git workflow. -| https://github.com/jkroepke/helm-secrets - -| SDS-EXT-23 -| Sops -| 3.7 -| Encrypted files management tool. -| https://github.com/mozilla/sops - -| SDS-EXT-24 -| Age -| 1.0 -| File encryption tool, format and Go library with small explicit keys. -| https://github.com/FiloSottile/age - -| SDS-EXT-25 -| Go Toolset for UBI 8 -| 1.18 -| go-toolset available as a container is a base platform for building and running various Go applications and frameworks. It is maintained by Red Hat and updated regularly. -| https://catalog.redhat.com/software/containers/ubi8/go-toolset/5ce8713aac3db925c03774d1 - -| SDS-EXT-26 -| NodeJS 16 for UBI 8 -| 1 -| Node.js 16 available as container is a base platform for building and running various Node.js 16 applications and frameworks. It is maintained by Red Hat and updated regularly. -| https://catalog.redhat.com/software/containers/ubi8/nodejs-16/615aee9fc739c0a4123a87e1 - | SDS-EXT-27 | Git LFS | 3.0.2 | Git Large File Storage extension for versioning large files. | https://git-lfs.github.com/ -| SDS-EXT-28 -| Python 3.9 for UBI 8 -| 1 -| Python 3.9 available as container is a base platform for building and running various Python applications and frameworks. It is maintained by Red Hat and updated regularly. -| https://catalog.redhat.com/software/containers/ubi8/python-39/6065b24eb92fbda3a4c65d8f - -| SDS-EXT-29 -| GNU findutils -| 4.6 -| Basic directory searching utilities, included due to the dependency of `helm-secrets` on `xargs` -| https://www.gnu.org/software/findutils/ - -| SDS-EXT-31 -| Trivy -| 0.36.0 -| Security scanner CLI. -| https://www.trivy.dev/ - -| SDS-EXT-32 -| NodeJS 18 for UBI 8 -| 1 -| Node.js 18 available as container is a base platform for building and running various Node.js 18 applications and frameworks. It is maintained by Red Hat and updated regularly. -| https://catalog.redhat.com/software/containers/ubi8/nodejs-18/6278e5c078709f5277f26998 - |=== == Appendix diff --git a/docs/design/software-requirements-specification.adoc b/docs/design/software-requirements-specification.adoc index 8f7071b1..2c0ce2b7 100644 --- a/docs/design/software-requirements-specification.adoc +++ b/docs/design/software-requirements-specification.adoc @@ -35,14 +35,7 @@ N/A === Software Description -ODS Pipeline shall provide CI/CD tooling within a Kubernetes cluster such as OpenShift. It shall interface with an ODS core installation (specifically, the Atlassian suite, Nexus, SonarQube and Aqua). ODS Pipeline shall provide a library of tasks which work on one or more Git repositories and: - -* test applications, -* build applications, -* package applications and -* deploy applications. - -The tasks shall create artifacts of their work. Those artifacts shall be stored in Nexus and are intended to be used as evidence in technical documentation. +ODS Pipeline shall provide CI/CD tooling within a Kubernetes cluster such as OpenShift. It shall interface with an ODS core installation (specifically, Bitbucket and Nexus). Tasks may create artifacts of their work, which shall be stored in Nexus and are intended to be used as evidence in technical documentation. === General Requirements @@ -92,7 +85,7 @@ The tasks shall create artifacts of their work. Those artifacts shall be stored === Tasks Requirements -==== Task `ods-start` +==== Task `ods-pipeline-start` [cols="1,3"] |=== @@ -120,7 +113,7 @@ a| The task shall store context information for each checked out repository (suc | The task shall support caching. Caching shall enable persisting build dependencies. Caching shall enable to reuse build outputs and artifacts if a build tasks runs when the associated working directory has not changed. |=== -==== Task `ods-finish` +==== Task `ods-pipeline-finish` [cols="1,3"] |=== @@ -137,204 +130,12 @@ a| The task shall store context information for each checked out repository (suc | The task shall send a notification to a configured webhook about the pipeline run status. |=== -==== Task `ods-build-go` - -[cols="1,3"] -|=== -| SRS-TASK-BUILD-GO-1 -| The task shall ensure that all Go files are formatted. - -| SRS-TASK-BUILD-GO-2 -| The task shall lint all Go files. - -| SRS-TASK-BUILD-GO-3 -a| The task shall run Go tests, creating code coverage and xUnit report. - -* The user shall be able to run a script prior to tests. -* Vendored packages shall be excluded from the tests. -* Artifacts shall be made available to SonarQube and designated for upload to Nexus. - -| SRS-TASK-BUILD-GO-4 -a| The task shall build a Go module based Go binary. - -* Destination directory shall be customizable -* Paths in stack traces shall be trimmed -* Target operating system and architecture shall be customizable -* CGO shall be disabled by default but possible to enable. - -| SRS-TASK-BUILD-GO-5 -| See SRS-TASK-SHARED-1. - -| SRS-TASK-BUILD-GO-6 -| See SRS-TASK-SHARED-2. - -| SRS-TASK-BUILD-GO-7 -| See SRS-TASK-SHARED-3. - -| SRS-TASK-BUILD-GO-8 -| See SRS-TASK-SHARED-4. - -|=== - -==== Task `ods-build-gradle` +==== Cache / Build skipping Support Requirements [cols="1,3"] |=== -| SRS-TASK-BUILD-GRADLE-1 -a| The task shall build a Gradle module. - -* Destination directory shall be customizable -* Gradle options shall be customizable -* Additional Gradle tasks shall be configurable - -| SRS-TASK-BUILD-GRADLE-2 -a| The task shall run tests, creating code coverage and xUnit report. - -* Artifacts shall be made available to SonarQube and designated for upload to Nexus. - -| SRS-TASK-BUILD-GRADLE-3 -| See SRS-TASK-SHARED-1. - -| SRS-TASK-BUILD-GRADLE-4 -| See SRS-TASK-SHARED-2. - -| SRS-TASK-BUILD-GRADLE-5 -| See SRS-TASK-SHARED-4. - -|=== - -==== Task `ods-build-python` - -[cols="1,3"] -|=== -| SRS-TASK-BUILD-PYTHON-1 -| The task shall lint all Python files. - -| SRS-TASK-BUILD-PYTHON-2 -a| The task shall run test, creating code coverage and xUnit reports. - -* The user shall be able to run a script prior to tests. -* Artifacts shall be made available to SonarQube and designated for upload to Nexus. - -| SRS-TASK-BUILD-PYTHON-3 -a| The task shall build a Python application. - -* Destination directory shall be customizable - -| SRS-TASK-BUILD-PYTHON-4 -| See SRS-TASK-SHARED-1. - -| SRS-TASK-BUILD-PYTHON-5 -| See SRS-TASK-SHARED-2. - -| SRS-TASK-BUILD-PYTHON-6 -| See SRS-TASK-SHARED-3. - -| SRS-TASK-BUILD-PYTHON-7 -| See SRS-TASK-SHARED-4. - -|=== - -==== Task `ods-build-npm` - -[cols="1,3"] -|=== -| SRS-TASK-BUILD-NPM-1 -| The task shall run the npm lint script. - -| SRS-TASK-BUILD-NPM-2 -a| The task shall run the npm test script, creating code coverage and xUnit reports. - -* Artifacts shall be made available to SonarQube and designated for upload to Nexus. - -| SRS-TASK-BUILD-NPM-3 -a| The task shall build a Node.JS application using npm. - -* Destination directory shall be customizable - -| SRS-TASK-BUILD-NPM-4 -| See SRS-TASK-SHARED-1. - -| SRS-TASK-BUILD-NPM-5 -| See SRS-TASK-SHARED-2. - -| SRS-TASK-BUILD-NPM-6 -| See SRS-TASK-SHARED-3. - -| SRS-TASK-BUILD-NPM-7 -| See SRS-TASK-SHARED-4. - -|=== - - -==== Task `ods-package-image` - -[cols="1,3a"] -|=== -| SRS-TASK-PACKAGE-IMAGE-1 -| The task shall build a container image unless the image digest already exists. - -* The Docker context directory and Dockerfile shall be customizable. -* The resulting image name and SHA shall be recorded in an artifact. -* The image build shall have access to Nexus for installing dependencies. This should only be used if one cannot easily copy artifacts created by the build task. - -| SRS-TASK-PACKAGE-IMAGE-2 -| The task shall push the image to the target registry. - -| SRS-TASK-PACKAGE-IMAGE-3 -| If an Aqua scanner download URL is provided, the pushed image shall be scanned with the Aqua scanner binary. - -| SRS-TASK-PACKAGE-IMAGE-4 -| The task shall generate the SBOM report of the image. -|=== - -==== Task `ods-deploy-helm` - -[cols="1,3"] -|=== -| SRS-TASK-DEPLOY-HELM-1 -| The task shall skip when no namespace parameter is given. - -| SRS-TASK-DEPLOY-HELM-2 -| The task shall push images built for the checked out commit into the target namespace, which may also be external to the cluster in which the pipeline runs. - -| SRS-TASK-DEPLOY-HELM-3 -a| The task shall upgrade (or install) a Helm chart. - -* The location of the chart shall be customizable. -* Changes (diff) shall be reported in the log output. -* Value files corresponding to the target namespace shall be applied. -* Encrypted secret files shall be decrypted on the fly and applied. -* The "app version" shall be set to the checked out Git commit SHA. -* Charts in any subrepositories shall be added as a subchart. -* The target namespace may also be external to the cluster in which the pipeline runs. -|=== - -==== Shared Requirements - -Tasks above may refer to these shared requirements. - -[cols="1,3"] -|=== -| SRS-TASK-SHARED-1 -a| The task shall analyze the source code statically using SonarQube. - -* The SQ project name shall be fixed by the task to avoid name clashes between projects. -* Default `sonar-project.properties` shall be supplied if that file is not present in the repository. -* Branch and pull request analysis shall be performed if the server edition supports it. -* Report artifacts shall be generated unless the scan is against a pull request. -* Optionally, it shall be checked if the Quality Gate passes. In this case, the quality gate status shall be stored as an artifact. If the gate fails, the task shall be terminated. - -| SRS-TASK-SHARED-2 -a| The task shall be able to run in a subdirectory of the checked out repository. - -* Artifacts and SonarQube project names shall reflect the subdirectory to avoid name clashes. - | SRS-TASK-SHARED-3 -a| The task shall be able to optionally use a build output and report cache so that future execution of build scripts can be skipped if the subdirectory of the checked out repository did not change. - -| SRS-TASK-SHARED-4 -a| The task shall be configurable to use a build script located in the workspace instead of the built-in build script. +a| Task shall be able to optionally use a build output and report cache so that future execution of build scripts can be skipped if the subdirectory of the checked out repository did not change. |=== diff --git a/docs/design/stakeholder-requirements.adoc b/docs/design/stakeholder-requirements.adoc index e145dcd6..8f8d6d4f 100644 --- a/docs/design/stakeholder-requirements.adoc +++ b/docs/design/stakeholder-requirements.adoc @@ -37,45 +37,6 @@ Stakeholder requirements describe what the tool shall be able to accomplish and [cols="1,3a"] |=== -| SHR-1 -| ODS Pipeline shall provide a task supporting to build Go applications: - -- lint Go source code files -- statically analyse Go source code files -- run the Go test tool -- build executable Go binaries - - -| SHR-2 -| ODS Pipeline shall provide a task supporting to build JDK-based applications (such as Java, Kotlin) using Gradle: - -- statically analyse Java source code files -- run the Gradle build task (running tests and producing executable JAR files) - - -| SHR-3 -| ODS Pipeline shall provide a task supporting to build Python applications: - -- lint Python source code files -- statically analyse Python source code files -- run pytest - - -| SHR-4 -| ODS Pipeline shall provide a task supporting to build Node.JS applications using npm: - -- lint JavaScript/TypeScript source code files -- statically analyse JavaScript/TypeScript source code files -- run the npm test script -- run the npm build task - - -| SHR-5 -| ODS Pipeline shall provide functionality to build container images, create an SBOM file describing the contents of those images, and to scan those images for known vulnerabilities. - -| SHR-6 -| ODS Pipeline shall provide a task deploying Helm charts. The release namespace may be in the same cluster or an external cluster. - | SHR-7 | ODS Pipeline shall retain immutable artifacts of activities performed by its tasks in an artifact repository. Artifacts shall include, but are not limited to, test results (xUnit), linting results, static analysis results, image digests and deployment configuration changes. diff --git a/docs/development.adoc b/docs/development.adoc index 40bd8db6..aa482931 100644 --- a/docs/development.adoc +++ b/docs/development.adoc @@ -5,17 +5,12 @@ First, check if your system meets the prerequisites: make check-system ``` -Then, launch a KinD cluster, install Tekton, build & push images and run services: -``` -make prepare-local-env -``` - -Finally, run all tests: +Then, run all tests: ``` make test ``` -More fine-grained make targets are available, see: +To see all available `make` targets: ``` make help ``` @@ -27,53 +22,8 @@ As mentioned above, `make test` will run all tests. You may also run only a subs * `make test-cmd` for the packages under `cmd` * `make test-pkg` for the packages under `pkg` * `make test-internal` for the packages under `internal` -* `make test-tasks` for the Tekton tasks -* `make test-e2e` for the end-to-end tasks - -Individual task test can be executed like this: -``` -go test -run ^TestTaskODSBuildImage github.com/opendevstack/ods-pipeline/test/tasks -v -count=1 -``` - -Further, if you only want to run one test case from the map of test cases, this can be done like this: -``` -go test -run ^TestTaskODSBuildNPM/build_backend_javascript_app$ github.com/opendevstack/ods-pipeline/test/tasks -v -count=1 -``` - -Be aware that depending on the tested task, some local services (e.g. Bitbucket) need to run for the test to succeed. These are all started via `make prepare-local-env`, but more fine-grained control is possible too. -These dependencies are explicitly set for each test suite and at the beginning of each test suite it will be checked if all required services are running. The tests will fail if at least one service is not running. - -TIP: If you want to test build tasks without using SonarQube, pass `-skip-sonar` to `go test`. +* `make test-e2e` for the task tests and the pipeline run tests Tests can be run both without a private certificate (which is the default) or using a private certificate to test how the tasks perform when the services use a certificate which is not part of the OS default trusted certs. If you want to use a private certificate in the task tests, pass `-private-cert` to `go test`. -Particularly the task and e2e tests might consume some time and might run into a timeout. To modify the standard timeout (by default in sync with the timeout predefined for Github actions), set the environment variable `ODS_TESTTIMEOUT` (e.g. to `45m`). - -Also, if you make changes to the images backing the tasks (be it by changing the `Dockerfile` or by changing the scripts/commands installed there), make sure to rebuild and push the affected images to the KinD registry for your changes to take effect. You can do this e.g. through `./scripts/build-and-push-images.sh --image finish` (the name of the image flag is the suffix of the respective Dockerfile). - -=== Testing deployment to external cluster - -The `ods-deploy-helm` task is able to deploy to external clusters. This functionality is covered by tests as well, but they are hidden behind the `external` build flag by default. To run those tests, you must run `go test` with `--tags=external`, and provide information about the external cluster to use as the test does not setup the external cluster automatically. - -First, you need to create a ODS configuration file containing one environment describing the external cluster you want to use, e.g.: - -.ods.external.yaml -[source,yaml] ----- -environments: -- name: dev - stage: dev - namespace: foo-dev - apiServer: https://api.example.openshiftapps.com:6443 - registryHost: default-route-openshift-image-registry.apps.example.openshiftapps.com ----- - -If you place this file in the root of the `ods-pipeline` repository, it will automtically be ignored by Git. Note that the `namespace` specified will be used to deploy the Helm release into. As the Helm release name is set to the random workspace name, clashes with existing resources is unlikely. Nonetheless, it is always recommended to use an empty namespace setup solely for the purpose of testing. - -Finally, you need to run the tests, passing the configuration created earlier and the token of a serviceaccount with enough permissions in the target namespace: - -``` -go test --tags=external -run ^TestTaskODSDeployHelmExternal$ github.com/opendevstack/ods-pipeline/test/tasks -count=1 -v -external-cluster-token=*** -external-cluster-config=ods.external.yaml -``` - -The above command runs only the external deployment test, but you may also remove this limitation (by removing `-run ^TestTaskODSDeployHelmExternal$`) and run the whole test suite including the external deployment test. +Images used in tasks are rebuilt automatically before executing tests. This provides the best accuracy but it can slow down testing considerably. If you did not make changes since the last test run that would affect the images, you can pass `-ods-reuse-images` to `go test`. diff --git a/docs/example-project.adoc b/docs/example-project.adoc index 5f9375b3..0d06b459 100644 --- a/docs/example-project.adoc +++ b/docs/example-project.adoc @@ -31,15 +31,21 @@ pipelines: tasks: - name: build taskRef: - kind: Task - name: ods-build-go-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-go.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/build.yaml } workspaces: - name: source workspace: shared-workspace - name: package taskRef: - kind: Task - name: ods-package-image-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-buildah.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/package.yaml } runAfter: - build workspaces: @@ -47,8 +53,11 @@ pipelines: workspace: shared-workspace - name: deploy taskRef: - kind: Task - name: ods-deploy-helm-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-helm.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/deploy.yaml } runAfter: - package workspaces: @@ -71,15 +80,21 @@ pipelines: tasks: - name: build taskRef: - kind: Task - name: ods-build-npm-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-npm.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/build.yaml } workspaces: - name: source workspace: shared-workspace - name: package taskRef: - kind: Task - name: ods-package-image-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-buildah.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/package.yaml } runAfter: - build workspaces: @@ -87,8 +102,11 @@ pipelines: workspace: shared-workspace - name: deploy taskRef: - kind: Task - name: ods-deploy-helm-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-helm.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/deploy.yaml } runAfter: - package workspaces: @@ -140,8 +158,11 @@ pipelines: tasks: - name: deploy taskRef: - kind: Task - name: ods-deploy-helm-v0-13-2 + resolver: git + params: + - { name: url, value: https://github.com/opendevstack/ods-pipeline-helm.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/deploy.yaml } workspaces: - name: source workspace: shared-workspace diff --git a/docs/installation.adoc b/docs/installation.adoc index 7a778e75..4d9ab43a 100644 --- a/docs/installation.adoc +++ b/docs/installation.adoc @@ -145,4 +145,4 @@ TIP: The credentials stored in the K8s secrets will not be updated. If you need ==== Finishing the update -Once the resources in your namespace are updated, you likely have to update the `ods.yaml` files in your repository to point to the new tasks, e.g. changing `ods-build-go-v0-12-0` to `ods-build-go-v0-13-2`. Whether or not you have to update the `ods.yaml` file depends whether the task suffix (controlled by the value `taskSuffix`) has changed due to the update. +Once the resources in your namespace are updated, you likely have to update the `ods.yaml` files in your repository to point to the new tasks, e.g. changing `ods-build-go-v0-12-0` to `ods-build-go-v0-13-2`. diff --git a/docs/ods-configuration.adoc b/docs/ods-configuration.adoc index 2cab231e..bfd1ff0f 100644 --- a/docs/ods-configuration.adoc +++ b/docs/ods-configuration.adoc @@ -84,15 +84,12 @@ pipelines: taskRef: resolver: git params: - - name: url - value: https://github.com/opendevstack/ods-pipeline.git - - name: revision - value: v0.13.2 - - name: pathInRepo - value: tasks/ods-build-go.yaml - workspaces: - - name: source - workspace: shared-workspace + - { name: url, value: https://github.com/opendevstack/ods-pipeline-go.git } + - { name: revision, value: v0.13.2 } + - { name: pathInRepo, value: tasks/build.yaml } + workspaces: + - name: source + workspace: shared-workspace ---- === Configuring final tasks diff --git a/docs/releasing.adoc b/docs/releasing.adoc index 0ca71552..6e6f60c7 100644 --- a/docs/releasing.adoc +++ b/docs/releasing.adoc @@ -13,7 +13,7 @@ ODS pipeline follows https://semver.org[Semantic Versioning 2.0.0]. This means b Ensure that all issues with the milestone of the release are resolved (or re-assign the issues to a future release). Further, run `govulncheck ./...` locally to check if there are any vulnerabilities that need to be fixed before the release. For more information on `govulncheck`, see https://go.dev/blog/vuln. === Preparing the release -The current version is hardcoded in a few places across the repository. All of these places must be updated for a release (search for `X.Y.Z` and `X-Y-Z`, ignoring the `test` and `tasks` directory). After the version numbers have been updated, run `make tasks` to update the rendered tasks, then mark the new version as released in the changelog and update the readme file. Commit the changes and push to the `master` branch. +The current version is hardcoded in a few places across the repository. All of these places must be updated for a release (search for `X.Y.Z`, ignoring the `test` and `tasks` directory). After the version numbers have been updated, run `make tasks` to update the rendered tasks, then mark the new version as released in the changelog and update the readme file. Commit the changes and push to the `master` branch. === Publishing the release Draft a new GitHub release, creating a new tag in the process (e.g. `v0.2.0`). The description should be like this: diff --git a/docs/repository-layout.adoc b/docs/repository-layout.adoc index 7551d114..d74a4781 100644 --- a/docs/repository-layout.adoc +++ b/docs/repository-layout.adoc @@ -4,10 +4,10 @@ The repo follows the https://github.com/golang-standards/project-layout[Standard The most important pieces are: -* **build/package**: `Dockerfile`s for the various container images in use. These images back Tekton tasks or the pipeline manager. +* **build/images**: `Dockerfile`s for the various container images in use. These images back Tekton tasks or the pipeline manager. * **cmd**: Main executables. These are installed (in different combinations) into the container images. -* **deploy**: OpenShift/K8S resource definitions, such as `Task` resources. The tasks typically make use of the images built via `build/package` and released to ghcr.io. Their `script` calls one or more executables built from the `cmd` folder. +* **deploy**: OpenShift/K8S resource definitions, such as `Task` resources. The tasks make use of the images built via `build/package` and released to ghcr.io. Their `script` calls an executables built from the `cmd` folder. * **docs**: Design and user documents * **internal/manager**: Implementation of the webhook receiver and pipeline manager - it creates and modifies the actual Tekton pipelines on the fly based on the config found in the repository triggering the webhook request. * **pkg**: Packages shared by the various main executables and the pipeline manager. These packages are the public interface and may be used outside this repo (e.g. by custom tasks). Example of packages are `bitbucket` (a Bitbucket Server API v1.0 client), `sonar` (a SonarQube client exposing API endpoints, scanner CLI and report CLI in one unified interface), `nexus` (a Nexus client for uploading, downloading and searching for assets) and `config` (the ODS configuration specification). -* **test**: Test scripts and test data. The actual tests are located in the `tasks` and `e2e` subfolders. Those tests make use files located in the `testdata` subfolder, in particular those in `testdata/workspaces`, which provide sample workspaces used by the Tekton task runs. +* **test**: Test scripts and test data. The actual tests are located in the `e2e` subfolder. Those tests make use files located in the `testdata` subfolder, in particular those in `testdata/workspaces`, which provide sample workspaces used by the Tekton task runs. diff --git a/docs/tasks/ods-finish.adoc b/docs/task-finish.adoc similarity index 92% rename from docs/tasks/ods-finish.adoc rename to docs/task-finish.adoc index 709f070c..5ffb150c 100644 --- a/docs/tasks/ods-finish.adoc +++ b/docs/task-finish.adoc @@ -1,6 +1,6 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. +// File is generated; DO NOT EDIT. -= ods-finish += ods-pipeline-finish Finishes the pipeline run. diff --git a/docs/tasks/ods-start.adoc b/docs/task-start.adoc similarity index 95% rename from docs/tasks/ods-start.adoc rename to docs/task-start.adoc index 64787828..190a9bb0 100644 --- a/docs/tasks/ods-start.adoc +++ b/docs/task-start.adoc @@ -1,6 +1,6 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. +// File is generated; DO NOT EDIT. -= ods-start += ods-pipeline-start Starts the pipeline run. diff --git a/docs/tasks/descriptions/ods-build-go.adoc b/docs/tasks/descriptions/ods-build-go.adoc deleted file mode 100644 index 9479ea3f..00000000 --- a/docs/tasks/descriptions/ods-build-go.adoc +++ /dev/null @@ -1,47 +0,0 @@ -Builds Go (module) applications. - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-go.sh[build/package/scripts/build-go.sh]. - -The following provides an overview of the performed steps: - -- Source files are checked to be formatted with `gofmt`. -- The go module cache is configured to be on the cache location of the PVC by setting environment variable `GOMODCACHE` to `.ods-cache/deps/gomod` (see link:https://go.dev/ref/mod#module-cache[go module cache]). -- `golanci-lint` is run. The linter can be configured via a - config file as described in the - link:https://golangci-lint.run/usage/configuration/[configuration documentation]. -- Tests are executed. A potential `vendor` directory is excluded. Test - results are converted into xUnit format. -- Application binary (named `app`) is built and placed into the directory - specified by `output-dir`. - -Finally, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `coverage.out` -* `lint-reports/` - ** `report.txt` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -Instead of the built-in script, one can also specify a build script located -in the Git repository using the `build-script` task parameter. This allows -full control of building and testing, including any generation of artifacts. -Note that some other task parameters have no effect when a custom build -script is used, unless they are handled properly in the script. At a -minimum, the custom script should place its outputs in the directory -identified by `output-dir`. diff --git a/docs/tasks/descriptions/ods-build-gradle.adoc b/docs/tasks/descriptions/ods-build-gradle.adoc deleted file mode 100644 index ced9e532..00000000 --- a/docs/tasks/descriptions/ods-build-gradle.adoc +++ /dev/null @@ -1,102 +0,0 @@ -Builds Gradle applications. - -The gradle build by default caches the downloaded version of the gradle wrapper and dependencies in the cache location of -the PVC by setting the environment variable `GRADLE_USER_HOME` to `/workspace/source/.ods-cache/deps/gradle`. - -The following steps are executed: - -- build gradle application, using `gradlew clean build`, which includes tests execution and coverage report generation -- SonarQube quality scan - -Notes: - -- tests exclude the vendor directory. -- test results are converted into xUnit format. - -Available environment variables: - -- `ODS_OUTPUT_DIR`: this environment variable points to the folder -that this build expects generated application artifacts to be copied to. -The gradle script should read it and copy there the generated artifacts. -- `NEXUS_*` env vars: `NEXUS_URL`, `NEXUS_USERNAME` and `NEXUS_PASSWORD` -are available and should be read by the gradle script. - -To enable the gradle script to copy the generated application artifacts script follow these steps: - -- read the environment variable `ODS_OUTPUT_DIR` in the buildscript section of the gradle script: -``` -buildscript { - ext { - outputDir = System.getenv('ODS_OUTPUT_DIR') - } -} -``` -- customize the jar tasks to set the destination directory -``` -jar { - println("Set application jar name to 'app'") - archiveBaseName = 'app' - if (outputDir != null) { - println("Set destinationDirectory to '${projectDir}/${outputDir}'") - destinationDirectory = file("${projectDir}/${outputDir}") - } -} -``` - -To create a coverage report be sure that you add to `gradle.properties` the required -configuration. For example to enable Jacoco coverage repot you will need to: - -- add `jacoco` plugin: -``` -plugins { - id 'application' - id 'jacoco' -} -``` -- add task `jacocoTestReport`: -``` -jacocoTestReport { - reports { - xml.required = true - } -} -``` -- add `finalizedBy jacocoTestReport` to the task `test`: -``` -tasks.named('test') { - useJUnitPlatform() - finalizedBy jacocoTestReport -} -``` - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-gradle.sh[build/package/scripts/build-gradle.sh]. - -After tests ran successfully, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `coverage.xml` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -Instead of the built-in script, one can also specify a build script located -in the Git repository using the `build-script` task parameter. This allows -full control of building and testing, including any generation of artifacts. -Note that some other task parameters have no effect when a custom build -script is used, unless they are handled properly in the script. At a -minimum, the custom script should place its outputs in the directory -identified by `output-dir`. diff --git a/docs/tasks/descriptions/ods-build-npm.adoc b/docs/tasks/descriptions/ods-build-npm.adoc deleted file mode 100644 index b3c2ea34..00000000 --- a/docs/tasks/descriptions/ods-build-npm.adoc +++ /dev/null @@ -1,60 +0,0 @@ -Builds Node.js applications using npm. - -The built-in script executes the following steps: - -- check that package.json and package-lock.json exist to require best practice of using lock files. See also link:https://github.com/opendevstack/ods-pipeline/discussions/411[discussion 411] -- linting using `npm run lint` -- build application, using `npm run build` -- test execution, using `npm run test` -- SonarQube quality scan - -For linting to work there needs to be a `lint` task in the `package.json` file, -for example `npx eslint src --format compact`, together with a config file -(`eslintrc.json` or similar) at the root of the working directory. This can -be done by running `eslint --init` or by following the -link:https://eslint.org/docs/user-guide/getting-started[official documentation]. - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-npm.sh[build/package/scripts/build-npm.sh]. -In particular, `npm run build` is expected to place outputs into `dist` and -`npm run test` is expected to create `build/test-results/test/report.xml` -and `build/coverage/{clover.xml,coverage-final.json,lcov.info}`. - -An example configuration for the test script is: - -``` -JEST_JUNIT_OUTPUT_DIR='build/test-results/test' JEST_JUNIT_OUTPUT_NAME='report.xml' npx jest --reporters=default --reporters=jest-junit --coverage --coverageDirectory=build/coverage --forceExit ./dist -``` - -After tests ran successfully, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `clover.xml` - ** `coverage-final.json` - ** `lcov.info` -* `lint-reports` - ** `report.txt` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -Instead of the built-in script, one can also specify a build script located -in the Git repository using the `build-script` task parameter. This allows -full control of building and testing, including any generation of artifacts. -Note that some other task parameters have no effect when a custom build -script is used, unless they are handled properly in the script. At a -minimum, the custom script should place its outputs in the directory -identified by `output-dir`. diff --git a/docs/tasks/descriptions/ods-build-python.adoc b/docs/tasks/descriptions/ods-build-python.adoc deleted file mode 100644 index c8dffd53..00000000 --- a/docs/tasks/descriptions/ods-build-python.adoc +++ /dev/null @@ -1,34 +0,0 @@ -Builds Python applications. - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-python.sh[build/package/scripts/build-python.sh]. -In particular, the Python source files are expected to be located in `src`. - -After tests ran successfully, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `coverage.xml` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -Instead of the built-in script, one can also specify a build script located -in the Git repository using the `build-script` task parameter. This allows -full control of building and testing, including any generation of artifacts. -Note that some other task parameters have no effect when a custom build -script is used, unless they are handled properly in the script. At a -minimum, the custom script should place its outputs in the directory -identified by `output-dir`. diff --git a/docs/tasks/descriptions/ods-deploy-helm.adoc b/docs/tasks/descriptions/ods-deploy-helm.adoc deleted file mode 100644 index b20d918c..00000000 --- a/docs/tasks/descriptions/ods-deploy-helm.adoc +++ /dev/null @@ -1,56 +0,0 @@ -Deploy Helm charts. - -This tasks will install / upgrade a Helm chart into your Kubernetes / -OpenShift cluster using Helm. - -Helm has the plugins `helm-diff` and `helm-secrets` installed. A diff is -performed before an upgrade is attempted. `helm-secrets` can be used to -encrypt sensitive values in the underlying Git repository using -https://age-encryption.org[age]. Secrets are decrypted on the fly if the -secret identified by the `age-key-secret` parameter exists and contains an -age secret key which corresponding public key was used as one of the -recipients to encrypt. - -Based on the target environment, some values files are added automatically -to the invocation of the `helm` command if they are present in the chart -directory: - -- `values.yaml`: the values file (automatically considered by Helm). -- `secrets.yaml`: a secrets file. -- `values..yaml`: a values file named after the target namespace. -- `secrets..yaml`: a secrets file named after the target namespace. - -Further, the task automatically sets the `image.tag` value on the CLI which -equals the Git commit SHA being built. This value can be used in your Helm -templates to refer to images built via `ods-package-image`. - -Before the Helm chart is applied, it is packaged, setting the `appVersion` -to the checked out Git commit SHA. - -If the pipeline runs for a repository defining subrepos in its `ods.y(a)ml` -file, then any charts in those subrepos are packaged as well, and added as -dependencies to the top-most chart under `charts/`. Note that values and -secrets files are only collected from the repository for which the pipeline -runs. Therefore, if you use an umbrella repository to promote an -application consisting of multiple repositories, the umbrella repository -needs to define the environment specific values for the subcomponents -for instead of having those files in the subrepo. - -In order to produce correct `image.tag` values for subcomponents, the task -automatically sets `.image.tag` equal to the Git commit SHA of -the subcomponent. Further, if no release name is explicitly configured, the -task also sets `.fullnameOverride` equal to the respective -subcomponent to avoid resources being prefixed with the umbrella repository -component name (assuming your resources are named using the `chart.fullname` -helper). - -If you do not have an existing Helm chart yet, you can use the provided -link:https://github.com/opendevstack/ods-pipeline/tree/sample-helm-chart[sample chart] -as a starting point. It is setup in a way that works with this task out of -the box. - -The following artifacts are generated by the task and placed into `.ods/artifacts/` - -* `deployments/` - ** `diff-.txt` - ** `release-.txt` diff --git a/docs/tasks/descriptions/ods-package-image.adoc b/docs/tasks/descriptions/ods-package-image.adoc deleted file mode 100644 index c032d907..00000000 --- a/docs/tasks/descriptions/ods-package-image.adoc +++ /dev/null @@ -1,41 +0,0 @@ -Packages applications into container images using -link:https://buildah.io[buildah]. - -buildah builds a container image from the `docker-dir` directory using the -provided `dockerfile`. -The following `--build-arg` parameters are provided to enable convenient access -to Nexus: - -* nexusUrl=${NEXUS_URL} -* nexusUsername=Escaped(${NEXUS_USERNAME}) -* nexusPassword=Escaped(${NEXUS_PASSWORD}) -* nexusHost=Host(${NEXUS_URL}) -* nexusAuth=${nexusUsername}:$(nexusPassword) -* nexusUrlWithAuth=${nexusUrl.withAuth($nexusAuth)} - -The above is adhoc notation meant to be more clear than a bunch of words. -If no nexusUsername/nexusPassword are defined nexusAuth will be empty and -nexusUrlWithAuth is equal to nexusUrl. - -By default, the image is named after the component and pushed into the image -stream located in the namespace of the pipeline run. - -If link:https://www.aquasec.com/products/container-security/[Aqua security scanning] -is enabled in the cluster, images are scanned and registered in Aqua after -they are pushed to the image stream. JSON and HTML report artifacts are -generated. Further, if there is an open pull request on Bitbucket for the -built branch, a code insight report is attached to the Git commit. - -Processes tags specified in the `extra-tags` parameter and adds missing tags to -the images stream in the namespace of the pipeline run. - -The following artifacts are generated by the task and placed into `.ods/artifacts/` - -* `aquasec-scans/` - ** `report.html` - ** `report.json` -* `image-digests/` - ** `.json` - ** `-.json` for each extra-tag -* `sboms/` - ** `.spdx` diff --git a/docs/tasks/ods-build-go.adoc b/docs/tasks/ods-build-go.adoc deleted file mode 100644 index c4ed74ba..00000000 --- a/docs/tasks/ods-build-go.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. - -= ods-build-go - -Builds Go (module) applications. - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-go.sh[build/package/scripts/build-go.sh]. - -The following provides an overview of the performed steps: - -- Source files are checked to be formatted with `gofmt`. -- The go module cache is configured to be on the cache location of the PVC by setting environment variable `GOMODCACHE` to `.ods-cache/deps/gomod` (see link:https://go.dev/ref/mod#module-cache[go module cache]). -- `golanci-lint` is run. The linter can be configured via a - config file as described in the - link:https://golangci-lint.run/usage/configuration/[configuration documentation]. -- Tests are executed. A potential `vendor` directory is excluded. Test - results are converted into xUnit format. -- Application binary (named `app`) is built and placed into the directory - specified by `output-dir`. - -Finally, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `coverage.out` -* `lint-reports/` - ** `report.txt` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -Instead of the built-in script, one can also specify a build script located -in the Git repository using the `build-script` task parameter. This allows -full control of building and testing, including any generation of artifacts. -Note that some other task parameters have no effect when a custom build -script is used, unless they are handled properly in the script. At a -minimum, the custom script should place its outputs in the directory -identified by `output-dir`. - - -== Parameters - -[cols="1,1,2"] -|=== -| Parameter | Default | Description - -| working-dir -| . -| Working directory. The path must be relative to the root of the repository, -without leading `./` and trailing `/`. - - - -| enable-cgo -| false -| Whether to enable CGO. When not enabled the build will set `CGO_ENABLED=0`. - - -| go-os -| linux -| `GOOS` variable (the execution operating system such as `linux`, `windows`). - - -| go-arch -| amd64 -| `GOARCH` variable (the execution architecture such as `arm`, `amd64`). - - -| output-dir -| docker -| Path to the directory into which the resulting Go binary should be copied, relative to `working-dir`. This directory may then later be used as Docker context for example. - - -| cache-build -| true -| If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that a build can be skipped if the `working-dir` contents did not change. You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - - -| build-extra-inputs -| -| List of build source directories (as colon separated string) which in addition working-dir influence the build. These directories are relative to the repository root. If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - - -| build-script -| /usr/local/bin/build-go -| Build script to execute. The link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-go.sh[default script] is located in the container image. If you specify a relative path instead, it will be resolved from the workspace. See the task definition for details how the build script is invoked. - - -| pre-test-script -| -| Script to execute before running tests, relative to the working directory. - - -| sonar-quality-gate -| false -| Whether the SonarQube quality gate needs to pass for the task to succeed. - - -| sonar-skip -| false -| Whether to skip SonarQube analysis or not. - -|=== - -== Results - -[cols="1,3"] -|=== -| Name | Description - -| build-reused-from-location -| The cache location that the build task used. If caching is not enabled this will be an empty string. - -|=== diff --git a/docs/tasks/ods-build-gradle.adoc b/docs/tasks/ods-build-gradle.adoc deleted file mode 100644 index 8d5dfdb2..00000000 --- a/docs/tasks/ods-build-gradle.adoc +++ /dev/null @@ -1,187 +0,0 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. - -= ods-build-gradle - -Builds Gradle applications. - -The gradle build by default caches the downloaded version of the gradle wrapper and dependencies in the cache location of -the PVC by setting the environment variable `GRADLE_USER_HOME` to `/workspace/source/.ods-cache/deps/gradle`. - -The following steps are executed: - -- build gradle application, using `gradlew clean build`, which includes tests execution and coverage report generation -- SonarQube quality scan - -Notes: - -- tests exclude the vendor directory. -- test results are converted into xUnit format. - -Available environment variables: - -- `ODS_OUTPUT_DIR`: this environment variable points to the folder -that this build expects generated application artifacts to be copied to. -The gradle script should read it and copy there the generated artifacts. -- `NEXUS_*` env vars: `NEXUS_URL`, `NEXUS_USERNAME` and `NEXUS_PASSWORD` -are available and should be read by the gradle script. - -To enable the gradle script to copy the generated application artifacts script follow these steps: - -- read the environment variable `ODS_OUTPUT_DIR` in the buildscript section of the gradle script: -``` -buildscript { - ext { - outputDir = System.getenv('ODS_OUTPUT_DIR') - } -} -``` -- customize the jar tasks to set the destination directory -``` -jar { - println("Set application jar name to 'app'") - archiveBaseName = 'app' - if (outputDir != null) { - println("Set destinationDirectory to '${projectDir}/${outputDir}'") - destinationDirectory = file("${projectDir}/${outputDir}") - } -} -``` - -To create a coverage report be sure that you add to `gradle.properties` the required -configuration. For example to enable Jacoco coverage repot you will need to: - -- add `jacoco` plugin: -``` -plugins { - id 'application' - id 'jacoco' -} -``` -- add task `jacocoTestReport`: -``` -jacocoTestReport { - reports { - xml.required = true - } -} -``` -- add `finalizedBy jacocoTestReport` to the task `test`: -``` -tasks.named('test') { - useJUnitPlatform() - finalizedBy jacocoTestReport -} -``` - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-gradle.sh[build/package/scripts/build-gradle.sh]. - -After tests ran successfully, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `coverage.xml` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -Instead of the built-in script, one can also specify a build script located -in the Git repository using the `build-script` task parameter. This allows -full control of building and testing, including any generation of artifacts. -Note that some other task parameters have no effect when a custom build -script is used, unless they are handled properly in the script. At a -minimum, the custom script should place its outputs in the directory -identified by `output-dir`. - - -== Parameters - -[cols="1,1,2"] -|=== -| Parameter | Default | Description - -| working-dir -| . -| Working directory. The path must be relative to the root of the repository, -without leading `./` and trailing `/`. - - - -| gradle-additional-tasks -| -| Additional gradle tasks to be passed to the gradle build. (default tasks called are `clean` and `build`). - - -| gradle-options -| --no-daemon --stacktrace -| Options to be passed to the gradle build. (See ref: https://docs.gradle.org/7.4.2/userguide/command_line_interface.html#sec:command_line_debugging) - - -| gradle-opts-env -| -Dorg.gradle.jvmargs=-Xmx512M -| Will be exposed to the build via `GRADLE_OPTS` environment variable. Specifies JVM arguments to use when starting the Gradle client VM. The client VM only handles command line input/output, so it is rare that one would need to change its VM options. You can still use this to change the settings for the Gradle daemon which runs the actual build by setting the according Gradle properties by `-D`. If you want to set the JVM arguments for the actual build you would do this via `-Dorg.gradle.jvmargs=-Xmx1024M` (See ref: https://docs.gradle.org/7.4.2/userguide/build_environment.html#sec:gradle_configuration_properties). - - -| output-dir -| docker -| Path to the directory into which the resulting Java application jar should be copied, relative to `working-dir`. This directory may then later be used as Docker context for example. - - -| cache-build -| true -| If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that a build can be skipped if the `working-dir` contents did not change. You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - - -| build-extra-inputs -| -| List of build source directories (as colon separated string) which in addition working-dir influence the build. These directories are relative to the repository root. If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - - -| cached-outputs -| docker -| List of build output directories (as colon separated string) to be cached. These directories are relative to `working-dir`. - - -| build-script -| /usr/local/bin/build-gradle -| Build script to execute. The link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-gradle.sh[default script] is located in the container image. If you specify a relative path instead, it will be resolved from the workspace. See the task definition for details how the build script is invoked. - - -| gradle-build-dir -| build -| Path to the directory into which Gradle publishes its build. - - -| sonar-quality-gate -| false -| Whether the SonarQube quality gate needs to pass for the task to succeed. - - -| sonar-skip -| false -| Whether to skip SonarQube analysis or not. - -|=== - -== Results - -[cols="1,3"] -|=== -| Name | Description - -| build-reused-from-location -| The cache location that the build task used. If caching is not enabled this will be an empty string. - -|=== diff --git a/docs/tasks/ods-build-npm.adoc b/docs/tasks/ods-build-npm.adoc deleted file mode 100644 index cecd1e19..00000000 --- a/docs/tasks/ods-build-npm.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. - -= ods-build-npm - -Builds Node.js applications using npm. - -The built-in script executes the following steps: - -- check that package.json and package-lock.json exist to require best practice of using lock files. See also link:https://github.com/opendevstack/ods-pipeline/discussions/411[discussion 411] -- linting using `npm run lint` -- build application, using `npm run build` -- test execution, using `npm run test` -- SonarQube quality scan - -For linting to work there needs to be a `lint` task in the `package.json` file, -for example `npx eslint src --format compact`, together with a config file -(`eslintrc.json` or similar) at the root of the working directory. This can -be done by running `eslint --init` or by following the -link:https://eslint.org/docs/user-guide/getting-started[official documentation]. - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-npm.sh[build/package/scripts/build-npm.sh]. -In particular, `npm run build` is expected to place outputs into `dist` and -`npm run test` is expected to create `build/test-results/test/report.xml` -and `build/coverage/{clover.xml,coverage-final.json,lcov.info}`. - -An example configuration for the test script is: - -``` -JEST_JUNIT_OUTPUT_DIR='build/test-results/test' JEST_JUNIT_OUTPUT_NAME='report.xml' npx jest --reporters=default --reporters=jest-junit --coverage --coverageDirectory=build/coverage --forceExit ./dist -``` - -After tests ran successfully, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `clover.xml` - ** `coverage-final.json` - ** `lcov.info` -* `lint-reports` - ** `report.txt` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -Instead of the built-in script, one can also specify a build script located -in the Git repository using the `build-script` task parameter. This allows -full control of building and testing, including any generation of artifacts. -Note that some other task parameters have no effect when a custom build -script is used, unless they are handled properly in the script. At a -minimum, the custom script should place its outputs in the directory -identified by `output-dir`. - - -== Parameters - -[cols="1,1,2"] -|=== -| Parameter | Default | Description - -| working-dir -| . -| Working directory. The path must be relative to the root of the repository, -without leading `./` and trailing `/`. - - - -| cache-build -| true -| If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that a build can be skipped if the `working-dir` contents did not change. You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - - -| build-extra-inputs -| -| List of build source directories (as colon separated string) which in addition working-dir influence the build. These directories are relative to the repository root. If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - - -| cached-outputs -| dist -| List of build output directories (as colon separated string) to be cached. These directories are relative to the `working-dir` parameter` Common build directories are `dist` (default), `build` and `public`. If empty this could mean that the original sources are being used as build output and no caching of built files are needed. Nonetheless build skipping can still be remain enabled. - - -| build-script -| /usr/local/bin/build-npm -| Build script to execute. The link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-npm.sh[default script] is located in the container image. If you specify a relative path instead, it will be resolved from the workspace. See the task definition for details how the build script is invoked. - - -| sonar-quality-gate -| false -| Whether quality gate needs to pass. - - -| sonar-skip -| false -| Whether to skip the SonarQube analysis or not. - - -| node-version -| 18 -| Node.js version to use - supported versions: 16, 18 - -|=== - -== Results - -[cols="1,3"] -|=== -| Name | Description - -| build-reused-from-location -| The cache location that the build task used. If caching is not enabled this will be an empty string. - -|=== diff --git a/docs/tasks/ods-build-python.adoc b/docs/tasks/ods-build-python.adoc deleted file mode 100644 index cb987590..00000000 --- a/docs/tasks/ods-build-python.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. - -= ods-build-python - -Builds Python applications. - -The exact build recipe can be found at -link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-python.sh[build/package/scripts/build-python.sh]. -In particular, the Python source files are expected to be located in `src`. - -After tests ran successfully, the application source code is scanned by SonarQube. -Default SonarQube project properties are provided unless `sonar-project.properties` -is present. -When `sonar-quality-gate` is set to `true`, the task will fail if the quality gate -is not passed. If SonarQube is not desired, it can be disabled via `sonar-skip`. -The SonarQube scan will include parameters to perform a pull request analysis if -there is an open pull request for the branch being built. If the -link:https://docs.sonarqube.org/latest/analysis/bitbucket-integration/[ALM integration] -is setup properly, pull request decoration in Bitbucket is done automatically. - -The following artifacts are generated by the build task and placed into `.ods/artifacts/` - -* `code-coverage/` - ** `coverage.xml` -* `sonarqube-analysis/` - ** `analysis-report.md` - ** `issues-report.csv` - ** `quality-gate.json` -* `xunit-reports/` - ** `report.xml` - -Instead of the built-in script, one can also specify a build script located -in the Git repository using the `build-script` task parameter. This allows -full control of building and testing, including any generation of artifacts. -Note that some other task parameters have no effect when a custom build -script is used, unless they are handled properly in the script. At a -minimum, the custom script should place its outputs in the directory -identified by `output-dir`. - - -== Parameters - -[cols="1,1,2"] -|=== -| Parameter | Default | Description - -| working-dir -| . -| Working directory. The path must be relative to the root of the repository, -without leading `./` and trailing `/`. - - - -| cache-build -| true -| If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that a build can be skipped if the `working-dir` contents did not change. You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - - -| build-extra-inputs -| -| List of build source directories (as colon separated string) which in addition working-dir influence the build. These directories are relative to the repository root. If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - - -| max-line-length -| 120 -| Maximum line length. - - -| pre-test-script -| -| Script to execute before running tests, relative to the working directory. - - -| build-script -| /usr/local/bin/build-python -| Build script to execute. The link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-python.sh[default script] is located in the container image. If you specify a relative path instead, it will be resolved from the workspace. See the task definition for details how the build script is invoked. - - -| sonar-quality-gate -| false -| Whether quality gate needs to pass. - - -| sonar-skip -| false -| Whether to skip the SonarQube analysis or not. - -|=== - -== Results - -[cols="1,3"] -|=== -| Name | Description - -| build-reused-from-location -| The cache location that the build task used. If caching is not enabled this will be an empty string. - -|=== diff --git a/docs/tasks/ods-deploy-helm.adoc b/docs/tasks/ods-deploy-helm.adoc deleted file mode 100644 index c1f1a976..00000000 --- a/docs/tasks/ods-deploy-helm.adoc +++ /dev/null @@ -1,144 +0,0 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. - -= ods-deploy-helm - -Deploy Helm charts. - -This tasks will install / upgrade a Helm chart into your Kubernetes / -OpenShift cluster using Helm. - -Helm has the plugins `helm-diff` and `helm-secrets` installed. A diff is -performed before an upgrade is attempted. `helm-secrets` can be used to -encrypt sensitive values in the underlying Git repository using -https://age-encryption.org[age]. Secrets are decrypted on the fly if the -secret identified by the `age-key-secret` parameter exists and contains an -age secret key which corresponding public key was used as one of the -recipients to encrypt. - -Based on the target environment, some values files are added automatically -to the invocation of the `helm` command if they are present in the chart -directory: - -- `values.yaml`: the values file (automatically considered by Helm). -- `secrets.yaml`: a secrets file. -- `values..yaml`: a values file named after the target namespace. -- `secrets..yaml`: a secrets file named after the target namespace. - -Further, the task automatically sets the `image.tag` value on the CLI which -equals the Git commit SHA being built. This value can be used in your Helm -templates to refer to images built via `ods-package-image`. - -Before the Helm chart is applied, it is packaged, setting the `appVersion` -to the checked out Git commit SHA. - -If the pipeline runs for a repository defining subrepos in its `ods.y(a)ml` -file, then any charts in those subrepos are packaged as well, and added as -dependencies to the top-most chart under `charts/`. Note that values and -secrets files are only collected from the repository for which the pipeline -runs. Therefore, if you use an umbrella repository to promote an -application consisting of multiple repositories, the umbrella repository -needs to define the environment specific values for the subcomponents -for instead of having those files in the subrepo. - -In order to produce correct `image.tag` values for subcomponents, the task -automatically sets `.image.tag` equal to the Git commit SHA of -the subcomponent. Further, if no release name is explicitly configured, the -task also sets `.fullnameOverride` equal to the respective -subcomponent to avoid resources being prefixed with the umbrella repository -component name (assuming your resources are named using the `chart.fullname` -helper). - -If you do not have an existing Helm chart yet, you can use the provided -link:https://github.com/opendevstack/ods-pipeline/tree/sample-helm-chart[sample chart] -as a starting point. It is setup in a way that works with this task out of -the box. - -The following artifacts are generated by the task and placed into `.ods/artifacts/` - -* `deployments/` - ** `diff-.txt` - ** `release-.txt` - - -== Parameters - -[cols="1,1,2"] -|=== -| Parameter | Default | Description - -| chart-dir -| ./chart -| Helm chart directory that will be deployed - - -| release-name -| -| The Helm release name. If empty, the release name is simply the name of the chart. - -When this task is used in a repository which defines subcharts, and the parameter is not set, -then the task sets `.fullnameOverride` equal to the respective -subcomponent to avoid resources being prefixed with the umbrella repository -component name (assuming your resources are named using the `chart.fullname` -helper). However, if the parameter is specified, `.fullnameOverride` is not set. -As a result the `chart.fullname` helper prefixes resources with the specfied -`release-name` unless the chart's name contains the `release-name`. - - - -| diff-flags -| --three-way-merge -| Flags to pass to `helm diff upgrade` in addition to the ones specified via the `upgrade-flags` parameter. Note that the flags `--detailed-exitcode` and `--no-color` are automatically set and cannot be removed. If flags unknown to `helm diff` are passed, they are ignored. - - -| upgrade-flags -| --install --wait -| Flags to pass to `helm upgrade`. - - -| age-key-secret -| helm-secrets-age-key -| Name of the secret containing the age key to use for helm-secrets. -If the secret exists, it is expected to have a field named `key.txt` with the age secret key in its content. - - - -| api-server -| -| API server of the target cluster, including scheme. -Only required if the target namespace is outside the cluster in which -the pipeline runs. - - - -| api-credentials-secret -| -| Name of the Secret resource holding the token of a serviceaccount (in field `token`). -Only required when `api-server` is set. - - - -| namespace -| -| Target K8s namespace (or OpenShift project) to deploy into. -If empty, the task will be a no-op. - - - -| registry-host -| -| Hostname of the target registry to push images to. -If not given, the registy host of the source image is used. - - - -| diff-only -| false -| If set to true, the task will only perform a diff, and then stop. -No images will be promoted or upgrades attempted. - - -|=== - -== Results - -N/A diff --git a/docs/tasks/ods-package-image.adoc b/docs/tasks/ods-package-image.adoc deleted file mode 100644 index da1ec131..00000000 --- a/docs/tasks/ods-package-image.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Document generated by internal/documentation/tasks.go from template.adoc.tmpl; DO NOT EDIT. - -= ods-package-image - -Packages applications into container images using -link:https://buildah.io[buildah]. - -buildah builds a container image from the `docker-dir` directory using the -provided `dockerfile`. -The following `--build-arg` parameters are provided to enable convenient access -to Nexus: - -* nexusUrl=${NEXUS_URL} -* nexusUsername=Escaped(${NEXUS_USERNAME}) -* nexusPassword=Escaped(${NEXUS_PASSWORD}) -* nexusHost=Host(${NEXUS_URL}) -* nexusAuth=${nexusUsername}:$(nexusPassword) -* nexusUrlWithAuth=${nexusUrl.withAuth($nexusAuth)} - -The above is adhoc notation meant to be more clear than a bunch of words. -If no nexusUsername/nexusPassword are defined nexusAuth will be empty and -nexusUrlWithAuth is equal to nexusUrl. - -By default, the image is named after the component and pushed into the image -stream located in the namespace of the pipeline run. - -If link:https://www.aquasec.com/products/container-security/[Aqua security scanning] -is enabled in the cluster, images are scanned and registered in Aqua after -they are pushed to the image stream. JSON and HTML report artifacts are -generated. Further, if there is an open pull request on Bitbucket for the -built branch, a code insight report is attached to the Git commit. - -Processes tags specified in the `extra-tags` parameter and adds missing tags to -the images stream in the namespace of the pipeline run. - -The following artifacts are generated by the task and placed into `.ods/artifacts/` - -* `aquasec-scans/` - ** `report.html` - ** `report.json` -* `image-digests/` - ** `.json` - ** `-.json` for each extra-tag -* `sboms/` - ** `.spdx` - - -== Parameters - -[cols="1,1,2"] -|=== -| Parameter | Default | Description - -| registry -| image-registry.openshift-image-registry.svc:5000 -| Image registry to push image to. - - -| image-stream -| -| Reference of the image stream buildah will produce. If not set, the value of `.ods/component` is used. - - -| extra-tags -| -| Additional image tags (e.g. 'latest dev') for pushed images. The primary tag is based on the commit sha. Only tags currently missing from the image will be added. - - -| storage-driver -| vfs -| Set buildah storage driver. - - -| dockerfile -| ./Dockerfile -| Path to the Dockerfile to build (relative to `docker-dir`). - - -| docker-dir -| . -| Path to the directory to use as Docker context. - - -| format -| oci -| The format of the built container, `oci` or `docker`. - - -| buildah-build-extra-args -| -| Extra parameters passed for the build command when building images (e.g. '--build-arg=firstArg=one --build-arg=secondArg=two'). - - -| buildah-push-extra-args -| -| Extra parameters passed for the push command when pushing images. - - -| trivy-sbom-extra-args -| -| Extra parameters passed for the trivy command to generate an SBOM. - - -| aqua-gate -| false -| Whether the Aqua security scan needs to pass for the task to succeed. - -|=== - -== Results - -[cols="1,3"] -|=== -| Name | Description - -| image-digest -| Digest of the image just built. - -|=== diff --git a/go.mod b/go.mod index db782c80..b7097230 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.19 require ( github.com/google/go-cmp v0.5.9 github.com/google/go-github/v42 v42.0.0 - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/sonatype-nexus-community/gonexus v0.59.0 github.com/tektoncd/pipeline v0.41.1 golang.org/x/net v0.7.0 @@ -16,6 +15,8 @@ require ( sigs.k8s.io/yaml v1.3.0 ) +require golang.org/x/tools v0.2.0 // indirect + require ( contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect @@ -63,7 +64,6 @@ require ( go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/exp v0.0.0-20230111222715-75897c7a292a golang.org/x/oauth2 v0.1.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.5.0 // indirect diff --git a/go.sum b/go.sum index ce077f5d..d9fe6c23 100644 --- a/go.sum +++ b/go.sum @@ -172,8 +172,6 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -336,8 +334,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230111222715-75897c7a292a h1:/YWeLOBWYV5WAQORVPkZF3Pq9IppkcT72GKnWjNf5W8= -golang.org/x/exp v0.0.0-20230111222715-75897c7a292a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -518,6 +514,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/docs/tasks.go b/internal/docs/tasks.go deleted file mode 100644 index 9189b480..00000000 --- a/internal/docs/tasks.go +++ /dev/null @@ -1,133 +0,0 @@ -package docs - -import ( - "bytes" - "fmt" - "log" - "os" - "path/filepath" - "strings" - "text/template" - - "github.com/opendevstack/ods-pipeline/internal/command" - tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "sigs.k8s.io/yaml" -) - -type Param struct { - Name string - Default string - Description string -} - -type Result struct { - Name string - Description string -} - -type Task struct { - Name string - Description string - Params []Param - Results []Result -} - -func renderTemplate(targetDir, targetFilename string, data Task) error { - targetFile, err := os.Create(targetFilename) - if err != nil { - return err - } - templateFilename := filepath.Join(targetDir, "template.adoc.tmpl") - templateFileParts := strings.Split(templateFilename, "/") - templateDisplayname := templateFileParts[len(templateFileParts)-1] - _, err = targetFile.WriteString( - "// Document generated by internal/documentation/tasks.go from " + templateDisplayname + "; DO NOT EDIT.\n\n", - ) - if err != nil { - return err - } - tmpl, err := template.ParseFiles(templateFilename) - if err != nil { - return err - } - return tmpl.Execute(targetFile, data) -} - -func parseTasks(helmTemplateOutput []byte) ([]*tekton.Task, error) { - var tasks []*tekton.Task - - tasksBytes := bytes.Split(helmTemplateOutput, []byte("---")) - - for _, taskBytes := range tasksBytes { - var t tekton.Task - err := yaml.Unmarshal(taskBytes, &t) - if err != nil { - return nil, err - } - if len(t.Name) > 0 { - tasks = append(tasks, &t) - } - } - - return tasks, nil -} - -// RenderTasks extracts the task information into a struct, and -// executes the Asciidoctor template belonging to it. -func RenderTasks(tasksSourceDir, descriptionsSourceDir, targetDir string) error { - if _, err := os.Stat(tasksSourceDir); os.IsNotExist(err) { - return err - } - if _, err := os.Stat(descriptionsSourceDir); os.IsNotExist(err) { - return err - } - stdout, stderr, err := command.RunBufferedInDir( - "helm", - []string{"template", "--values=values.docs.yaml", "."}, - tasksSourceDir, - ) - if err != nil { - fmt.Println(string(stderr)) - log.Fatal(err) - } - - tasks, err := parseTasks(stdout) - if err != nil { - return err - } - for _, t := range tasks { - desc, err := os.ReadFile(filepath.Join(descriptionsSourceDir, fmt.Sprintf("%s.adoc", t.Name))) - if err != nil { - return err - } - task := Task{ - Name: t.Name, - Description: string(desc), - Params: []Param{}, - } - for _, p := range t.Spec.Params { - defaultValue := "" - if p.Default != nil { - defaultValue = p.Default.StringVal - } - task.Params = append(task.Params, Param{ - Name: p.Name, - Default: defaultValue, - Description: p.Description, - }) - } - for _, r := range t.Spec.Results { - task.Results = append(task.Results, Result{ - Name: r.Name, - Description: r.Description, - }) - } - targetFilename := fmt.Sprintf("%s.adoc", t.Name) - target := filepath.Join(targetDir, targetFilename) - err = renderTemplate(targetDir, target, task) - if err != nil { - log.Fatal(err) - } - } - return nil -} diff --git a/internal/image/identity.go b/internal/image/identity.go deleted file mode 100644 index 248473f7..00000000 --- a/internal/image/identity.go +++ /dev/null @@ -1,104 +0,0 @@ -package image - -import ( - "fmt" - - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -// registry/imageNamespace/imageStream: -type Identity struct { - ImageNamespace string - ImageStream string - GitCommitSHA string // our Digest not docker digest. -} - -// StreamSha renders ImageStream:GitCommitSHA -func (iid *Identity) StreamSha() string { - return fmt.Sprintf("%s:%s", iid.ImageStream, iid.GitCommitSHA) -} - -// NamespaceStreamSha renders ImageNamespace/ImageStream:GitCommitSHA -func (iid *Identity) NamespaceStreamSha() string { - return fmt.Sprintf("%s:%s", iid.NamespaceStream(), iid.GitCommitSHA) -} - -// NamespaceStream renders ImageNamespace/ImageStream aka Repository in docker terms -func (iid *Identity) NamespaceStream() string { - return fmt.Sprintf("%s/%s", iid.ImageNamespace, iid.ImageStream) -} - -func CreateImageIdentity(ctxt *pipelinectxt.ODSContext, imageNamespace, imageStream string) Identity { - n := imageNamespace - if len(n) == 0 { - n = ctxt.Namespace - } - s := imageStream - if len(s) == 0 { - s = ctxt.Component - } - return Identity{ - ImageNamespace: n, - ImageStream: s, - GitCommitSHA: ctxt.GitCommitSHA, - } -} - -type IdentityWithTag struct { - ImageIdentity *Identity - Tag string -} - -// NamespaceStreamTag renders ImageNamespace/ImageStream:Tag -func (idt *IdentityWithTag) NamespaceStreamTag() string { - return fmt.Sprintf("%s:%s", idt.ImageIdentity.NamespaceStream(), idt.Tag) -} - -// NamespaceStreamSha renders ImageNamespace/ImageStream:GitCommitSHA -func (idt *IdentityWithTag) NamespaceStreamSha() string { - return idt.ImageIdentity.NamespaceStreamSha() -} - -func (iid *Identity) Tag(tag string) IdentityWithTag { - return IdentityWithTag{ - ImageIdentity: iid, - Tag: tag, - } -} - -func (iid *Identity) ImageRefWithSha(registry string) string { - return fmt.Sprintf("%s/%s", registry, iid.NamespaceStreamSha()) -} - -func (iid *Identity) ArtifactImage(registry string, imageDigest string) artifact.Image { - return artifact.Image{ - Ref: iid.ImageRefWithSha(registry), - Registry: registry, - Repository: iid.ImageNamespace, - Name: iid.ImageStream, - Tag: iid.GitCommitSHA, - Digest: imageDigest, - } -} - -// ImageRef renders Registry/ImageNamespace/ImageStream:Tag -func (idt *IdentityWithTag) ImageRef(registry string) string { - return fmt.Sprintf("%s/%s", registry, idt.NamespaceStreamTag()) -} - -// imageRef renders Registry/ImageNamespace/ImageStream:GitCommitSHA -func (idt *IdentityWithTag) ImageRefWithSha(registry string) string { - return fmt.Sprintf("%s/%s", registry, idt.NamespaceStreamSha()) -} - -func (idt *IdentityWithTag) ArtifactImage(registry string, imageDigest string) artifact.Image { - return artifact.Image{ - Ref: idt.ImageRef(registry), - Registry: registry, - Repository: idt.ImageIdentity.ImageNamespace, - Name: idt.ImageIdentity.ImageStream, - Tag: idt.Tag, - Digest: imageDigest, - } -} diff --git a/internal/installation/bitbucket.go b/internal/installation/bitbucket.go index 07ffe203..a921384c 100644 --- a/internal/installation/bitbucket.go +++ b/internal/installation/bitbucket.go @@ -23,7 +23,7 @@ const ( // NewBitbucketClientConfig returns a *bitbucket.ClientConfig which is derived // from the information about Bitbucket located in the given Kubernetes namespace. -func NewBitbucketClientConfig(c *kclient.Clientset, namespace string, logger logging.LeveledLoggerInterface, privateCert string) (*bitbucket.ClientConfig, error) { +func NewBitbucketClientConfig(c kclient.Interface, namespace string, logger logging.LeveledLoggerInterface, privateCert string) (*bitbucket.ClientConfig, error) { bitbucketSecret, err := c.CoreV1().Secrets(namespace). Get(context.TODO(), BitbucketSecretName, metav1.GetOptions{}) if err != nil { diff --git a/internal/installation/nexus.go b/internal/installation/nexus.go index 3da24178..e6841e87 100644 --- a/internal/installation/nexus.go +++ b/internal/installation/nexus.go @@ -20,7 +20,7 @@ const ( // NewNexusClientConfig returns a *nexus.ClientConfig which is derived // from the information about Nexus located in the given Kubernetes namespace. -func NewNexusClientConfig(c *kclient.Clientset, namespace string, logger logging.LeveledLoggerInterface) (*nexus.ClientConfig, error) { +func NewNexusClientConfig(c kclient.Interface, namespace string, logger logging.LeveledLoggerInterface) (*nexus.ClientConfig, error) { nexusSecret, err := c.CoreV1().Secrets(namespace). Get(context.TODO(), NexusSecretName, metav1.GetOptions{}) if err != nil { diff --git a/internal/kubernetes/namespaces.go b/internal/kubernetes/namespaces.go index 2728dfec..eaddd6e7 100644 --- a/internal/kubernetes/namespaces.go +++ b/internal/kubernetes/namespaces.go @@ -10,7 +10,7 @@ import ( ) // TODO: return error -func CreateNamespace(clientset *kubernetes.Clientset, namespace string) { +func CreateNamespace(clientset kubernetes.Interface, namespace string) { log.Printf("Create namespace %s to deploy to", namespace) if _, err := clientset.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ diff --git a/internal/kubernetes/secrets.go b/internal/kubernetes/secrets.go index 81b0ea10..4173372f 100644 --- a/internal/kubernetes/secrets.go +++ b/internal/kubernetes/secrets.go @@ -10,7 +10,7 @@ import ( "k8s.io/client-go/kubernetes" ) -func CreateSecret(clientset *kubernetes.Clientset, namespace string, secret *corev1.Secret) (*corev1.Secret, error) { +func CreateSecret(clientset kubernetes.Interface, namespace string, secret *corev1.Secret) (*corev1.Secret, error) { log.Printf("Create secret %s", secret.Name) @@ -21,7 +21,7 @@ func CreateSecret(clientset *kubernetes.Clientset, namespace string, secret *cor return secret, err } -func GetSecret(clientset *kubernetes.Clientset, namespace string, secretName string) (*corev1.Secret, error) { +func GetSecret(clientset kubernetes.Interface, namespace string, secretName string) (*corev1.Secret, error) { log.Printf("Get secret %s", secretName) @@ -32,7 +32,7 @@ func GetSecret(clientset *kubernetes.Clientset, namespace string, secretName str return secret, err } -func GetSecretKey(clientset *kubernetes.Clientset, namespace, secretName, key string) (string, error) { +func GetSecretKey(clientset kubernetes.Interface, namespace, secretName, key string) (string, error) { log.Printf("Get secret %s", secretName) diff --git a/internal/kubernetes/services.go b/internal/kubernetes/services.go deleted file mode 100644 index 449138ef..00000000 --- a/internal/kubernetes/services.go +++ /dev/null @@ -1,82 +0,0 @@ -package kubernetes - -import ( - "context" - "fmt" - "log" - "strings" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" -) - -func CreateNodePortService(clientset *kubernetes.Clientset, name string, selectors map[string]string, port, targetPort int32, namespace string) (*v1.Service, error) { - - log.Printf("Create node port service %s", name) - svc, err := clientset.CoreV1().Services(namespace).Create(context.TODO(), - &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"app.kubernetes.io/managed-by": "ods-pipeline"}, - }, - Spec: v1.ServiceSpec{ - ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster, - Ports: []v1.ServicePort{ - { - Name: fmt.Sprintf("%d-%d", port, targetPort), - NodePort: port, - Port: port, - Protocol: v1.ProtocolTCP, - TargetPort: intstr.FromInt(int(targetPort)), - }, - }, - Selector: selectors, - SessionAffinity: v1.ServiceAffinityNone, - Type: v1.ServiceTypeNodePort, - }, - }, metav1.CreateOptions{}) - - return svc, err -} - -// ServiceHasReadyPods returns false if no pod is assigned to given service -// or if one or more pods are not "Running" -// or one or more of any pods containers are not "ready". -func ServiceHasReadyPods(clientset *kubernetes.Clientset, svc *v1.Service) (bool, string, error) { - podList, err := servicePods(clientset, svc) - if err != nil { - return false, "error", err - } - for _, pod := range podList.Items { - phase := pod.Status.Phase - if phase != "Running" { - return false, fmt.Sprintf("pod %s is in phase %+v", pod.Name, phase), nil - } - for _, containerStatus := range pod.Status.ContainerStatuses { - if !containerStatus.Ready { - return false, fmt.Sprintf("container %s in pod %s is not ready", containerStatus.Name, pod.Name), nil - } - } - } - return true, "ok", nil -} - -func servicePods(clientset *kubernetes.Clientset, svc *v1.Service) (*v1.PodList, error) { - podClient := clientset.CoreV1().Pods(svc.Namespace) - selector := []string{} - for key, value := range svc.Spec.Selector { - selector = append(selector, fmt.Sprintf("%s=%s", key, value)) - } - pods, err := podClient.List( - context.TODO(), - metav1.ListOptions{ - LabelSelector: strings.Join(selector, ","), - }, - ) - if err != nil { - return nil, err - } - return pods.DeepCopy(), nil -} diff --git a/internal/kubernetes/volumes.go b/internal/kubernetes/volumes.go index 0392d128..754f72bd 100644 --- a/internal/kubernetes/volumes.go +++ b/internal/kubernetes/volumes.go @@ -10,7 +10,7 @@ import ( "k8s.io/client-go/kubernetes" ) -func CreatePersistentVolume(clientset *kubernetes.Clientset, pvName string, capacity string, hostPath string, storageClassName string) (*v1.PersistentVolume, error) { +func CreatePersistentVolume(clientset kubernetes.Interface, pvName string, capacity string, hostPath string, storageClassName string) (*v1.PersistentVolume, error) { log.Printf("Create persistent volume %s", pvName) @@ -34,7 +34,7 @@ func CreatePersistentVolume(clientset *kubernetes.Clientset, pvName string, capa return pv, err } -func CreatePersistentVolumeClaim(clientset *kubernetes.Clientset, capacity string, storageClassName string, namespace string) (*v1.PersistentVolumeClaim, error) { +func CreatePersistentVolumeClaim(clientset kubernetes.Interface, capacity string, storageClassName string, namespace string) (*v1.PersistentVolumeClaim, error) { pvcName := "task-pv-claim" log.Printf("Create persistent volume claim %s", pvcName) diff --git a/internal/manager/pipeline.go b/internal/manager/pipeline.go index 37f2f595..482c9560 100644 --- a/internal/manager/pipeline.go +++ b/internal/manager/pipeline.go @@ -44,8 +44,6 @@ func createPipelineRun( tektonClient tektonClient.ClientPipelineRunInterface, ctxt context.Context, cfg PipelineConfig, - taskKind tekton.TaskKind, - taskSuffix string, needQueueing bool) (*tekton.PipelineRun, error) { pr := &tekton.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ @@ -57,7 +55,7 @@ func createPipelineRun( Kind: "PipelineRun", }, Spec: tekton.PipelineRunSpec{ - PipelineSpec: assemblePipelineSpec(cfg, taskKind, taskSuffix), + PipelineSpec: assemblePipelineSpec(cfg), Params: extractPipelineParams(cfg.Params), ServiceAccountName: "pipeline", // TODO PodTemplate: cfg.PipelineSpec.PodTemplate, @@ -127,11 +125,11 @@ func pipelineLabels(data PipelineConfig) map[string]string { } // assemblePipelineSpec returns a Tekton pipeline based on given PipelineConfig. -func assemblePipelineSpec(cfg PipelineConfig, taskKind tekton.TaskKind, taskSuffix string) *tekton.PipelineSpec { +func assemblePipelineSpec(cfg PipelineConfig) *tekton.PipelineSpec { var tasks []tekton.PipelineTask tasks = append(tasks, tekton.PipelineTask{ Name: "start", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-start" + taskSuffix}, + TaskRef: &tekton.TaskRef{Kind: tekton.NamespacedTaskKind, Name: "ods-pipeline-start"}, Params: startTaskParams(), Workspaces: tektonDefaultWorkspaceBindings(), }) @@ -151,7 +149,7 @@ func assemblePipelineSpec(cfg PipelineConfig, taskKind tekton.TaskKind, taskSuff finallyTasks := append([]tekton.PipelineTask{}, cfg.PipelineSpec.Finally...) finallyTasks = append(finallyTasks, tekton.PipelineTask{ Name: "finish", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-finish" + taskSuffix}, + TaskRef: &tekton.TaskRef{Kind: tekton.NamespacedTaskKind, Name: "ods-pipeline-finish"}, Workspaces: tektonDefaultWorkspaceBindings(), Params: finishTaskParams(), }) diff --git a/internal/manager/pipeline_test.go b/internal/manager/pipeline_test.go index b34e515c..3345114e 100644 --- a/internal/manager/pipeline_test.go +++ b/internal/manager/pipeline_test.go @@ -67,7 +67,7 @@ func TestCreatePipelineRun(t *testing.T) { PVC: "pvc", } t.Run("non-queued PR", func(t *testing.T) { - pr, err := createPipelineRun(tc, ctxt, pData, tekton.NamespacedTaskKind, "", false) + pr, err := createPipelineRun(tc, ctxt, pData, false) if err != nil { t.Fatal(err) } @@ -96,7 +96,7 @@ func TestCreatePipelineRun(t *testing.T) { }) t.Run("pending PR", func(t *testing.T) { - pr, err := createPipelineRun(tc, ctxt, pData, tekton.NamespacedTaskKind, "", true) + pr, err := createPipelineRun(tc, ctxt, pData, true) if err != nil { t.Fatal(err) } @@ -124,7 +124,7 @@ func TestCreatePipelineRun(t *testing.T) { }, }, } - pr, err := createPipelineRun(tc, ctxt, pData, tekton.NamespacedTaskKind, "", false) + pr, err := createPipelineRun(tc, ctxt, pData, false) if err != nil { t.Fatal(err) } @@ -137,7 +137,7 @@ func TestCreatePipelineRun(t *testing.T) { wantTasks := []tekton.PipelineTask{ { Name: "start", - TaskRef: &tekton.TaskRef{Kind: "Task", Name: "ods-start"}, + TaskRef: &tekton.TaskRef{Kind: "Task", Name: "ods-pipeline-start"}, Params: append(startTaskParams(), tektonStringParam("clone-depth", "5")), Workspaces: tektonDefaultWorkspaceBindings(), }, @@ -157,7 +157,7 @@ func TestCreatePipelineRun(t *testing.T) { wantFinallyTasks := []tekton.PipelineTask{ { Name: "finish", - TaskRef: &tekton.TaskRef{Kind: "Task", Name: "ods-finish"}, + TaskRef: &tekton.TaskRef{Kind: "Task", Name: "ods-pipeline-finish"}, Params: []tekton.Param{ tektonStringParam("pipeline-run-name", "$(context.pipelineRun.name)"), tektonStringParam("aggregate-tasks-status", "overriden"), @@ -173,7 +173,6 @@ func TestCreatePipelineRun(t *testing.T) { func TestAssemblePipeline(t *testing.T) { taskKind := tekton.NamespacedTaskKind - taskSuffix := "-latest" cfg := PipelineConfig{ PipelineInfo: PipelineInfo{ Project: "project", @@ -195,7 +194,7 @@ func TestAssemblePipeline(t *testing.T) { Tasks: []tekton.PipelineTask{ { Name: "build", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-build-go" + taskSuffix}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-pipeline-go-build"}, Workspaces: []tekton.WorkspacePipelineTaskBinding{ {Name: "source", Workspace: sharedWorkspaceName}, }, @@ -204,12 +203,12 @@ func TestAssemblePipeline(t *testing.T) { Finally: []tekton.PipelineTask{ { Name: "final", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "final" + taskSuffix}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "final"}, }, }, }, } - got := assemblePipelineSpec(cfg, taskKind, taskSuffix) + got := assemblePipelineSpec(cfg) want := &tekton.PipelineSpec{ Description: "", Params: []tekton.ParamSpec{ @@ -224,7 +223,7 @@ func TestAssemblePipeline(t *testing.T) { Tasks: []tekton.PipelineTask{ { Name: "start", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-start-latest"}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-pipeline-start"}, Params: []tekton.Param{ tektonStringParam("url", "$(params.git-repo-url)"), tektonStringParam("git-full-ref", "$(params.git-full-ref)"), @@ -239,7 +238,7 @@ func TestAssemblePipeline(t *testing.T) { { Name: "build", RunAfter: []string{"start"}, - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-build-go-latest"}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-pipeline-go-build"}, Params: nil, Workspaces: tektonDefaultWorkspaceBindings(), }, @@ -247,12 +246,12 @@ func TestAssemblePipeline(t *testing.T) { Finally: []tekton.PipelineTask{ { Name: "final", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "final-latest"}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "final"}, Params: nil, }, { Name: "finish", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-finish-latest"}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-pipeline-finish"}, Params: []tekton.Param{ tektonStringParam("pipeline-run-name", "$(context.pipelineRun.name)"), tektonStringParam("aggregate-tasks-status", "$(tasks.status)"), @@ -320,7 +319,7 @@ func TestTasksRunAfterInjection(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { cfg := PipelineConfig{PipelineSpec: config.Pipeline{Tasks: tc.cfgTasks}} - got := assemblePipelineSpec(cfg, tekton.NamespacedTaskKind, "") + got := assemblePipelineSpec(cfg) wantRunAfter := [][]string{} for _, task := range tc.want { wantRunAfter = append(wantRunAfter, task.RunAfter) diff --git a/internal/manager/schedule.go b/internal/manager/schedule.go index e8c8fc6f..611434a0 100644 --- a/internal/manager/schedule.go +++ b/internal/manager/schedule.go @@ -30,11 +30,6 @@ type Scheduler struct { TektonClient tektonClient.ClientInterface KubernetesClient kubernetesClient.ClientInterface Logger logging.LeveledLoggerInterface - // TaskKind is the Tekton resource kind for tasks. - // Either "ClusterTask" or "Task". - TaskKind tekton.TaskKind - // TaskSuffic is the suffix applied to tasks (version information). - TaskSuffix string StorageConfig StorageConfig } @@ -74,7 +69,7 @@ func (s *Scheduler) schedule(ctx context.Context, pData PipelineConfig) bool { s.Logger.Debugf("Found %d pipeline runs related to repository %s.", len(pipelineRuns.Items), pData.Repository) needQueueing := needsQueueing(pipelineRuns) s.Logger.Debugf("Creating run for pipeline %s (queued=%v) ...", pData.Component, needQueueing) - _, err = createPipelineRun(s.TektonClient, ctxt, pData, s.TaskKind, s.TaskSuffix, needQueueing) + _, err = createPipelineRun(s.TektonClient, ctxt, pData, needQueueing) if err != nil { s.Logger.Errorf(err.Error()) return false diff --git a/internal/projectpath/root.go b/internal/projectpath/root.go index 96357e4e..adc3cbd9 100644 --- a/internal/projectpath/root.go +++ b/internal/projectpath/root.go @@ -11,3 +11,7 @@ var ( // Root folder of this project Root = filepath.Join(filepath.Dir(b), "../..") ) + +func RootedPath(path string) string { + return filepath.Join(Root, path) +} diff --git a/internal/tasks/tasks.go b/internal/tasks/tasks.go deleted file mode 100644 index 03469ef3..00000000 --- a/internal/tasks/tasks.go +++ /dev/null @@ -1,66 +0,0 @@ -package tasks - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - - "github.com/opendevstack/ods-pipeline/internal/command" - tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "sigs.k8s.io/yaml" -) - -type Task struct { - Name string -} - -func parseTasks(helmTemplateOutput []byte) (map[string][]byte, error) { - tasks := make(map[string][]byte) - - tasksBytes := bytes.Split(helmTemplateOutput, []byte("---")) - - for _, taskBytes := range tasksBytes { - var t tekton.Task - err := yaml.Unmarshal(taskBytes, &t) - if err != nil { - return nil, err - } - if len(t.Name) > 0 { - tasks[t.Name] = taskBytes - } - } - - return tasks, nil -} - -// Render extracts the task information into a struct, and -// executes the Asciidoctor template belonging to it. -func Render(sourceDir, targetDir string) error { - if _, err := os.Stat(sourceDir); os.IsNotExist(err) { - return err - } - stdout, stderr, err := command.RunBufferedInDir( - "helm", - []string{"template", "--values=values.docs.yaml", "."}, - sourceDir, - ) - if err != nil { - fmt.Println(string(stderr)) - return err - } - - tasks, err := parseTasks(stdout) - if err != nil { - return err - } - for name, t := range tasks { - targetFilename := fmt.Sprintf("%s.yaml", name) - target := filepath.Join(targetDir, targetFilename) - err := os.WriteFile(target, t, 0644) - if err != nil { - return err - } - } - return nil -} diff --git a/pkg/tasktesting/bitbucket.go b/internal/tasktesting/bitbucket.go similarity index 91% rename from pkg/tasktesting/bitbucket.go rename to internal/tasktesting/bitbucket.go index d1f23839..5bc900e0 100644 --- a/pkg/tasktesting/bitbucket.go +++ b/internal/tasktesting/bitbucket.go @@ -19,7 +19,7 @@ const ( ) // BitbucketClientOrFatal returns a Bitbucket client, configured based on ConfigMap/Secret in the given namespace. -func BitbucketClientOrFatal(t *testing.T, c *kclient.Clientset, namespace string, privateCert bool) *bitbucket.Client { +func BitbucketClientOrFatal(t *testing.T, c kclient.Interface, namespace string, privateCert bool) *bitbucket.Client { var privateCertPath string if privateCert { privateCertPath = filepath.Join(projectpath.Root, PrivateCertFile) diff --git a/pkg/tasktesting/git.go b/internal/tasktesting/git.go similarity index 98% rename from pkg/tasktesting/git.go rename to internal/tasktesting/git.go index 25ec1179..0ced4cf5 100644 --- a/pkg/tasktesting/git.go +++ b/internal/tasktesting/git.go @@ -74,7 +74,7 @@ func RemoveAll(t *testing.T, path ...string) { } // SetupBitbucketRepo initializes a Git repo, commits, pushes to Bitbucket and writes the result to the .ods cache. -func SetupBitbucketRepo(t *testing.T, c *kclient.Clientset, ns, wsDir, projectKey string, privateCert bool) *pipelinectxt.ODSContext { +func SetupBitbucketRepo(t *testing.T, c kclient.Interface, ns, wsDir, projectKey string, privateCert bool) *pipelinectxt.ODSContext { initAndCommitOrFatal(t, wsDir) originURL := pushToBitbucketOrFatal(t, c, ns, wsDir, projectKey, privateCert) @@ -150,7 +150,7 @@ func PushFileToBitbucketOrFatal(t *testing.T, c *kclient.Clientset, ns, wsDir, b } } -func pushToBitbucketOrFatal(t *testing.T, c *kclient.Clientset, ns, wsDir, projectKey string, privateCert bool) string { +func pushToBitbucketOrFatal(t *testing.T, c kclient.Interface, ns, wsDir, projectKey string, privateCert bool) string { repoName := filepath.Base(wsDir) bbURL := "http://localhost:7990" bbToken, err := kubernetes.GetSecretKey(c, ns, "ods-bitbucket-auth", "password") diff --git a/internal/tasktesting/helper.go b/internal/tasktesting/helper.go new file mode 100644 index 00000000..eeb8d532 --- /dev/null +++ b/internal/tasktesting/helper.go @@ -0,0 +1,5 @@ +package tasktesting + +const ( + PrivateCertFile = "test/testdata/private-cert/tls.crt" +) diff --git a/pkg/tasktesting/nexus.go b/internal/tasktesting/nexus.go similarity index 90% rename from pkg/tasktesting/nexus.go rename to internal/tasktesting/nexus.go index 502c9d83..99a65556 100644 --- a/pkg/tasktesting/nexus.go +++ b/internal/tasktesting/nexus.go @@ -18,7 +18,7 @@ const ( ) // NexusClientOrFatal returns a Nexus client, configured based on ConfigMap/Secret in the given namespace. -func NexusClientOrFatal(t *testing.T, c *kclient.Clientset, namespace string, privateCert bool) *nexus.Client { +func NexusClientOrFatal(t *testing.T, c kclient.Interface, namespace string, privateCert bool) *nexus.Client { ncc, err := installation.NewNexusClientConfig( c, namespace, &logging.LeveledLogger{Level: logging.LevelDebug}, ) diff --git a/pkg/exchange/image.go b/pkg/exchange/image.go deleted file mode 100644 index 86f77024..00000000 --- a/pkg/exchange/image.go +++ /dev/null @@ -1,8 +0,0 @@ -package exchange - -// ImageDigest represants an image -type ImageDigest struct { - Name string - Tag string - Sha string -} diff --git a/pkg/exchange/test.go b/pkg/exchange/test.go deleted file mode 100644 index 0a302bdb..00000000 --- a/pkg/exchange/test.go +++ /dev/null @@ -1,42 +0,0 @@ -package exchange - -import "time" - -type PipelineInfo struct { - Time time.Time - URL string -} - -type ItemInfo struct { - Name string - Description string -} - -type TestReport struct { - Name string - Description string - Executor string - Time time.Time - Suites []TestSuite - Properties map[string]string // extension mechanims - // extra prop for linking to e.q. requirement(s)? -} - -type TestSuite struct { - Name string - Description string - Cases []TestCase - Duration time.Duration - Properties map[string]string // extension mechanims - // extra prop for linking to e.q. requirement(s)? -} - -type TestCase struct { - Name string - Description string - Result string // should be enum: Passed / Failed / Skipped - Message string - Duration time.Duration - Properties map[string]string // extension mechanims - // extra prop for linking to e.q. requirement(s)? -} diff --git a/pkg/odstasktest/assertions.go b/pkg/odstasktest/assertions.go new file mode 100644 index 00000000..ed2d707b --- /dev/null +++ b/pkg/odstasktest/assertions.go @@ -0,0 +1,55 @@ +package odstasktest + +import ( + "os" + "path/filepath" + "strings" + "testing" +) + +// AssertFilesExist checks that all files named by wantFiles exist in wsDir. +// Any files that do not exist will report a test error. +func AssertFilesExist(t *testing.T, wsDir string, wantFiles ...string) { + for _, wf := range wantFiles { + filename := filepath.Join(wsDir, wf) + if _, err := os.Stat(filename); os.IsNotExist(err) { + t.Errorf("Want %s, but got nothing", filename) + } + } +} + +// AssertFileContent checks that the file named by filename in the directory +// wsDir has the exact context specified by want. +func AssertFileContent(t *testing.T, wsDir, filename, want string) { + got, err := getTrimmedFileContent(filepath.Join(wsDir, filename)) + if err != nil { + t.Errorf("get content of %s: %s", filename, err) + return + } + if got != want { + t.Errorf("got '%s', want '%s' in file %s", got, want, filename) + } +} + +// AssertFileContentContains checks that the file named by filename in the directory +// wsDir contains all of wantContains. +func AssertFileContentContains(t *testing.T, wsDir, filename string, wantContains ...string) { + content, err := os.ReadFile(filepath.Join(wsDir, filename)) + got := string(content) + if err != nil { + t.Fatalf("could not read %s: %s", filename, err) + } + for _, w := range wantContains { + if !strings.Contains(got, w) { + t.Fatalf("got '%s', want '%s' contained in file %s", got, w, filename) + } + } +} + +func getTrimmedFileContent(filename string) (string, error) { + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + return strings.TrimSpace(string(content)), nil +} diff --git a/pkg/odstasktest/doc.go b/pkg/odstasktest/doc.go new file mode 100644 index 00000000..1432e3b0 --- /dev/null +++ b/pkg/odstasktest/doc.go @@ -0,0 +1,77 @@ +/* +Package odstasktest implements ODS Pipeline specific functionality to run +Tekton tasks in a KinD cluster on top of package tektontaskrun. + +odstasktest is intended to be used as a library for testing ODS Pipeline +tasks using Go. + +Example usage: + + package test + + import ( + "log" + "os" + "path/filepath" + "testing" + + ott "github.com/opendevstack/ods-pipeline/pkg/odstasktest" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" + ) + + var ( + namespaceConfig *ttr.NamespaceConfig + rootPath = "../.." + ) + + func TestMain(m *testing.M) { + cc, err := ttr.StartKinDCluster( + ttr.LoadImage(ttr.ImageBuildConfig{ + Dockerfile: "build/images/Dockerfile.my-task", + ContextDir: rootPath, + }), + ) + if err != nil { + log.Fatal("Could not start KinD cluster: ", err) + } + nc, cleanup, err := ttr.SetupTempNamespace( + cc, + ott.StartNexus(), + ott.InstallODSPipeline(), + ttr.InstallTaskFromPath( + filepath.Join(rootPath, "build/tasks/my-task.yaml"), + nil, + ), + ) + if err != nil { + log.Fatal("Could not setup temporary namespace: ", err) + } + defer cleanup() + namespaceConfig = nc + os.Exit(m.Run()) + } + + func TestMyTask(t *testing.T) { + if err := ttr.RunTask( + ttr.InNamespace(namespaceConfig.Name), + ttr.UsingTask("my-task"), + ttr.WithStringParams(map[string]string{ + "go-os": runtime.GOOS, + "go-arch": runtime.GOARCH, + }), + ott.WithGitSourceWorkspace(t, "../testdata/workspaces/go-sample-app"), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun) { + ott.AssertFilesExist( + t, config.WorkspaceConfigs["source"].Dir, + "docker/Dockerfile", + "docker/app", + ) + }), + ); err != nil { + t.Fatal(err) + } + } + + // further tests here ... +*/ +package odstasktest diff --git a/pkg/odstasktest/install.go b/pkg/odstasktest/install.go new file mode 100644 index 00000000..0463d6ca --- /dev/null +++ b/pkg/odstasktest/install.go @@ -0,0 +1,51 @@ +package odstasktest + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/opendevstack/ods-pipeline/internal/command" + "github.com/opendevstack/ods-pipeline/internal/projectpath" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +// InstallOptions configure the installation of ODS Pipeline. +type InstallOptions struct { + // PrivateCert specifies if services should be accessed through TLS + // with a private certificate. + PrivateCert bool +} + +// InstallODSPipeline installs the ODS Pipeline Helm chart in the namespace +// given in NamespaceConfig. +func InstallODSPipeline(opts *InstallOptions) ttr.NamespaceOpt { + if opts == nil { + opts = &InstallOptions{PrivateCert: false} + } + return func(cc *ttr.ClusterConfig, nc *ttr.NamespaceConfig) error { + return installCDNamespaceResources(nc.Name, "pipeline", opts.PrivateCert) + } +} + +func installCDNamespaceResources(ns, serviceaccount string, privateCert bool) error { + scriptArgs := []string{"-n", ns, "-s", serviceaccount, "--no-diff"} + // if testing.Verbose() { + // scriptArgs = append(scriptArgs, "-v") + // } + if privateCert { + // Insert as first flag because install-inside-kind.sh won't recognize it otherwise. + scriptArgs = append( + []string{fmt.Sprintf("--private-cert=%s", filepath.Join(projectpath.Root, "test/testdata/private-cert/tls.crt"))}, + scriptArgs..., + ) + } + + return command.Run( + "bash", + append([]string{filepath.Join(projectpath.Root, "scripts/install-inside-kind.sh")}, scriptArgs...), + []string{}, + os.Stdout, + os.Stderr, + ) +} diff --git a/pkg/odstasktest/services.go b/pkg/odstasktest/services.go new file mode 100644 index 00000000..d2e375db --- /dev/null +++ b/pkg/odstasktest/services.go @@ -0,0 +1,48 @@ +package odstasktest + +import ( + "flag" + "os" + + "github.com/opendevstack/ods-pipeline/internal/command" + "github.com/opendevstack/ods-pipeline/internal/projectpath" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +var restartNexusFlag = flag.Bool("ods-restart-nexus", false, "Whether to force a restart of Nexus") +var restartSonarQubeFlag = flag.Bool("ods-restart-sonarqube", false, "Whether to force a restart of SonarQube") +var restartBitbucketFlag = flag.Bool("ods-restart-bitbucket", false, "Whether to force a restart of Bitbucket") + +// StartNexus starts a Nexus instance in a Docker container (named +// ods-test-nexus). If a container of the same name already exists, it will be +// reused unless -ods-restart-nexus is passed. +func StartNexus() ttr.NamespaceOpt { + flag.Parse() + return runService("run-nexus.sh", *restartNexusFlag) +} + +// StartSonarQube starts a SonarQube instance in a Docker container (named +// ods-test-sonarqube). If a container of the same name already exists, it will +// be reused unless -ods-restart-sonarqube is passed. +func StartSonarQube() ttr.NamespaceOpt { + flag.Parse() + return runService("run-sonarqube.sh", *restartSonarQubeFlag) +} + +// StartBitbucket starts a Bitbucket instance in a Docker container (named +// ods-test-bitbucket-server). If a container of the same name already exists, +// it will be reused unless -ods-restart-bitbucket is passed. +func StartBitbucket() ttr.NamespaceOpt { + flag.Parse() + return runService("run-bitbucket.sh", *restartBitbucketFlag) +} + +func runService(script string, restart bool) ttr.NamespaceOpt { + return func(cc *ttr.ClusterConfig, nc *ttr.NamespaceConfig) error { + args := []string{projectpath.RootedPath("scripts/" + script)} + if !restart { + args = append(args, "--reuse") + } + return command.Run("bash", args, []string{}, os.Stdout, os.Stderr) + } +} diff --git a/pkg/odstasktest/workspace.go b/pkg/odstasktest/workspace.go new file mode 100644 index 00000000..d3e6c378 --- /dev/null +++ b/pkg/odstasktest/workspace.go @@ -0,0 +1,45 @@ +package odstasktest + +import ( + "testing" + + "github.com/opendevstack/ods-pipeline/internal/tasktesting" + "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +// GetSourceWorkspaceContext reads the ODS context from the source workspace. +func GetSourceWorkspaceContext(t *testing.T, config *ttr.TaskRunConfig) (dir string, ctxt *pipelinectxt.ODSContext) { + dir = config.WorkspaceConfigs["source"].Dir + ctxt, err := pipelinectxt.NewFromCache(dir) + if err != nil { + t.Fatal(err) + } + return +} + +// InitGitRepo initialises a Git repository inside the given workspace. +// The workspace will also be setup with an ODS context directory in .ods +// with the given namespace. +func InitGitRepo(t *testing.T, namespace string) ttr.WorkspaceOpt { + return func(c *ttr.WorkspaceConfig) error { + _ = tasktesting.SetupGitRepo(t, namespace, c.Dir) + return nil + } +} + +// WithGitSourceWorkspace configures the task run with a workspace named +// "source", mapped to the directory sourced from sourceDir. The directory is +// initialised as a Git repository with an ODS context with the given namespace. +func WithGitSourceWorkspace(t *testing.T, sourceDir, namespace string, opts ...ttr.WorkspaceOpt) ttr.TaskRunOpt { + return WithSourceWorkspace( + t, sourceDir, + append([]ttr.WorkspaceOpt{InitGitRepo(t, namespace)}, opts...)..., + ) +} + +// WithSourceWorkspace configures the task run with a workspace named +// "source", mapped to the directory sourced from sourceDir. +func WithSourceWorkspace(t *testing.T, sourceDir string, opts ...ttr.WorkspaceOpt) ttr.TaskRunOpt { + return ttr.WithWorkspace("source", sourceDir, opts...) +} diff --git a/pkg/pipelinectxt/context.go b/pkg/pipelinectxt/context.go index 14c0df0c..1d11a088 100644 --- a/pkg/pipelinectxt/context.go +++ b/pkg/pipelinectxt/context.go @@ -60,6 +60,12 @@ func (o *ODSContext) WriteCache(wsDir string) error { return nil } +func NewFromCache(wsDir string) (o *ODSContext, err error) { + o = &ODSContext{} + err = o.ReadCache(wsDir) + return +} + // ReadCache reads ODS context from .ods // TODO: test that this works func (o *ODSContext) ReadCache(wsDir string) error { diff --git a/pkg/sonar/client.go b/pkg/sonar/client.go deleted file mode 100644 index 0eb8421b..00000000 --- a/pkg/sonar/client.go +++ /dev/null @@ -1,123 +0,0 @@ -package sonar - -import ( - b64 "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" - - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -type ClientInterface interface { - Scan(sonarProject, branch, commit string, pr *PullRequest, outWriter, errWriter io.Writer) error - QualityGateGet(p QualityGateGetParams) (*QualityGate, error) - GenerateReports(sonarProject, author, branch, rootPath, artifactPrefix string) error - ExtractComputeEngineTaskID(filename string) (string, error) - ComputeEngineTaskGet(p ComputeEngineTaskGetParams) (*ComputeEngineTask, error) -} - -// Loosely based on https://github.com/brandur/wanikaniapi. -type Client struct { - httpClient *http.Client - clientConfig *ClientConfig - baseURL *url.URL -} - -type ClientConfig struct { - Timeout time.Duration - APIToken string - HTTPClient *http.Client - MaxRetries int - BaseURL string - ServerEdition string - TrustStore string - TrustStorePassword string - Debug bool - // Logger is the logger to send logging messages to. - Logger logging.LeveledLoggerInterface -} - -func NewClient(clientConfig *ClientConfig) (*Client, error) { - httpClient := clientConfig.HTTPClient - if httpClient == nil { - httpClient = &http.Client{} - } - if clientConfig.Timeout > 0 { - httpClient.Timeout = clientConfig.Timeout - } else { - httpClient.Timeout = 20 * time.Second - } - if clientConfig.Logger == nil { - clientConfig.Logger = &logging.LeveledLogger{Level: logging.LevelError} - } - if clientConfig.ServerEdition == "" { - clientConfig.ServerEdition = "community" - } - baseURL, err := url.Parse(clientConfig.BaseURL) - if err != nil { - return nil, fmt.Errorf("parse base URL: %w", err) - } - return &Client{ - httpClient: httpClient, - clientConfig: clientConfig, - baseURL: baseURL, - }, nil -} - -// ProjectKey returns the SonarQube project key for given context and artifact prefix. -// Monorepo support: separate projects in SonarQube. -// See https://community.sonarsource.com/t/monorepo-and-sonarqube/37990/3. -func ProjectKey(ctxt *pipelinectxt.ODSContext, artifactPrefix string) string { - sonarProject := fmt.Sprintf("%s-%s", ctxt.Project, ctxt.Component) - if len(artifactPrefix) > 0 { - sonarProject = fmt.Sprintf("%s-%s", sonarProject, strings.TrimSuffix(artifactPrefix, "-")) - } - return sonarProject -} - -func (c *Client) logger() logging.LeveledLoggerInterface { - return c.clientConfig.Logger -} - -func (c *Client) javaSystemProperties() []string { - return []string{ - fmt.Sprintf("-Djavax.net.ssl.trustStore=%s", c.clientConfig.TrustStore), - fmt.Sprintf("-Djavax.net.ssl.trustStorePassword=%s", c.clientConfig.TrustStorePassword), - } -} - -func (c *Client) get(urlPath string) (int, []byte, error) { - u, err := c.baseURL.Parse(urlPath) - if err != nil { - return 0, nil, fmt.Errorf("parse URL path: %w", err) - } - c.logger().Debugf("GET %s", u) - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return 0, nil, fmt.Errorf("could not create request: %s", err) - } - - res, err := c.do(req) - if err != nil { - return 500, nil, fmt.Errorf("got error %s", err) - } - defer res.Body.Close() - - body, err := io.ReadAll(res.Body) - return res.StatusCode, body, err -} - -func (c *Client) do(req *http.Request) (*http.Response, error) { - // The user token is sent via the login field of HTTP basic authentication, - // without any password. See https://docs.sonarqube.org/latest/extend/web-api/. - credentials := fmt.Sprintf("%s:", c.clientConfig.APIToken) - basicAuth := b64.StdEncoding.EncodeToString([]byte(credentials)) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Basic %s", basicAuth)) - return c.httpClient.Do(req) -} diff --git a/pkg/sonar/client_test.go b/pkg/sonar/client_test.go deleted file mode 100644 index 936ed968..00000000 --- a/pkg/sonar/client_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package sonar - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" -) - -func testClient(t *testing.T, baseURL string) *Client { - c, err := NewClient(&ClientConfig{BaseURL: baseURL}) - if err != nil { - t.Fatal(err) - } - return c -} - -func TestGetRequest(t *testing.T) { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, r.URL.Path) - })) - defer srv.Close() - tests := map[string]struct { - baseURL string - }{ - "base URL without trailing slash": { - baseURL: srv.URL, - }, - "base URL with trailing slash": { - baseURL: srv.URL + "/", - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - bitbucketClient := testClient(t, tc.baseURL) - requestPath := "/foo" - code, out, err := bitbucketClient.get(requestPath) - if err != nil { - t.Fatal(err) - } - if code != 200 { - t.Fatal("expected 200") - } - if string(out) != requestPath { - t.Fatalf("expected %s, got: %s", requestPath, string(out)) - } - }) - } -} diff --git a/pkg/sonar/compute_engine.go b/pkg/sonar/compute_engine.go deleted file mode 100644 index bfc8d7a8..00000000 --- a/pkg/sonar/compute_engine.go +++ /dev/null @@ -1,61 +0,0 @@ -package sonar - -import ( - "encoding/json" - "fmt" -) - -const ( - TaskStatusInProgress = "IN_PROGRESS" - TaskStatusPending = "PENDING" - TaskStatusSuccess = "SUCCESS" - TaskStatusFailed = "FAILED" -) - -type ComputeEngineTask struct { - Organization string `json:"organization"` - ID string `json:"id"` - Type string `json:"type"` - ComponentID string `json:"componentId"` - ComponentKey string `json:"componentKey"` - ComponentName string `json:"componentName"` - ComponentQualifier string `json:"componentQualifier"` - AnalysisID string `json:"analysisId"` - Status string `json:"status"` - SubmittedAt string `json:"submittedAt"` - StartedAt string `json:"startedAt"` - ExecutedAt string `json:"executedAt"` - ExecutionTimeMs int `json:"executionTimeMs"` - ErrorMessage string `json:"errorMessage"` - Logs bool `json:"logs"` - HasErrorStacktrace bool `json:"hasErrorStacktrace"` - ErrorStacktrace string `json:"errorStacktrace"` - ScannerContext string `json:"scannerContext"` - HasScannerContext bool `json:"hasScannerContext"` -} - -type computeEngineTaskResponse struct { - Task *ComputeEngineTask `json:"task"` -} - -type ComputeEngineTaskGetParams struct { - AdditionalFields string `json:"additionalFields"` - ID string `json:"id"` -} - -func (c *Client) ComputeEngineTaskGet(p ComputeEngineTaskGetParams) (*ComputeEngineTask, error) { - urlPath := fmt.Sprintf("/api/ce/task?id=%s", p.ID) - statusCode, response, err := c.get(urlPath) - if err != nil { - return nil, fmt.Errorf("request returned err: %w", err) - } - if statusCode != 200 { - return nil, fmt.Errorf("request returned unexpected response code: %d, body: %s", statusCode, string(response)) - } - var cetr *computeEngineTaskResponse - err = json.Unmarshal(response, &cetr) - if err != nil { - return nil, fmt.Errorf("could not unmarshal response: %w", err) - } - return cetr.Task, nil -} diff --git a/pkg/sonar/compute_engine_test.go b/pkg/sonar/compute_engine_test.go deleted file mode 100644 index 838828df..00000000 --- a/pkg/sonar/compute_engine_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package sonar - -import ( - "testing" - - "github.com/opendevstack/ods-pipeline/test/testserver" -) - -func TestComputeEngineTaskGet(t *testing.T) { - - srv, cleanup := testserver.NewTestServer(t) - defer cleanup() - c := testClient(t, srv.Server.URL) - - tests := map[string]struct { - Fixture string - WantStatus string - }{ - "FAILED status": { - Fixture: "sonar/task_failed.json", - WantStatus: TaskStatusFailed, - }, - "SUCCESS status": { - Fixture: "sonar/task_success.json", - WantStatus: TaskStatusSuccess, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - srv.EnqueueResponse( - t, "/api/ce/task", - 200, tc.Fixture, - ) - taskID := "AVAn5RKqYwETbXvgas-I" - got, err := c.ComputeEngineTaskGet(ComputeEngineTaskGetParams{ID: taskID}) - if err != nil { - t.Fatalf("Unexpected error on request: %s", err) - } - - // check extracted status matches - if got.Status != tc.WantStatus { - t.Fatalf("want %s, got %s", tc.WantStatus, got.Status) - } - - // check sent task ID matches - lr, err := srv.LastRequest() - if err != nil { - t.Fatal(err) - } - q := lr.URL.Query() - if q.Get("id") != taskID { - t.Fatalf("want %s, got %s", taskID, q.Get("id")) - } - }) - } -} diff --git a/pkg/sonar/quality_gate.go b/pkg/sonar/quality_gate.go deleted file mode 100644 index 575a7309..00000000 --- a/pkg/sonar/quality_gate.go +++ /dev/null @@ -1,68 +0,0 @@ -package sonar - -import ( - "encoding/json" - "fmt" -) - -const ( - QualityGateStatusOk = "OK" - QualityGateStatusWarn = "WARN" - QualityGateStatusError = "ERROR" - QualityGateStatusNone = "NONE" -) - -type QualityGate struct { - ProjectStatus QualityGateProjectStatus `json:"projectStatus"` -} - -type QualityGateProjectStatus struct { - Status string `json:"status"` - IgnoredConditions bool `json:"ignoredConditions"` - Conditions []QualityGateCondition `json:"conditions"` - Periods []QualityGatePeriod `json:"periods"` -} - -type QualityGateCondition struct { - Status string `json:"status"` - MetricKey string `json:"metricKey"` - Comparator string `json:"comparator"` - PeriodIndex int `json:"periodIndex"` - ErrorThreshold string `json:"errorThreshold,omitempty"` - ActualValue string `json:"actualValue"` -} - -type QualityGatePeriod struct { - Index int `json:"index"` - Mode string `json:"mode"` - Date string `json:"date"` - Parameter string `json:"parameter"` -} - -type QualityGateGetParams struct { - ProjectKey string - Branch string - PullRequest string -} - -func (c *Client) QualityGateGet(p QualityGateGetParams) (*QualityGate, error) { - urlPath := "/api/qualitygates/project_status?projectKey=" + p.ProjectKey - if p.PullRequest != "" && p.PullRequest != "0" { - urlPath = urlPath + "&pullRequest=" + p.PullRequest - } else if p.Branch != "" { - urlPath = urlPath + "&branch=" + p.Branch - } - statusCode, response, err := c.get(urlPath) - if err != nil { - return &QualityGate{ProjectStatus: QualityGateProjectStatus{Status: QualityGateStatusNone}}, nil - } - if statusCode != 200 { - return nil, fmt.Errorf("request returned unexpected response code: %d, body: %s", statusCode, string(response)) - } - var qg *QualityGate - err = json.Unmarshal(response, &qg) - if err != nil { - return qg, err - } - return qg, nil -} diff --git a/pkg/sonar/quality_gate_test.go b/pkg/sonar/quality_gate_test.go deleted file mode 100644 index 1c35398d..00000000 --- a/pkg/sonar/quality_gate_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package sonar - -import ( - "testing" - - "github.com/opendevstack/ods-pipeline/test/testserver" -) - -func TestQualityGateGet(t *testing.T) { - - srv, cleanup := testserver.NewTestServer(t) - defer cleanup() - c := testClient(t, srv.Server.URL) - - tests := map[string]struct { - responseFixture string - params QualityGateGetParams - wantRequestURI string - wantStatus string - }{ - "ERROR status": { - params: QualityGateGetParams{ProjectKey: "foo"}, - responseFixture: "sonar/project_status_error.json", - wantRequestURI: "/api/qualitygates/project_status?projectKey=foo", - wantStatus: "ERROR", - }, - "OK status": { - params: QualityGateGetParams{ProjectKey: "foo"}, - responseFixture: "sonar/project_status_ok.json", - wantRequestURI: "/api/qualitygates/project_status?projectKey=foo", - wantStatus: "OK", - }, - "OK status for branch": { - params: QualityGateGetParams{ProjectKey: "foo", Branch: "bar"}, - responseFixture: "sonar/project_status_ok.json", - wantRequestURI: "/api/qualitygates/project_status?projectKey=foo&branch=bar", - wantStatus: "OK", - }, - "OK status for PR": { - params: QualityGateGetParams{ProjectKey: "foo", PullRequest: "123"}, - responseFixture: "sonar/project_status_ok.json", - wantRequestURI: "/api/qualitygates/project_status?projectKey=foo&pullRequest=123", - wantStatus: "OK", - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - srv.EnqueueResponse( - t, "/api/qualitygates/project_status", - 200, tc.responseFixture, - ) - got, err := c.QualityGateGet(tc.params) - if err != nil { - t.Fatalf("Unexpected error on request: %s", err) - } - if got.ProjectStatus.Status != tc.wantStatus { - t.Fatalf("want %s, got %s", tc.wantStatus, got.ProjectStatus.Status) - } - req, err := srv.LastRequest() - if err != nil { - t.Fatal(err) - } - if req.URL.RequestURI() != tc.wantRequestURI { - t.Fatalf("want request URI %s, got %s", tc.wantRequestURI, req.URL.RequestURI()) - } - }) - } -} diff --git a/pkg/sonar/report.go b/pkg/sonar/report.go deleted file mode 100644 index 5328b74a..00000000 --- a/pkg/sonar/report.go +++ /dev/null @@ -1,75 +0,0 @@ -package sonar - -import ( - "fmt" - "path/filepath" - "time" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/file" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -// GenerateReports generates SonarQube reports using cnesreport. -// See https://github.com/cnescatlab/sonar-cnes-report. -func (c *Client) GenerateReports(sonarProject, author, branch, rootPath, artifactPrefix string) error { - reportParams := append( - c.javaSystemProperties(), - "-jar", "/usr/local/cnes/cnesreport.jar", - "-s", c.clientConfig.BaseURL, - "-t", c.clientConfig.APIToken, - "-p", sonarProject, - "-a", author, - branch, - ) - stdout, stderr, err := command.RunBuffered("java", reportParams) - if err != nil { - return fmt.Errorf( - "report generation failed: %w, stderr: %s, stdout: %s", - err, string(stderr), string(stdout), - ) - } - - artifactsPath := filepath.Join(rootPath, pipelinectxt.SonarAnalysisPath) - err = copyReportFiles(sonarProject, artifactsPath, artifactPrefix) - if err != nil { - return fmt.Errorf("copying report to artifacts failed: %w", err) - } - - return nil -} - -func copyReportFiles(project, destinationDir, artifactPrefix string) error { - analysisReportFile := fmt.Sprintf( - "%s-%s-analysis-report.md", - currentDate(), - project, - ) - err := file.Copy( - analysisReportFile, - filepath.Join(destinationDir, artifactPrefix+"analysis-report.md"), - ) - if err != nil { - return fmt.Errorf("copying %s failed: %w", analysisReportFile, err) - } - - issuesReportFile := fmt.Sprintf( - "%s-%s-issues-report.csv", - currentDate(), - project, - ) - err = file.Copy( - issuesReportFile, - filepath.Join(destinationDir, artifactPrefix+"issues-report.csv"), - ) - if err != nil { - return fmt.Errorf("copying %s failed: %w", issuesReportFile, err) - } - return nil -} - -// currentDate returns the current date as YYYY-MM-DD -func currentDate() string { - currentTime := time.Now() - return currentTime.Format("2006-01-02") -} diff --git a/pkg/sonar/scan.go b/pkg/sonar/scan.go deleted file mode 100644 index 988de1ee..00000000 --- a/pkg/sonar/scan.go +++ /dev/null @@ -1,111 +0,0 @@ -package sonar - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" - - "github.com/opendevstack/ods-pipeline/internal/command" -) - -type PullRequest struct { - Key string - Branch string - Base string -} - -// Scan report -type ReportTask struct { - ProjectKey string - ServerUrl string - ServerVersion string - Branch string - DashboardUrl string - CeTaskId string - CeTaskUrl string -} - -const ( - ScannerworkDir = ".scannerwork" - ReportTaskFilename = "report-task.txt" - ReportTaskFile = ScannerworkDir + "/" + ReportTaskFilename -) - -// Scan scans the source code and uploads the analysis to given SonarQube project. -// If pr is non-nil, information for pull request decoration is sent. -func (c *Client) Scan(sonarProject, branch, commit string, pr *PullRequest, outWriter, errWriter io.Writer) error { - scannerParams := []string{ - fmt.Sprintf("-Dsonar.host.url=%s", c.clientConfig.BaseURL), - "-Dsonar.scm.provider=git", - fmt.Sprintf("-Dsonar.projectKey=%s", sonarProject), - fmt.Sprintf("-Dsonar.projectName=%s", sonarProject), - fmt.Sprintf("-Dsonar.projectVersion=%s", commit), - } - if c.clientConfig.Debug { - scannerParams = append(scannerParams, "-X") - } - // Both Branch Analysis and Pull Request Analysis are only available - // starting in Developer Edition, see - // https://docs.sonarqube.org/latest/branches/overview/ and - // https://docs.sonarqube.org/latest/analysis/pull-request/. - if c.clientConfig.ServerEdition != "community" { - if pr != nil { - scannerParams = append( - scannerParams, - fmt.Sprintf("-Dsonar.pullrequest.key=%s", pr.Key), - fmt.Sprintf("-Dsonar.pullrequest.branch=%s", pr.Branch), - fmt.Sprintf("-Dsonar.pullrequest.base=%s", pr.Base), - ) - } else { - scannerParams = append(scannerParams, fmt.Sprintf("-Dsonar.branch.name=%s", branch)) - } - } - - c.logger().Debugf("Scan params: %v", scannerParams) - // The authentication token of a SonarQube user with "Execute Analysis" - // permission on the project is passed as "sonar.login" for authentication, - // see https://docs.sonarqube.org/latest/analysis/analysis-parameters/. - scannerParams = append(scannerParams, fmt.Sprintf("-Dsonar.login=%s", c.clientConfig.APIToken)) - - return command.Run( - "sonar-scanner", scannerParams, - []string{fmt.Sprintf("SONAR_SCANNER_OPTS=%s", strings.Join(c.javaSystemProperties(), " "))}, - outWriter, errWriter, - ) -} - -/* -Example of the file located in .scannerwork/report-task.txt: - - projectKey=XXXX-python - serverUrl=https://sonarqube-ods.XXXX.com - serverVersion=8.2.0.32929 - branch=dummy - dashboardUrl=https://sonarqube-ods.XXXX.com/dashboard?id=XXXX-python&branch=dummy - ceTaskId=AXxaAoUSsjAMlIY9kNmn - ceTaskUrl=https://sonarqube-ods.XXXX.com/api/ce/task?id=AXxaAoUSsjAMlIY9kNmn -*/ -func (c *Client) ExtractComputeEngineTaskID(filename string) (string, error) { - file, err := os.Open(filename) - if err != nil { - return "", err - } - defer file.Close() - - taskIDPrefix := "ceTaskId=" - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, taskIDPrefix) { - return strings.TrimPrefix(line, taskIDPrefix), nil - } - } - - if err := scanner.Err(); err != nil { - return "", err - } - - return "", fmt.Errorf("properties file %s does not contain %s", filename, taskIDPrefix) -} diff --git a/pkg/sonar/scan_test.go b/pkg/sonar/scan_test.go deleted file mode 100644 index 629f543f..00000000 --- a/pkg/sonar/scan_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package sonar - -import ( - "path/filepath" - "testing" - - "github.com/opendevstack/ods-pipeline/internal/projectpath" -) - -func TestExtractComputeEngineTaskID(t *testing.T) { - - c := testClient(t, "") - want := "AVAn5RKqYwETbXvgas-I" - fixture := filepath.Join(projectpath.Root, "test/testdata/fixtures/sonar", ReportTaskFilename) - got, err := c.ExtractComputeEngineTaskID(fixture) - if err != nil { - t.Fatal(err) - } - - // check extracted status matches - if got != want { - t.Fatalf("want %s, got %s", want, got) - } -} diff --git a/pkg/taskdoc/taskdoc.go b/pkg/taskdoc/taskdoc.go new file mode 100644 index 00000000..2c06cadd --- /dev/null +++ b/pkg/taskdoc/taskdoc.go @@ -0,0 +1,78 @@ +package taskdoc + +import ( + "errors" + "io" + "text/template" + + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "sigs.k8s.io/yaml" +) + +type Param struct { + Name string + Default string + Description string +} + +type Result struct { + Name string + Description string +} + +type Task struct { + Name string + Description string + Params []Param + Results []Result +} + +// ParseTask reads a Tekton task from given bytes f, +// and assembles a new Task with the name, params and +// results from the parsed Tekton task, as well as the +// given description. +func ParseTask(f []byte, desc []byte) (*Task, error) { + var t tekton.Task + err := yaml.Unmarshal(f, &t) + if err != nil { + return nil, err + } + if t.Name == "" { + return nil, errors.New("encountered empty name, something is wrong with the task") + } + task := &Task{ + Name: t.Name, + Description: string(desc), + Params: []Param{}, + Results: []Result{}, + } + for _, p := range t.Spec.Params { + defaultValue := "" + if p.Default != nil { + defaultValue = p.Default.StringVal + } + task.Params = append(task.Params, Param{ + Name: p.Name, + Default: defaultValue, + Description: p.Description, + }) + } + for _, r := range t.Spec.Results { + task.Results = append(task.Results, Result{ + Name: r.Name, + Description: r.Description, + }) + } + return task, nil +} + +// RenderTaskDocumentation renders the given template with the task data, +// writing the result to w. +func RenderTaskDocumentation(w io.Writer, tmpl *template.Template, task *Task) error { + if _, err := w.Write( + []byte("// File is generated; DO NOT EDIT.\n\n"), + ); err != nil { + return err + } + return tmpl.Execute(w, task) +} diff --git a/pkg/taskmanifest/taskmanifest.go b/pkg/taskmanifest/taskmanifest.go new file mode 100644 index 00000000..5ad95cc2 --- /dev/null +++ b/pkg/taskmanifest/taskmanifest.go @@ -0,0 +1,17 @@ +package taskmanifest + +import ( + "io" + "text/template" +) + +// RenderTask renders the given template with the passed data, +// writing the result to w. +func RenderTask(w io.Writer, tmpl *template.Template, data map[string]string) error { + if _, err := w.Write( + []byte("# File is generated; DO NOT EDIT.\n\n"), + ); err != nil { + return err + } + return tmpl.Execute(w, data) +} diff --git a/pkg/tasktesting/check.go b/pkg/tasktesting/check.go deleted file mode 100644 index 8a8acba9..00000000 --- a/pkg/tasktesting/check.go +++ /dev/null @@ -1,52 +0,0 @@ -package tasktesting - -import ( - "fmt" - "net/http" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/internal/command" -) - -type Service string - -const ( - Bitbucket Service = "7990" - Nexus Service = "8081" - SonarQube Service = "9000" -) - -var serviceMapping = map[Service]string{ - Bitbucket: "Bitbucket", - Nexus: "Nexus", - SonarQube: "SonarQube", -} - -// Safeguard against running outside KinD -func CheckCluster(t *testing.T, outsideKindAllowed bool) { - if !outsideKindAllowed { - stdout, stderr, err := command.RunBuffered("kubectl", []string{"config", "current-context"}) - if err != nil { - t.Fatalf("could not check current Kube context: %s, err: %s", string(stderr), err) - } - gotContext := strings.TrimSpace(string(stdout)) - wantContext := "kind-kind" - if gotContext != wantContext { - t.Fatalf("Not running tests outside KinD cluster ('%s') without -outside-kind! Current context: %s", wantContext, gotContext) - } - } -} - -func CheckServices(t *testing.T, requiredServices []Service) { - t.Logf("Trying to reach the required services...") - for _, port := range requiredServices { - service := serviceMapping[port] - resp, err := http.Get(fmt.Sprintf("http://localhost:%s", port)) - if err != nil { - t.Fatalf("%s needs to run for this test to be executable, but it could not be reached: %s", service, err) - } - t.Logf("%s reached successfully.", service) - defer resp.Body.Close() - } -} diff --git a/pkg/tasktesting/helper.go b/pkg/tasktesting/helper.go deleted file mode 100644 index bd047c3f..00000000 --- a/pkg/tasktesting/helper.go +++ /dev/null @@ -1,138 +0,0 @@ -package tasktesting - -import ( - "context" - "fmt" - "os" - "os/signal" - "path/filepath" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/internal/command" - k "github.com/opendevstack/ods-pipeline/internal/kubernetes" - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/internal/random" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/apis" - "knative.dev/pkg/test/logging" -) - -const ( - StorageClassName = "standard" // if using KinD, set it to "standard" - StorageCapacity = "1Gi" - StorageSourceDir = "/files" // this is the dir *within* the KinD container that mounts to ${ODS_PIPELINE_DIR}/test - PrivateCertFile = "test/testdata/private-cert/tls.crt" -) - -type SetupOpts struct { - SourceDir string - StorageCapacity string - StorageClassName string - PrivateCert bool -} - -func Setup(t *testing.T, opts SetupOpts) (*k.Clients, string) { - t.Helper() - - namespace := random.PseudoString() - clients := k.NewClients() - - k.CreateNamespace(clients.KubernetesClientSet, namespace) - - _, err := k.CreatePersistentVolume(clients.KubernetesClientSet, namespace, opts.StorageCapacity, opts.SourceDir, opts.StorageClassName) - if err != nil { - t.Error(err) - } - - _, err = k.CreatePersistentVolumeClaim(clients.KubernetesClientSet, opts.StorageCapacity, opts.StorageClassName, namespace) - if err != nil { - t.Error(err) - } - - installCDNamespaceResources(t, namespace, "pipeline", opts.PrivateCert) - - return clients, namespace -} - -func installCDNamespaceResources(t *testing.T, ns, serviceaccount string, privateCert bool) { - - scriptArgs := []string{"-n", ns, "-s", serviceaccount, "--no-diff"} - if testing.Verbose() { - scriptArgs = append(scriptArgs, "-v") - } - if privateCert { - // Insert as first flag because install-inside-kind.sh won't recognize it otherwise. - scriptArgs = append( - []string{fmt.Sprintf( - "--private-cert=%s", - filepath.Join(projectpath.Root, PrivateCertFile))}, - scriptArgs..., - ) - } - - stdout, stderr, err := command.RunBuffered( - filepath.Join(projectpath.Root, "scripts/install-inside-kind.sh"), - scriptArgs, - ) - - t.Logf(string(stdout)) - if err != nil { - t.Logf(string(stderr)) - t.Fatal(err) - } -} - -func Header(logf logging.FormatLogger, text string) { - left := "### " - right := " ###" - txt := left + text + right - bar := strings.Repeat("#", len(txt)) - logf(bar) - logf(txt) - logf(bar) -} - -// CleanupOnInterrupt will execute the function cleanup if an interrupt signal is caught -func CleanupOnInterrupt(cleanup func(), logf logging.FormatLogger) { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - go func() { - for range c { - logf("Test interrupted, cleaning up.") - cleanup() - os.Exit(1) - } - }() -} - -func TearDown(t *testing.T, cs *k.Clients, namespace string) { - t.Helper() - if cs.KubernetesClientSet == nil { - return - } - - t.Logf("Deleting namespace %s", namespace) - if err := cs.KubernetesClientSet.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}); err != nil { - t.Errorf("Failed to delete namespace %s: %s", namespace, err) - } - - // For simplicity and traceability, we use for the PV the same name as the namespace - pvName := namespace - t.Logf("Deleting persistent volume with name %s", pvName) - if err := cs.KubernetesClientSet.CoreV1().PersistentVolumes().Delete(context.Background(), pvName, metav1.DeleteOptions{}); err != nil { - t.Errorf("Failed to delete persistent volume %s: %s", pvName, err) - } - -} - -func CollectTaskResultInfo(tr *v1beta1.TaskRun, logf logging.FormatLogger) { - if tr == nil { - logf("error: no taskrun") - return - } - logf("Status: %s\n", tr.Status.GetCondition(apis.ConditionSucceeded).Status) - logf("Reason: %s\n", tr.Status.GetCondition(apis.ConditionSucceeded).GetReason()) - logf("Message: %s\n", tr.Status.GetCondition(apis.ConditionSucceeded).GetMessage()) -} diff --git a/pkg/tasktesting/run.go b/pkg/tasktesting/run.go deleted file mode 100644 index 78e05b16..00000000 --- a/pkg/tasktesting/run.go +++ /dev/null @@ -1,253 +0,0 @@ -package tasktesting - -import ( - "bytes" - "context" - "fmt" - "os" - "path/filepath" - "testing" - "time" - - "github.com/opendevstack/ods-pipeline/internal/directory" - "github.com/opendevstack/ods-pipeline/internal/kubernetes" - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - v1 "k8s.io/api/core/v1" -) - -type TestOpts struct { - TaskKindRef string - TaskName string - Clients *kubernetes.Clients - Namespace string - Timeout time.Duration - AlwaysKeepTmpWorkspaces bool -} - -type TaskRunCase struct { - WantRunSuccess bool - WantSetupFail bool - PreRunFunc func(t *testing.T, ctxt *TaskRunContext) - PostRunFunc func(t *testing.T, ctxt *TaskRunContext) -} - -type TestCase struct { - // Map workspace name of task to local directory under test/testdata/workspaces. - WorkspaceDirMapping map[string]string - TaskParamsMapping map[string]string - // The fields until AdditionalRuns are the same as for AdditionalRuns TaskRunCase so that single task tests do not require another level. The goal is to only require the extra level if needed. Could this be avoided? - WantRunSuccess bool - WantSetupFail bool - PreRunFunc func(t *testing.T, ctxt *TaskRunContext) - PostRunFunc func(t *testing.T, ctxt *TaskRunContext) - CleanupFunc func(t *testing.T, ctxt *TaskRunContext) - AdditionalRuns []TaskRunCase - Timeout time.Duration -} - -type TaskRunContext struct { - Namespace string - Clients *kubernetes.Clients - Workspaces map[string]string - Params map[string]string - ODS *pipelinectxt.ODSContext - Cleanup func() - CollectedLogs []byte -} - -func (tc *TaskRunContext) toTektonParams() []tekton.Param { - var tektonParams []tekton.Param - - // When tekton supports array usage their usage could be supported here. - // (see [Cannot refer array params in script #4912](https://github.com/tektoncd/pipeline/issues/4912)) - for key, value := range tc.Params { - tektonParams = append(tektonParams, tekton.Param{ - Name: key, - Value: *tekton.NewArrayOrString(value), - }) - } - return tektonParams -} - -func runTask(t *testing.T, testOpts TestOpts, taskWorkspaces map[string]string, testCaseContext *TaskRunContext, tc TaskRunCase) { - if tc.PreRunFunc != nil { - tc.PreRunFunc(t, testCaseContext) - } - - if testCaseContext.Cleanup != nil { - defer testCaseContext.Cleanup() - } - - tr, err := CreateTaskRunWithParams( - testOpts.Clients.TektonClientSet, - testOpts.TaskKindRef, - testOpts.TaskName, - testCaseContext.toTektonParams(), - taskWorkspaces, - testOpts.Namespace, - ) - if err != nil { - t.Fatal(err) - } - - taskRun, collectedLogsBuffer, err := WatchTaskRunUntilDone(t, testOpts, tr) - - // Check if task setup was successful - if err != nil { - if tc.WantSetupFail { - return - } else { - t.Fatalf("Task setup failed: %s", err) - } - } - - if tc.WantSetupFail { - t.Fatal("Task setup was successful, but was expected to fail.") - } - - if collectedLogsBuffer.Len() > 0 { - testCaseContext.CollectedLogs = collectedLogsBuffer.Bytes() - } - - // Show info from Task result - CollectTaskResultInfo(taskRun, t.Logf) - - // Check if task was successful - if taskRun.IsSuccessful() != tc.WantRunSuccess { - t.Fatalf("Got: %+v, want: %+v.", taskRun.IsSuccessful(), tc.WantRunSuccess) - } - - // Check local folder and evaluate output of task if needed - if tc.PostRunFunc != nil { - tc.PostRunFunc(t, testCaseContext) - } -} - -func Run(t *testing.T, tc TestCase, testOpts TestOpts) { - - // Set default timeout for running the test - if testOpts.Timeout == 0 { - testOpts.Timeout = 120 * time.Second - } - - taskWorkspaces := map[string]string{} - for wn, wd := range tc.WorkspaceDirMapping { - tempDir, err := InitWorkspace(wn, wd) - if err != nil { - t.Fatal(err) - } - t.Logf("Workspace is in %s", tempDir) - taskWorkspaces[wn] = tempDir - } - - testCaseContext := &TaskRunContext{ - Namespace: testOpts.Namespace, - Clients: testOpts.Clients, - Workspaces: taskWorkspaces, - Params: tc.TaskParamsMapping, - } - - if tc.CleanupFunc != nil { - defer tc.CleanupFunc(t, testCaseContext) - } - - tasks := []TaskRunCase{} - tasks = append(tasks, TaskRunCase{ - WantRunSuccess: tc.WantRunSuccess, - WantSetupFail: tc.WantSetupFail, - PreRunFunc: tc.PreRunFunc, - PostRunFunc: tc.PostRunFunc, - }) - for _, ttc := range tc.AdditionalRuns { - if ttc.PostRunFunc == nil { - ttc.PostRunFunc = tc.PostRunFunc - } - if ttc.PreRunFunc == nil { - ttc.PreRunFunc = tc.PreRunFunc - } - tasks = append(tasks, ttc) - } - for _, ttc := range tasks { - runTask(t, testOpts, taskWorkspaces, testCaseContext, ttc) - } - - if !testOpts.AlwaysKeepTmpWorkspaces { - // Clean up only if test is successful - for _, wd := range taskWorkspaces { - err := os.RemoveAll(wd) - if err != nil { - t.Fatal(err) - } - } - } -} - -func InitWorkspace(workspaceName, workspaceDir string) (string, error) { - workspaceSourceDirectory := filepath.Join( - projectpath.Root, "test", TestdataWorkspacesPath, workspaceDir, - ) - workspaceParentDirectory := filepath.Dir(workspaceSourceDirectory) - return directory.CopyToTempDir( - workspaceSourceDirectory, - workspaceParentDirectory, - "workspace-", - ) -} - -func WatchTaskRunUntilDone(t *testing.T, testOpts TestOpts, tr *tekton.TaskRun) (*tekton.TaskRun, bytes.Buffer, error) { - taskRunDone := make(chan *tekton.TaskRun) - podAdded := make(chan *v1.Pod) - errs := make(chan error) - collectedLogsChan := make(chan []byte) - var collectedLogsBuffer bytes.Buffer - - ctx, cancel := context.WithTimeout(context.TODO(), testOpts.Timeout) - defer cancel() - go waitForTaskRunDone( - ctx, - t, - testOpts.Clients.TektonClientSet, - tr.Name, - testOpts.Namespace, - errs, - taskRunDone, - ) - - go waitForTaskRunPod( - ctx, - testOpts.Clients.KubernetesClientSet, - tr.Name, - testOpts.Namespace, - podAdded, - ) - - for { - select { - case err := <-errs: - if err != nil { - return nil, collectedLogsBuffer, err - } - - case pod := <-podAdded: - if pod != nil { - go getEventsAndLogsOfPod( - ctx, - testOpts.Clients.KubernetesClientSet, - pod, - collectedLogsChan, - errs, - ) - } - - case b := <-collectedLogsChan: - collectedLogsBuffer.Write(b) - - case tr := <-taskRunDone: - return tr, collectedLogsBuffer, nil - case <-ctx.Done(): - return nil, collectedLogsBuffer, fmt.Errorf("timeout waiting for task run to finish. Consider increasing the timeout for your testcase at hand") - } - } -} diff --git a/pkg/tasktesting/taskrun.go b/pkg/tasktesting/taskrun.go deleted file mode 100644 index e3cff523..00000000 --- a/pkg/tasktesting/taskrun.go +++ /dev/null @@ -1,141 +0,0 @@ -package tasktesting - -import ( - "context" - "fmt" - "log" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/opendevstack/ods-pipeline/internal/random" - tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - pipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeinformers "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" -) - -const ( - TestdataWorkspacesPath = "testdata/workspaces" -) - -func CreateTaskRunWithParams(tknClient *pipelineclientset.Clientset, taskRefKind string, taskName string, tektonParams []tekton.Param, workspaces map[string]string, namespace string) (*tekton.TaskRun, error) { - - var tk tekton.TaskKind - switch taskRefKind { - case string(tekton.ClusterTaskKind): - tk = tekton.ClusterTaskKind - case string(tekton.NamespacedTaskKind): - tk = tekton.NamespacedTaskKind - default: - log.Fatalf("Don't know type %s\n", taskRefKind) - } - - taskWorkspaces := []tekton.WorkspaceBinding{} - for wn, wd := range workspaces { - wsDirName := filepath.Base(wd) - taskWorkspaces = append(taskWorkspaces, tekton.WorkspaceBinding{ - Name: wn, - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "task-pv-claim", - ReadOnly: false, - }, - SubPath: filepath.Join(TestdataWorkspacesPath, wsDirName), - }) - } - - tr, err := tknClient.TektonV1beta1().TaskRuns(namespace).Create(context.TODO(), - &tekton.TaskRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-taskrun-%s", taskName, random.PseudoString()), - }, - Spec: tekton.TaskRunSpec{ - TaskRef: &tekton.TaskRef{Kind: tk, Name: taskName}, - Params: tektonParams, - Workspaces: taskWorkspaces, - ServiceAccountName: "pipeline", - }, - }, - metav1.CreateOptions{}) - - return tr, err -} - -func waitForTaskRunDone( - ctx context.Context, - t *testing.T, - c pipelineclientset.Interface, - name, ns string, - errs chan error, - done chan *tekton.TaskRun) { - - deadline, _ := ctx.Deadline() - timeout := time.Until(deadline) - log.Printf("Waiting up to %v seconds for task %s in namespace %s to be done...\n", timeout.Round(time.Second).Seconds(), name, ns) - - t.Helper() - - w, err := c.TektonV1beta1().TaskRuns(ns).Watch(ctx, metav1.SingleObject(metav1.ObjectMeta{ - Name: name, - Namespace: ns, - })) - if err != nil { - errs <- fmt.Errorf("error watching taskrun: %s", err) - return - } - - // Wait for the TaskRun to be done - for { - ev := <-w.ResultChan() - if ev.Object != nil { - tr, ok := ev.Object.(*tekton.TaskRun) - if ok { - if tr.IsDone() { - done <- tr - close(done) - return - } - } - - } - } -} - -func waitForTaskRunPod( - ctx context.Context, - c *kubernetes.Clientset, - taskRunName, - namespace string, - podAdded chan *corev1.Pod) { - log.Printf("Waiting for pod related to TaskRun %s to be added to the cluster\n", taskRunName) - stop := make(chan struct{}) - - kubeInformerFactory := kubeinformers.NewSharedInformerFactory(c, time.Second*30) - podsInformer := kubeInformerFactory.Core().V1().Pods().Informer() - - var taskRunPod *corev1.Pod - - podsInformer.AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - // when a new task is created, watch its events - pod := obj.(*corev1.Pod) - if strings.HasPrefix(pod.Name, taskRunName) { - taskRunPod = pod - log.Printf("TaskRun %s added pod %s to the cluster", taskRunName, pod.Name) - stop <- struct{}{} - } - - }, - }) - - defer close(stop) - kubeInformerFactory.Start(stop) - - <-stop - podAdded <- taskRunPod -} diff --git a/pkg/tektontaskrun/cluster.go b/pkg/tektontaskrun/cluster.go new file mode 100644 index 00000000..91066ab2 --- /dev/null +++ b/pkg/tektontaskrun/cluster.go @@ -0,0 +1,200 @@ +package tektontaskrun + +import ( + "errors" + "flag" + "fmt" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/opendevstack/ods-pipeline/internal/command" + "github.com/opendevstack/ods-pipeline/internal/projectpath" +) + +const ( + DefaultServiceAccountName = "pipeline" + KinDMountHostPath = "/tmp/ods-pipeline/kind-mount" + KinDMountContainerPath = "/files" + KinDRegistry = "localhost:5000" + KinDName = "ods-pipeline" +) + +var recreateClusterFlag = flag.Bool("ods-recreate-cluster", false, "Whether to remove and recreate the KinD cluster named 'ods-pipeline'") +var registryPortFlag = flag.String("ods-cluster-registry-port", "5000", "Port of cluster registry") +var outsideKindFlag = flag.Bool("ods-outside-kind", false, "Whether to continue if the Kube context is not set to the KinD cluster") +var reuseImagesFlag = flag.Bool("ods-reuse-images", false, "Whether to reuse existing images instead of building again") +var debugFlag = flag.Bool("ods-debug", false, "Turn on debug mode for scripts etc.") + +// ClusterOpt allows to further configure the KinD cluster after its creation. +type ClusterOpt func(c *ClusterConfig) error + +// ClusterConfig represents key configuration of the KinD cluster. +type ClusterConfig struct { + StorageSourceDir string + StorageCapacity string + StorageClassName string + Registry string + DefaultRepository string +} + +// ImageBuildConfig represents the config used to build a container image. +type ImageBuildConfig struct { + Dockerfile string + Tag string + ContextDir string +} + +// Process validates the configuration and defaults the image tag if unset +// using the defaultImageRepository and Dockerfile values. +func (ibc *ImageBuildConfig) Process(defaultImageRepository string) error { + if ibc.Dockerfile == "" || ibc.ContextDir == "" { + return errors.New("both Dockerfile and ContextDir must be set") + } + if ibc.Tag == "" { + imageName := strings.TrimPrefix(path.Base(ibc.Dockerfile), "Dockerfile.") + ibc.Tag = fmt.Sprintf("%s/%s:latest", defaultImageRepository, imageName) + } + return nil +} + +// NewClusterConfig creates a new ClusterConfig instance. +func NewClusterConfig() *ClusterConfig { + return &ClusterConfig{ + StorageClassName: "standard", // if using KinD, set it to "standard" + StorageCapacity: "1Gi", + StorageSourceDir: KinDMountContainerPath, + Registry: KinDRegistry, + DefaultRepository: "ods-pipeline", + } +} + +// DefaultImageRepository returns the registry + default repository +// combination. +func (c *ClusterConfig) DefaultImageRepository() string { + return c.Registry + "/" + c.DefaultRepository +} + +// DefaultTaskTemplateData returns a map with default values which can be used +// in task templates. +func (c *ClusterConfig) DefaultTaskTemplateData() map[string]string { + return map[string]string{ + "ImageRepository": c.DefaultImageRepository(), + "Version": "latest", + } +} + +// StartKinDCluster starts a KinD cluster with Tekton installed. +// Afterwards, any given ClusterOpt is applied. +func StartKinDCluster(opts ...ClusterOpt) (*ClusterConfig, error) { + flag.Parse() + if err := checkCluster(*outsideKindFlag); err != nil { + return nil, fmt.Errorf("check kubectl context: %s", err) + } + if err := createKinDCluster(*debugFlag); err != nil { + return nil, fmt.Errorf("create KinD cluster: %s", err) + } + if err := installTektonPipelines(*debugFlag); err != nil { + return nil, fmt.Errorf("install Tekton: %s", err) + } + + c := NewClusterConfig() + for _, o := range opts { + err := o(c) + if err != nil { + return nil, err + } + } + return c, nil +} + +// LoadImage builds a container image using the docker CLI based on the given +// ImageBuildConfig. +// +// The ImageBuildConfig must set at least Dockerfile and ContextDir option. +// If Tag is unset, it is inferred from the default registry and the Dockerfile +// name. For example, given a Dockerfile of "Dockerfile.foobar", the tag is +// defaulted to localhost:5000/ods-pipeline/foobar. +// Passing the flag -ods-reuse-images to the tests will skip image rebuilding. +func LoadImage(ibc ImageBuildConfig) ClusterOpt { + flag.Parse() + return func(c *ClusterConfig) error { + buildImage := true + err := ibc.Process(c.DefaultImageRepository()) + if err != nil { + return fmt.Errorf("processing image build config: %s", err) + } + if *reuseImagesFlag { + cmd := exec.Command("docker", "images", "-q", ibc.Tag) + b, err := cmd.Output() + if err != nil { + return err + } + imageID := strings.TrimSpace(string(b)) + if imageID != "" { + log.Printf("Reusing image ID %s for tag %s ...\n", imageID, ibc.Tag) + buildImage = false + } + } + if buildImage { + log.Printf("Building image %s from %s ...\n", ibc.Tag, ibc.Dockerfile) + if !path.IsAbs(ibc.Dockerfile) { + ibc.Dockerfile = filepath.Join(ibc.ContextDir, ibc.Dockerfile) + } + args := []string{ + "build", + "-f", ibc.Dockerfile, + "-t", ibc.Tag, + ibc.ContextDir, + } + if err := command.Run("docker", args, []string{}, os.Stdout, os.Stderr); err != nil { + return err + } + } + return command.Run("docker", []string{"push", ibc.Tag}, []string{}, os.Stdout, os.Stderr) + } +} + +func checkCluster(outsideKindAllowed bool) error { + if !outsideKindAllowed { + cmd := exec.Command("kubectl", "config", "current-context") + b, err := cmd.Output() + if err != nil || len(b) == 0 { + log.Println("did not detect existing kubectl context") + return nil + } + gotContext := strings.TrimSpace(string(b)) + wantCluster := "ods-pipeline" + if gotContext != "kind-"+wantCluster { + return fmt.Errorf("not running tests outside KinD cluster ('%s') without -ods-outside-kind! Current context: %s", wantCluster, gotContext) + } + } + return nil +} + +func createKinDCluster(debug bool) error { + args := []string{ + projectpath.RootedPath("scripts/kind-with-registry.sh"), + "--registry-port=" + *registryPortFlag, + } + if *recreateClusterFlag { + args = append(args, "--recreate") + } + if debug { + args = append(args, "--verbose") + } + return command.Run("bash", args, []string{}, os.Stdout, os.Stderr) +} + +func installTektonPipelines(debug bool) error { + args := []string{ + projectpath.RootedPath("scripts/install-tekton-pipelines.sh"), + } + if debug { + args = append(args, "--verbose") + } + return command.Run("sh", args, []string{}, os.Stdout, os.Stderr) +} diff --git a/pkg/tektontaskrun/doc.go b/pkg/tektontaskrun/doc.go new file mode 100644 index 00000000..27540de0 --- /dev/null +++ b/pkg/tektontaskrun/doc.go @@ -0,0 +1,74 @@ +/* +Package tektontaskrun implements ODS Pipeline independent functionality to run +Tekton tasks in a KinD cluster. + +Using tektontaskrun it is possible to start a KinD cluster, configure it (e.g. +by setting up a temporary namespace), and running a Tekton task. + +tektontaskrun is intended to be used by CLI programs and as a library for +testing Tekton tasks using Go. + +Example usage: + + package test + + import ( + "log" + "os" + "path/filepath" + "testing" + + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" + ) + + var ( + namespaceConfig *ttr.NamespaceConfig + rootPath = "../.." + ) + + func TestMain(m *testing.M) { + cc, err := ttr.StartKinDCluster( + ttr.LoadImage(ttr.ImageBuildConfig{ + Dockerfile: "build/images/Dockerfile.my-task", + ContextDir: rootPath, + }), + ) + if err != nil { + log.Fatal("Could not start KinD cluster: ", err) + } + nc, cleanup, err := ttr.SetupTempNamespace( + cc, + ttr.InstallTaskFromPath( + filepath.Join(rootPath, "build/tasks/my-task.yaml"), + nil, + ), + ) + if err != nil { + log.Fatal("Could not setup temporary namespace: ", err) + } + defer cleanup() + namespaceConfig = nc + os.Exit(m.Run()) + } + + func TestMyTask(t *testing.T) { + if err := ttr.RunTask( + ttr.InNamespace(namespaceConfig.Name), + ttr.UsingTask("my-task"), + ttr.WithStringParams(map[string]string{ + "go-os": runtime.GOOS, + "go-arch": runtime.GOARCH, + }), + ttr.WithWorkspace("source", "my-sample-app"), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun) { + wd := config.WorkspaceConfigs["source"].Dir + // e.g. check files in workspace ... + }), + ); err != nil { + t.Fatal(err) + } + } + + // further tests here ... +*/ +package tektontaskrun diff --git a/pkg/tasktesting/events.go b/pkg/tektontaskrun/events.go similarity index 98% rename from pkg/tasktesting/events.go rename to pkg/tektontaskrun/events.go index 24c2304c..ab575657 100644 --- a/pkg/tasktesting/events.go +++ b/pkg/tektontaskrun/events.go @@ -1,4 +1,4 @@ -package tasktesting +package tektontaskrun import ( "context" diff --git a/pkg/tektontaskrun/interrupt.go b/pkg/tektontaskrun/interrupt.go new file mode 100644 index 00000000..df8ca472 --- /dev/null +++ b/pkg/tektontaskrun/interrupt.go @@ -0,0 +1,18 @@ +package tektontaskrun + +import ( + "os" + "os/signal" +) + +// cleanupOnInterrupt will execute the function cleanup if an interrupt signal is caught +func cleanupOnInterrupt(cleanup func()) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + for range c { + cleanup() + os.Exit(1) + } + }() +} diff --git a/pkg/tasktesting/logs.go b/pkg/tektontaskrun/logs.go similarity index 99% rename from pkg/tasktesting/logs.go rename to pkg/tektontaskrun/logs.go index 6a6e4ece..4f32b110 100644 --- a/pkg/tasktesting/logs.go +++ b/pkg/tektontaskrun/logs.go @@ -1,4 +1,4 @@ -package tasktesting +package tektontaskrun import ( "bufio" diff --git a/pkg/tektontaskrun/namespace_opt.go b/pkg/tektontaskrun/namespace_opt.go new file mode 100644 index 00000000..23e21e70 --- /dev/null +++ b/pkg/tektontaskrun/namespace_opt.go @@ -0,0 +1,130 @@ +package tektontaskrun + +import ( + "context" + "log" + + k "github.com/opendevstack/ods-pipeline/internal/kubernetes" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// NamespaceOpt allows to further configure the K8s namespace after its creation. +type NamespaceOpt func(cc *ClusterConfig, nc *NamespaceConfig) error + +// NamespaceConfig represents key configuration of the K8s namespace. +type NamespaceConfig struct { + Name string +} + +// SetupTempNamespace sets up a new namespace using a pseduo-random name, +// applies any given NamespaceOpt and returns a function to clean up the +// namespace at a later time. +func SetupTempNamespace(cc *ClusterConfig, opts ...NamespaceOpt) (nc *NamespaceConfig, cleanup func(), err error) { + nc = &NamespaceConfig{ + Name: makeRandomString(8), + } + cleanup, err = initNamespaceAndPVC(cc, nc) + if err != nil { + return + } + cleanupOnInterrupt(cleanup) + for _, o := range opts { + err = o(cc, nc) + if err != nil { + return + } + } + return +} + +// InstallTaskFromPath renders the task template at path using the given data, +// then installs the resulting task into the namespace identified by +// NamespaceConfig. +func InstallTaskFromPath(path string, data map[string]string) NamespaceOpt { + return func(cc *ClusterConfig, nc *NamespaceConfig) error { + d := cc.DefaultTaskTemplateData() + for k, v := range data { + d[k] = v + } + _, err := installTask(path, nc.Name, d) + return err + } +} + +func initNamespaceAndPVC(cc *ClusterConfig, nc *NamespaceConfig) (cleanup func(), err error) { + clients := k.NewClients() + + _, nsCleanup, err := createTempNamespace(clients.KubernetesClientSet, nc.Name) + if err != nil { + return nil, err + } + + // for simplicity and traceability, use namespace name for PVC as well + _, pvcCleanup, err := createTempPVC(clients.KubernetesClientSet, cc, nc.Name) + if err != nil { + return nil, err + } + + return func() { + nsCleanup() + pvcCleanup() + }, nil +} + +func createTempNamespace(clientset kubernetes.Interface, name string) (namespace *corev1.Namespace, cleanup func(), err error) { + namespace, err = clientset.CoreV1().Namespaces().Create( + context.TODO(), + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, + metav1.CreateOptions{}, + ) + return namespace, func() { + log.Printf("Removing temporary namespace %q ...", name) + err := removeNamespace(clientset, name) + if err != nil { + log.Println(err) + } + }, err +} + +func createTempPVC(clientset kubernetes.Interface, cc *ClusterConfig, name string) (pvc *corev1.PersistentVolumeClaim, cleanup func(), err error) { + _, err = k.CreatePersistentVolume( + clientset, + name, + cc.StorageCapacity, + cc.StorageSourceDir, + cc.StorageClassName, + ) + if err != nil { + return + } + + pvc, err = k.CreatePersistentVolumeClaim( + clientset, + cc.StorageCapacity, + cc.StorageClassName, + name, + ) + if err != nil { + return + } + return pvc, func() { + err := removePVC(clientset, name) + if err != nil { + log.Println(err) + } + }, err +} + +func removeNamespace(clientset kubernetes.Interface, name string) error { + return clientset.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{}) +} + +func removePVC(clientset kubernetes.Interface, name string) error { + return clientset.CoreV1().PersistentVolumes().Delete(context.Background(), name, metav1.DeleteOptions{}) +} diff --git a/pkg/tektontaskrun/random.go b/pkg/tektontaskrun/random.go new file mode 100644 index 00000000..36a432b2 --- /dev/null +++ b/pkg/tektontaskrun/random.go @@ -0,0 +1,17 @@ +package tektontaskrun + +import ( + "math/rand" + "strings" + "time" +) + +func makeRandomString(length int) string { + rand.Seed(time.Now().UnixNano()) + chars := []rune("abcdefghijklmnopqrstuvwxyz") + var b strings.Builder + for i := 0; i < length; i++ { + b.WriteRune(chars[rand.Intn(len(chars))]) + } + return b.String() +} diff --git a/pkg/tektontaskrun/task.go b/pkg/tektontaskrun/task.go new file mode 100644 index 00000000..cc068ee8 --- /dev/null +++ b/pkg/tektontaskrun/task.go @@ -0,0 +1,38 @@ +package tektontaskrun + +import ( + "bytes" + "context" + "fmt" + "text/template" + + k "github.com/opendevstack/ods-pipeline/internal/kubernetes" + "github.com/opendevstack/ods-pipeline/pkg/taskmanifest" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +func installTask(path, namespace string, data map[string]string) (*tekton.Task, error) { + var t tekton.Task + tmpl, err := template.ParseFiles(path) + if err != nil { + return nil, fmt.Errorf("parse file: %w", err) + } + w := new(bytes.Buffer) + err = taskmanifest.RenderTask(w, tmpl, data) + if err != nil { + return nil, fmt.Errorf("render task: %w", err) + } + err = yaml.Unmarshal(w.Bytes(), &t) + if err != nil { + return nil, fmt.Errorf("unmarshal: %w", err) + } + clients := k.NewClients() + tc := clients.TektonClientSet + it, err := tc.TektonV1beta1().Tasks(namespace).Create(context.TODO(), &t, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("create task: %w", err) + } + return it, nil +} diff --git a/pkg/tektontaskrun/taskrun.go b/pkg/tektontaskrun/taskrun.go new file mode 100644 index 00000000..a681ac8e --- /dev/null +++ b/pkg/tektontaskrun/taskrun.go @@ -0,0 +1,220 @@ +package tektontaskrun + +import ( + "bytes" + "context" + "fmt" + "log" + "path" + "strings" + "time" + + k "github.com/opendevstack/ods-pipeline/internal/kubernetes" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + pipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/apis" +) + +func TektonParamsFromStringParams(stringParams map[string]string) []tekton.Param { + var params []tekton.Param + for k, v := range stringParams { + tp := tekton.Param{Name: k, Value: tekton.ParamValue{ + Type: tekton.ParamTypeString, + StringVal: v, + }} + params = append(params, tp) + } + return params +} + +func runTask(tc *TaskRunConfig) (*tekton.TaskRun, bytes.Buffer, error) { + clients := k.NewClients() + tr, err := createTaskRunWithParams(clients.TektonClientSet, tc) + if err != nil { + return nil, bytes.Buffer{}, err + } + + // TODO: if last output is short, it may be omitted from the logs. + taskRun, logsBuffer, err := watchTaskRunUntilDone(clients, tc, tr) + if err != nil { + return nil, logsBuffer, err + } + + log.Printf( + "Task status: %q - %q\n", + taskRun.Status.GetCondition(apis.ConditionSucceeded).GetReason(), + taskRun.Status.GetCondition(apis.ConditionSucceeded).GetMessage(), + ) + + return taskRun, logsBuffer, nil +} + +func createTaskRunWithParams(tknClient *pipelineclientset.Clientset, tc *TaskRunConfig) (*tekton.TaskRun, error) { + + taskWorkspaces := []tekton.WorkspaceBinding{} + for wn, wd := range tc.Workspaces { + if path.IsAbs(wd) && !strings.HasPrefix(wd, KinDMountHostPath) { + return nil, fmt.Errorf("workspace dir %q is not located within %q", wd, KinDMountHostPath) + } + taskWorkspaces = append(taskWorkspaces, tekton.WorkspaceBinding{ + Name: wn, + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "task-pv-claim", + ReadOnly: false, + }, + SubPath: strings.TrimPrefix(wd, KinDMountHostPath+"/"), + }) + } + + tr, err := tknClient.TektonV1beta1().TaskRuns(tc.Namespace).Create(context.TODO(), + &tekton.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: makeRandomTaskrunName(tc.Name), + }, + Spec: tekton.TaskRunSpec{ + TaskRef: &tekton.TaskRef{Kind: tekton.NamespacedTaskKind, Name: tc.Name}, + Params: tc.Params, + Workspaces: taskWorkspaces, + ServiceAccountName: tc.ServiceAccountName, + }, + }, + metav1.CreateOptions{}) + + return tr, err +} + +func makeRandomTaskrunName(taskName string) string { + return fmt.Sprintf("%s-taskrun-%s", taskName, makeRandomString(8)) +} + +func waitForTaskRunDone( + ctx context.Context, + c pipelineclientset.Interface, + name, ns string, + errs chan error, + done chan *tekton.TaskRun) { + + deadline, _ := ctx.Deadline() + timeout := time.Until(deadline) + log.Printf("Waiting up to %v seconds for task %s in namespace %s to be done...\n", timeout.Round(time.Second).Seconds(), name, ns) + + w, err := c.TektonV1beta1().TaskRuns(ns).Watch(ctx, metav1.SingleObject(metav1.ObjectMeta{ + Name: name, + Namespace: ns, + })) + if err != nil { + errs <- fmt.Errorf("error watching taskrun: %s", err) + return + } + + // Wait for the TaskRun to be done + for { + ev := <-w.ResultChan() + if ev.Object != nil { + tr, ok := ev.Object.(*tekton.TaskRun) + if ok { + if tr.IsDone() { + done <- tr + close(done) + return + } + } + + } + } +} + +func waitForTaskRunPod( + ctx context.Context, + c *kubernetes.Clientset, + taskRunName, + namespace string, + podAdded chan *corev1.Pod) { + log.Printf("Waiting for pod related to TaskRun %s to be added to the cluster\n", taskRunName) + stop := make(chan struct{}) + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(c, time.Second*30) + podsInformer := kubeInformerFactory.Core().V1().Pods().Informer() + + var taskRunPod *corev1.Pod + + podsInformer.AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + // when a new task is created, watch its events + pod := obj.(*corev1.Pod) + if strings.HasPrefix(pod.Name, taskRunName) { + taskRunPod = pod + log.Printf("TaskRun %s added pod %s to the cluster", taskRunName, pod.Name) + stop <- struct{}{} + } + + }, + }) + + defer close(stop) + kubeInformerFactory.Start(stop) + + <-stop + podAdded <- taskRunPod +} + +func watchTaskRunUntilDone(c *k.Clients, tc *TaskRunConfig, tr *tekton.TaskRun) (*tekton.TaskRun, bytes.Buffer, error) { + taskRunDone := make(chan *tekton.TaskRun) + podAdded := make(chan *corev1.Pod) + errs := make(chan error) + collectedLogsChan := make(chan []byte) + var collectedLogsBuffer bytes.Buffer + + ctx, cancel := context.WithTimeout(context.TODO(), tc.Timeout) + defer cancel() + go waitForTaskRunDone( + ctx, + c.TektonClientSet, + tr.Name, + tc.Namespace, + errs, + taskRunDone, + ) + + go waitForTaskRunPod( + ctx, + c.KubernetesClientSet, + tr.Name, + tc.Namespace, + podAdded, + ) + + for { + select { + case err := <-errs: + if err != nil { + return nil, collectedLogsBuffer, err + } + + case pod := <-podAdded: + if pod != nil { + go getEventsAndLogsOfPod( + ctx, + c.KubernetesClientSet, + pod, + collectedLogsChan, + errs, + ) + } + + case b := <-collectedLogsChan: + collectedLogsBuffer.Write(b) + + case tr := <-taskRunDone: + return tr, collectedLogsBuffer, nil + case <-ctx.Done(): + return nil, collectedLogsBuffer, fmt.Errorf("timeout waiting for task run to finish. Consider increasing the timeout for your testcase at hand") + } + } +} diff --git a/pkg/tektontaskrun/taskrun_opt.go b/pkg/tektontaskrun/taskrun_opt.go new file mode 100644 index 00000000..c82ca1e9 --- /dev/null +++ b/pkg/tektontaskrun/taskrun_opt.go @@ -0,0 +1,204 @@ +package tektontaskrun + +import ( + "bytes" + "errors" + "log" + "os" + "time" + + "github.com/opendevstack/ods-pipeline/internal/directory" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" +) + +const ( + DefaultTimeout = 5 * time.Minute +) + +// TaskRunOpt allows to configure the Tekton task run before it is started. +type TaskRunOpt func(c *TaskRunConfig) error + +// TaskRunConfig represents key configuration of the Tekton task run. +type TaskRunConfig struct { + Name string + Params []tekton.Param + Workspaces map[string]string + Namespace string + ServiceAccountName string + Timeout time.Duration + AfterRunFunc func(config *TaskRunConfig, taskRun *tekton.TaskRun, logs bytes.Buffer) + CleanupFuncs []func() + NamespaceConfig *NamespaceConfig + WorkspaceConfigs map[string]*WorkspaceConfig + ExpectFailure bool +} + +// Cleanup calls all registered CleanupFuncs. +func (nc *TaskRunConfig) Cleanup() { + for _, f := range nc.CleanupFuncs { + f() + } +} + +// RunTask executes a task run after applying all given TaskRunOpt. +func RunTask(opts ...TaskRunOpt) error { + trc := &TaskRunConfig{ + Workspaces: map[string]string{}, + WorkspaceConfigs: map[string]*WorkspaceConfig{}, + Timeout: DefaultTimeout, + ServiceAccountName: DefaultServiceAccountName, + } + for _, o := range opts { + err := o(trc) + if err != nil { + return err + } + } + + cleanupOnInterrupt(trc.Cleanup) + defer trc.Cleanup() + + taskRun, logsBuffer, err := runTask(trc) + if err != nil { + return err + } + + if !taskRun.IsSuccessful() && !trc.ExpectFailure { + return errors.New("task run was not successful") + } + + if trc.AfterRunFunc != nil { + trc.AfterRunFunc(trc, taskRun, logsBuffer) + } + + return err +} + +// InNamespace configures the task run to execute in given namespace. +func InNamespace(namespace string) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.Namespace = namespace + return nil + } +} + +// InTempNamespace configures the task run to execute in a newly created, +// temporary namespace. +func InTempNamespace(cc *ClusterConfig, opts ...NamespaceOpt) TaskRunOpt { + return func(c *TaskRunConfig) error { + nc, cleanup, err := SetupTempNamespace(cc, opts...) + if err != nil { + return err + } + c.Namespace = nc.Name + c.NamespaceConfig = nc + c.CleanupFuncs = append(c.CleanupFuncs, cleanup) + return nil + } +} + +// UsingTask configures the task run to execute the Task identified by name in +// the configured namespace. +func UsingTask(name string) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.Name = name + return nil + } +} + +// WithServiceAccountName configures the task run to execute under the +// specified serviceaccount name. +func WithServiceAccountName(name string) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.ServiceAccountName = name + return nil + } +} + +// WithTimeout configures the task run to execute within the given duration. +func WithTimeout(timeout time.Duration) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.Timeout = timeout + return nil + } +} + +// WithWorkspace sets up a workspace with given name and contents of sourceDir. +// sourceDir is copied to a temporary directory so that the original contents +// remain unchanged. +func WithWorkspace(name, sourceDir string, opts ...WorkspaceOpt) TaskRunOpt { + return func(c *TaskRunConfig) error { + workspaceDir, cleanup, err := SetupWorkspaceDir(sourceDir) + if err != nil { + return err + } + log.Printf("Workspace %q is in %s ...\n", name, workspaceDir) + wc := &WorkspaceConfig{ + Name: name, + Dir: workspaceDir, + Cleanup: cleanup, + } + for _, o := range opts { + err := o(wc) + if err != nil { + return err + } + } + c.WorkspaceConfigs[wc.Name] = wc + c.CleanupFuncs = append(c.CleanupFuncs, wc.Cleanup) + c.Workspaces[wc.Name] = wc.Dir + return nil + } +} + +// WithParams configures the task run to use the specified Tekton parameters. +func WithParams(params ...tekton.Param) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.Params = append(c.Params, params...) + return nil + } +} + +// WithStringParams configures the task run to use the specified string +// parameters. WithStringParams is a more convenient way to configure +// simple parameters compares to WithParams. +func WithStringParams(params map[string]string) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.Params = append(c.Params, TektonParamsFromStringParams(params)...) + return nil + } +} + +// ExpectFailure sets up an expectation that the task will fail. If the task +// does not fail, RunTask will error. Conversely, if ExpectFailure is not set, +// RunTask will error when the task run fails. +func ExpectFailure() TaskRunOpt { + return func(c *TaskRunConfig) error { + c.ExpectFailure = true + return nil + } +} + +// AfterRun registers a function which is run after the task run completes. +// The function will receive the task run configuration, as well as an instance +// of the TaskRun. +func AfterRun(f func(c *TaskRunConfig, r *tekton.TaskRun, l bytes.Buffer)) TaskRunOpt { + return func(c *TaskRunConfig) error { + c.AfterRunFunc = f + return nil + } +} + +// SetupWorkspaceDir copies sourceDir to the KinD mount host path, which is +// set to /tmp/ods-pipeline/kind-mount. The created folder can then be used +// as a Tekton task run workspace. SetupWorkspaceDir returns the +// created directory as well as a function to clean it up. +func SetupWorkspaceDir(sourceDir string) (dir string, cleanup func(), err error) { + dir, err = directory.CopyToTempDir(sourceDir, KinDMountHostPath, "workspace-") + cleanup = func() { + if err := os.RemoveAll(dir); err != nil { + log.Printf("failed to clean up temporary workspace dir %s: %s", dir, err) + } + } + return +} diff --git a/pkg/tektontaskrun/workspace_opt.go b/pkg/tektontaskrun/workspace_opt.go new file mode 100644 index 00000000..c6c52fd3 --- /dev/null +++ b/pkg/tektontaskrun/workspace_opt.go @@ -0,0 +1,14 @@ +package tektontaskrun + +// WorkspaceOpt allows to further configure a Tekton workspace after its creation. +type WorkspaceOpt func(c *WorkspaceConfig) error + +// WorkspaceConfig describes a Tekton workspace. +type WorkspaceConfig struct { + // Name of the Tekton workspace. + Name string + // Directory on the host of the workspace. + Dir string + // Cleanup function. + Cleanup func() +} diff --git a/scripts/build-and-push-images.sh b/scripts/build-and-push-images.sh deleted file mode 100755 index 8e17438c..00000000 --- a/scripts/build-and-push-images.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash -set -eu - -# To carry out normal operations like running ODS Tekton Tasks, -# we need the ODS tasks images available in the KinD cluster. -REGISTRY="localhost:5000" -NAMESPACE="ods" - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -ODS_PIPELINE_DIR=${SCRIPT_DIR%/*} - -SKIP_BUILD="false" -IMAGES="" -http_proxy="${http_proxy:-}" -https_proxy="${https_proxy:-}" - -PLATFORM="" -# eg. --platform linux/amd64 - -while [ "$#" -gt 0 ]; do - case $1 in - - -v|--verbose) set -x;; - - --skip-build) SKIP_BUILD="true";; - - -i|--image) IMAGES="$2"; shift;; - -i=*|--image=*) IMAGES="${1#*=}";; - - -p|--platform) PLATFORM="$2"; shift;; - -p=*|--platform=*) PLATFORM="${1#*=}";; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -cd "$ODS_PIPELINE_DIR" - -build_and_push_image() { - odsImage="ods-$image" - if [ "${SKIP_BUILD}" != "true" ]; then - echo "Building image $REGISTRY/$NAMESPACE/$odsImage..." - # shellcheck disable=SC2086 - docker build $platform_arg \ - --build-arg http_proxy="$http_proxy" \ - --build-arg https_proxy="$https_proxy" \ - --build-arg HTTP_PROXY="$http_proxy" \ - --build-arg HTTPS_PROXY="$https_proxy" \ - -f build/package/Dockerfile."$image" -t $REGISTRY/$NAMESPACE/"$odsImage" . - fi - echo "Pushing image to $REGISTRY/$NAMESPACE/$odsImage ..." - docker push "$REGISTRY/$NAMESPACE/$odsImage" -} - -platform_arg= -if [ -n "$PLATFORM" ]; then - platform_arg="--platform=${PLATFORM}" -fi - -if [ -z "$IMAGES" ]; then - for file in build/package/Dockerfile.*; do - image=${file##*Dockerfile.} - build_and_push_image - done -else - for image in $IMAGES; do - build_and_push_image - done -fi diff --git a/scripts/install-inside-kind.sh b/scripts/install-inside-kind.sh index c16f9a9b..308b46ea 100755 --- a/scripts/install-inside-kind.sh +++ b/scripts/install-inside-kind.sh @@ -1,51 +1,51 @@ #!/usr/bin/env bash set -ue -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -ODS_PIPELINE_DIR=${SCRIPT_DIR%/*} -kind_values_dir="${ODS_PIPELINE_DIR}/deploy/.kind-values" -HELM_GENERATED_VALUES_FILE="${ODS_PIPELINE_DIR}/deploy/ods-pipeline/values.generated.yaml" +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ods_pipeline_dir=${script_dir%/*} +kind_deploy_path="/tmp/ods-pipeline/kind-deploy" +kind_values_dir="/tmp/ods-pipeline/kind-values" +helm_generated_values_file="${kind_deploy_path}/chart/values.generated.yaml" -URL_SUFFIX="http" -BITBUCKET_AUTH="unavailable" -NEXUS_AUTH="unavailable:unavailable" -SONAR_AUTH="unavailable" +url_suffix="http" +bitbucket_auth="unavailable" +nexus_auth="unavailable:unavailable" if [ "$#" -gt 0 ]; then case $1 in - --private-cert=*) URL_SUFFIX="https"; + --private-cert=*) url_suffix="https"; esac; fi +# Copy deploy path to tmp dir as the deploy path may be used through the Go package. +# The source directories of Go packages are placed into a non-writable location. +rm -rf "${kind_deploy_path}" +cp -r "${ods_pipeline_dir}/deploy" "${kind_deploy_path}" +chmod -R u+w "${kind_deploy_path}" + if [ -f "${kind_values_dir}/bitbucket-auth" ]; then - BITBUCKET_AUTH=$(cat "${kind_values_dir}/bitbucket-auth") + bitbucket_auth=$(cat "${kind_values_dir}/bitbucket-auth") fi if [ -f "${kind_values_dir}/nexus-auth" ]; then - NEXUS_AUTH=$(cat "${kind_values_dir}/nexus-auth") -fi -if [ -f "${kind_values_dir}/sonar-auth" ]; then - SONAR_AUTH=$(cat "${kind_values_dir}/sonar-auth") + nexus_auth=$(cat "${kind_values_dir}/nexus-auth") fi -if [ ! -e "${HELM_GENERATED_VALUES_FILE}" ]; then - echo "setup:" > "${HELM_GENERATED_VALUES_FILE}" +touch "${helm_generated_values_file}" +if [ -f "${kind_values_dir}/bitbucket-${url_suffix}" ]; then + bitbucket_url=$(cat "${kind_values_dir}/bitbucket-${url_suffix}") + echo "bitbucketUrl: '${bitbucket_url}'" >> "${helm_generated_values_file}" fi -if [ -f "${kind_values_dir}/bitbucket-${URL_SUFFIX}" ]; then - BITBUCKET_URL=$(cat "${kind_values_dir}/bitbucket-${URL_SUFFIX}") - echo " bitbucketUrl: '${BITBUCKET_URL}'" >> "${HELM_GENERATED_VALUES_FILE}" +if [ -f "${kind_values_dir}/nexus-${url_suffix}" ]; then + nexus_url=$(cat "${kind_values_dir}/nexus-${url_suffix}") + echo "nexusUrl: '${nexus_url}'" >> "${helm_generated_values_file}" fi -if [ -f "${kind_values_dir}/nexus-${URL_SUFFIX}" ]; then - NEXUS_URL=$(cat "${kind_values_dir}/nexus-${URL_SUFFIX}") - echo " nexusUrl: '${NEXUS_URL}'" >> "${HELM_GENERATED_VALUES_FILE}" -fi -if [ -f "${kind_values_dir}/sonar-${URL_SUFFIX}" ]; then - SONAR_URL=$(cat "${kind_values_dir}/sonar-${URL_SUFFIX}") - echo " sonarUrl: '${SONAR_URL}'" >> "${HELM_GENERATED_VALUES_FILE}" + +values_arg="${kind_deploy_path}/chart/values.kind.yaml" +if [ "$(cat "${helm_generated_values_file}")" != "setup:" ]; then + values_arg="${values_arg},${helm_generated_values_file}" fi -"${ODS_PIPELINE_DIR}"/deploy/install.sh \ - --aqua-auth "unavailable:unavailable" \ - --aqua-scanner-url "none" \ - --bitbucket-auth "${BITBUCKET_AUTH}" \ - --nexus-auth "${NEXUS_AUTH}" \ - --sonar-auth "${SONAR_AUTH}" \ - -f "./ods-pipeline/values.kind.yaml,${HELM_GENERATED_VALUES_FILE}" "$@" +cd "${kind_deploy_path}" +bash ./install.sh \ + --bitbucket-auth "${bitbucket_auth}" \ + --nexus-auth "${nexus_auth}" \ + -f "${values_arg}" "$@" diff --git a/scripts/install-tekton-pipelines.sh b/scripts/install-tekton-pipelines.sh index 1aabc345..878655a0 100755 --- a/scripts/install-tekton-pipelines.sh +++ b/scripts/install-tekton-pipelines.sh @@ -1,17 +1,17 @@ #!/bin/bash set -eu -KUBE_CONTEXT="--context kind-kind" -KUBECTL_BIN="kubectl $KUBE_CONTEXT" +kube_context="--context kind-ods-pipeline" +kubectl_bin="kubectl $kube_context" -# Tekton version is aligned with Red Hat OpenShift Pipelines General Availability 1.6. -# See https://docs.openshift.com/container-platform/4.9/cicd/pipelines/op-release-notes.html. -TKN_VERSION="v0.41.1" -TKN_DASHBOARD_VERSION="v0.17.0" +# Tekton version is aligned with Red Hat OpenShift Pipelines General Availability 1.10. +# See https://docs.openshift.com/container-platform/latest/cicd/pipelines/op-release-notes.html. +tkn_version="v0.44.4" +tkn_dashboard_version="v0.17.0" -INSTALL_TKN_DASHBOARD="false" +install_tkn_dashboard="false" -if ! which kubectl &> /dev/null; then +if ! command -v kubectl &> /dev/null; then echo "kubectl is required" fi @@ -20,20 +20,20 @@ while [ "$#" -gt 0 ]; do -v|--verbose) set -x;; - --tekton-dashboard) INSTALL_TKN_DASHBOARD="true";; + --tekton-dashboard) install_tkn_dashboard="true";; *) echo "Unknown parameter passed: $1"; exit 1;; esac; shift; done # Install Tekton # https://tekton.dev/docs/getting-started/#installation -if ! $KUBECTL_BIN get namespace tekton-pipelines &> /dev/null; then +if ! $kubectl_bin get namespace tekton-pipelines &> /dev/null; then echo "Installing Tekton ..." - $KUBECTL_BIN apply --filename https://storage.googleapis.com/tekton-releases/pipeline/previous/${TKN_VERSION}/release.notags.yaml + $kubectl_bin apply --filename https://storage.googleapis.com/tekton-releases/pipeline/previous/${tkn_version}/release.notags.yaml - if [ "${INSTALL_TKN_DASHBOARD}" != "false" ]; then + if [ "${install_tkn_dashboard}" != "false" ]; then echo "Installing Tekton Dashboard..." - $KUBECTL_BIN apply --filename https://storage.googleapis.com/tekton-releases/dashboard/previous/${TKN_DASHBOARD_VERSION}/tekton-dashboard-release.yaml + $kubectl_bin apply --filename https://storage.googleapis.com/tekton-releases/dashboard/previous/${tkn_dashboard_version}/tekton-dashboard-release.yaml fi else echo "Tekton already installed." diff --git a/scripts/kind-with-registry.sh b/scripts/kind-with-registry.sh index 41e0070e..d187a998 100755 --- a/scripts/kind-with-registry.sh +++ b/scripts/kind-with-registry.sh @@ -19,89 +19,95 @@ set -o errexit -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -ODS_PIPELINE_DIR=${SCRIPT_DIR%/*} +if ! command -v kind >/dev/null 2>&1; then + echo "kind is not installed. Please see https://kind.sigs.k8s.io/" + exit 1 +fi # desired cluster name; default is "kind" -KIND_CLUSTER_NAME="kind" -RECREATE_KIND_CLUSTER="false" -REGISTRY_PORT="5000" +kind_cluster_name="ods-pipeline" +recreate_kind_cluster="false" +registry_port="5000" +kind_mount_path="/tmp/ods-pipeline/kind-mount" # K8S version is aligned with OpenShift GA 4.11. # See https://docs.openshift.com/container-platform/4.11/release_notes/ocp-4-11-release-notes.html -K8S_VERSION="v1.24.7" +k8s_version="v1.24.7" while [ "$#" -gt 0 ]; do case $1 in -v|--verbose) set -x;; - --name) KIND_CLUSTER_NAME="$2"; shift;; - --name=*) KIND_CLUSTER_NAME="${1#*=}";; + --name) kind_cluster_name="$2"; shift;; + --name=*) kind_cluster_name="${1#*=}";; - --recreate) RECREATE_KIND_CLUSTER="true";; + --recreate) recreate_kind_cluster="true";; - --registry-port) REGISTRY_PORT="$2"; shift;; - --registry-port=*) REGISTRY_PORT="${1#*=}";; + --registry-port) registry_port="$2"; shift;; + --registry-port=*) registry_port="${1#*=}";; *) echo "Unknown parameter passed: $1"; exit 1;; esac; shift; done -kind_version=$(kind version) -REGISTRY_NAME="${KIND_CLUSTER_NAME}-registry" +registry_name="${kind_cluster_name}-registry" reg_ip_selector='{{.NetworkSettings.Networks.kind.IPAddress}}' reg_network='kind' -case "${kind_version}" in - "kind v0.7."* | "kind v0.6."* | "kind v0.5."*) - reg_ip_selector='{{.NetworkSettings.IPAddress}}' - reg_network='bridge' - ;; -esac # create registry container unless it already exists -running="$(docker inspect -f '{{.State.Running}}' "${REGISTRY_NAME}" 2>/dev/null || true)" +running="$(docker inspect -f '{{.State.Running}}' "${registry_name}" 2>/dev/null || true)" # If the registry already exists, but is in the wrong network, we have to # re-create it. if [ "${running}" = 'true' ]; then - reg_ip="$(docker inspect -f ${reg_ip_selector} "${REGISTRY_NAME}")" + reg_ip="$(docker inspect -f ${reg_ip_selector} "${registry_name}")" if [ "${reg_ip}" = '' ]; then - docker kill "${REGISTRY_NAME}" - docker rm "${REGISTRY_NAME}" + docker kill "${registry_name}" + docker rm "${registry_name}" running="false" fi fi if [ "${running}" != 'true' ]; then - if [ "${reg_network}" != "bridge" ]; then - docker network create "${reg_network}" || true + net_driver=$(docker network inspect "${reg_network}" -f '{{.Driver}}' || true) + if [ "${net_driver}" != "bridge" ]; then + docker network create "${reg_network}" + fi + if docker inspect "${registry_name}" >/dev/null 2>&1; then + docker rm "${registry_name}" fi - docker run \ - -d --restart=always -p "${REGISTRY_PORT}:5000" --name "${REGISTRY_NAME}" --net "${reg_network}" \ + -d --restart=always -p "${registry_port}:5000" --name "${registry_name}" --net "${reg_network}" \ registry:2 fi -reg_ip="$(docker inspect -f ${reg_ip_selector} "${REGISTRY_NAME}")" +reg_ip="$(docker inspect -f ${reg_ip_selector} "${registry_name}")" if [ "${reg_ip}" = "" ]; then echo "Error creating registry: no IPAddress found at: ${reg_ip_selector}" exit 1 fi -echo "Registry IP: ${reg_ip}" -if [ "${RECREATE_KIND_CLUSTER}" == "true" ]; then - kind delete cluster --name "${KIND_CLUSTER_NAME}" +if [ "${recreate_kind_cluster}" == "false" ]; then + if kind get clusters | grep "${kind_cluster_name}" >/dev/null 2>&1; then + echo "Reusing existing cluster ..." + exit 0 + fi +fi + +if [ "${recreate_kind_cluster}" == "true" ]; then + kind delete cluster --name "${kind_cluster_name}" fi # create a cluster with the local registry enabled in containerd -cat < /dev/null; then + echo "No existing Bitbucket container ${BITBUCKET_SERVER_CONTAINER_NAME} found ..." + else + echo "Reusing existing Bitbucket container ${BITBUCKET_SERVER_CONTAINER_NAME} ..." + exit 0 + fi +fi + echo "Run Postgres container" docker rm -f ${BITBUCKET_POSTGRES_CONTAINER_NAME} || true docker run --name ${BITBUCKET_POSTGRES_CONTAINER_NAME} \ @@ -61,14 +74,14 @@ docker run --name ${BITBUCKET_SERVER_CONTAINER_NAME} \ -d --net kind -p "${HOST_HTTP_PORT}:7990" -p 7999:7999 \ "${BITBUCKET_SERVER_IMAGE_NAME}:${BITBUCKET_SERVER_IMAGE_TAG}" -if ! "${SCRIPT_DIR}/waitfor-bitbucket.sh" ; then +if ! bash "${SCRIPT_DIR}/waitfor-bitbucket.sh" ; then docker logs ${BITBUCKET_SERVER_CONTAINER_NAME} exit 1 fi echo "Launch TLS proxy" TLS_CONTAINER_NAME="${BITBUCKET_SERVER_CONTAINER_NAME}-tls" -"${SCRIPT_DIR}/run-tls-proxy.sh" \ +bash "${SCRIPT_DIR}/run-tls-proxy.sh" \ --container-name "${TLS_CONTAINER_NAME}" \ --https-port "${HOST_HTTPS_PORT}" \ --nginx-conf "nginx-bitbucket.conf" diff --git a/scripts/run-nexus.sh b/scripts/run-nexus.sh index 16c04523..1232e00a 100755 --- a/scripts/run-nexus.sh +++ b/scripts/run-nexus.sh @@ -15,8 +15,10 @@ NEXUS_URL= IMAGE_NAME="ods-test-nexus" CONTAINER_NAME="ods-test-nexus" NEXUS_IMAGE_TAG="3.30.1" -kind_values_dir="${ODS_PIPELINE_DIR}/deploy/.kind-values" +kind_values_dir="/tmp/ods-pipeline/kind-values" +mkdir -p "${kind_values_dir}" DOCKER_CONTEXT_DIR="${ODS_PIPELINE_DIR}/test/testdata/private-cert" +reuse="false" while [ "$#" -gt 0 ]; do case $1 in @@ -25,9 +27,20 @@ while [ "$#" -gt 0 ]; do -i|--insecure) INSECURE="--insecure";; + -r|--reuse) reuse="true";; + *) echo "Unknown parameter passed: $1"; exit 1;; esac; shift; done +if [ "${reuse}" = "true" ]; then + if ! docker inspect ${CONTAINER_NAME} &> /dev/null; then + echo "No existing Nexus container ${CONTAINER_NAME} found ..." + else + echo "Reusing existing Nexus container ${CONTAINER_NAME} ..." + exit 0 + fi +fi + echo "Run container using image tag ${NEXUS_IMAGE_TAG}" docker rm -f ${CONTAINER_NAME} || true cd "${SCRIPT_DIR}"/nexus @@ -35,7 +48,7 @@ docker build -t ${IMAGE_NAME} -f "Dockerfile.$(uname -m)" "${DOCKER_CONTEXT_DIR} cd - &> /dev/null docker run -d -p "${HOST_HTTP_PORT}:8081" --net kind --name ${CONTAINER_NAME} ${IMAGE_NAME} -if ! "${SCRIPT_DIR}/waitfor-nexus.sh" ; then +if ! bash "${SCRIPT_DIR}"/waitfor-nexus.sh ; then docker logs ${CONTAINER_NAME} exit 1 fi @@ -81,13 +94,13 @@ echo "Setup developer role" runJsonScript "createRole" "-d @${SCRIPT_DIR}/nexus/developer-role.json" echo "Setup developer user" -sed "s|@developer_password@|${DEVELOPER_PASSWORD}|g" "${SCRIPT_DIR}"/nexus/developer-user.json > "${SCRIPT_DIR}"/nexus/developer-user-with-password.json -runJsonScript "createUser" "-d @${SCRIPT_DIR}/nexus/developer-user-with-password.json" -rm "${SCRIPT_DIR}"/nexus/developer-user-with-password.json +sed "s|@developer_password@|${DEVELOPER_PASSWORD}|g" "${SCRIPT_DIR}"/nexus/developer-user.json > /tmp/nexus-developer-user-with-password.json +runJsonScript "createUser" "-d @/tmp/nexus-developer-user-with-password.json" +rm /tmp/nexus-developer-user-with-password.json echo "Launch TLS proxy" TLS_CONTAINER_NAME="${CONTAINER_NAME}-tls" -"${SCRIPT_DIR}/run-tls-proxy.sh" \ +bash "${SCRIPT_DIR}/run-tls-proxy.sh" \ --container-name "${TLS_CONTAINER_NAME}" \ --https-port "${HOST_HTTPS_PORT}" \ --nginx-conf "nginx-nexus.conf" diff --git a/scripts/run-sonarqube.sh b/scripts/run-sonarqube.sh index b919d741..f833d7a5 100755 --- a/scripts/run-sonarqube.sh +++ b/scripts/run-sonarqube.sh @@ -2,7 +2,6 @@ set -ue SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -ODS_PIPELINE_DIR=${SCRIPT_DIR%/*} INSECURE="" HOST_HTTP_PORT="9000" @@ -14,7 +13,9 @@ SONAR_USERNAME="admin" SONAR_PASSWORD="admin" SONAR_EDITION="community" SONAR_IMAGE_TAG="${SONAR_VERSION}-${SONAR_EDITION}" -kind_values_dir="${ODS_PIPELINE_DIR}/deploy/.kind-values" +kind_values_dir="/tmp/ods-pipeline/kind-values" +mkdir -p "${kind_values_dir}" +reuse="false" while [ "$#" -gt 0 ]; do case $1 in @@ -23,9 +24,20 @@ while [ "$#" -gt 0 ]; do -i|--insecure) INSECURE="--insecure";; + -r|--reuse) reuse="true";; + *) echo "Unknown parameter passed: $1"; exit 1;; esac; shift; done +if [ "${reuse}" = "true" ]; then + if [ "$(docker inspect ${CONTAINER_NAME} -f '{{.State.Running}}')" = "true" ]; then + echo "Reusing running SonarQube container ${CONTAINER_NAME} ..." + exit 0 + else + echo "No running SonarQube container ${CONTAINER_NAME} found ..." + fi +fi + echo "Run container using image tag ${SONAR_IMAGE_TAG}" docker rm -f ${CONTAINER_NAME} || true @@ -54,7 +66,7 @@ cd - &> /dev/null docker run -d --net kind --name ${CONTAINER_NAME} -e SONAR_ES_BOOTSTRAP_CHECKS_DISABLE=true -p "${HOST_HTTP_PORT}:9000" ${IMAGE_NAME}:${SONAR_IMAGE_TAG} SONARQUBE_URL="http://localhost:${HOST_HTTP_PORT}" -if ! "${SCRIPT_DIR}/waitfor-sonarqube.sh" ; then +if ! bash "${SCRIPT_DIR}/waitfor-sonarqube.sh" ; then docker logs ${CONTAINER_NAME} exit 1 fi @@ -68,7 +80,7 @@ token=$(echo "${tokenResponse}" | jq -r .token) echo "Launch TLS proxy" TLS_CONTAINER_NAME="${CONTAINER_NAME}-tls" -"${SCRIPT_DIR}/run-tls-proxy.sh" \ +bash "${SCRIPT_DIR}/run-tls-proxy.sh" \ --container-name "${TLS_CONTAINER_NAME}" \ --https-port "${HOST_HTTPS_PORT}" \ --nginx-conf "nginx-sonarqube.conf" diff --git a/scripts/start-local-env.sh b/scripts/start-local-env.sh deleted file mode 100755 index 7e08a13e..00000000 --- a/scripts/start-local-env.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -set -ue - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -kind_registry='kind-registry' -kind_control_plane='kind-control-plane' -BITBUCKET_POSTGRES_CONTAINER_NAME="ods-test-bitbucket-postgres" -BITBUCKET_SERVER_CONTAINER_NAME="ods-test-bitbucket-server" -NEXUS_CONTAINER_NAME="ods-test-nexus" -SQ_CONTAINER_NAME="ods-test-sonarqube" - -container_names_in_start_order=( "$kind_registry" "$kind_control_plane" "$BITBUCKET_POSTGRES_CONTAINER_NAME" "$BITBUCKET_SERVER_CONTAINER_NAME" "$NEXUS_CONTAINER_NAME" - "$SQ_CONTAINER_NAME" ) - -for cn in "${container_names_in_start_order[@]}"; do - echo docker start "$cn" - docker start "$cn" -done - -echo "Waiting for tools to start..." -echo "If this times out you can run this script again." - -"$SCRIPT_DIR/waitfor-bitbucket.sh" -"$SCRIPT_DIR/waitfor-nexus.sh" -"$SCRIPT_DIR/waitfor-sonarqube.sh" - -echo "Please start k9s and see pods are all ready before using cluster." diff --git a/scripts/stop-local-env.sh b/scripts/stop-local-env.sh deleted file mode 100755 index 12c5886d..00000000 --- a/scripts/stop-local-env.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -set -ue - -kind_registry='kind-registry' -kind_control_plane='kind-control-plane' -BITBUCKET_POSTGRES_CONTAINER_NAME="ods-test-bitbucket-postgres" -BITBUCKET_SERVER_CONTAINER_NAME="ods-test-bitbucket-server" -NEXUS_CONTAINER_NAME="ods-test-nexus" -SQ_CONTAINER_NAME="ods-test-sonarqube" - -container_names_in_stop_order=( "$SQ_CONTAINER_NAME" "$NEXUS_CONTAINER_NAME" "$BITBUCKET_SERVER_CONTAINER_NAME" "$BITBUCKET_POSTGRES_CONTAINER_NAME" "$kind_control_plane" "$kind_registry" ) - -for cn in "${container_names_in_stop_order[@]}"; do - echo docker stop "$cn" - docker stop "$cn" || true -done diff --git a/scripts/waitfor-bitbucket.sh b/scripts/waitfor-bitbucket.sh index c193d2f6..0debb250 100755 --- a/scripts/waitfor-bitbucket.sh +++ b/scripts/waitfor-bitbucket.sh @@ -29,7 +29,7 @@ until [ $n -ge 30 ]; do break else echo -n "." - sleep 10 + sleep 5 n=$((n+1)) fi done diff --git a/scripts/waitfor-nexus.sh b/scripts/waitfor-nexus.sh index 462de568..cdb3cf80 100755 --- a/scripts/waitfor-nexus.sh +++ b/scripts/waitfor-nexus.sh @@ -28,7 +28,7 @@ function waitForReady { break else echo -n "." - sleep 10 + sleep 5 n=$((n+1)) fi done diff --git a/scripts/waitfor-sonarqube.sh b/scripts/waitfor-sonarqube.sh index 8b0c431a..ea95e293 100755 --- a/scripts/waitfor-sonarqube.sh +++ b/scripts/waitfor-sonarqube.sh @@ -30,7 +30,7 @@ until [ $n -ge 30 ]; do break else echo -n "." - sleep 10 + sleep 5 n=$((n+1)) fi done diff --git a/tasks/ods-build-go.yaml b/tasks/ods-build-go.yaml deleted file mode 100644 index 2d71beb1..00000000 --- a/tasks/ods-build-go.yaml +++ /dev/null @@ -1,193 +0,0 @@ - -# Source: tasks/templates/task-ods-build-go.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-build-go' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Go (module) applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-build-go.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: enable-cgo - description: Whether to enable CGO. When not enabled the build will set `CGO_ENABLED=0`. - type: string - default: "false" - - name: go-os - description: "`GOOS` variable (the execution operating system such as `linux`, `windows`)." - type: string - default: "linux" - - name: go-arch - description: "`GOARCH` variable (the execution architecture such as `arm`, `amd64`)." - type: string - default: "amd64" - - name: output-dir - description: >- - Path to the directory into which the resulting Go binary should be copied, relative to `working-dir`. - This directory may then later be used as Docker context for example. - type: string - default: docker - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-go.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-go" - - name: pre-test-script - description: Script to execute before running tests, relative to the working directory. - type: string - default: "" - - name: sonar-quality-gate - description: Whether the SonarQube quality gate needs to pass for the task to succeed. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - steps: - - name: build-go-binary - # Image is built from build/package/Dockerfile.go-toolset. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-go-toolset:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=go-$(params.go-os)-$(params.go-arch) - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.output-dir) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-go.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --enable-cgo=$(params.enable-cgo) \ - --go-os=$(params.go-os) \ - --go-arch=$(params.go-arch) \ - --pre-test-script=$(params.pre-test-script) \ - --output-dir=$(params.output-dir) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.output-dir) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-sonar:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - - truststore="${JAVA_HOME}/lib/security/cacerts" - if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore="$(pwd)/.ods-cache/truststore/cacerts" - configure-truststore --dest-store "${truststore}" - fi - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) \ - -truststore "${truststore}" - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-build-gradle.yaml b/tasks/ods-build-gradle.yaml deleted file mode 100644 index bac13d9d..00000000 --- a/tasks/ods-build-gradle.yaml +++ /dev/null @@ -1,222 +0,0 @@ - -# Source: tasks/templates/task-ods-build-gradle.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-build-gradle' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Gradle applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-build-gradle.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: gradle-additional-tasks - description: >- - Additional gradle tasks to be passed to the gradle build. (default tasks called are `clean` and `build`). - type: string - default: "" - - name: gradle-options - description: >- - Options to be passed to the gradle build. - (See ref: https://docs.gradle.org/7.4.2/userguide/command_line_interface.html#sec:command_line_debugging) - type: string - default: "--no-daemon --stacktrace" - - name: gradle-opts-env - description: >- - Will be exposed to the build via `GRADLE_OPTS` environment variable. - Specifies JVM arguments to use when starting the Gradle client VM. The client VM only handles command line input/output, so it is rare that one would need to change its VM options. - You can still use this to change the settings for the Gradle daemon which runs the actual build by setting the according Gradle properties by `-D`. - If you want to set the JVM arguments for the actual build you would do this via `-Dorg.gradle.jvmargs=-Xmx1024M` - (See ref: https://docs.gradle.org/7.4.2/userguide/build_environment.html#sec:gradle_configuration_properties). - type: string - default: "-Dorg.gradle.jvmargs=-Xmx512M" - - name: output-dir - description: >- - Path to the directory into which the resulting Java application jar should be copied, relative to `working-dir`. - This directory may then later be used as Docker context for example. - type: string - default: docker - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: cached-outputs - description: >- - List of build output directories (as colon separated string) to be cached. - These directories are relative to `working-dir`. - type: string - default: "docker" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-gradle.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-gradle" - - name: gradle-build-dir - description: >- - Path to the directory into which Gradle publishes its build. - type: string - default: build - - name: sonar-quality-gate - description: Whether the SonarQube quality gate needs to pass for the task to succeed. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - steps: - - name: build-gradle-binary - # Image is built from build/package/Dockerfile.gradle-toolset. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-gradle-toolset:0.13.2' - env: - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: GRADLE_OPTS - value: "$(params.gradle-opts-env)" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - resources: - {} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=gradle - if copy-build-if-cached \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-gradle.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --output-dir=$(params.output-dir) \ - --gradle-build-dir=$(params.gradle-build-dir) \ - --gradle-additional-tasks="$(params.gradle-additional-tasks)" \ - --gradle-options="$(params.gradle-options)" - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-sonar:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - - truststore="${JAVA_HOME}/lib/security/cacerts" - if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore="$(pwd)/.ods-cache/truststore/cacerts" - configure-truststore --dest-store "${truststore}" - fi - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) \ - -truststore "${truststore}" - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-build-npm.yaml b/tasks/ods-build-npm.yaml deleted file mode 100644 index 23851a65..00000000 --- a/tasks/ods-build-npm.yaml +++ /dev/null @@ -1,193 +0,0 @@ - -# Source: tasks/templates/task-ods-build-npm.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-build-npm' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Node.js applications using npm. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-build-npm.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: cached-outputs - description: >- - List of build output directories (as colon separated string) to be cached. - These directories are relative to the `working-dir` parameter` - Common build directories are `dist` (default), `build` and `public`. - If empty this could mean that the original sources are being used as build output and no caching of built files are needed. Nonetheless build skipping can still be remain enabled. - type: string - default: "dist" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-npm.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-npm" - - name: sonar-quality-gate - description: Whether quality gate needs to pass. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip the SonarQube analysis or not. - type: string - default: "false" - - name: node-version - description: "Node.js version to use - supported versions: 16, 18" - type: string - default: "18" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - steps: - - name: build-npm - # Image is built from build/package/Dockerfile.node-npm-toolset. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-node$(params.node-version)-npm-toolset:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=npm - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-npm.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-sonar:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - - truststore="${JAVA_HOME}/lib/security/cacerts" - if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore="$(pwd)/.ods-cache/truststore/cacerts" - configure-truststore --dest-store "${truststore}" - fi - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) \ - -truststore "${truststore}" - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-build-python.yaml b/tasks/ods-build-python.yaml deleted file mode 100644 index fd0e69c3..00000000 --- a/tasks/ods-build-python.yaml +++ /dev/null @@ -1,189 +0,0 @@ - -# Source: tasks/templates/task-ods-build-python.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-build-python' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Python applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-build-python.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: max-line-length - description: Maximum line length. - type: string - default: "120" - - name: pre-test-script - description: Script to execute before running tests, relative to the working directory. - type: string - default: "" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-python.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-python" - - name: sonar-quality-gate - description: Whether quality gate needs to pass. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip the SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - steps: - - name: build-python - # Image is built from build/package/Dockerfile.python-toolset. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-python-toolset:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=python - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-python.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --max-line-length=$(params.max-line-length) \ - --pre-test-script=$(params.pre-test-script) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-sonar:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - - truststore="${JAVA_HOME}/lib/security/cacerts" - if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore="$(pwd)/.ods-cache/truststore/cacerts" - configure-truststore --dest-store "${truststore}" - fi - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) \ - -truststore "${truststore}" - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-deploy-helm.yaml b/tasks/ods-deploy-helm.yaml deleted file mode 100644 index 8fe8c4ce..00000000 --- a/tasks/ods-deploy-helm.yaml +++ /dev/null @@ -1,115 +0,0 @@ - -# Source: tasks/templates/task-ods-deploy-helm.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-deploy-helm' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Deploy Helm charts. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-deploy-helm.adoc - params: - - name: chart-dir - description: Helm chart directory that will be deployed - type: string - default: ./chart - - name: release-name - description: | - The Helm release name. If empty, the release name is simply the name of the chart. - - When this task is used in a repository which defines subcharts, and the parameter is not set, - then the task sets `.fullnameOverride` equal to the respective - subcomponent to avoid resources being prefixed with the umbrella repository - component name (assuming your resources are named using the `chart.fullname` - helper). However, if the parameter is specified, `.fullnameOverride` is not set. - As a result the `chart.fullname` helper prefixes resources with the specfied - `release-name` unless the chart's name contains the `release-name`. - type: string - default: '' - - name: diff-flags - description: Flags to pass to `helm diff upgrade` in addition to the ones specified via the `upgrade-flags` parameter. Note that the flags `--detailed-exitcode` and `--no-color` are automatically set and cannot be removed. If flags unknown to `helm diff` are passed, they are ignored. - type: string - default: '--three-way-merge' - - name: upgrade-flags - description: Flags to pass to `helm upgrade`. - type: string - default: '--install --wait' - - name: age-key-secret - description: | - Name of the secret containing the age key to use for helm-secrets. - If the secret exists, it is expected to have a field named `key.txt` with the age secret key in its content. - type: string - default: 'helm-secrets-age-key' - - name: api-server - description: | - API server of the target cluster, including scheme. - Only required if the target namespace is outside the cluster in which - the pipeline runs. - type: string - default: '' - - name: api-credentials-secret - description: | - Name of the Secret resource holding the token of a serviceaccount (in field `token`). - Only required when `api-server` is set. - type: string - default: '' - - name: namespace - description: | - Target K8s namespace (or OpenShift project) to deploy into. - If empty, the task will be a no-op. - type: string - default: '' - - name: registry-host - description: | - Hostname of the target registry to push images to. - If not given, the registy host of the source image is used. - type: string - default: '' - - name: diff-only - description: | - If set to true, the task will only perform a diff, and then stop. - No images will be promoted or upgrades attempted. - type: string - default: 'false' - steps: - - name: helm-upgrade-from-repo - # Image is built from build/package/Dockerfile.helm. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-helm:0.13.2' - env: - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - - name: HOME - value: '/tekton/home' - resources: {} - script: | - # deploy-helm is built from /cmd/deploy-helm/main.go. - deploy-helm \ - -chart-dir=$(params.chart-dir) \ - -namespace=$(params.namespace) \ - -release-name=$(params.release-name) \ - -diff-flags="$(params.diff-flags)" \ - -upgrade-flags="$(params.upgrade-flags)" \ - -age-key-secret=$(params.age-key-secret) \ - -api-server=$(params.api-server) \ - -api-credentials-secret=$(params.api-credentials-secret) \ - -registry-host=$(params.registry-host) \ - -diff-only=$(params.diff-only) - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-finish.yaml b/tasks/ods-finish.yaml deleted file mode 100644 index 2059e8e4..00000000 --- a/tasks/ods-finish.yaml +++ /dev/null @@ -1,87 +0,0 @@ - -# Source: tasks/templates/task-ods-finish.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-finish' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Finishes the pipeline run. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-finish.adoc - params: - - name: pipeline-run-name - description: Name of pipeline run. - type: string - - name: aggregate-tasks-status - description: Aggregate status of all tasks. - default: 'None' - - name: artifact-target - description: Artifact target respository - default: '' - steps: - - name: ods-finish - # Image is built from build/package/Dockerfile.finish. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-finish:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: BITBUCKET_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-bitbucket - - name: BITBUCKET_ACCESS_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-bitbucket-auth - - name: CONSOLE_URL - valueFrom: - configMapKeyRef: - key: consoleUrl - name: ods-cluster - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - - # ods-finish is built from cmd/finish/main.go. - ods-finish \ - -pipeline-run-name=$(params.pipeline-run-name) \ - -aggregate-tasks-status=$(params.aggregate-tasks-status) \ - -artifact-target=$(params.artifact-target) - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - description: The git repo will be present onto the volume backing this workspace - name: source diff --git a/tasks/ods-package-image.yaml b/tasks/ods-package-image.yaml deleted file mode 100644 index ec569f22..00000000 --- a/tasks/ods-package-image.yaml +++ /dev/null @@ -1,192 +0,0 @@ - -# Source: tasks/templates/task-ods-package-image.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-package-image' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Packages applications into container images using buildah. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-package-image.adoc - params: - - name: registry - description: Image registry to push image to. - type: string - default: 'image-registry.openshift-image-registry.svc:5000' - - name: image-stream - description: Reference of the image stream buildah will produce. If not set, the value of `.ods/component` is used. - type: string - default: '' - - name: extra-tags - description: Additional image tags (e.g. 'latest dev') for pushed images. The primary tag is based on the commit sha. Only tags currently missing from the image will be added. - type: string # Wanted to use and array but ran into [Cannot refer array params in script #4912](https://github.com/tektoncd/pipeline/issues/4912) - default: '' - - name: storage-driver - description: Set buildah storage driver. - type: string - default: vfs - - name: dockerfile - description: Path to the Dockerfile to build (relative to `docker-dir`). - type: string - default: ./Dockerfile - - name: docker-dir - description: Path to the directory to use as Docker context. - type: string - default: '.' - - name: format - description: 'The format of the built container, `oci` or `docker`.' - type: string - default: oci - - name: buildah-build-extra-args - description: Extra parameters passed for the build command when building images (e.g. '--build-arg=firstArg=one --build-arg=secondArg=two'). - type: string - default: '' - - name: buildah-push-extra-args - description: Extra parameters passed for the push command when pushing images. - type: string - default: '' - - name: trivy-sbom-extra-args - description: Extra parameters passed for the trivy command to generate an SBOM. - type: string - default: '' - - name: aqua-gate - description: Whether the Aqua security scan needs to pass for the task to succeed. - type: string - default: "false" - results: - - description: Digest of the image just built. - name: image-digest - steps: - - name: package-image - # Image is built from build/package/Dockerfile.package-image. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-package-image:0.13.2' - env: - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - - # ods-package-image is built from cmd/package-image/main.go. - ods-package-image \ - -image-stream=$(params.image-stream) \ - -extra-tags=$(params.extra-tags) \ - -registry=$(params.registry) \ - -storage-driver=$(params.storage-driver) \ - -format=$(params.format) \ - -dockerfile=$(params.dockerfile) \ - -context-dir=$(params.docker-dir) \ - -buildah-build-extra-args=$(params.buildah-build-extra-args) \ - -buildah-push-extra-args=$(params.buildah-push-extra-args) \ - -trivy-sbom-extra-args=$(params.trivy-sbom-extra-args) - - # As this task does not run unter uid 1001, chown created artifacts - # to make them deletable by ods-start's cleanup procedure. - chown -R 1001:0 .ods/artifacts/image-digests .ods/artifacts/sboms - securityContext: - capabilities: - add: - - SETFCAP - volumeMounts: - - mountPath: /var/lib/containers - name: varlibcontainers - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: aqua-scan - # Image is built from build/package/Dockerfile.aqua-scan. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-aqua-scan:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: BITBUCKET_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-bitbucket - - name: BITBUCKET_ACCESS_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-bitbucket-auth - - name: AQUA_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-aqua - - name: AQUA_REGISTRY - valueFrom: - configMapKeyRef: - key: registry - name: ods-aqua - - name: AQUA_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-aqua-auth - - name: AQUA_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-aqua-auth - - name: AQUA_SCANNER_URL - valueFrom: - secretKeyRef: - key: secret - name: ods-aqua-scanner-url - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "${AQUA_SCANNER_URL:0:4}" != "http" ]; then - echo "Skipping Aqua scan" - else - download-aqua-scanner \ - --aqua-scanner-url=${AQUA_SCANNER_URL} \ - $(case ${DEBUG} in (true) printf -- '--debug'; esac) - - # ods-aqua-scan is built from cmd/aqua-scan/main.go. - ods-aqua-scan \ - -image-stream=$(params.image-stream) \ - -aqua-gate=$(params.aqua-gate) - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - emptyDir: {} - name: varlibcontainers - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-start.yaml b/tasks/ods-start.yaml deleted file mode 100644 index 80b78ca7..00000000 --- a/tasks/ods-start.yaml +++ /dev/null @@ -1,160 +0,0 @@ - -# Source: tasks/templates/task-ods-start.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-start' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Starts the pipeline run. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-start.adoc - params: - - name: url - description: Git URL to clone - type: string - - name: git-full-ref - description: 'Git revision to checkout (branch, tag, sha, ref, ...)' - type: string - default: '' - - name: submodules - description: Defines if the resource should initialize and fetch the submodules. - type: string - default: 'true' - - name: clone-depth - description: >- - Perform a shallow clone where only the most recent commit(s) will be - fetched. By default, a full clone is performed. Note that the parameter is of string type, - therefore the depth value must be quoted, e.g. `value: '1'`. - type: string - default: '' - - name: http-proxy - description: Git HTTP proxy server for non-SSL requests. - type: string - default: '' - - name: https-proxy - description: Git HTTPS proxy server for SSL requests. - type: string - default: '' - - name: no-proxy - description: Git no proxy - opt out of proxying HTTP/HTTPS requests. - type: string - default: '' - - name: project - description: >- - Name of the project to build. - The project is equal to the Bitbucket project of the repository to clone. - type: string - - name: pr-key - description: >- - Bitbucket pull request key. - Empty if there is no open PR for the specified Git branch. - type: string - default: '' - - name: pr-base - description: >- - Bitbucket pull request base branch. - Empty if there is no open PR for the specified Git branch. - type: string - default: '' - - name: pipeline-run-name - description: Name of pipeline run. - type: string - - name: cache-build-tasks-for-days - description: >- - Number of days build tasks are cached to enable build skipping. - A subsequent build reusing the cache resets the time for that cache location. - type: string - default: '7' - - name: artifact-source - description: Artifact source respository - type: string - default: '' - results: - - description: The commit SHA that was fetched by this task. - name: commit - - description: The URL that was fetched by this task. - name: url - steps: - - name: ods-start - # Image is built from build/package/Dockerfile.start. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-start:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: BITBUCKET_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-bitbucket - - name: BITBUCKET_ACCESS_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-bitbucket-auth - - name: CONSOLE_URL - valueFrom: - configMapKeyRef: - key: consoleUrl - name: ods-cluster - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ -f /etc/ssl/certs/private-cert.pem ]; then - cat /etc/pki/tls/certs/ca-bundle.crt /etc/ssl/certs/private-cert.pem > /tekton/home/git-cert.pem - git config --global http.sslCAInfo /tekton/home/git-cert.pem - fi - - # ods-start is built from cmd/start/main.go. - ods-start \ - -project=$(params.project) \ - -git-full-ref=$(params.git-full-ref) \ - -url=$(params.url) \ - -pr-key=$(params.pr-key) \ - -pr-base=$(params.pr-base) \ - -http-proxy=$(params.http-proxy) \ - -https-proxy=$(params.https-proxy) \ - -no-proxy=$(params.no-proxy) \ - -submodules=$(params.submodules) \ - -clone-depth=$(params.clone-depth) \ - -pipeline-run-name=$(params.pipeline-run-name) \ - -artifact-source=$(params.artifact-source) - - cp .ods/git-commit-sha $(results.commit.path) - - echo -n "$(params.url)" > $(results.url.path) - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - description: The git repo will be cloned onto the volume backing this workspace - name: source diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go new file mode 100644 index 00000000..d4926dfa --- /dev/null +++ b/test/e2e/common_test.go @@ -0,0 +1,7 @@ +package e2e + +import ( + "flag" +) + +var privateCertFlag = flag.Bool("private-cert", false, "Whether to run tests using a private cert") diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go new file mode 100644 index 00000000..823775c9 --- /dev/null +++ b/test/e2e/main_test.go @@ -0,0 +1,121 @@ +package e2e + +import ( + "log" + "os" + "path/filepath" + "testing" + + "github.com/opendevstack/ods-pipeline/internal/tasktesting" + "github.com/opendevstack/ods-pipeline/pkg/bitbucket" + ott "github.com/opendevstack/ods-pipeline/pkg/odstasktest" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" + tekton "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/homedir" +) + +var ( + clusterConfig *ttr.ClusterConfig + namespaceConfig *ttr.NamespaceConfig + rootPath = "../.." + testdataWorkspacesPath = "testdata/workspaces" +) + +func TestMain(m *testing.M) { + os.Exit(testMain(m)) +} + +func testMain(m *testing.M) int { + cc, err := ttr.StartKinDCluster( + ttr.LoadImage(ttr.ImageBuildConfig{ + Dockerfile: "build/images/Dockerfile.start", + ContextDir: rootPath, + }), + ttr.LoadImage(ttr.ImageBuildConfig{ + Dockerfile: "build/images/Dockerfile.finish", + ContextDir: rootPath, + }), + ttr.LoadImage(ttr.ImageBuildConfig{ + Dockerfile: "build/images/Dockerfile.pipeline-manager", + ContextDir: rootPath, + }), + ) + if err != nil { + log.Fatal("Could not start KinD cluster: ", err) + } + clusterConfig = cc + nc, cleanup, err := ttr.SetupTempNamespace( + clusterConfig, + ott.StartBitbucket(), + ott.StartNexus(), + ott.InstallODSPipeline(nil), + ) + if err != nil { + log.Fatal("Could not setup temporary namespace: ", err) + } + defer cleanup() + namespaceConfig = nc + return m.Run() +} + +func newK8sClient(t *testing.T) *kubernetes.Clientset { + home := homedir.HomeDir() + kubeconfig := filepath.Join(home, ".kube", "config") + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + t.Fatal(err) + } + kubernetesClientset, err := kubernetes.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + return kubernetesClientset +} + +func newTektonClient(t *testing.T) *tekton.Clientset { + home := homedir.HomeDir() + kubeconfig := filepath.Join(home, ".kube", "config") + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + t.Fatal(err) + } + tektonClientSet, err := tekton.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + return tektonClientSet +} + +// initBitbucketRepo initialises a Git repository inside the given workspace, +// then commits and pushes to Bitbucket. +// The workspace will also be setup with an ODS context directory in .ods +// with the given namespace. +func initBitbucketRepo(t *testing.T, k8sClient kubernetes.Interface, namespace string) ttr.WorkspaceOpt { + return func(c *ttr.WorkspaceConfig) error { + _ = tasktesting.SetupBitbucketRepo(t, k8sClient, namespace, c.Dir, tasktesting.BitbucketProjectKey, false) + return nil + } +} + +// withBitbucketSourceWorkspace configures the task run with a workspace named +// "source", mapped to the directory sourced from sourceDir. The directory is +// initialised as a Git repository with an ODS context with the given namespace. +func withBitbucketSourceWorkspace(t *testing.T, sourceDir string, k8sClient kubernetes.Interface, namespace string, opts ...ttr.WorkspaceOpt) ttr.TaskRunOpt { + return ott.WithSourceWorkspace( + t, sourceDir, + append([]ttr.WorkspaceOpt{initBitbucketRepo(t, k8sClient, namespace)}, opts...)..., + ) +} + +func checkBuildStatus(t *testing.T, c *bitbucket.Client, gitCommit, wantBuildStatus string) { + buildStatusPage, err := c.BuildStatusList(gitCommit) + buildStatus := buildStatusPage.Values[0] + if err != nil { + t.Fatal(err) + } + if buildStatus.State != wantBuildStatus { + t.Fatalf("Got: %s, want: %s", buildStatus.State, wantBuildStatus) + } +} diff --git a/test/e2e/e2e_test.go b/test/e2e/pipeline_run_test.go similarity index 59% rename from test/e2e/e2e_test.go rename to test/e2e/pipeline_run_test.go index 46fe336b..5a811f73 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/pipeline_run_test.go @@ -3,8 +3,8 @@ package e2e import ( "context" "errors" - "flag" "fmt" + "log" "os" "os/exec" "path/filepath" @@ -14,42 +14,25 @@ import ( "github.com/opendevstack/ods-pipeline/internal/command" "github.com/opendevstack/ods-pipeline/internal/kubernetes" + "github.com/opendevstack/ods-pipeline/internal/projectpath" + "github.com/opendevstack/ods-pipeline/internal/tasktesting" "github.com/opendevstack/ods-pipeline/pkg/bitbucket" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" + "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" tekton "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" k8s "k8s.io/client-go/kubernetes" "knative.dev/pkg/apis" ) -var outsideKindFlag = flag.Bool("outside-kind", false, "Whether to continue if not in KinD cluster") -var privateCertFlag = flag.Bool("private-cert", false, "Whether to run tests using a private cert") - -func TestE2E(t *testing.T) { - tasktesting.CheckCluster(t, *outsideKindFlag) - tasktesting.CheckServices(t, []tasktesting.Service{ - tasktesting.Bitbucket, tasktesting.Nexus, - }) - - // Setup namespace to run tests in. - c, ns := tasktesting.Setup(t, - tasktesting.SetupOpts{ - SourceDir: tasktesting.StorageSourceDir, - StorageCapacity: tasktesting.StorageCapacity, - StorageClassName: tasktesting.StorageClassName, - }, - ) - - // Cleanup namespace at the end. - tasktesting.CleanupOnInterrupt(func() { tasktesting.TearDown(t, c, ns) }, t.Logf) - defer tasktesting.TearDown(t, c, ns) - +func TestPipelineRun(t *testing.T) { + k8sClient := newK8sClient(t) // Create NodePort service which Bitbucket can post its webhook to. var nodePort int32 = 30950 - _, err := kubernetes.CreateNodePortService( - c.KubernetesClientSet, + _, err := createNodePortService( + k8sClient, "ods-pm-nodeport", // NodePort for ODS Pipeline Manager map[string]string{ "app.kubernetes.io/name": "ods-pipeline", @@ -57,20 +40,24 @@ func TestE2E(t *testing.T) { }, nodePort, 8080, - ns, + namespaceConfig.Name, ) if err != nil { t.Fatal(err) } // Initialize workspace with basic app. - wsDir, err := tasktesting.InitWorkspace("source", "hello-world-app") + workspaceSourceDirectory := filepath.Join( + projectpath.Root, "test", testdataWorkspacesPath, "hello-world-app", + ) + wsDir, wsDirCleanupFunc, err := tektontaskrun.SetupWorkspaceDir(workspaceSourceDirectory) + defer wsDirCleanupFunc() if err != nil { t.Fatal(err) } t.Logf("Workspace is in %s", wsDir) odsContext := tasktesting.SetupBitbucketRepo( - t, c.KubernetesClientSet, ns, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, + t, k8sClient, namespaceConfig.Name, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, ) // The webhook URL needs to be the address of the KinD control plane on the node port. @@ -83,12 +70,12 @@ func TestE2E(t *testing.T) { // Create webhook in Bitbucket. webhookSecret, err := kubernetes.GetSecretKey( - c.KubernetesClientSet, ns, "ods-bitbucket-webhook", "secret", + k8sClient, namespaceConfig.Name, "ods-bitbucket-webhook", "secret", ) if err != nil { t.Fatalf("could not get Bitbucket webhook secret: %s", err) } - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, c.KubernetesClientSet, ns, *privateCertFlag) + bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) _, err = bitbucketClient.WebhookCreate( odsContext.Project, odsContext.Repository, @@ -107,14 +94,20 @@ func TestE2E(t *testing.T) { filename := "ods.yaml" fileContent := ` pipelines: - - tasks: - - name: package-image - taskRef: - kind: Task - name: ods-package-image - workspaces: - - name: source - workspace: shared-workspace` +- tasks: + - name: hello-world + taskSpec: + steps: + - name: message + image: busybox + script: | + echo "hello world" + workingDir: $(workspaces.source.path) + workspaces: + - name: source + workspaces: + - name: source + workspace: shared-workspace` err = os.WriteFile(filepath.Join(wsDir, filename), []byte(fileContent), 0644) if err != nil { @@ -123,28 +116,29 @@ pipelines: requiredService := "ods-pipeline" serviceTimeout := time.Minute t.Logf("Waiting %s for service %s to have ready pods ...\n", serviceTimeout, requiredService) - err = waitForServiceToBeReady(t, c.KubernetesClientSet, ns, requiredService, serviceTimeout) + err = waitForServiceToBeReady(t, k8sClient, namespaceConfig.Name, requiredService, serviceTimeout) if err != nil { t.Fatal(err) } t.Log("Pushing file to Bitbucket ...") - tasktesting.PushFileToBitbucketOrFatal(t, c.KubernetesClientSet, ns, wsDir, "master:feature/test-branch", "ods.yaml") + tasktesting.PushFileToBitbucketOrFatal(t, k8sClient, namespaceConfig.Name, wsDir, "master:feature/test-branch", "ods.yaml") triggerTimeout := time.Minute + tektonClient := newTektonClient(t) t.Logf("Waiting %s for pipeline run to be triggered ...", triggerTimeout) - pr, err := waitForPipelineRunToBeTriggered(c.TektonClientSet, ns, triggerTimeout) + pr, err := waitForPipelineRunToBeTriggered(tektonClient, namespaceConfig.Name, triggerTimeout) if err != nil { t.Fatal(err) } t.Logf("Triggered pipeline run %s\n", pr.Name) runTimeout := 3 * time.Minute t.Logf("Waiting %s for pipeline run to succeed ...", runTimeout) - gotReason, err := waitForPipelineRunToBeDone(c.TektonClientSet, ns, pr.Name, runTimeout) + gotReason, err := waitForPipelineRunToBeDone(tektonClient, namespaceConfig.Name, pr.Name, runTimeout) if err != nil { t.Fatal(err) } if gotReason != "Succeeded" { t.Logf("Want pipeline run reason to be 'Succeeded' but got '%s'", gotReason) - logs, err := pipelineRunLogs(ns, pr.Name) + logs, err := pipelineRunLogs(namespaceConfig.Name, pr.Name) if err != nil { t.Fatal(err) } @@ -167,7 +161,7 @@ func waitForServiceToBeReady(t *testing.T, clientset *k8s.Clientset, ns, name st svc = s } time.Sleep(2 * time.Second) - ready, reason, err := kubernetes.ServiceHasReadyPods(clientset, svc) + ready, reason, err := serviceHasReadyPods(clientset, svc) if err != nil { return err } @@ -232,7 +226,7 @@ func waitForPipelineRunToBeDone(clientset *tekton.Clientset, ns, pr string, time func kindControlPlaneIP() (string, error) { stdout, stderr, err := command.RunBuffered( "docker", - []string{"inspect", "-f", "{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}", "kind-control-plane"}, + []string{"inspect", "-f", "{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}", tektontaskrun.KinDName + "-control-plane"}, ) if err != nil { return "", fmt.Errorf("could not get IP address of KinD control plane: %s, err: %s", string(stderr), err) @@ -258,3 +252,68 @@ func tknInstalled() bool { _, err := exec.LookPath("tkn") return err == nil } + +func createNodePortService(clientset k8s.Interface, name string, selectors map[string]string, port, targetPort int32, namespace string) (*corev1.Service, error) { + log.Printf("Create node port service %s", name) + svc, err := clientset.CoreV1().Services(namespace).Create(context.TODO(), + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: corev1.ServiceSpec{ + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + Ports: []corev1.ServicePort{ + { + Name: fmt.Sprintf("%d-%d", port, targetPort), + NodePort: port, + Port: port, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(int(targetPort)), + }, + }, + Selector: selectors, + SessionAffinity: corev1.ServiceAffinityNone, + Type: corev1.ServiceTypeNodePort, + }, + }, metav1.CreateOptions{}) + + return svc, err +} + +// serviceHasReadyPods returns false if no pod is assigned to given service +// or if one or more pods are not "Running" +// or one or more of any pods containers are not "ready". +func serviceHasReadyPods(clientset *k8s.Clientset, svc *corev1.Service) (bool, string, error) { + podList, err := servicePods(clientset, svc) + if err != nil { + return false, "error", err + } + for _, pod := range podList.Items { + phase := pod.Status.Phase + if phase != "Running" { + return false, fmt.Sprintf("pod %s is in phase %+v", pod.Name, phase), nil + } + for _, containerStatus := range pod.Status.ContainerStatuses { + if !containerStatus.Ready { + return false, fmt.Sprintf("container %s in pod %s is not ready", containerStatus.Name, pod.Name), nil + } + } + } + return true, "ok", nil +} + +func servicePods(clientset *k8s.Clientset, svc *corev1.Service) (*corev1.PodList, error) { + podClient := clientset.CoreV1().Pods(svc.Namespace) + selector := []string{} + for key, value := range svc.Spec.Selector { + selector = append(selector, fmt.Sprintf("%s=%s", key, value)) + } + pods, err := podClient.List( + context.TODO(), + metav1.ListOptions{ + LabelSelector: strings.Join(selector, ","), + }, + ) + if err != nil { + return nil, err + } + return pods.DeepCopy(), nil +} diff --git a/test/e2e/task_finish_test.go b/test/e2e/task_finish_test.go new file mode 100644 index 00000000..2769185e --- /dev/null +++ b/test/e2e/task_finish_test.go @@ -0,0 +1,198 @@ +package e2e + +import ( + "bytes" + "fmt" + "log" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/opendevstack/ods-pipeline/internal/tasktesting" + "github.com/opendevstack/ods-pipeline/pkg/bitbucket" + "github.com/opendevstack/ods-pipeline/pkg/nexus" + "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "k8s.io/client-go/kubernetes" + + ott "github.com/opendevstack/ods-pipeline/pkg/odstasktest" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +func runFinishTask(opts ...ttr.TaskRunOpt) error { + return ttr.RunTask(append([]ttr.TaskRunOpt{ + ttr.InNamespace(namespaceConfig.Name), + ttr.UsingTask("ods-pipeline-finish"), + }, opts...)...) +} + +func TestFinishTaskSetsBitbucketStatusToFailed(t *testing.T) { + k8sClient := newK8sClient(t) + if err := runFinishTask( + withBitbucketSourceWorkspace(t, "../testdata/workspaces/hello-world-app-with-artifacts", k8sClient, namespaceConfig.Name), + ttr.WithStringParams(map[string]string{ + "pipeline-run-name": "foo", + "aggregate-tasks-status": "None", + }), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + _, odsContext := ott.GetSourceWorkspaceContext(t, config) + bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + checkBuildStatus(t, bitbucketClient, odsContext.GitCommitSHA, bitbucket.BuildStatusFailed) + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestFinishTaskSetsBitbucketStatusToSuccessfulAndUploadsArtifactsToNexus(t *testing.T) { + k8sClient := newK8sClient(t) + if err := runFinishTask( + ott.WithSourceWorkspace( + t, + "../testdata/workspaces/hello-world-app-with-artifacts", + func(c *ttr.WorkspaceConfig) error { + odsContext := tasktesting.SetupBitbucketRepo( + t, k8sClient, namespaceConfig.Name, c.Dir, tasktesting.BitbucketProjectKey, *privateCertFlag, + ) + // Pretend there is alredy a coverage report in Nexus. + // This assures the safeguard is working to avoid duplicate upload. + t.Log("Uploading coverage artifact to Nexus and writing manifest") + nexusClient := tasktesting.NexusClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + if _, err := nexusClient.Upload( + nexus.TestTemporaryRepository, + pipelinectxt.ArtifactGroup(odsContext, pipelinectxt.CodeCoveragesDir), + filepath.Join(c.Dir, pipelinectxt.CodeCoveragesPath, "coverage.out"), + ); err != nil { + t.Fatal(err) + } + am := pipelinectxt.NewArtifactsManifest( + nexus.TestTemporaryRepository, + pipelinectxt.ArtifactInfo{ + Directory: pipelinectxt.CodeCoveragesDir, + Name: "coverage.out", + }, + ) + if err := pipelinectxt.WriteJsonArtifact( + am, + filepath.Join(c.Dir, pipelinectxt.ArtifactsPath), + pipelinectxt.ArtifactsManifestFilename, + ); err != nil { + t.Fatal(err) + } + return nil + }, + ), + ttr.WithStringParams(map[string]string{ + "pipeline-run-name": "foo", + "aggregate-tasks-status": "Succeeded", + "artifact-target": nexus.TestTemporaryRepository, + }), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + _, odsContext := ott.GetSourceWorkspaceContext(t, config) + bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + checkBuildStatus(t, bitbucketClient, odsContext.GitCommitSHA, bitbucket.BuildStatusSuccessful) + checkArtifactsAreInNexus(t, k8sClient, odsContext, nexus.TestTemporaryRepository) + + wantLogMsg := "Artifact \"coverage.out\" is already present in Nexus repository" + if !strings.Contains(logs.String(), wantLogMsg) { + t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, logs.String()) + } + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestFinishTaskStopsGracefullyWhenContextCannotBeRead(t *testing.T) { + if err := runFinishTask( + ott.WithSourceWorkspace(t, "../testdata/workspaces/empty"), + ttr.WithStringParams(map[string]string{ + "pipeline-run-name": "foo", + "aggregate-tasks-status": "None", + }), + ttr.ExpectFailure(), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + want := "Unable to continue as pipeline context cannot be read" + if !strings.Contains(logs.String(), want) { + t.Fatalf("Want:\n%s\n\nGot:\n%s", want, logs.String()) + } + }), + ); err != nil { + t.Fatal(err) + } +} + +func checkArtifactsAreInNexus(t *testing.T, k8sClient kubernetes.Interface, odsContext *pipelinectxt.ODSContext, targetRepository string) { + + nexusClient := tasktesting.NexusClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + + // List of expected artifacts to have been uploaded to Nexus + artifactsMap := map[string][]string{ + pipelinectxt.XUnitReportsDir: {"report.xml"}, + // exclude coverage as we pretend it has been uploaded earlier already + // pipelinectxt.CodeCoveragesDir: {"coverage.out"}, + pipelinectxt.SonarAnalysisDir: {"analysis-report.md", "issues-report.csv"}, + } + + for artifactsSubDir, files := range artifactsMap { + + filesCountInSubDir := len(artifactsMap[artifactsSubDir]) + + // e.g: "/ODSPIPELINETEST/workspace-190880007/935e5229b084dd60d44a5eddd2d023720ec153c1/xunit-reports" + group := pipelinectxt.ArtifactGroup(odsContext, artifactsSubDir) + + // The test is so fast that, when we reach this line, the artifacts could still being uploaded to Nexus + artifactURLs := waitForArtifacts(t, nexusClient, targetRepository, group, filesCountInSubDir, 5*time.Second) + if len(artifactURLs) != filesCountInSubDir { + t.Fatalf("Got: %d artifacts in subdir %s, want: %d.", len(artifactURLs), artifactsMap[artifactsSubDir], filesCountInSubDir) + } + + for _, file := range files { + + // e.g. "http://localhost:8081/repository/ods-pipelines/ODSPIPELINETEST/workspace-866704509/b1415e831b4f5b24612abf24499663ddbff6babb/xunit-reports/report.xml" + // note that the "group" value already has a leading slash! + url := fmt.Sprintf("%s/repository/%s%s/%s", nexusClient.URL(), targetRepository, group, file) + + if !contains(artifactURLs, url) { + t.Fatalf("Artifact %s with URL %+v not found in Nexus under any of the following URLs: %v", file, url, artifactURLs) + } + } + + } +} + +func waitForArtifacts(t *testing.T, nexusClient *nexus.Client, targetRepository, group string, expectedArtifactsCount int, timeout time.Duration) []string { + start := time.Now().UTC() + elapsed := time.Since(start) + artifactURLs := []string{} + + for elapsed < timeout { + artifactURLs, err := nexusClient.Search(targetRepository, group) + if err != nil { + t.Fatal(err) + } + + if len(artifactURLs) == expectedArtifactsCount { + return artifactURLs + } + + log.Printf("Artifacts are not yet available in Nexus...\n") + time.Sleep(1 * time.Second) + + elapsed = time.Since(start) + } + + log.Printf("Time out reached.\n") + return artifactURLs +} + +// contains checks if a string is present in a slice +func contains(s []string, str string) bool { + for _, v := range s { + if v == str { + return true + } + } + return false +} diff --git a/test/e2e/task_start_test.go b/test/e2e/task_start_test.go new file mode 100644 index 00000000..91b08f48 --- /dev/null +++ b/test/e2e/task_start_test.go @@ -0,0 +1,368 @@ +package e2e + +import ( + "bytes" + "crypto/sha256" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/opendevstack/ods-pipeline/internal/directory" + "github.com/opendevstack/ods-pipeline/internal/projectpath" + "github.com/opendevstack/ods-pipeline/internal/tasktesting" + "github.com/opendevstack/ods-pipeline/pkg/bitbucket" + "github.com/opendevstack/ods-pipeline/pkg/config" + "github.com/opendevstack/ods-pipeline/pkg/nexus" + "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/yaml" + + ott "github.com/opendevstack/ods-pipeline/pkg/odstasktest" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +func runStartTask(opts ...ttr.TaskRunOpt) error { + return ttr.RunTask(append([]ttr.TaskRunOpt{ + ttr.InNamespace(namespaceConfig.Name), + ttr.UsingTask("ods-pipeline-start"), + }, opts...)...) +} + +func TestStartTaskClonesRepoAtBranch(t *testing.T) { + k8sClient := newK8sClient(t) + if err := runStartTask( + withBitbucketSourceWorkspace(t, "../testdata/workspaces/hello-world-app", k8sClient, namespaceConfig.Name), + func(c *ttr.TaskRunConfig) error { + c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ + "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), + "git-full-ref": "refs/heads/master", + "project": tasktesting.BitbucketProjectKey, + "pipeline-run-name": "foo", + })...) + return nil + }, + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + wsDir, odsContext := ott.GetSourceWorkspaceContext(t, config) + checkODSContext(t, wsDir, odsContext) + checkFilesExist(t, wsDir, filepath.Join(pipelinectxt.ArtifactsPath, pipelinectxt.ArtifactsManifestFilename)) + bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + checkBuildStatus(t, bitbucketClient, odsContext.GitCommitSHA, bitbucket.BuildStatusInProgress) + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestStartTaskClonesRepoAtTag(t *testing.T) { + k8sClient := newK8sClient(t) + if err := runStartTask( + withBitbucketSourceWorkspace(t, "../testdata/workspaces/hello-world-app", k8sClient, namespaceConfig.Name), + func(c *ttr.TaskRunConfig) error { + wsDir, odsContext := ott.GetSourceWorkspaceContext(t, c) + tasktesting.UpdateBitbucketRepoWithTagOrFatal(t, odsContext, wsDir, "v1.0.0") + c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ + "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), + "git-full-ref": "refs/tags/v1.0.0", + "project": tasktesting.BitbucketProjectKey, + "pipeline-run-name": "foo", + })...) + return nil + }, + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + wsDir, odsContext := ott.GetSourceWorkspaceContext(t, config) + checkODSContext(t, wsDir, odsContext) + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestStartTaskClonesRepoAndSubrepos(t *testing.T) { + var subrepoContext *pipelinectxt.ODSContext + k8sClient := newK8sClient(t) + if err := runStartTask( + ott.WithSourceWorkspace( + t, + "../testdata/workspaces/hello-world-app", + func(c *ttr.WorkspaceConfig) error { + // Setup sub-component + subrepoContext = setupBitbucketRepoWithSubdirOrFatal(t, c, k8sClient) + // Nexus artifacts + nexusClient := tasktesting.NexusClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + artifactsBaseDir := filepath.Join(projectpath.Root, "test", testdataWorkspacesPath, "hello-world-app-with-artifacts", pipelinectxt.ArtifactsPath) + _, err := nexusClient.Upload( + nexus.TestTemporaryRepository, + pipelinectxt.ArtifactGroup(subrepoContext, pipelinectxt.XUnitReportsDir), + filepath.Join(artifactsBaseDir, pipelinectxt.XUnitReportsDir, "report.xml"), + ) + if err != nil { + return err + } + _, err = nexusClient.Upload( + nexus.TestTemporaryRepository, + pipelinectxt.ArtifactGroup(subrepoContext, pipelinectxt.PipelineRunsDir), + filepath.Join(artifactsBaseDir, pipelinectxt.PipelineRunsDir, "foo-zh9gt0.json"), + ) + if err != nil { + return err + } + return nil + }, + ), + func(c *ttr.TaskRunConfig) error { + c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ + "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), + "git-full-ref": "refs/heads/master", + "project": tasktesting.BitbucketProjectKey, + "pipeline-run-name": "foo", + "artifact-source": nexus.TestTemporaryRepository, + })...) + return nil + }, + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + wsDir, odsContext := ott.GetSourceWorkspaceContext(t, config) + + // Check .ods directory contents of main repo + checkODSContext(t, wsDir, odsContext) + checkFilesExist(t, wsDir, filepath.Join(pipelinectxt.ArtifactsPath, pipelinectxt.ArtifactsManifestFilename)) + + // Check .ods directory contents of subrepo + subrepoDir := filepath.Join(wsDir, pipelinectxt.SubreposPath, subrepoContext.Repository) + checkODSContext(t, subrepoDir, subrepoContext) + + // Check artifacts are downloaded properly in subrepo + sourceArtifactsBaseDir := filepath.Join(projectpath.Root, "test", testdataWorkspacesPath, "hello-world-app-with-artifacts", pipelinectxt.ArtifactsPath) + xUnitFileSource := "xunit-reports/report.xml" + xUnitContent := trimmedFileContentOrFatal(t, filepath.Join(sourceArtifactsBaseDir, xUnitFileSource)) + destinationArtifactsBaseDir := filepath.Join(subrepoDir, pipelinectxt.ArtifactsPath) + checkFileContent(t, destinationArtifactsBaseDir, xUnitFileSource, xUnitContent) + checkFilesExist(t, destinationArtifactsBaseDir, pipelinectxt.ArtifactsManifestFilename) + + bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + checkBuildStatus(t, bitbucketClient, odsContext.GitCommitSHA, bitbucket.BuildStatusInProgress) + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestStartTaskFailsWithoutSuccessfulPipelineRunOfSubrepo(t *testing.T) { + k8sClient := newK8sClient(t) + if err := runStartTask( + ott.WithSourceWorkspace( + t, + "../testdata/workspaces/hello-world-app", + func(c *ttr.WorkspaceConfig) error { + _ = setupBitbucketRepoWithSubdirOrFatal(t, c, k8sClient) + return nil + }, + ), + func(c *ttr.TaskRunConfig) error { + c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ + "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), + "git-full-ref": "refs/heads/master", + "project": tasktesting.BitbucketProjectKey, + "pipeline-run-name": "foo", + "artifact-source": "empty-repo", + })...) + return nil + }, + ttr.ExpectFailure(), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + want := "Pipeline runs with subrepos require a successful pipeline run artifact " + + "for all checked out subrepo commits, however no such artifact was found" + + if !strings.Contains(logs.String(), want) { + t.Fatalf("Want:\n%s\n\nGot:\n%s", want, logs.String()) + } + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestStartTaskClonesUsingLFS(t *testing.T) { + var lfsFilename string + var lfsFileHash [32]byte + k8sClient := newK8sClient(t) + if err := runStartTask( + ott.WithSourceWorkspace( + t, + "../testdata/workspaces/hello-world-app", + func(c *ttr.WorkspaceConfig) error { + odsContext := tasktesting.SetupBitbucketRepo( + t, k8sClient, namespaceConfig.Name, c.Dir, tasktesting.BitbucketProjectKey, *privateCertFlag, + ) + tasktesting.EnableLfsOnBitbucketRepoOrFatal(t, filepath.Base(c.Dir), tasktesting.BitbucketProjectKey) + lfsFilename = "lfspicture.jpg" + lfsFileHash = tasktesting.UpdateBitbucketRepoWithLfsOrFatal(t, odsContext, c.Dir, tasktesting.BitbucketProjectKey, lfsFilename) + return nil + }, + ), + func(c *ttr.TaskRunConfig) error { + c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ + "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), + "git-full-ref": "refs/heads/master", + "project": tasktesting.BitbucketProjectKey, + "pipeline-run-name": "foo", + })...) + return nil + }, + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + wsDir, odsContext := ott.GetSourceWorkspaceContext(t, config) + checkODSContext(t, wsDir, odsContext) + checkFileHash(t, wsDir, lfsFilename, lfsFileHash) + }), + ); err != nil { + t.Fatal(err) + } +} + +// func TestStartTaskUsesPrivateCert(t *testing.T) { +// k8sClient := newK8sClient(t) +// nc, cleanup, err := ttr.SetupTempNamespace( +// clusterConfig, +// ott.StartBitbucket(), +// ott.StartNexus(), +// ott.InstallODSPipeline(&ott.InstallOptions{PrivateCert: true}), +// ) +// if err != nil { +// t.Fatal(err) +// } +// defer cleanup() +// if err := runStartTask( +// withBitbucketSourceWorkspace(t, "../testdata/workspaces/hello-world-app", k8sClient, nc.Name), +// func(c *ttr.TaskRunConfig) error { +// c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ +// "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), +// "git-full-ref": "refs/heads/master", +// "project": tasktesting.BitbucketProjectKey, +// "pipeline-run-name": "foo", +// })...) +// return nil +// }, +// ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { +// wsDir, odsContext := ott.GetSourceWorkspaceContext(t, config) +// checkODSContext(t, wsDir, odsContext) +// checkFilesExist(t, wsDir, filepath.Join(pipelinectxt.ArtifactsPath, pipelinectxt.ArtifactsManifestFilename)) +// bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, nc.Name, *privateCertFlag) +// checkBuildStatus(t, bitbucketClient, odsContext.GitCommitSHA, bitbucket.BuildStatusInProgress) +// }), +// ); err != nil { +// t.Fatal(err) +// } +// } + +func setupBitbucketRepoWithSubdirOrFatal(t *testing.T, c *ttr.WorkspaceConfig, k8sClient kubernetes.Interface) *pipelinectxt.ODSContext { + // Setup sub-component + tempDir, err := directory.CopyToTempDir( + filepath.Join(projectpath.Root, "test", testdataWorkspacesPath, "hello-world-app"), + c.Dir, + "subcomponent-", + ) + if err != nil { + t.Fatal(err) + } + subCtxt := tasktesting.SetupBitbucketRepo( + t, k8sClient, namespaceConfig.Name, tempDir, tasktesting.BitbucketProjectKey, *privateCertFlag, + ) + err = os.RemoveAll(tempDir) + if err != nil { + t.Fatal(err) + } + err = createStartODSYMLWithSubrepo(c.Dir, filepath.Base(tempDir)) + if err != nil { + t.Fatal(err) + } + _ = tasktesting.SetupBitbucketRepo( + t, k8sClient, namespaceConfig.Name, c.Dir, tasktesting.BitbucketProjectKey, *privateCertFlag, + ) + return subCtxt +} + +func bitbucketURLForWorkspace(c *ttr.WorkspaceConfig) string { + bbURL := "http://ods-test-bitbucket-server.kind:7990" + repoName := filepath.Base(c.Dir) + return fmt.Sprintf("%s/scm/%s/%s.git", bbURL, tasktesting.BitbucketProjectKey, repoName) +} + +func createStartODSYMLWithSubrepo(wsDir, repo string) error { + o := &config.ODS{Repositories: []config.Repository{{Name: repo}}} + return createODSYML(wsDir, o) +} + +func createODSYML(wsDir string, o *config.ODS) error { + y, err := yaml.Marshal(o) + if err != nil { + return err + } + filename := filepath.Join(wsDir, "ods.yaml") + return os.WriteFile(filename, y, 0644) +} + +func checkFileHash(t *testing.T, wsDir string, filename string, hash [32]byte) { + filepath := filepath.Join(wsDir, filename) + filecontent, err := os.ReadFile(filepath) + if err != nil { + t.Fatalf("Want %s, but got nothing", filename) + } + filehash := sha256.Sum256(filecontent) + if filehash != hash { + t.Fatalf("Want %x, but got %x", hash, filehash) + } +} + +func checkODSContext(t *testing.T, repoDir string, want *pipelinectxt.ODSContext) { + checkODSFileContent(t, repoDir, "component", want.Component) + checkODSFileContent(t, repoDir, "git-commit-sha", want.GitCommitSHA) + checkODSFileContent(t, repoDir, "git-full-ref", want.GitFullRef) + checkODSFileContent(t, repoDir, "git-ref", want.GitRef) + checkODSFileContent(t, repoDir, "git-url", want.GitURL) + checkODSFileContent(t, repoDir, "namespace", want.Namespace) + checkODSFileContent(t, repoDir, "pr-base", want.PullRequestBase) + checkODSFileContent(t, repoDir, "pr-key", want.PullRequestKey) + checkODSFileContent(t, repoDir, "project", want.Project) + checkODSFileContent(t, repoDir, "repository", want.Repository) +} + +func checkODSFileContent(t *testing.T, wsDir, filename, want string) { + checkFileContent(t, filepath.Join(wsDir, pipelinectxt.BaseDir), filename, want) +} + +func checkFileContent(t *testing.T, wsDir, filename, want string) { + got, err := getTrimmedFileContent(filepath.Join(wsDir, filename)) + if err != nil { + t.Fatalf("could not read %s: %s", filename, err) + } + if got != want { + t.Fatalf("got '%s', want '%s' in file %s", got, want, filename) + } +} + +func checkFilesExist(t *testing.T, wsDir string, wantFiles ...string) { + for _, wf := range wantFiles { + filename := filepath.Join(wsDir, wf) + if _, err := os.Stat(filename); os.IsNotExist(err) { + t.Fatalf("Want %s, but got nothing", filename) + } + } +} + +func getTrimmedFileContent(filename string) (string, error) { + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + return strings.TrimSpace(string(content)), nil +} + +func trimmedFileContentOrFatal(t *testing.T, filename string) string { + c, err := getTrimmedFileContent(filename) + if err != nil { + t.Fatal(err) + } + return c +} diff --git a/test/tasks/common_test.go b/test/tasks/common_test.go deleted file mode 100644 index 2e9ebb27..00000000 --- a/test/tasks/common_test.go +++ /dev/null @@ -1,257 +0,0 @@ -package tasks - -import ( - "crypto/sha256" - "flag" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/opendevstack/ods-pipeline/internal/directory" - "github.com/opendevstack/ods-pipeline/internal/kubernetes" - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/pkg/bitbucket" - "github.com/opendevstack/ods-pipeline/pkg/config" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" - "golang.org/x/exp/slices" - kclient "k8s.io/client-go/kubernetes" - "sigs.k8s.io/yaml" -) - -var alwaysKeepTmpWorkspacesFlag = flag.Bool("always-keep-tmp-workspaces", false, "Whether to keep temporary workspaces from taskruns even when test is successful") -var outsideKindFlag = flag.Bool("outside-kind", false, "Whether to continue if not in KinD cluster") -var skipSonarQubeFlag = flag.Bool("skip-sonar", false, "Whether to skip SonarQube steps") -var privateCertFlag = flag.Bool("private-cert", false, "Whether to run tests using a private cert") - -const ( - taskKindRef = "Task" - nexusPermanentRepository = "ods-permanent-artifacts" - nexusTemporaryRepository = "ods-temporary-artifacts" -) - -// buildTaskParams forces all SonarQube params to be "falsy" -// if the skipSonarQubeFlag is set. -func buildTaskParams(p map[string]string) map[string]string { - if *skipSonarQubeFlag { - p["sonar-skip"] = "true" - p["sonar-quality-gate"] = "false" - } - return p -} - -// requiredServices takes a variable amount of services and removes -// SonarQube from the resulting slice if the skipSonarQubeFlag is set. -func requiredServices(s ...tasktesting.Service) []tasktesting.Service { - requiredServices := append([]tasktesting.Service{}, s...) - sqIndex := slices.Index(requiredServices, tasktesting.SonarQube) - if sqIndex != -1 && *skipSonarQubeFlag { - requiredServices = slices.Delete(requiredServices, sqIndex, sqIndex+1) - } - return requiredServices -} - -func checkODSContext(t *testing.T, repoDir string, want *pipelinectxt.ODSContext) { - checkODSFileContent(t, repoDir, "component", want.Component) - checkODSFileContent(t, repoDir, "git-commit-sha", want.GitCommitSHA) - checkODSFileContent(t, repoDir, "git-full-ref", want.GitFullRef) - checkODSFileContent(t, repoDir, "git-ref", want.GitRef) - checkODSFileContent(t, repoDir, "git-url", want.GitURL) - checkODSFileContent(t, repoDir, "namespace", want.Namespace) - checkODSFileContent(t, repoDir, "pr-base", want.PullRequestBase) - checkODSFileContent(t, repoDir, "pr-key", want.PullRequestKey) - checkODSFileContent(t, repoDir, "project", want.Project) - checkODSFileContent(t, repoDir, "repository", want.Repository) -} - -func checkODSFileContent(t *testing.T, wsDir, filename, want string) { - checkFileContent(t, filepath.Join(wsDir, pipelinectxt.BaseDir), filename, want) -} - -func checkFileContent(t *testing.T, wsDir, filename, want string) { - got, err := getTrimmedFileContent(filepath.Join(wsDir, filename)) - if err != nil { - t.Fatalf("could not read %s: %s", filename, err) - } - if got != want { - t.Fatalf("got '%s', want '%s' in file %s", got, want, filename) - } -} - -func checkFilesExist(t *testing.T, wsDir string, wantFiles ...string) { - for _, wf := range wantFiles { - filename := filepath.Join(wsDir, wf) - if _, err := os.Stat(filename); os.IsNotExist(err) { - t.Fatalf("Want %s, but got nothing", filename) - } - } -} - -func checkFileHash(t *testing.T, wsDir string, filename string, hash [32]byte) { - filepath := filepath.Join(wsDir, filename) - filecontent, err := os.ReadFile(filepath) - if err != nil { - t.Fatalf("Want %s, but got nothing", filename) - } - filehash := sha256.Sum256(filecontent) - if filehash != hash { - t.Fatalf("Want %x, but got %x", hash, filehash) - } -} - -func getTrimmedFileContent(filename string) (string, error) { - content, err := os.ReadFile(filename) - if err != nil { - return "", err - } - return strings.TrimSpace(string(content)), nil -} - -func trimmedFileContentOrFatal(t *testing.T, filename string) string { - c, err := getTrimmedFileContent(filename) - if err != nil { - t.Fatal(err) - } - return c -} - -func checkFileContentContains(t *testing.T, wsDir, filename string, wantContains ...string) { - content, err := os.ReadFile(filepath.Join(wsDir, filename)) - got := string(content) - if err != nil { - t.Fatalf("could not read %s: %s", filename, err) - } - for _, w := range wantContains { - if !strings.Contains(got, w) { - t.Fatalf("got '%s', want '%s' contained in file %s", got, w, filename) - } - } -} - -func checkFileContentLeanContains(t *testing.T, wsDir, filename string, wantContains string) { - got, err := getFileContentLean(filepath.Join(wsDir, filename)) - if err != nil { - t.Fatalf("could not read %s: %s", filename, err) - } - if !strings.Contains(got, wantContains) { - t.Fatalf("got '%s', want '%s' contained in file %s", got, wantContains, filename) - } -} - -func getFileContentLean(filename string) (string, error) { - content, err := os.ReadFile(filename) - if err != nil { - return "", err - } - - contentStr := strings.ReplaceAll(string(content), "\t", "") - contentStr = strings.ReplaceAll(contentStr, "\n", "") - contentStr = strings.ReplaceAll(contentStr, " ", "") - - return contentStr, nil -} - -func runTaskTestCases(t *testing.T, taskName string, requiredServices []tasktesting.Service, testCases map[string]tasktesting.TestCase) { - tasktesting.CheckCluster(t, *outsideKindFlag) - if len(requiredServices) != 0 { - tasktesting.CheckServices(t, requiredServices) - } - - c, ns := tasktesting.Setup(t, - tasktesting.SetupOpts{ - SourceDir: tasktesting.StorageSourceDir, - StorageCapacity: tasktesting.StorageCapacity, - StorageClassName: tasktesting.StorageClassName, - PrivateCert: *privateCertFlag, - }, - ) - - tasktesting.CleanupOnInterrupt(func() { tasktesting.TearDown(t, c, ns) }, t.Logf) - defer tasktesting.TearDown(t, c, ns) - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - tn := taskName - if tc.Timeout == 0 { - tc.Timeout = 5 * time.Minute - } - tasktesting.Run(t, tc, tasktesting.TestOpts{ - TaskKindRef: taskKindRef, - TaskName: tn, - Clients: c, - Namespace: ns, - Timeout: tc.Timeout, - AlwaysKeepTmpWorkspaces: *alwaysKeepTmpWorkspacesFlag, - }) - }) - } -} - -func checkSonarQualityGate(t *testing.T, c *kclient.Clientset, namespace, sonarProject string, qualityGateFlag bool, wantQualityGateStatus string) { - - sonarToken, err := kubernetes.GetSecretKey(c, namespace, "ods-sonar-auth", "password") - if err != nil { - t.Fatalf("could not get SonarQube token: %s", err) - } - - sonarClient, err := sonar.NewClient(&sonar.ClientConfig{ - APIToken: sonarToken, - BaseURL: "http://localhost:9000", // use localhost instead of ods-test-sonarqube.kind! - ServerEdition: "community", - }) - if err != nil { - t.Fatalf("sonar client: %s", err) - } - - if qualityGateFlag { - qualityGateResult, err := sonarClient.QualityGateGet( - sonar.QualityGateGetParams{ProjectKey: sonarProject}, - ) - if err != nil { - t.Fatal(err) - } - actualStatus := qualityGateResult.ProjectStatus.Status - if actualStatus != wantQualityGateStatus { - t.Fatalf("Got: %s, want: %s", actualStatus, wantQualityGateStatus) - } - - } - -} - -func createODSYML(wsDir string, o *config.ODS) error { - y, err := yaml.Marshal(o) - if err != nil { - return err - } - filename := filepath.Join(wsDir, "ods.yaml") - return os.WriteFile(filename, y, 0644) -} - -func checkBuildStatus(t *testing.T, c *bitbucket.Client, gitCommit, wantBuildStatus string) { - buildStatusPage, err := c.BuildStatusList(gitCommit) - buildStatus := buildStatusPage.Values[0] - if err != nil { - t.Fatal(err) - } - if buildStatus.State != wantBuildStatus { - t.Fatalf("Got: %s, want: %s", buildStatus.State, wantBuildStatus) - } -} - -func createAppInSubDirectory(t *testing.T, wsDir string, subdir string, sampleApp string) { - err := os.MkdirAll(filepath.Join(wsDir, subdir), 0755) - if err != nil { - t.Fatal(err) - } - err = directory.Copy( - filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, sampleApp), - filepath.Join(wsDir, subdir), - ) - if err != nil { - t.Fatal(err) - } -} diff --git a/test/tasks/ods-aqua-scan_test.go b/test/tasks/ods-aqua-scan_test.go deleted file mode 100644 index efc74f34..00000000 --- a/test/tasks/ods-aqua-scan_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package tasks - -import ( - "testing" - - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSAquaScan(t *testing.T) { - runTaskTestCases(t, - "ods-aqua-scan", - []tasktesting.Service{}, - map[string]tasktesting.TestCase{ - "task fails without Aqua download URL": { - WorkspaceDirMapping: map[string]string{"source": "empty"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: false, - }, - }, - ) -} diff --git a/test/tasks/ods-build-go_test.go b/test/tasks/ods-build-go_test.go deleted file mode 100644 index 5d55f7f8..00000000 --- a/test/tasks/ods-build-go_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package tasks - -import ( - "bytes" - "fmt" - "io" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSBuildGo(t *testing.T) { - goProverb := "Don't communicate by sharing memory, share memory by communicating." - runTaskTestCases(t, - "ods-build-go", - requiredServices(tasktesting.SonarQube), - map[string]tasktesting.TestCase{ - "build go app": { - WorkspaceDirMapping: map[string]string{"source": "go-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "go-os": runtime.GOOS, - "go-arch": runtime.GOARCH, - "sonar-quality-gate": "true", - "cache-build": "false", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "docker/Dockerfile", - "docker/app", - filepath.Join(pipelinectxt.LintReportsPath, "report.txt"), - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.out"), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - sonarProject := sonar.ProjectKey(ctxt.ODS, "") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - - wantLogMsg := "No sonar-project.properties present, using default:" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - - b, _, err := command.RunBuffered(wsDir+"/docker/app", []string{}) - if err != nil { - t.Fatal(err) - } - if string(b) != goProverb { - t.Fatalf("Got: %+v, want: %+v.", string(b), goProverb) - } - }, - CleanupFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - cleanModcache(t, wsDir) - }, - }, - "build go app with build caching": { - WorkspaceDirMapping: map[string]string{"source": "go-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "go-os": runtime.GOOS, - "go-arch": runtime.GOARCH, - "sonar-quality-gate": "true", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "docker/Dockerfile", - "docker/app", - filepath.Join(pipelinectxt.LintReportsPath, "report.txt"), - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.out"), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - - sonarProject := sonar.ProjectKey(ctxt.ODS, "") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - - // This is not available when build skipping as the default is - // supplied on the second repeat. - // Not sure whether the check is significant in the first place. - // wantLogMsg := "No sonar-project.properties present, using default:" - // if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - // t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - // } - - b, _, err := command.RunBuffered(wsDir+"/docker/app", []string{}) - if err != nil { - t.Fatal(err) - } - if string(b) != goProverb { - t.Fatalf("Got: %+v, want: %+v.", string(b), goProverb) - } - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - }, - WantRunSuccess: true, - }}, - }, - "build go app in subdirectory": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - // Setup subdir in "monorepo" - subdir := "go-src" - createAppInSubDirectory(t, wsDir, subdir, "go-sample-app") - - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "go-os": runtime.GOOS, - "go-arch": runtime.GOARCH, - "sonar-quality-gate": "true", - "working-dir": subdir, - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - subdir := "go-src" - binary := fmt.Sprintf("%s/docker/app", subdir) - - checkFilesExist(t, wsDir, - fmt.Sprintf("%s/docker/Dockerfile", subdir), - binary, - filepath.Join(pipelinectxt.LintReportsPath, fmt.Sprintf("%s-report.txt", subdir)), - filepath.Join(pipelinectxt.XUnitReportsPath, fmt.Sprintf("%s-report.xml", subdir)), - filepath.Join(pipelinectxt.CodeCoveragesPath, fmt.Sprintf("%s-coverage.out", subdir)), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-analysis-report.md", subdir)), - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-issues-report.csv", subdir)), - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-quality-gate.json", subdir)), - ) - sonarProject := sonar.ProjectKey(ctxt.ODS, subdir+"-") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - - b, _, err := command.RunBuffered(filepath.Join(wsDir, binary), []string{}) - if err != nil { - t.Fatal(err) - } - if string(b) != goProverb { - t.Fatalf("Got: %+v, want: %+v.", string(b), goProverb) - } - }, - CleanupFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - cleanModcache(t, wsDir) - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - }, - WantRunSuccess: true, - }}, - }, - "fail linting go app and generate lint report": { - WorkspaceDirMapping: map[string]string{"source": "go-sample-app-lint-error"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "go-os": runtime.GOOS, - "go-arch": runtime.GOARCH, - }) - }, - WantRunSuccess: false, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - wantFile := filepath.Join(pipelinectxt.LintReportsPath, "report.txt") - checkFilesExist(t, wsDir, wantFile) - - wantLintReportContent := "main.go:6:2: printf: fmt.Printf format %s reads arg #1, but call has 0 args (govet)\n\tfmt.Printf(\"Hello World %s\") // lint error on purpose to generate lint report\n\t^" - - checkFileContent(t, wsDir, ".ods/artifacts/lint-reports/report.txt", wantLintReportContent) - }, - CleanupFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - cleanModcache(t, wsDir) - }, - }, - "build go app with pre-test script": { - WorkspaceDirMapping: map[string]string{"source": "go-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-skip": "true", - "pre-test-script": "pre-test-script.sh", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - wantFile := "docker/test.txt" - checkFilesExist(t, wsDir, wantFile) - }, - CleanupFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - cleanModcache(t, wsDir) - }, - }, - "build go app in PR": { - WorkspaceDirMapping: map[string]string{"source": "go-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - writeContextFile(t, wsDir, "pr-key", "3") - writeContextFile(t, wsDir, "pr-base", "master") - ctxt.Params = buildTaskParams(map[string]string{ - "go-os": runtime.GOOS, - "go-arch": runtime.GOARCH, - // "sonar-quality-gate": "true", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // No idea yet how to fake PR scanning in SQ ... - // if !*skipSonarQubeFlag { - // sonarProject := sonar.ProjectKey(ctxt.ODS, "") - // checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - // } - }, - CleanupFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - cleanModcache(t, wsDir) - }, - }, - }) -} - -func cleanModcache(t *testing.T, workspace string) { - var stderr bytes.Buffer - err := command.Run( - "go", []string{"clean", "-modcache"}, - []string{ - fmt.Sprintf("GOMODCACHE=%s/%s", workspace, ".ods-cache/deps/gomod"), - }, - io.Discard, - &stderr, - ) - if err != nil { - t.Fatalf("could not clean up modcache: %s, stderr: %s", err, stderr.String()) - } -} diff --git a/test/tasks/ods-build-gradle_test.go b/test/tasks/ods-build-gradle_test.go deleted file mode 100644 index 91cde428..00000000 --- a/test/tasks/ods-build-gradle_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package tasks - -import ( - "path/filepath" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSBuildGradle(t *testing.T) { - runTaskTestCases(t, - "ods-build-gradle", - requiredServices(tasktesting.Nexus, tasktesting.SonarQube), - map[string]tasktesting.TestCase{ - "task should build gradle app": { - WorkspaceDirMapping: map[string]string{"source": "gradle-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - "cache-build": "false", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "docker/Dockerfile", - "docker/app.jar", - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest.xml"), - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest2.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - logContains(t, ctxt.CollectedLogs, - "No sonar-project.properties present, using default:", - "ods-test-nexus", - "Gradle 7.4.2", - "Using GRADLE_OPTS=-Dorg.gradle.jvmargs=-Xmx512M", - "Using GRADLE_USER_HOME=/workspace/source/.ods-cache/deps/gradle", - "To honour the JVM settings for this build a single-use Daemon process will be forked.", - ) - }, - }, - "build gradle app with build caching": { - WorkspaceDirMapping: map[string]string{"source": "gradle-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "docker/Dockerfile", - "docker/app.jar", - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest.xml"), - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest2.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - logContains(t, ctxt.CollectedLogs, - "No sonar-project.properties present, using default:", - "ods-test-nexus", - "Gradle 7.4.2", - "Using GRADLE_OPTS=-Dorg.gradle.jvmargs=-Xmx512M", - "Using GRADLE_USER_HOME=/workspace/source/.ods-cache/deps/gradle", - "To honour the JVM settings for this build a single-use Daemon process will be forked.", - ) - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - wsDir := ctxt.Workspaces["source"] - tasktesting.RemoveAll(t, wsDir, "docker/app.jar") - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "docker/Dockerfile", - "docker/app.jar", - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest.xml"), - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest2.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - logContains(t, ctxt.CollectedLogs, - "Copying prior ods build artifacts from cache: /workspace/source/.ods-cache/build-task/gradle", - "Copying prior build output from cache: /workspace/source/.ods-cache/build-task/gradle", - ) - }, - }}, - }, - }) -} - -func logContains(t *testing.T, collectedLogs []byte, wantLogMsgs ...string) { - t.Helper() - logString := string(collectedLogs) - - for _, msg := range wantLogMsgs { - if !strings.Contains(logString, msg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", msg, logString) - } - } - -} diff --git a/test/tasks/ods-build-npm_test.go b/test/tasks/ods-build-npm_test.go deleted file mode 100644 index 71cd2335..00000000 --- a/test/tasks/ods-build-npm_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package tasks - -import ( - "fmt" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSBuildNPM(t *testing.T) { - runTaskTestCases(t, - "ods-build-npm", - requiredServices(tasktesting.Nexus, tasktesting.SonarQube), - map[string]tasktesting.TestCase{ - "build typescript app with SQ scan": { - WorkspaceDirMapping: map[string]string{"source": "typescript-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - "cache-build": "false", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "clover.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage-final.json"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "lcov.info"), - filepath.Join(pipelinectxt.LintReportsPath, "report.txt"), - "dist/src/index.js", - "node_modules", - "package.json", - "package-lock.json", - ) - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - wantLogMsg := "No sonar-project.properties present, using default:" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - - if !*skipSonarQubeFlag { - sonarProject := sonar.ProjectKey(ctxt.ODS, "") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - }, - }, - "build javascript app in subdirectory with build caching": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - // Setup subdir in "monorepo" - subdir := "js-src" - createAppInSubDirectory(t, wsDir, subdir, "javascript-sample-app") - - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "working-dir": subdir, - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - subdir := "js-src" - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.XUnitReportsPath, fmt.Sprintf("%s-report.xml", subdir)), - filepath.Join(pipelinectxt.CodeCoveragesPath, fmt.Sprintf("%s-clover.xml", subdir)), - filepath.Join(pipelinectxt.CodeCoveragesPath, fmt.Sprintf("%s-coverage-final.json", subdir)), - filepath.Join(pipelinectxt.CodeCoveragesPath, fmt.Sprintf("%s-lcov.info", subdir)), - filepath.Join(pipelinectxt.LintReportsPath, fmt.Sprintf("%s-report.txt", subdir)), - fmt.Sprintf("%s/dist/src/index.js", subdir), - fmt.Sprintf("%s/package.json", subdir), - fmt.Sprintf("%s/package-lock.json", subdir), - ) - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - wsDir := ctxt.Workspaces["source"] - tasktesting.RemoveAll(t, wsDir, "js-src/dist") - tasktesting.RemoveAll(t, wsDir, "js-src/node_modules") - }, - WantRunSuccess: true, - }}, - }, - "fail linting typescript app and generate lint report": { - WorkspaceDirMapping: map[string]string{"source": "typescript-sample-app-lint-error"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: false, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - wantFile := filepath.Join(pipelinectxt.LintReportsPath, "report.txt") - checkFilesExist(t, wsDir, wantFile) - - wantLintReportContent := "/workspace/source/src/index.ts: line 3, col 31, Warning - Unexpected any. Specify a different type. (@typescript-eslint/no-explicit-any)\n\n1 problem" - checkFileContentContains(t, wsDir, filepath.Join(pipelinectxt.LintReportsPath, "report.txt"), wantLintReportContent) - }, - }, - "fail pulling image if unsupported node version is specified": { - WorkspaceDirMapping: map[string]string{"source": "javascript-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = map[string]string{ - "node-version": "10", - } - }, - WantSetupFail: true, - }, - "build backend javascript app": { - Timeout: 10 * time.Minute, - WorkspaceDirMapping: map[string]string{"source": "javascript-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "cached-outputs": "node_modules/", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkFilesExist(t, wsDir, - "node_modules/", - "package.json", - "package-lock.json", - ) - }, - }, - "build javascript app with custom build directory": { - WorkspaceDirMapping: map[string]string{"source": "javascript-sample-app-build-dir"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "cached-outputs": "build", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkFilesExist(t, wsDir, - "build/src/index.js", - "package.json", - "package-lock.json", - ) - }, - }, - "build javascript app using node16": { - WorkspaceDirMapping: map[string]string{"source": "javascript-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = map[string]string{ - "sonar-skip": "true", - "node-version": "16", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "clover.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage-final.json"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "lcov.info"), - filepath.Join(pipelinectxt.LintReportsPath, "report.txt"), - "dist/src/index.js", - "package.json", - "package-lock.json", - ) - }, - }, - }) -} diff --git a/test/tasks/ods-build-python_test.go b/test/tasks/ods-build-python_test.go deleted file mode 100644 index 35918094..00000000 --- a/test/tasks/ods-build-python_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package tasks - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSBuildPython(t *testing.T) { - runTaskTestCases(t, - "ods-build-python", - requiredServices(tasktesting.Nexus, tasktesting.SonarQube), - map[string]tasktesting.TestCase{ - "build python fastapi app": { - WorkspaceDirMapping: map[string]string{"source": "python-fastapi-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - "cache-build": "false", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "src/main.py", - "requirements.txt", - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), - ) - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - wantContainsBytes, err := os.ReadFile("../../test/testdata/golden/ods-build-python/excerpt-from-coverage.xml") - if err != nil { - t.Fatal(err) - } - - wantContains := string(wantContainsBytes) - - wantContains = strings.ReplaceAll(wantContains, "\t", "") - wantContains = strings.ReplaceAll(wantContains, "\n", "") - wantContains = strings.ReplaceAll(wantContains, " ", "") - - checkFileContentLeanContains(t, wsDir, filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), wantContains) - - if !*skipSonarQubeFlag { - sonarProject := sonar.ProjectKey(ctxt.ODS, "") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - - wantLogMsg := "No sonar-project.properties present, using default:" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }, - "build python fastapi app with build caching": { - WorkspaceDirMapping: map[string]string{"source": "python-fastapi-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "src/main.py", - "requirements.txt", - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), - ) - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - wantContainsBytes, err := os.ReadFile("../../test/testdata/golden/ods-build-python/excerpt-from-coverage.xml") - if err != nil { - t.Fatal(err) - } - - wantContains := string(wantContainsBytes) - - wantContains = strings.ReplaceAll(wantContains, "\t", "") - wantContains = strings.ReplaceAll(wantContains, "\n", "") - wantContains = strings.ReplaceAll(wantContains, " ", "") - - checkFileContentLeanContains(t, wsDir, filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), wantContains) - - if !*skipSonarQubeFlag { - sonarProject := sonar.ProjectKey(ctxt.ODS, "") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - - // This is not available when build skipping as the default is - // supplied on the second repeat. - // Not sure whether the check is significant in the first place. - // wantLogMsg := "No sonar-project.properties present, using default:" - // if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - // t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - // } - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - }, - WantRunSuccess: true, - }}, - }, - "build python fastapi app in subdirectory": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - // Setup subdir in "monorepo" - subdir := "fastapi-src" - createAppInSubDirectory(t, wsDir, subdir, "python-fastapi-sample-app") - - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - "working-dir": subdir, - "cache-build": "true", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - subdir := "fastapi-src" - - checkFilesExist(t, wsDir, - fmt.Sprintf("%s/src/main.py", subdir), - fmt.Sprintf("%s/requirements.txt", subdir), - filepath.Join(pipelinectxt.XUnitReportsPath, fmt.Sprintf("%s-report.xml", subdir)), - filepath.Join(pipelinectxt.CodeCoveragesPath, fmt.Sprintf("%s-coverage.xml", subdir)), - ) - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-analysis-report.md", subdir)), - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-issues-report.csv", subdir)), - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-quality-gate.json", subdir)), - ) - sonarProject := sonar.ProjectKey(ctxt.ODS, subdir+"-") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - }, - }, - "build python fastapi app with pre-test script": { - WorkspaceDirMapping: map[string]string{"source": "python-fastapi-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "pre-test-script": "pre-test-script.sh", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - wantFile := "docker/test.txt" - checkFilesExist(t, wsDir, wantFile) - }, - }, - }) -} diff --git a/test/tasks/ods-deploy-helm_external_test.go b/test/tasks/ods-deploy-helm_external_test.go deleted file mode 100644 index 81830958..00000000 --- a/test/tasks/ods-deploy-helm_external_test.go +++ /dev/null @@ -1,170 +0,0 @@ -//go:build external -// +build external - -package tasks - -import ( - "flag" - "fmt" - "path/filepath" - "testing" - "time" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/kubernetes" - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/internal/random" - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/config" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// To test deployment to external cluster, you must provide the token for a -// serviceaccount in an externa cluster, and a matching configuration like this: -// -// TODO: make this part of triggers, and supply -// tasks: -// - name: deploy -// taskRef: -// kind: Task -// name: ods-deploy-helm -// params: -// - name: namespace -// value: foobar -// apiServer: https://api.example.openshift.com:443 -// registryHost: default-route-openshift-image-registry.apps.example.openshiftapps.com -// -// You do not need to specify "apiCredentialsSecret", it is set automatically to -// the secret created from the token given via -external-cluster-token. -// -// The test will not create or delete any namespaces. It will install a Helm -// release into the specified namespace, and delete the release again after the -// test. The Helm release and related resources are prefixed with the temporary -// workspace directory (e.g. "workspace-476709422") so any clashes even in none- -// empty namespace are very unlikely. Nonetheless, it is always recommended to -// use an empty namespace setup solely for the purpose of testing. -var ( - externalClusterTokenFlag = flag.String("external-cluster-token", "", "Token of serviceaccount in external cluster") - externalClusterConfigFlag = flag.String("external-cluster-config", "", "ods.yaml describing external cluster") -) - -func TestTaskODSDeployHelmExternal(t *testing.T) { - var externalEnv *config.Environment - var imageStream string - runTaskTestCases(t, - "ods-deploy-helm", - []tasktesting.Service{}, - map[string]tasktesting.TestCase{ - "external deployment": { - Timeout: 10 * time.Minute, - WorkspaceDirMapping: map[string]string{"source": "helm-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - if *externalClusterConfigFlag == "" || *externalClusterTokenFlag == "" { - t.Fatal( - "-external-cluster-token and -external-cluster-config are required to run this test. " + - "Use -short to skip this test.", - ) - } - - t.Log("Create token secret for external cluster") - secret, err := kubernetes.CreateSecret(ctxt.Clients.KubernetesClientSet, ctxt.Namespace, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{Name: "ext"}, - Data: map[string][]byte{ - "token": []byte(*externalClusterTokenFlag), - }, - }) - if err != nil { - t.Fatal(err) - } - - t.Log("Create private key secret for sample app") - createSampleAppPrivateKeySecret(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace) - - t.Log("Read ods.yaml from flag and write into working dir") - externalClusterConfig := *externalClusterConfigFlag - if !filepath.IsAbs(externalClusterConfig) { - externalClusterConfig = filepath.Join(projectpath.Root, externalClusterConfig) - } - o, err := config.ReadFromFile(externalClusterConfig) - if err != nil { - t.Fatal(err) - } - externalEnv := o.Environments[0] - externalEnv.APICredentialsSecret = secret.Name - externalEnv.APIToken = *externalClusterTokenFlag - o.Environments[0] = externalEnv - err = createODSYML(wsDir, o) - if err != nil { - t.Fatal(err) - } - - imageStream = random.PseudoString() - tag := "latest" - fullTag := fmt.Sprintf("localhost:5000/%s/%s:%s", ctxt.Namespace, imageStream, tag) - buildAndPushImageWithLabel(t, ctxt, fullTag, wsDir) - ia := artifact.Image{ - Ref: fmt.Sprintf("kind-registry.kind:5000/%s/%s:%s", ctxt.Namespace, imageStream, tag), - Registry: "kind-registry.kind:5000", - Repository: ctxt.Namespace, - Name: imageStream, - Tag: tag, - Digest: "abc", - } - imageArtifactFilename := fmt.Sprintf("%s.json", imageStream) - err = pipelinectxt.WriteJsonArtifact(ia, filepath.Join(wsDir, pipelinectxt.ImageDigestsPath), imageArtifactFilename) - if err != nil { - t.Fatal(err) - } - - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - t.Log("Check image") - _, _, err := command.Run("skopeo", []string{ - "inspect", - fmt.Sprintf("--registry-token=%s", externalEnv.APIToken), - fmt.Sprintf("docker://%s/%s/%s:%s", externalEnv.RegistryHost, ctxt.Namespace, imageStream, "latest"), - }) - if err != nil { - t.Fatal(err) - } - t.Log("Remove Helm release again") - command.Run("helm", []string{ - fmt.Sprintf("--kube-apiserver=%s", externalEnv.APIServer), - fmt.Sprintf("--kube-token=%s", externalEnv.APIToken), - fmt.Sprintf("--namespace=%s", externalEnv.Namespace), - "uninstall", - ctxt.ODS.Component, - }) - }, - }, - }, - ) -} - -// buildAndPushImageWithLabel builds an image and pushes it to the registry. -// The used image tag equals the Git SHA that is being built, so the task -// will pick up the existing image. -// The image is labelled with "tasktestrun=true" so that it is possible to -// verify that the image has not been rebuild in the task. -func buildAndPushImageWithLabel(t *testing.T, ctxt *tasktesting.TaskRunContext, tag string, wsDir string) { - t.Logf("Build image %s ahead of taskrun", tag) - _, stderr, err := command.RunBuffered("docker", []string{ - "build", "--label", "tasktestrun=true", "-t", tag, filepath.Join(wsDir, "docker"), - }) - if err != nil { - t.Fatalf("could not build image: %s, stderr: %s", err, string(stderr)) - } - _, stderr, err = command.RunBuffered("docker", []string{ - "push", tag, - }) - if err != nil { - t.Fatalf("could not push image: %s, stderr: %s", err, string(stderr)) - } -} diff --git a/test/tasks/ods-deploy-helm_test.go b/test/tasks/ods-deploy-helm_test.go deleted file mode 100644 index 70507cbd..00000000 --- a/test/tasks/ods-deploy-helm_test.go +++ /dev/null @@ -1,340 +0,0 @@ -package tasks - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/kubernetes" - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/internal/random" - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8s "k8s.io/client-go/kubernetes" - "sigs.k8s.io/yaml" -) - -const ( - localRegistry = "localhost:5000" - kindRegistry = "kind-registry.kind:5000" -) - -type imageImportParams struct { - externalRef string - namespace string - workdir string -} - -func TestTaskODSDeployHelm(t *testing.T) { - var separateReleaseNamespace string - runTaskTestCases(t, - "ods-deploy-helm", - []tasktesting.Service{}, - map[string]tasktesting.TestCase{ - "skips when no namespace is given": { - WorkspaceDirMapping: map[string]string{"source": "helm-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - // no "namespace" param set - }, - WantRunSuccess: true, - }, - "upgrades Helm chart in separate namespace": { - WorkspaceDirMapping: map[string]string{"source": "helm-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - - externalNamespace, cleanupFunc := createReleaseNamespaceOrFatal( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, - ) - separateReleaseNamespace = externalNamespace - ctxt.Cleanup = cleanupFunc - ctxt.Params = map[string]string{ - "namespace": externalNamespace, - } - importImage(t, imageImportParams{ - externalRef: "index.docker.io/crccheck/hello-world", - namespace: ctxt.Namespace, - workdir: wsDir, - }) - createSampleAppPrivateKeySecret(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkFileContentContains( - t, wsDir, - filepath.Join(pipelinectxt.DeploymentsPath, fmt.Sprintf("diff-%s.txt", separateReleaseNamespace)), - "Release was not present in Helm. Diff will show entire contents as new.", - "Deployment (apps) has been added", - "Secret (v1) has been added", - "Service (v1) has been added", - ) - checkFileContentContains( - t, wsDir, - filepath.Join(pipelinectxt.DeploymentsPath, fmt.Sprintf("release-%s.txt", separateReleaseNamespace)), - "Installing it now.", - fmt.Sprintf("NAMESPACE: %s", separateReleaseNamespace), - "STATUS: deployed", - "REVISION: 1", - ) - resourceName := fmt.Sprintf("%s-%s", ctxt.ODS.Component, "helm-sample-app") - _, err := checkService(ctxt.Clients.KubernetesClientSet, separateReleaseNamespace, resourceName) - if err != nil { - t.Fatal(err) - } - _, err = checkDeployment(ctxt.Clients.KubernetesClientSet, separateReleaseNamespace, resourceName) - if err != nil { - t.Fatal(err) - } - - // Verify log output massaging - doNotWantLogMsg := "plugin \"diff\" exited with error" - if strings.Contains(string(ctxt.CollectedLogs), doNotWantLogMsg) { - t.Fatalf("Do not want:\n%s\n\nGot:\n%s", doNotWantLogMsg, string(ctxt.CollectedLogs)) - } - wantLogMsg := "identified at least one change" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }, - "upgrades Helm chart with dependencies": { - WorkspaceDirMapping: map[string]string{"source": "helm-app-with-dependencies"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = map[string]string{ - "namespace": ctxt.Namespace, - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - parentChartResourceName := fmt.Sprintf("%s-%s", ctxt.ODS.Component, "helm-app-with-dependencies") - // Parent chart - _, err := checkService(ctxt.Clients.KubernetesClientSet, ctxt.Namespace, parentChartResourceName) - if err != nil { - t.Fatal(err) - } - _, err = checkDeployment(ctxt.Clients.KubernetesClientSet, ctxt.Namespace, parentChartResourceName) - if err != nil { - t.Fatal(err) - } - // Subchart - subChartResourceName := "helm-sample-database" // fixed name due to fullnameOverride - _, err = checkService(ctxt.Clients.KubernetesClientSet, ctxt.Namespace, subChartResourceName) - if err != nil { - t.Fatal(err) - } - d, err := checkDeployment(ctxt.Clients.KubernetesClientSet, ctxt.Namespace, subChartResourceName) - if err != nil { - t.Fatal(err) - } - // Check that Helm value overriding in subchart works - gotEnvValue := d.Spec.Template.Spec.Containers[0].Env[0].Value - wantEnvValue := "tom" // defined in parent (child has value "john") - if gotEnvValue != wantEnvValue { - t.Fatalf("Want ENV username = %s, got: %s", wantEnvValue, gotEnvValue) - } - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wantLogMsg := "No diff detected, skipping helm upgrade" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }}, - }, - "skips upgrade when diff-only is requested": { - WorkspaceDirMapping: map[string]string{"source": "helm-app-with-dependencies"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - externalNamespace, cleanupFunc := createReleaseNamespaceOrFatal( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, - ) - separateReleaseNamespace = externalNamespace - ctxt.Cleanup = cleanupFunc - ctxt.Params = map[string]string{ - "namespace": externalNamespace, - "diff-only": "true", - } - importImage(t, imageImportParams{ - externalRef: "index.docker.io/crccheck/hello-world", - namespace: ctxt.Namespace, - workdir: wsDir, - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - t.Log("Verify image was not promoted ...") - img := fmt.Sprintf("%s/%s/hello-world", localRegistry, separateReleaseNamespace) - promoted := checkIfImageExists(t, img) - if promoted { - t.Fatalf("Image %s should not have been promoted to %s", img, separateReleaseNamespace) - } - t.Log("Verify service was not deployed ...") - resourceName := fmt.Sprintf("%s-%s", ctxt.ODS.Component, "helm-app-with-dependencies") - _, err := checkService(ctxt.Clients.KubernetesClientSet, separateReleaseNamespace, resourceName) - if err == nil { - t.Fatalf("Service %s should not have been deployed to %s", resourceName, separateReleaseNamespace) - } - t.Log("Verify task skipped upgrade ...") - wantLogMsg := "Only diff was requested, skipping helm upgrade" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }, - }, - ) -} - -func createSampleAppPrivateKeySecret(t *testing.T, clientset *k8s.Clientset, ctxtNamespace string) { - secret, err := readPrivateKeySecret() - if err != nil { - t.Fatal(err) - } - _, err = kubernetes.CreateSecret(clientset, ctxtNamespace, secret) - if err != nil { - t.Fatal(err) - } -} - -func createReleaseNamespaceOrFatal(t *testing.T, clientset *k8s.Clientset, ctxtNamespace string) (string, func()) { - externalNamespace, err := createReleaseNamespace(clientset, ctxtNamespace) - if err != nil { - t.Fatal(err) - } - return externalNamespace, func() { - if err := clientset.CoreV1().Namespaces().Delete(context.TODO(), externalNamespace, metav1.DeleteOptions{}); err != nil { - t.Errorf("Failed to delete namespace %s: %s", externalNamespace, err) - } - } -} - -func createReleaseNamespace(clientset *k8s.Clientset, ctxtNamespace string) (string, error) { - releaseNamespace := random.PseudoString() - kubernetes.CreateNamespace(clientset, releaseNamespace) - _, err := clientset.RbacV1().RoleBindings(releaseNamespace).Create( - context.Background(), - &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pipeline-deployer", - Namespace: releaseNamespace, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: "pipeline", - Namespace: ctxtNamespace, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: "edit", - }, - }, - metav1.CreateOptions{}) - - return releaseNamespace, err -} - -func writeContextFile(t *testing.T, wsDir, file, content string) { - err := os.WriteFile( - filepath.Join(wsDir, pipelinectxt.BaseDir, file), []byte(content), 0644, - ) - if err != nil { - t.Fatal(err) - } -} - -func checkDeployment(clientset *k8s.Clientset, namespace, name string) (*appsv1.Deployment, error) { - return clientset.AppsV1(). - Deployments(namespace). - Get(context.TODO(), name, metav1.GetOptions{}) -} - -func checkService(clientset *k8s.Clientset, namespace, name string) (*corev1.Service, error) { - return clientset.CoreV1(). - Services(namespace). - Get(context.TODO(), name, metav1.GetOptions{}) -} - -func readPrivateKeySecret() (*corev1.Secret, error) { - bytes, err := os.ReadFile(filepath.Join(projectpath.Root, "test/testdata/fixtures/tasks/secret.yaml")) - if err != nil { - return nil, err - } - - var secretSpec corev1.Secret - err = yaml.Unmarshal(bytes, &secretSpec) - if err != nil { - return nil, err - } - return &secretSpec, nil -} - -func importImage(t *testing.T, iip imageImportParams) { - var err error - cmds := [][]string{ - {"pull", iip.externalRef}, - {"tag", iip.externalRef, iip.internalRef(localRegistry)}, - {"push", iip.internalRef(localRegistry)}, - } - for _, args := range cmds { - if err == nil { - _, _, err = command.RunBuffered("docker", args) - } - } - if err != nil { - t.Fatalf("docker cmd failed: %s", err) - } - - err = pipelinectxt.WriteJsonArtifact(artifact.Image{ - Ref: iip.internalRef(kindRegistry), - Registry: kindRegistry, - Repository: iip.namespace, - Name: iip.name(), - Tag: "latest", - Digest: "not needed", - }, filepath.Join(iip.workdir, pipelinectxt.ImageDigestsPath), fmt.Sprintf("%s.json", iip.name())) - if err != nil { - t.Fatalf("failed to write artifact: %s", err) - } - t.Log("Imported image", iip.internalRef(localRegistry)) -} - -func checkIfImageExists(t *testing.T, name string) bool { - t.Helper() - _, _, err := command.RunBuffered("docker", []string{"inspect", name}) - return err == nil -} - -func (iip imageImportParams) name() string { - parts := strings.Split(iip.externalRef, "/") - return parts[2] -} - -func (iip imageImportParams) internalRef(registry string) string { - parts := strings.Split(iip.externalRef, "/") - return fmt.Sprintf("%s/%s/%s", registry, iip.namespace, parts[2]) -} diff --git a/test/tasks/ods-finish_test.go b/test/tasks/ods-finish_test.go deleted file mode 100644 index e2058e71..00000000 --- a/test/tasks/ods-finish_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package tasks - -import ( - "fmt" - "log" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/opendevstack/ods-pipeline/pkg/bitbucket" - "github.com/opendevstack/ods-pipeline/pkg/nexus" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSFinish(t *testing.T) { - runTaskTestCases(t, - "ods-finish", - []tasktesting.Service{ - tasktesting.Bitbucket, - tasktesting.Nexus, - }, - map[string]tasktesting.TestCase{ - "set bitbucket build status to failed": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app-with-artifacts"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - ctxt.Params = map[string]string{ - "pipeline-run-name": "foo", - "aggregate-tasks-status": "None", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - checkBuildStatus(t, bitbucketClient, ctxt.ODS.GitCommitSHA, bitbucket.BuildStatusFailed) - }, - }, - "set bitbucket build status to successful and upload artifacts to Nexus repository": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app-with-artifacts"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - // Pretend there is alredy a coverage report in Nexus. - // This assures the safeguard is working to avoid duplicate upload. - // TODO: assure the safeguard is actually invoked by checking the logs. - t.Log("Uploading coverage artifact to Nexus and writing manifest") - nexusClient := tasktesting.NexusClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - if _, err := nexusClient.Upload( - nexus.TestTemporaryRepository, - pipelinectxt.ArtifactGroup(ctxt.ODS, pipelinectxt.CodeCoveragesDir), - filepath.Join(wsDir, pipelinectxt.CodeCoveragesPath, "coverage.out"), - ); err != nil { - t.Fatal(err) - } - am := pipelinectxt.NewArtifactsManifest( - nexus.TestTemporaryRepository, - pipelinectxt.ArtifactInfo{ - Directory: pipelinectxt.CodeCoveragesDir, - Name: "coverage.out", - }, - ) - if err := pipelinectxt.WriteJsonArtifact( - am, - filepath.Join(wsDir, pipelinectxt.ArtifactsPath), - pipelinectxt.ArtifactsManifestFilename, - ); err != nil { - t.Fatal(err) - } - - ctxt.Params = map[string]string{ - "pipeline-run-name": "foo", - "aggregate-tasks-status": "Succeeded", - "artifact-target": nexus.TestTemporaryRepository, - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - checkBuildStatus(t, bitbucketClient, ctxt.ODS.GitCommitSHA, bitbucket.BuildStatusSuccessful) - checkArtifactsAreInNexus(t, ctxt, nexus.TestTemporaryRepository) - - wantLogMsg := "Artifact \"coverage.out\" is already present in Nexus repository" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }, - "stops gracefully when context cannot be read": { - WorkspaceDirMapping: map[string]string{"source": "empty"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - ctxt.Params = map[string]string{ - "pipeline-run-name": "foo", - "aggregate-tasks-status": "Failed", - } - }, - WantRunSuccess: false, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - want := "Unable to continue as pipeline context cannot be read" - - if !strings.Contains(string(ctxt.CollectedLogs), want) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", want, string(ctxt.CollectedLogs)) - } - }, - }, - }, - ) -} - -func checkArtifactsAreInNexus(t *testing.T, ctxt *tasktesting.TaskRunContext, targetRepository string) { - - nexusClient := tasktesting.NexusClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - - // List of expected artifacts to have been uploaded to Nexus - artifactsMap := map[string][]string{ - pipelinectxt.XUnitReportsDir: {"report.xml"}, - // exclude coverage as we pretend it has been uploaded earlier already - // pipelinectxt.CodeCoveragesDir: {"coverage.out"}, - pipelinectxt.SonarAnalysisDir: {"analysis-report.md", "issues-report.csv"}, - } - - for artifactsSubDir, files := range artifactsMap { - - filesCountInSubDir := len(artifactsMap[artifactsSubDir]) - - // e.g: "/ODSPIPELINETEST/workspace-190880007/935e5229b084dd60d44a5eddd2d023720ec153c1/xunit-reports" - group := pipelinectxt.ArtifactGroup(ctxt.ODS, artifactsSubDir) - - // The test is so fast that, when we reach this line, the artifacts could still being uploaded to Nexus - artifactURLs := waitForArtifacts(t, nexusClient, targetRepository, group, filesCountInSubDir, 5*time.Second) - if len(artifactURLs) != filesCountInSubDir { - t.Fatalf("Got: %d artifacts in subdir %s, want: %d.", len(artifactURLs), artifactsMap[artifactsSubDir], filesCountInSubDir) - } - - for _, file := range files { - - // e.g. "http://localhost:8081/repository/ods-pipelines/ODSPIPELINETEST/workspace-866704509/b1415e831b4f5b24612abf24499663ddbff6babb/xunit-reports/report.xml" - // note that the "group" value already has a leading slash! - url := fmt.Sprintf("%s/repository/%s%s/%s", nexusClient.URL(), targetRepository, group, file) - - if !contains(artifactURLs, url) { - t.Fatalf("Artifact %s with URL %+v not found in Nexus under any of the following URLs: %v", file, url, artifactURLs) - } - } - - } -} - -func waitForArtifacts(t *testing.T, nexusClient *nexus.Client, targetRepository, group string, expectedArtifactsCount int, timeout time.Duration) []string { - - start := time.Now().UTC() - elapsed := time.Since(start) - artifactURLs := []string{} - - for elapsed < timeout { - artifactURLs, err := nexusClient.Search(targetRepository, group) - if err != nil { - t.Fatal(err) - } - - if len(artifactURLs) == expectedArtifactsCount { - return artifactURLs - } - - log.Printf("Artifacts are not yet available in Nexus...\n") - time.Sleep(1 * time.Second) - - elapsed = time.Since(start) - } - - log.Printf("Time out reached.\n") - return artifactURLs -} - -// contains checks if a string is present in a slice -func contains(s []string, str string) bool { - for _, v := range s { - if v == str { - return true - } - } - - return false -} diff --git a/test/tasks/ods-package-image_test.go b/test/tasks/ods-package-image_test.go deleted file mode 100644 index 7a3f1707..00000000 --- a/test/tasks/ods-package-image_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package tasks - -import ( - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/installation" - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" - "golang.org/x/exp/slices" -) - -func TestTaskODSPackageImage(t *testing.T) { - runTaskTestCases(t, - "ods-package-image", - []tasktesting.Service{ - tasktesting.Nexus, - }, - map[string]tasktesting.TestCase{ - "task should build image and use nexus args": { - WorkspaceDirMapping: map[string]string{"source": "hello-nexus-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkResultingFiles(t, ctxt, wsDir) - checkResultingImageHelloNexus(t, ctxt, wsDir) - }, - }, - "task should build image": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkResultingFiles(t, ctxt, wsDir) - checkResultingImageHelloWorld(t, ctxt, wsDir) - }, - }, - "task should build image with additional tags": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = map[string]string{ - "extra-tags": "'latest cool'", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkResultingFiles(t, ctxt, wsDir) - checkTagFiles(t, ctxt, wsDir, []string{"latest", "cool"}) - checkTags(t, ctxt, wsDir, []string{ctxt.ODS.GitCommitSHA, "latest", "cool"}) - checkResultingImageHelloWorld(t, ctxt, wsDir) - checkTaggedImageHelloWorld(t, ctxt, wsDir, "latest") - checkTaggedImageHelloWorld(t, ctxt, wsDir, "cool") - }, - }, - "task should reuse existing image": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - tag := getDockerImageTag(t, ctxt, wsDir) - generateArtifacts(t, ctxt, tag, wsDir) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkResultingFiles(t, ctxt, wsDir) - }, - }, - "task should build image with build extra args param": { - WorkspaceDirMapping: map[string]string{"source": "hello-build-extra-args-app"}, - TaskParamsMapping: map[string]string{ - "buildah-build-extra-args": "'--build-arg=firstArg=one --build-arg=secondArg=two'", - "docker-dir": "docker", - }, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkResultingFiles(t, ctxt, wsDir) - checkResultingImageHelloBuildExtraArgs(t, ctxt, wsDir) - }, - }, - }, - ) -} - -func checkResultingFiles(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) { - wantFiles := []string{ - fmt.Sprintf(".ods/artifacts/image-digests/%s.json", ctxt.ODS.Component), - fmt.Sprintf(".ods/artifacts/sboms/%s.spdx", ctxt.ODS.Component), - } - for _, wf := range wantFiles { - if _, err := os.Stat(filepath.Join(wsDir, wf)); os.IsNotExist(err) { - t.Fatalf("Want %s, but got nothing", wf) - } - } -} - -func checkTagFiles(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string, tags []string) { - wantFiles := []string{} - for _, tag := range tags { - wantFiles = append(wantFiles, fmt.Sprintf(".ods/artifacts/image-digests/%s-%s.json", ctxt.ODS.Component, tag)) - } - for _, wf := range wantFiles { - if _, err := os.Stat(filepath.Join(wsDir, wf)); os.IsNotExist(err) { - t.Fatalf("Want %s, but got nothing", wf) - } - } -} - -func checkTags(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string, expectedTags []string) { - // registry := "kind-registry.kind:5000" - registry := "localhost:5000" - tlsVerify := false - args := []string{ - "inspect", - `--format={{.RepoTags}}`, - fmt.Sprintf("--tls-verify=%v", tlsVerify), - } - imageNsStreamSha := fmt.Sprintf("%s/%s:%s", ctxt.Namespace, ctxt.ODS.Component, ctxt.ODS.GitCommitSHA) - imageRef := fmt.Sprintf("docker://%s/%s", registry, imageNsStreamSha) - args = append(args, imageRef) - - stdout, _, err := command.RunBuffered("skopeo", args) - if err != nil { - t.Fatalf("skopeo inspect %s: %s", fmt.Sprint(args), err) - } - tags, err := parseSkopeoInspectDigestTags(string(stdout)) - if err != nil { - t.Fatalf("parse tags failed: %s", err) - } - for _, expectedTag := range expectedTags { - if !slices.Contains(tags, expectedTag) { - t.Fatalf("Expected tags=%s to be in actual tags=%s", fmt.Sprint(expectedTags), fmt.Sprint(tags)) - } - } -} - -func parseSkopeoInspectDigestTags(out string) ([]string, error) { - t := strings.TrimSpace(out) - if !(strings.HasPrefix(t, "[") && strings.HasSuffix(t, "]")) { - return nil, fmt.Errorf("skopeo inspect: unexpected tag response expecting tags to be in brackets %s", t) - } - t = t[1 : len(t)-1] - // expecting t to have space separated tags. - tags := strings.Split(t, " ") - return tags, nil -} - -func runSpecifiedImage(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string, image string) string { - stdout, stderr, err := command.RunBuffered("docker", []string{ - "run", "--rm", - image, - }) - if err != nil { - t.Fatalf("could not run built image: %s, stderr: %s", err, string(stderr)) - } - got := strings.TrimSpace(string(stdout)) - return got -} - -func runResultingImage(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) string { - got := runSpecifiedImage(t, ctxt, wsDir, getDockerImageTag(t, ctxt, wsDir)) - return got -} - -func checkResultingImageHelloWorld(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) { - got := runResultingImage(t, ctxt, wsDir) - want := "Hello World" - if got != want { - t.Fatalf("Want %s, but got %s", want, got) - } -} - -func checkTaggedImageHelloWorld(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string, tag string) { - image := fmt.Sprintf("localhost:5000/%s/%s:%s", ctxt.Namespace, ctxt.ODS.Component, tag) - got := runSpecifiedImage(t, ctxt, wsDir, image) - want := "Hello World" - if got != want { - t.Fatalf("Want %s, but got %s", want, got) - } -} - -func checkResultingImageHelloNexus(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) { - got := runResultingImage(t, ctxt, wsDir) - gotLines := strings.Split(got, "\n") - - ncc, err := installation.NewNexusClientConfig( - ctxt.Clients.KubernetesClientSet, ctxt.Namespace, &logging.LeveledLogger{Level: logging.LevelDebug}, - ) - if err != nil { - t.Fatalf("could not create Nexus client config: %s", err) - } - - // nexusClient := tasktesting.NexusClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace) - nexusUrlString := string(ncc.BaseURL) - nexusUrl, err := url.Parse(nexusUrlString) - if err != nil { - t.Fatalf("could not determine nexusUrl from nexusClient: %s", err) - } - - wantUsername := "developer" - if ncc.Username != wantUsername { - t.Fatalf("Want %s, but got %s", wantUsername, ncc.Username) - } - - wantSecret := "s3cr3t" - if ncc.Password != wantSecret { - t.Fatalf("Want %s, but got %s", wantSecret, ncc.Password) - } - - want := []string{ - fmt.Sprintf("nexusUrl=%s", nexusUrlString), - fmt.Sprintf("nexusUsername=%s", ncc.Username), - fmt.Sprintf("nexusPassword=%s", ncc.Password), - fmt.Sprintf("nexusAuth=%s:%s", ncc.Username, ncc.Password), - fmt.Sprintf("nexusUrlWithAuth=http://%s:%s@%s", ncc.Username, ncc.Password, nexusUrl.Host), - fmt.Sprintf("nexusHost=%s", nexusUrl.Host), - } - if diff := cmp.Diff(want, gotLines); diff != "" { - t.Fatalf("context mismatch (-want +got):\n%s", diff) - } -} - -func checkResultingImageHelloBuildExtraArgs(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) { - got := runResultingImage(t, ctxt, wsDir) - gotLines := strings.Split(got, "\n") - - want := []string{ - fmt.Sprintf("firstArg=%s", "one"), - fmt.Sprintf("secondArg=%s", "two"), - } - if diff := cmp.Diff(want, gotLines); diff != "" { - t.Fatalf("context mismatch (-want +got):\n%s", diff) - } -} - -func getDockerImageTag(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) string { - sha, err := getTrimmedFileContent(filepath.Join(wsDir, ".ods/git-commit-sha")) - if err != nil { - t.Fatalf("could not read git-commit-sha: %s", err) - } - return fmt.Sprintf("localhost:5000/%s/%s:%s", ctxt.Namespace, ctxt.ODS.Component, sha) -} - -func generateArtifacts(t *testing.T, ctxt *tasktesting.TaskRunContext, tag string, wsDir string) { - t.Logf("Generating artifacts for image %s", tag) - generateImageArtifact(t, ctxt, tag, wsDir) - generateImageSBOMArtifact(t, ctxt, wsDir) -} - -func generateImageArtifact(t *testing.T, ctxt *tasktesting.TaskRunContext, tag string, wsDir string) { - t.Logf("Generating image artifact") - sha, err := getTrimmedFileContent(filepath.Join(wsDir, ".ods/git-commit-sha")) - if err != nil { - t.Fatalf("could not read git-commit-sha: %s", err) - } - ia := artifact.Image{ - Ref: tag, - Registry: "kind-registry.kind:5000", - Repository: ctxt.Namespace, - Name: ctxt.ODS.Component, - Tag: sha, - Digest: "abc", - } - imageArtifactFilename := fmt.Sprintf("%s.json", ctxt.ODS.Component) - err = pipelinectxt.WriteJsonArtifact(ia, filepath.Join(wsDir, pipelinectxt.ImageDigestsPath), imageArtifactFilename) - if err != nil { - t.Fatalf("could not create image artifact: %s", err) - } -} - -func generateImageSBOMArtifact(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) { - t.Logf("Generating image SBOM artifact") - artifactsDir := filepath.Join(wsDir, pipelinectxt.SBOMsPath) - sbomArtifactFilename := fmt.Sprintf("%s.%s", ctxt.ODS.Component, pipelinectxt.SBOMsFormat) - err := os.MkdirAll(artifactsDir, 0755) - if err != nil { - t.Fatalf("could not create %s: %s", artifactsDir, err) - } - _, err = os.Create(filepath.Join(artifactsDir, sbomArtifactFilename)) - if err != nil { - t.Fatalf("could not create image SBOM artifact: %s", err) - } -} diff --git a/test/tasks/ods-start_test.go b/test/tasks/ods-start_test.go deleted file mode 100644 index c4d55fd7..00000000 --- a/test/tasks/ods-start_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package tasks - -import ( - "os" - "path/filepath" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/internal/directory" - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/pkg/bitbucket" - "github.com/opendevstack/ods-pipeline/pkg/config" - "github.com/opendevstack/ods-pipeline/pkg/nexus" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSStart(t *testing.T) { - var subrepoContext *pipelinectxt.ODSContext - var lfsFilename string - var lfsFileHash [32]byte - runTaskTestCases(t, - "ods-start", - []tasktesting.Service{ - tasktesting.Bitbucket, - tasktesting.Nexus, - }, - map[string]tasktesting.TestCase{ - "clones repo @ branch": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - ctxt.Params = map[string]string{ - "url": ctxt.ODS.GitURL, - "git-full-ref": "refs/heads/master", - "project": ctxt.ODS.Project, - "pipeline-run-name": "foo", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkODSContext(t, wsDir, ctxt.ODS) - checkFilesExist(t, wsDir, filepath.Join(pipelinectxt.ArtifactsPath, pipelinectxt.ArtifactsManifestFilename)) - - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - checkBuildStatus(t, bitbucketClient, ctxt.ODS.GitCommitSHA, bitbucket.BuildStatusInProgress) - }, - }, - "clones repo @ tag": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - tasktesting.UpdateBitbucketRepoWithTagOrFatal(t, ctxt.ODS, wsDir, "v1.0.0") - ctxt.ODS.GitRef = "v1.0.0" - ctxt.ODS.GitFullRef = "refs/tags/v1.0.0" - ctxt.Params = map[string]string{ - "url": ctxt.ODS.GitURL, - "git-full-ref": ctxt.ODS.GitFullRef, - "project": ctxt.ODS.Project, - "pipeline-run-name": "foo", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkODSContext(t, wsDir, ctxt.ODS) - }, - }, - "clones repo and configured subrepos": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - // Setup sub-component - tempDir, err := directory.CopyToTempDir( - filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app"), - wsDir, - "subcomponent-", - ) - if err != nil { - t.Fatal(err) - } - subCtxt := tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, tempDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - subrepoContext = subCtxt - err = os.RemoveAll(tempDir) - if err != nil { - t.Fatal(err) - } - err = createStartODSYMLWithSubrepo(wsDir, filepath.Base(tempDir)) - if err != nil { - t.Fatal(err) - } - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - - nexusClient := tasktesting.NexusClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - artifactsBaseDir := filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app-with-artifacts", pipelinectxt.ArtifactsPath) - _, err = nexusClient.Upload( - nexus.TestTemporaryRepository, - pipelinectxt.ArtifactGroup(subCtxt, pipelinectxt.XUnitReportsDir), - filepath.Join(artifactsBaseDir, pipelinectxt.XUnitReportsDir, "report.xml"), - ) - if err != nil { - t.Fatal(err) - } - _, err = nexusClient.Upload( - nexus.TestTemporaryRepository, - pipelinectxt.ArtifactGroup(subCtxt, pipelinectxt.PipelineRunsDir), - filepath.Join(artifactsBaseDir, pipelinectxt.PipelineRunsDir, "foo-zh9gt0.json"), - ) - if err != nil { - t.Fatal(err) - } - - ctxt.Params = map[string]string{ - "url": ctxt.ODS.GitURL, - "git-full-ref": "refs/heads/master", - "project": ctxt.ODS.Project, - "pipeline-run-name": "foo", - "artifact-source": nexus.TestTemporaryRepository, - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - // Check .ods directory contents of main repo - checkODSContext(t, wsDir, ctxt.ODS) - checkFilesExist(t, wsDir, filepath.Join(pipelinectxt.ArtifactsPath, pipelinectxt.ArtifactsManifestFilename)) - - // Check .ods directory contents of subrepo - subrepoDir := filepath.Join(wsDir, pipelinectxt.SubreposPath, subrepoContext.Repository) - checkODSContext(t, subrepoDir, subrepoContext) - - // Check artifacts are downloaded properly in subrepo - sourceArtifactsBaseDir := filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app-with-artifacts", pipelinectxt.ArtifactsPath) - xUnitFileSource := "xunit-reports/report.xml" - xUnitContent := trimmedFileContentOrFatal(t, filepath.Join(sourceArtifactsBaseDir, xUnitFileSource)) - destinationArtifactsBaseDir := filepath.Join(subrepoDir, pipelinectxt.ArtifactsPath) - checkFileContent(t, destinationArtifactsBaseDir, xUnitFileSource, xUnitContent) - checkFilesExist(t, destinationArtifactsBaseDir, pipelinectxt.ArtifactsManifestFilename) - - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - checkBuildStatus(t, bitbucketClient, ctxt.ODS.GitCommitSHA, bitbucket.BuildStatusInProgress) - - }, - }, - "fails when subrepo has no successful pipeline run": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - // Setup sub-component - tempDir, err := directory.CopyToTempDir( - filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app"), - wsDir, - "subcomponent-", - ) - if err != nil { - t.Fatal(err) - } - tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, tempDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - err = os.RemoveAll(tempDir) - if err != nil { - t.Fatal(err) - } - err = createStartODSYMLWithSubrepo(wsDir, filepath.Base(tempDir)) - if err != nil { - t.Fatal(err) - } - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - ctxt.Params = map[string]string{ - "url": ctxt.ODS.GitURL, - "git-full-ref": "refs/heads/master", - "project": ctxt.ODS.Project, - "pipeline-run-name": "foo", - "artifact-source": "empty-repo", - } - }, - WantRunSuccess: false, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - want := "Pipeline runs with subrepos require a successful pipeline run artifact " + - "for all checked out subrepo commits, however no such artifact was found" - - if !strings.Contains(string(ctxt.CollectedLogs), want) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", want, string(ctxt.CollectedLogs)) - } - }, - }, - "handles git LFS extension": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - tasktesting.EnableLfsOnBitbucketRepoOrFatal(t, filepath.Base(wsDir), tasktesting.BitbucketProjectKey) - lfsFilename = "lfspicture.jpg" - lfsFileHash = tasktesting.UpdateBitbucketRepoWithLfsOrFatal(t, ctxt.ODS, wsDir, tasktesting.BitbucketProjectKey, lfsFilename) - - ctxt.Params = map[string]string{ - "url": ctxt.ODS.GitURL, - "git-full-ref": "refs/heads/master", - "project": ctxt.ODS.Project, - "pipeline-run-name": "foo", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkODSContext(t, wsDir, ctxt.ODS) - - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - checkBuildStatus(t, bitbucketClient, ctxt.ODS.GitCommitSHA, bitbucket.BuildStatusInProgress) - - checkFileHash(t, wsDir, lfsFilename, lfsFileHash) - }, - }, - }, - ) -} - -func createStartODSYMLWithSubrepo(wsDir, repo string) error { - o := &config.ODS{ - Repositories: []config.Repository{ - { - Name: repo, - }, - }, - } - return createODSYML(wsDir, o) -} diff --git a/test/testdata/deploy/cd-kind/.gitignore b/test/testdata/deploy/cd-kind/.gitignore deleted file mode 100644 index 94548af5..00000000 --- a/test/testdata/deploy/cd-kind/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -* -*/ -!.gitignore