diff --git a/.gitlab/ci/review.gitlab-ci.yml b/.gitlab/ci/review.gitlab-ci.yml index f1bd173ff6d15054ca630d87e2e23ee0482e39e8..d3069657e88db65793d792d073781839c367e461 100644 --- a/.gitlab/ci/review.gitlab-ci.yml +++ b/.gitlab/ci/review.gitlab-ci.yml @@ -38,7 +38,7 @@ review-build-cng: - BUILD_TRIGGER_TOKEN=$REVIEW_APPS_BUILD_TRIGGER_TOKEN ./scripts/trigger-build cng # When the job is manual, review-deploy is also manual and we don't want people # to have to manually start the jobs in sequence, so we do it for them. - - '[ -z $CI_JOB_MANUAL ] || scripts/api/play_job --job-name "review-deploy"' + - '[ -z $CI_JOB_MANUAL ] || play_job "review-deploy"' .review-workflow-base: extends: @@ -78,8 +78,8 @@ review-deploy: - disable_sign_ups || (delete_release && exit 1) # When the job is manual, review-qa-smoke is also manual and we don't want people # to have to manually start the jobs in sequence, so we do it for them. - - '[ -z $CI_JOB_MANUAL ] || scripts/api/play_job --job-name "review-qa-smoke"' - - '[ -z $CI_JOB_MANUAL ] || scripts/api/play_job --job-name "review-performance"' + - '[ -z $CI_JOB_MANUAL ] || play_job "review-qa-smoke"' + - '[ -z $CI_JOB_MANUAL ] || play_job "review-performance"' after_script: # Run seed-dast-test-data.sh only when DAST_RUN is set to true. This is to pupulate review app with data for DAST scan. # Set DAST_RUN to true when jobs are manually scheduled. diff --git a/.gitlab/ci/test-metadata.gitlab-ci.yml b/.gitlab/ci/test-metadata.gitlab-ci.yml index aec0a1640f1f9067f4260051fa9f3da8ea2126ce..e4b7047ef71c34f19f6ea4ef8c8918288a225032 100644 --- a/.gitlab/ci/test-metadata.gitlab-ci.yml +++ b/.gitlab/ci/test-metadata.gitlab-ci.yml @@ -1,5 +1,6 @@ .tests-metadata-state: - image: ruby:2.7 + variables: + TESTS_METADATA_S3_BUCKET: "gitlab-ce-cache" before_script: - source scripts/utils.sh artifacts: @@ -16,8 +17,7 @@ retrieve-tests-metadata: - .test-metadata:rules:retrieve-tests-metadata stage: prepare script: - - install_gitlab_gem - - source ./scripts/rspec_helpers.sh + - source scripts/rspec_helpers.sh - retrieve_tests_metadata update-tests-metadata: diff --git a/doc/development/testing_guide/ci.md b/doc/development/testing_guide/ci.md index e7d67593a091a357861a07a4385d546b495a44e5..618f9010b4d3f9ae2647f4726647b30501e1702c 100644 --- a/doc/development/testing_guide/ci.md +++ b/doc/development/testing_guide/ci.md @@ -12,8 +12,8 @@ Our current CI parallelization setup is as follows: 1. The `retrieve-tests-metadata` job in the `prepare` stage ensures we have a `knapsack/report-master.json` file: - - The `knapsack/report-master.json` file is fetched from the latest `master` pipeline which runs `update-tests-metadata` - (for now it's the 2-hourly scheduled master pipeline), if it's not here we initialize the file with `{}`. + - The `knapsack/report-master.json` file is fetched from S3, if it's not here + we initialize the file with `{}`. 1. Each `[rspec|rspec-ee] [unit|integration|system|geo] n m` job are run with `knapsack rspec` and should have an evenly distributed share of tests: - It works because the jobs have access to the `knapsack/report-master.json` @@ -25,7 +25,7 @@ Our current CI parallelization setup is as follows: 1. The `update-tests-metadata` job (which only runs on scheduled pipelines for [the canonical project](https://gitlab.com/gitlab-org/gitlab) takes all the `knapsack/rspec*_pg_*.json` files and merge them all together into a single - `knapsack/report-master.json` file that is saved as artifact. + `knapsack/report-master.json` file that is then uploaded to S3. After that, the next pipeline will use the up-to-date `knapsack/report-master.json` file. diff --git a/scripts/api/cancel_pipeline b/scripts/api/cancel_pipeline deleted file mode 100755 index 0965877a69affbb4d771774d5e7677ca69202149..0000000000000000000000000000000000000000 --- a/scripts/api/cancel_pipeline +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env ruby -# frozen_string_literal: true - -require 'rubygems' -require 'gitlab' -require 'optparse' -require_relative 'get_job_id' - -class CancelPipeline - DEFAULT_OPTIONS = { - project: ENV['CI_PROJECT_ID'], - pipeline_id: ENV['CI_PIPELINE_ID'], - api_token: ENV['GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN'] - }.freeze - - def initialize(options) - @project = options.delete(:project) - @pipeline_id = options.delete(:pipeline_id) - - Gitlab.configure do |config| - config.endpoint = 'https://gitlab.com/api/v4' - config.private_token = options.delete(:api_token) - end - end - - def execute - Gitlab.cancel_pipeline(project, pipeline_id) - end - - private - - attr_reader :project, :pipeline_id -end - -if $0 == __FILE__ - options = CancelPipeline::DEFAULT_OPTIONS.dup - - OptionParser.new do |opts| - opts.on("-p", "--project PROJECT", String, "Project where to find the job (defaults to $CI_PROJECT_ID)") do |value| - options[:project] = value - end - - opts.on("-i", "--pipeline-id PIPELINE_ID", String, "A pipeline ID (defaults to $CI_PIPELINE_ID)") do |value| - options[:pipeline_id] = value - end - - opts.on("-t", "--api-token API_TOKEN", String, "A value API token with the `read_api` scope") do |value| - options[:api_token] = value - end - - opts.on("-h", "--help", "Prints this help") do - puts opts - exit - end - end.parse! - - CancelPipeline.new(options).execute -end diff --git a/scripts/api/download_job_artifact b/scripts/api/download_job_artifact deleted file mode 100755 index 9ac24ff624de87077fbdf2206c908e9a5083e993..0000000000000000000000000000000000000000 --- a/scripts/api/download_job_artifact +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env ruby -# frozen_string_literal: true - -require 'rubygems' -require 'optparse' -require 'fileutils' -require 'uri' -require 'cgi' -require 'net/http' - -class ArtifactFinder - DEFAULT_OPTIONS = { - project: ENV['CI_PROJECT_ID'], - api_token: ENV['GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN'] - }.freeze - - def initialize(options) - @project = options.delete(:project) - @job_id = options.delete(:job_id) - @api_token = options.delete(:api_token) - @artifact_path = options.delete(:artifact_path) - end - - def execute - url = "https://gitlab.com/api/v4/projects/#{CGI.escape(project)}/jobs/#{job_id}/artifacts" - - if artifact_path - FileUtils.mkdir_p(File.dirname(artifact_path)) - url += "/#{artifact_path}" - end - - fetch(url) - end - - private - - attr_reader :project, :job_id, :api_token, :artifact_path - - def fetch(uri_str, limit = 10) - raise 'Too many HTTP redirects' if limit == 0 - - uri = URI(uri_str) - request = Net::HTTP::Get.new(uri) - request['Private-Token'] = api_token - - Net::HTTP.start(uri.host, uri.port, use_ssl: true) do |http| - http.request(request) do |response| - case response - when Net::HTTPSuccess then - File.open(artifact_path || 'artifacts.zip', 'w') do |file| - response.read_body(&file.method(:write)) - end - when Net::HTTPRedirection then - location = response['location'] - warn "Redirected (#{limit - 1} redirections remaining)." - fetch(location, limit - 1) - else - raise "Unexpected response: #{response.value}" - end - end - end - end -end - -if $0 == __FILE__ - options = ArtifactFinder::DEFAULT_OPTIONS.dup - - OptionParser.new do |opts| - opts.on("-p", "--project PROJECT", String, "Project where to find the job (defaults to $CI_PROJECT_ID)") do |value| - options[:project] = value - end - - opts.on("-j", "--job-id JOB_ID", String, "A job ID") do |value| - options[:job_id] = value - end - - opts.on("-a", "--artifact-path ARTIFACT_PATH", String, "A valid artifact path") do |value| - options[:artifact_path] = value - end - - opts.on("-t", "--api-token API_TOKEN", String, "A value API token with the `read_api` scope") do |value| - options[:api_token] = value - end - - opts.on("-h", "--help", "Prints this help") do - puts opts - exit - end - end.parse! - - ArtifactFinder.new(options).execute -end diff --git a/scripts/api/get_job_id b/scripts/api/get_job_id deleted file mode 100755 index 2324f6ca9d3a4ee907702ac940d45ccaea533022..0000000000000000000000000000000000000000 --- a/scripts/api/get_job_id +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env ruby -# frozen_string_literal: true - -require 'rubygems' -require 'gitlab' -require 'optparse' - -class JobFinder - DEFAULT_OPTIONS = { - project: ENV['CI_PROJECT_ID'], - pipeline_id: ENV['CI_PIPELINE_ID'], - pipeline_query: {}, - job_query: {}, - api_token: ENV['GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN'] - }.freeze - - def initialize(options) - @project = options.delete(:project) - @pipeline_query = options.delete(:pipeline_query) - @job_query = options.delete(:job_query) - @pipeline_id = options.delete(:pipeline_id) - @job_name = options.delete(:job_name) - - Gitlab.configure do |config| - config.endpoint = 'https://gitlab.com/api/v4' - config.private_token = options.delete(:api_token) - end - end - - def execute - find_job_with_filtered_pipelines || find_job_in_pipeline - end - - private - - attr_reader :project, :pipeline_query, :job_query, :pipeline_id, :job_name - - def find_job_with_filtered_pipelines - return if pipeline_query.empty? - - Gitlab.pipelines(project, pipeline_query_params).auto_paginate do |pipeline| - Gitlab.pipeline_jobs(project, pipeline.id, job_query_params).auto_paginate do |job| - return job if job.name == job_name # rubocop:disable Cop/AvoidReturnFromBlocks - end - end - - raise 'Job not found!' - end - - def find_job_in_pipeline - return unless pipeline_id - - Gitlab.pipeline_jobs(project, pipeline_id, job_query_params).auto_paginate do |job| - return job if job.name == job_name # rubocop:disable Cop/AvoidReturnFromBlocks - end - - raise 'Job not found!' - end - - def pipeline_query_params - @pipeline_query_params ||= { per_page: 100, **pipeline_query } - end - - def job_query_params - @job_query_params ||= { per_page: 100, **job_query } - end -end - -if $0 == __FILE__ - options = JobFinder::DEFAULT_OPTIONS.dup - - OptionParser.new do |opts| - opts.on("-p", "--project PROJECT", String, "Project where to find the job (defaults to $CI_PROJECT_ID)") do |value| - options[:project] = value - end - - opts.on("-i", "--pipeline-id pipeline_id", String, "A pipeline ID (defaults to $CI_PIPELINE_ID)") do |value| - options[:pipeline_id] = value - end - - opts.on("-q", "--pipeline-query pipeline_query", String, "Query to pass to the Pipeline API request") do |value| - options[:pipeline_query].merge!(Hash[*value.split('=')]) - end - - opts.on("-Q", "--job-query job_query", String, "Query to pass to the Job API request") do |value| - options[:job_query].merge!(Hash[*value.split('=')]) - end - - opts.on("-j", "--job-name job_name", String, "A job name that needs to exist in the found pipeline") do |value| - options[:job_name] = value - end - - opts.on("-t", "--api-token API_TOKEN", String, "A value API token with the `read_api` scope") do |value| - options[:api_token] = value - end - - opts.on("-h", "--help", "Prints this help") do - puts opts - exit - end - end.parse! - - job = JobFinder.new(options).execute - - return if job.nil? - - puts job.id -end diff --git a/scripts/api/play_job b/scripts/api/play_job deleted file mode 100755 index 199f7e656334ca5333a8119d910eca63c9634282..0000000000000000000000000000000000000000 --- a/scripts/api/play_job +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env ruby -# frozen_string_literal: true - -require 'rubygems' -require 'gitlab' -require 'optparse' -require_relative 'get_job_id' - -class PlayJob - DEFAULT_OPTIONS = { - project: ENV['CI_PROJECT_ID'], - pipeline_id: ENV['CI_PIPELINE_ID'], - api_token: ENV['GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN'] - }.freeze - - def initialize(options) - @project = options.delete(:project) - @options = options - - Gitlab.configure do |config| - config.endpoint = 'https://gitlab.com/api/v4' - config.private_token = options.fetch(:api_token) - end - end - - def execute - job = JobFinder.new(project, options.slice(:api_token, :pipeline_id, :job_name).merge(scope: 'manual')).execute - - Gitlab.job_play(project, job.id) - end - - private - - attr_reader :project, :options -end - -if $0 == __FILE__ - options = PlayJob::DEFAULT_OPTIONS.dup - - OptionParser.new do |opts| - opts.on("-p", "--project PROJECT", String, "Project where to find the job (defaults to $CI_PROJECT_ID)") do |value| - options[:project] = value - end - - opts.on("-j", "--job-name JOB_NAME", String, "A job name that needs to exist in the found pipeline") do |value| - options[:job_name] = value - end - - opts.on("-t", "--api-token API_TOKEN", String, "A value API token with the `read_api` scope") do |value| - options[:api_token] = value - end - - opts.on("-h", "--help", "Prints this help") do - puts opts - exit - end - end.parse! - - PlayJob.new(options).execute -end diff --git a/scripts/get-job-id b/scripts/get-job-id new file mode 100755 index 0000000000000000000000000000000000000000..a5d34dc545b6fd620eda4b168eda0fe63924a2ec --- /dev/null +++ b/scripts/get-job-id @@ -0,0 +1,43 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require 'gitlab' +require 'optparse' + +# +# Configure credentials to be used with gitlab gem +# +Gitlab.configure do |config| + config.endpoint = 'https://gitlab.com/api/v4' + config.private_token = ENV['GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN'] +end + +options = {} +OptionParser.new do |opts| + opts.on("-s", "--scope=SCOPE", "Find job with matching scope") do |scope| + options[:scope] = scope + end +end.parse! + +class PipelineJobFinder + def initialize(project_id, pipeline_id, job_name, options) + @project_id = project_id + @pipeline_id = pipeline_id + @job_name = job_name + @options = options + end + + def execute + Gitlab.pipeline_jobs(@project_id, @pipeline_id, @options).auto_paginate do |job| + break job if job.name == @job_name + end + end +end + +project_id, pipeline_id, job_name = ARGV + +job = PipelineJobFinder.new(project_id, pipeline_id, job_name, options).execute + +return if job.nil? + +puts job.id diff --git a/scripts/rspec_helpers.sh b/scripts/rspec_helpers.sh index 0f14b702de237295ef179c25daf6be75fb2580b3..5f003d032b73e707f62450611b8f358f7e15f614 100644 --- a/scripts/rspec_helpers.sh +++ b/scripts/rspec_helpers.sh @@ -1,39 +1,44 @@ #!/usr/bin/env bash function retrieve_tests_metadata() { - mkdir -p crystalball/ knapsack/ rspec_flaky/ rspec_profiling/ - - local project_path="gitlab-org/gitlab" - local test_metadata_job_id - - # Ruby - test_metadata_job_id=$(scripts/api/get_job_id --project "${project_path}" -q "status=success" -q "ref=master" -q "username=gitlab-bot" -Q "scope=success" --job-name "update-tests-metadata") + mkdir -p knapsack/ rspec_flaky/ rspec_profiling/ if [[ ! -f "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" ]]; then - scripts/api/download_job_artifact --project "${project_path}" --job-id "${test_metadata_job_id}" --artifact-path "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" || echo "{}" > "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" + wget -O "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" "http://${TESTS_METADATA_S3_BUCKET}.s3.amazonaws.com/${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" || echo "{}" > "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" fi if [[ ! -f "${FLAKY_RSPEC_SUITE_REPORT_PATH}" ]]; then - scripts/api/download_job_artifact --project "${project_path}" --job-id "${test_metadata_job_id}" --artifact-path "${FLAKY_RSPEC_SUITE_REPORT_PATH}" || echo "{}" > "${FLAKY_RSPEC_SUITE_REPORT_PATH}" + wget -O "${FLAKY_RSPEC_SUITE_REPORT_PATH}" "http://${TESTS_METADATA_S3_BUCKET}.s3.amazonaws.com/${FLAKY_RSPEC_SUITE_REPORT_PATH}" || echo "{}" > "${FLAKY_RSPEC_SUITE_REPORT_PATH}" fi - - # FIXME: We will need to find a pipeline where the $RSPEC_PACKED_TESTS_MAPPING_PATH.gz actually exists (Crystalball only runs every two-hours, but the `update-tests-metadata` runs for all `master` pipelines...). - # if [[ ! -f "${RSPEC_PACKED_TESTS_MAPPING_PATH}" ]]; then - # (scripts/api/download_job_artifact --project "${project_path}" --job-id "${test_metadata_job_id}" --artifact-path "${RSPEC_PACKED_TESTS_MAPPING_PATH}.gz" && gzip -d "${RSPEC_PACKED_TESTS_MAPPING_PATH}.gz") || echo "{}" > "${RSPEC_PACKED_TESTS_MAPPING_PATH}" - # fi - # - # scripts/unpack-test-mapping "${RSPEC_PACKED_TESTS_MAPPING_PATH}" "${RSPEC_TESTS_MAPPING_PATH}" } function update_tests_metadata() { echo "{}" > "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" scripts/merge-reports "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" knapsack/rspec*.json + if [[ -n "${TESTS_METADATA_S3_BUCKET}" ]]; then + if [[ "$CI_PIPELINE_SOURCE" == "schedule" ]]; then + scripts/sync-reports put "${TESTS_METADATA_S3_BUCKET}" "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" + else + echo "Not uplaoding report to S3 as the pipeline is not a scheduled one." + fi + fi + rm -f knapsack/rspec*.json - export FLAKY_RSPEC_GENERATE_REPORT="true" scripts/merge-reports "${FLAKY_RSPEC_SUITE_REPORT_PATH}" rspec_flaky/all_*.json + + export FLAKY_RSPEC_GENERATE_REPORT="true" scripts/flaky_examples/prune-old-flaky-examples "${FLAKY_RSPEC_SUITE_REPORT_PATH}" + + if [[ -n ${TESTS_METADATA_S3_BUCKET} ]]; then + if [[ "$CI_PIPELINE_SOURCE" == "schedule" ]]; then + scripts/sync-reports put "${TESTS_METADATA_S3_BUCKET}" "${FLAKY_RSPEC_SUITE_REPORT_PATH}" + else + echo "Not uploading report to S3 as the pipeline is not a scheduled one." + fi + fi + rm -f rspec_flaky/all_*.json rspec_flaky/new_*.json if [[ "$CI_PIPELINE_SOURCE" == "schedule" ]]; then @@ -43,6 +48,16 @@ function update_tests_metadata() { fi } +function retrieve_tests_mapping() { + mkdir -p crystalball/ + + if [[ ! -f "${RSPEC_PACKED_TESTS_MAPPING_PATH}" ]]; then + (wget -O "${RSPEC_PACKED_TESTS_MAPPING_PATH}.gz" "http://${TESTS_METADATA_S3_BUCKET}.s3.amazonaws.com/${RSPEC_PACKED_TESTS_MAPPING_PATH}.gz" && gzip -d "${RSPEC_PACKED_TESTS_MAPPING_PATH}.gz") || echo "{}" > "${RSPEC_PACKED_TESTS_MAPPING_PATH}" + fi + + scripts/unpack-test-mapping "${RSPEC_PACKED_TESTS_MAPPING_PATH}" "${RSPEC_TESTS_MAPPING_PATH}" +} + function update_tests_mapping() { if ! crystalball_rspec_data_exists; then echo "No crystalball rspec data found." @@ -50,9 +65,20 @@ function update_tests_mapping() { fi scripts/generate-test-mapping "${RSPEC_TESTS_MAPPING_PATH}" crystalball/rspec*.yml + scripts/pack-test-mapping "${RSPEC_TESTS_MAPPING_PATH}" "${RSPEC_PACKED_TESTS_MAPPING_PATH}" + gzip "${RSPEC_PACKED_TESTS_MAPPING_PATH}" - rm -f crystalball/rspec*.yml "${RSPEC_PACKED_TESTS_MAPPING_PATH}" + + if [[ -n "${TESTS_METADATA_S3_BUCKET}" ]]; then + if [[ "$CI_PIPELINE_SOURCE" == "schedule" ]]; then + scripts/sync-reports put "${TESTS_METADATA_S3_BUCKET}" "${RSPEC_PACKED_TESTS_MAPPING_PATH}.gz" + else + echo "Not uploading report to S3 as the pipeline is not a scheduled one." + fi + fi + + rm -f crystalball/rspec*.yml } function crystalball_rspec_data_exists() { diff --git a/scripts/utils.sh b/scripts/utils.sh index 4d6088e94a8d017a732282821641cd2a44ab5993..3829bcdf24e5339927240a607247c9efb7a8dccb 100644 --- a/scripts/utils.sh +++ b/scripts/utils.sh @@ -87,14 +87,65 @@ function echosuccess() { fi } +function get_job_id() { + local job_name="${1}" + local query_string="${2:+&${2}}" + local api_token="${API_TOKEN-${GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN}}" + if [ -z "${api_token}" ]; then + echoerr "Please provide an API token with \$API_TOKEN or \$GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN." + return + fi + + local max_page=3 + local page=1 + + while true; do + local url="https://gitlab.com/api/v4/projects/${CI_PROJECT_ID}/pipelines/${CI_PIPELINE_ID}/jobs?per_page=100&page=${page}${query_string}" + echoinfo "GET ${url}" + + local job_id + job_id=$(curl --silent --show-error --header "PRIVATE-TOKEN: ${api_token}" "${url}" | jq "map(select(.name == \"${job_name}\")) | map(.id) | last") + [[ "${job_id}" == "null" && "${page}" -lt "$max_page" ]] || break + + let "page++" + done + + if [[ "${job_id}" == "null" ]]; then # jq prints "null" for non-existent attribute + echoerr "The '${job_name}' job ID couldn't be retrieved!" + else + echoinfo "The '${job_name}' job ID is ${job_id}" + echo "${job_id}" + fi +} + +function play_job() { + local job_name="${1}" + local job_id + job_id=$(get_job_id "${job_name}" "scope=manual"); + if [ -z "${job_id}" ]; then return; fi + + local api_token="${API_TOKEN-${GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN}}" + if [ -z "${api_token}" ]; then + echoerr "Please provide an API token with \$API_TOKEN or \$GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN." + return + fi + + local url="https://gitlab.com/api/v4/projects/${CI_PROJECT_ID}/jobs/${job_id}/play" + echoinfo "POST ${url}" + + local job_url + job_url=$(curl --silent --show-error --request POST --header "PRIVATE-TOKEN: ${api_token}" "${url}" | jq ".web_url") + echoinfo "Manual job '${job_name}' started at: ${job_url}" +} + function fail_pipeline_early() { local dont_interrupt_me_job_id - dont_interrupt_me_job_id=$(scripts/api/get_job_id --job-query "scope=success" --job-name "dont-interrupt-me") + dont_interrupt_me_job_id=$(get_job_id 'dont-interrupt-me' 'scope=success') if [[ -n "${dont_interrupt_me_job_id}" ]]; then echoinfo "This pipeline cannot be interrupted due to \`dont-interrupt-me\` job ${dont_interrupt_me_job_id}" else echoinfo "Failing pipeline early for fast feedback due to test failures in rspec fail-fast." - scripts/api/cancel_pipeline + curl --request POST --header "PRIVATE-TOKEN: ${GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN}" "https://${CI_SERVER_HOST}/api/v4/projects/${CI_PROJECT_ID}/pipelines/${CI_PIPELINE_ID}/cancel" fi }