-
Notifications
You must be signed in to change notification settings - Fork 226
Expand file tree
/
Copy pathci_phase_iso_build.sh
More file actions
executable file
·235 lines (195 loc) · 9.33 KB
/
ci_phase_iso_build.sh
File metadata and controls
executable file
·235 lines (195 loc) · 9.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
#!/bin/bash
#
# This script runs on the build host to create all test artifacts.
set -xeuo pipefail
export PS4='+ $(date "+%T.%N") ${BASH_SOURCE#$HOME/}:$LINENO \011'
# Cannot use common.sh yet because some dependencies may be missing,
# but we only need ROOTDIR at this time.
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ROOTDIR="$(cd "${SCRIPTDIR}/../.." && pwd)"
# Log output automatically
LOGDIR="${ROOTDIR}/_output/ci-logs"
LOGFILE="${LOGDIR}/$(basename "$0" .sh).log"
if [ ! -d "${LOGDIR}" ]; then
mkdir -p "${LOGDIR}"
fi
echo "Logging to ${LOGFILE}"
# Set fd 1 and 2 to write to the log file
exec &> >(tee >(awk '{ print strftime("%Y-%m-%d %H:%M:%S"), $0; fflush() }' >"${LOGFILE}"))
PULL_SECRET=${PULL_SECRET:-${HOME}/.pull-secret.json}
# Detect a bootc build mode based on the job name
COMPOSER_CLI_BUILDS=true
if [ -v CI_JOB_NAME ] && [[ "${CI_JOB_NAME}" =~ .*bootc.* ]]; then
COMPOSER_CLI_BUILDS=false
fi
# Allow for a dry-run option to save on testing time
BUILD_DRY_RUN=${BUILD_DRY_RUN:-false}
dry_run() {
${BUILD_DRY_RUN} && echo "echo"
}
# Try downloading the 'last' build cache.
# Return 0 on success or 1 otherwise.
download_build_cache() {
local -r cache_last="$(\
./bin/manage_build_cache.sh getlast \
-b "${SCENARIO_BUILD_BRANCH}" -t "${SCENARIO_BUILD_TAG}" | \
awk '/LAST:/ {print $NF}' \
)"
if ./bin/manage_build_cache.sh verify -b "${SCENARIO_BUILD_BRANCH}" -t "${cache_last}" ; then
# Download the cached images
./bin/manage_build_cache.sh download -b "${SCENARIO_BUILD_BRANCH}" -t "${cache_last}"
return 0
fi
return 1
}
# Run image build for the 'base' layers and update the cache:
# - Upload build artifacts
# - Update 'last' to point to the current build tag
# - Clean up older images, preserving the 'last' and the previous build tag
# Note that the build and upload are skipped if valid cached data already exists.
update_build_cache() {
if ./bin/manage_build_cache.sh verify -b "${SCENARIO_BUILD_BRANCH}" -t "${SCENARIO_BUILD_TAG}" ; then
echo "Valid build cache already exists for the '${SCENARIO_BUILD_BRANCH}' branch and '${SCENARIO_BUILD_TAG}' tag"
echo "WARNING: Skipping cache build, update and cleanup procedures"
return
fi
# Build the composer-cli base layer and brew RPMs to be cached
$(dry_run) bash -x ./bin/build_images.sh -l ./image-blueprints/layer1-base
$(dry_run) bash -x ./bin/build_images.sh -l ./image-blueprints/layer4-release
# Build templates
$(dry_run) bash -x ./bin/build_bootc_images.sh -g ./image-blueprints-bootc/templates
# Build the bootc base layer and brew RPMs to be cached
$(dry_run) bash -x ./bin/build_bootc_images.sh -l ./image-blueprints-bootc/el9/layer1-base -l ./image-blueprints-bootc/el10/layer1-base
$(dry_run) bash -x ./bin/build_bootc_images.sh -l ./image-blueprints-bootc/el9/layer4-release -l ./image-blueprints-bootc/el10/layer4-release
# Prepare for the cache upload by stopping composer services and cleaning
# temporary artifacts
./bin/manage_composer_config.sh cleanup
# Upload the images and update the 'last' setting
./bin/manage_build_cache.sh upload -b "${SCENARIO_BUILD_BRANCH}" -t "${SCENARIO_BUILD_TAG}"
./bin/manage_build_cache.sh setlast -b "${SCENARIO_BUILD_BRANCH}" -t "${SCENARIO_BUILD_TAG}"
# Cleanup older images in the cache, preserving the previous cache if any
# The 'last' cache is preserved by default
./bin/manage_build_cache.sh keep -b "${SCENARIO_BUILD_BRANCH}" -t "${SCENARIO_BUILD_TAG_PREV}"
}
# Run image build, potentially skipping the 'periodic' layer in CI builds.
# Full builds are run if the 'CI_JOB_NAME' environment variable is not set.
#
# When the 'CI_JOB_NAME' environment variable is set:
# - Always build the 'base' and 'presubmit' layers.
# - Only build the 'periodic' layer when 'CI_JOB_NAME' contains 'periodic' token.
run_image_build() {
if [ -v CI_JOB_NAME ] ; then
# Skip all image builds for release testing CI jobs because all the images are fetched from the cache.
if [[ "${CI_JOB_NAME}" =~ .*release(-arm)?$ ]]; then
$(dry_run) bash -x ./bin/build_images.sh -X
return
fi
# Conditional per-layer builds when running in CI.
# The build_images.sh script skips any images that have been downloaded from the cache.
$(dry_run) bash -x ./bin/build_images.sh -l ./image-blueprints/layer1-base
$(dry_run) bash -x ./bin/build_images.sh -l ./image-blueprints/layer2-presubmit
if [[ "${CI_JOB_NAME}" =~ .*periodic.* ]]; then
$(dry_run) bash -x ./bin/build_images.sh -l ./image-blueprints/layer3-periodic
fi
else
# Fall back to full build when not running in CI
$(dry_run) bash -x ./bin/build_images.sh
fi
}
# Run container file verification and bootc image build
run_bootc_image_build() {
make -C "${ROOTDIR}" verify-containers
# Build templates first
$(dry_run) bash -x ./bin/build_bootc_images.sh -g ./image-blueprints-bootc/templates
if [ -v CI_JOB_NAME ] ; then
# Skip all image builds for release testing CI jobs because all the images are fetched from the cache.
if [[ "${CI_JOB_NAME}" =~ .*release(-arm)?(-el(9|10))?$ ]]; then
$(dry_run) bash -x ./bin/build_bootc_images.sh -X
return
fi
local -r os="${CI_JOB_NAME##*-}"
if [[ "${os}" == "el9" || "${os}" == "el10" ]]; then
$(dry_run) bash -x ./bin/build_bootc_images.sh -l "./image-blueprints-bootc/${os}/layer1-base"
$(dry_run) bash -x ./bin/build_bootc_images.sh -l "./image-blueprints-bootc/${os}/layer2-presubmit"
if [[ "${os}" == "el10" ]]; then
# Build el9 images for upgrade tests
$(dry_run) bash -x ./bin/build_bootc_images.sh -l ./image-blueprints-bootc/el10/layer5-upgrade
fi
if [[ "${CI_JOB_NAME}" =~ .*periodic.* ]]; then
$(dry_run) bash -x ./bin/build_bootc_images.sh -l "./image-blueprints-bootc/${os}/layer3-periodic"
fi
if [[ "${CI_JOB_NAME}" =~ .*release.* ]]; then
$(dry_run) bash -x ./bin/build_bootc_images.sh -l "./image-blueprints-bootc/${os}/layer4-release"
fi
fi
# Build upstream images
if [[ "${CI_JOB_NAME}" =~ .*upstream.* ]]; then
$(dry_run) bash -x ./bin/build_bootc_images.sh -l ./image-blueprints-bootc/upstream
fi
else
# Full build for all OS versions
$(dry_run) bash -x ./bin/build_bootc_images.sh -l ./image-blueprints-bootc/el9/layer1-base -l ./image-blueprints-bootc/el10/layer1-base
$(dry_run) bash -x ./bin/build_bootc_images.sh -l ./image-blueprints-bootc/el9/layer2-presubmit -l ./image-blueprints-bootc/el10/layer2-presubmit
$(dry_run) bash -x ./bin/build_bootc_images.sh -l ./image-blueprints-bootc/el9/layer3-periodic -l ./image-blueprints-bootc/el10/layer3-periodic
$(dry_run) bash -x ./bin/build_bootc_images.sh -l ./image-blueprints-bootc/el9/layer4-release -l ./image-blueprints-bootc/el10/layer4-release
$(dry_run) bash -x ./bin/build_bootc_images.sh -l ./image-blueprints-bootc/upstream
fi
}
cat /etc/os-release
# Show what other dnf commands have been run to try to debug why we
# sometimes see cache collisons.
$(dry_run) sudo dnf history --reverse
cd "${ROOTDIR}"
# Get firewalld and repos in place. Use scripts to get the right repos
# for each branch.
$(dry_run) bash -x ./scripts/devenv-builder/configure-vm.sh --skip-dnf-update --no-build --force-firewall "${PULL_SECRET}"
$(dry_run) bash -x ./test/bin/manage_composer_config.sh create
cd "${ROOTDIR}/test/"
# Source common.sh only after all dependencies are installed.
# shellcheck source=test/bin/common.sh
source "${SCRIPTDIR}/common.sh"
# shellcheck source=test/bin/common_versions_verify.sh
source "${SCRIPTDIR}/common_versions_verify.sh"
if ${COMPOSER_CLI_BUILDS} ; then
# Determine and create the ideal number of workers
$(dry_run) bash -x ./bin/manage_composer_config.sh create-workers
fi
# Check if cache can be used for builds
# This may fail when AWS S3 connection is not configured, or there is no cache bucket
HAS_CACHE_ACCESS=false
if ./bin/manage_build_cache.sh getlast -b "${SCENARIO_BUILD_BRANCH}" -t "${SCENARIO_BUILD_TAG}" ; then
HAS_CACHE_ACCESS=true
fi
# Check the build mode: "try using cache" (default) or "update cache"
if [ $# -gt 0 ] && [ "$1" = "-update_cache" ] ; then
if ${HAS_CACHE_ACCESS} ; then
# Re-build from source before updating the cache because some
# build artifacts may be cached
$(dry_run) bash -x ./bin/build_rpms.sh
update_build_cache
else
echo "ERROR: Access to the build cache is not available"
exit 1
fi
else
GOT_CACHED_DATA=false
if ${HAS_CACHE_ACCESS} ; then
if download_build_cache ; then
GOT_CACHED_DATA=true
fi
fi
if ! ${GOT_CACHED_DATA} ; then
echo "WARNING: Build cache is not available, rebuilding all the artifacts"
fi
# Re-build from source after downloading the cache because
# the build may depend on some cached artifacts
$(dry_run) bash -x ./bin/build_rpms.sh
# Optionally run bootc image builds
if ${COMPOSER_CLI_BUILDS} ; then
run_image_build
else
run_bootc_image_build
fi
fi
echo "Build phase complete"
exit 0