-
Notifications
You must be signed in to change notification settings - Fork 226
Expand file tree
/
Copy pathbuild_bootc_images.py
More file actions
703 lines (625 loc) · 31.4 KB
/
build_bootc_images.py
File metadata and controls
703 lines (625 loc) · 31.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
#!/usr/bin/env python3
import argparse
import concurrent.futures
import getpass
import glob
import multiprocessing
import os
import platform
import re
import sys
import time
import traceback
import common
# Global environment variables
#
# Note: Global variables for RPM versions and repos are
# initialized in set_rpm_version_info_vars function
SCRIPTDIR = common.get_env_var('SCRIPTDIR')
BOOTC_IMAGE_DIR = common.get_env_var('BOOTC_IMAGE_DIR')
BOOTC_ISO_DIR = common.get_env_var('BOOTC_ISO_DIR')
IMAGEDIR = common.get_env_var('IMAGEDIR')
VM_DISK_BASEDIR = common.get_env_var('VM_DISK_BASEDIR')
UNAME_M = common.get_env_var('UNAME_M')
CONTAINER_LIST = common.get_env_var('CONTAINER_LIST')
LOCAL_REPO = common.get_env_var('LOCAL_REPO')
BASE_REPO = common.get_env_var('BASE_REPO')
NEXT_REPO = common.get_env_var('NEXT_REPO')
BREW_REPO = common.get_env_var('BREW_REPO')
HOME_DIR = common.get_env_var("HOME")
PULL_SECRET = common.get_env_var('PULL_SECRET', f"{HOME_DIR}/.pull-secret.json")
# Switch to quay.io/centos-bootc/bootc-image-builder:latest if any new upstream
# features are required
BIB_IMAGE = "registry.redhat.io/rhel9/bootc-image-builder:latest"
GOMPLATE = common.get_env_var('GOMPLATE')
MIRROR_REGISTRY = common.get_env_var('MIRROR_REGISTRY_URL')
FORCE_REBUILD = False
def cleanup_atexit(dry_run):
common.print_msg("Running atexit cleanup")
# Terminating any running subprocesses
for pid in common.find_subprocesses():
common.print_msg(f"Terminating {pid} PID")
common.terminate_process(pid)
# Terminate running bootc image builder containers
podman_args = [
"sudo", "podman", "ps",
"--filter", f"ancestor={BIB_IMAGE}",
"--format", "{{.ID}}"
]
cids = common.run_command_in_shell(podman_args, dry_run)
if cids:
# Make sure the ids are normalized in a single line
cids = re.sub(r'\s+', ' ', cids)
common.print_msg(f"Terminating '{cids}' container(s)")
common.run_command_in_shell(["sudo", "podman", "stop", cids], dry_run)
def find_latest_rpm(repo_path, version=""):
rpms = glob.glob(f"{repo_path}/**/microshift-release-info-{version}*.rpm", recursive=True)
if not rpms:
raise Exception(f"Failed to find 'microshift-release-info-{version}*' RPM in {repo_path}")
rpms.sort()
return rpms[-1]
def is_rhocp_available(ver):
# Equivalent to `uname -m`
architecture = platform.machine()
repository = f"rhocp-4.{ver}-for-rhel-9-{architecture}-rpms"
try:
# Run the dnf command to check for cri-o in the specified repository
repo_info = common.run_command_in_shell(f"dnf repository-packages --showduplicates {repository} info cri-o")
common.print_msg(repo_info)
return True
except Exception:
return False
def get_rhocp_beta_url_if_available(ver):
url_amd = f"https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rpms/4.{ver}-el9-beta/"
url_arm = f"https://mirror.openshift.com/pub/openshift-v4/aarch64/dependencies/rpms/4.{ver}-el9-beta/"
try:
# Run the dnf command to check for cri-o in the specified repository
repo_info = common.run_command_in_shell(f"dnf repository-packages --showduplicates --disablerepo '*' --repofrompath 'this,{url_amd}' this info cri-o")
common.print_msg(repo_info)
repo_info = common.run_command_in_shell(f"dnf repository-packages --showduplicates --disablerepo '*' --repofrompath 'this,{url_arm}' this info cri-o")
common.print_msg(repo_info)
# Use specific minor version RHOCP mirror only if both arches are available.
architecture = platform.machine()
return f"https://mirror.openshift.com/pub/openshift-v4/{architecture}/dependencies/rpms/4.{ver}-el9-beta/"
except Exception:
return ""
def set_rpm_version_info_vars():
# See the test/bin/common_versions.sh script for a full list
# of the variables used for templating
global FAKE_NEXT_MINOR_VERSION
global PREVIOUS_RELEASE_REPO
global PREVIOUS_RELEASE_VERSION
global YMINUS2_RELEASE_REPO
global YMINUS2_RELEASE_VERSION
global BREW_Y0_RELEASE_VERSION
global BREW_Y1_RELEASE_VERSION
global BREW_Y2_RELEASE_VERSION
global BREW_RC_RELEASE_VERSION
global BREW_EC_RELEASE_VERSION
global BREW_NIGHTLY_RELEASE_VERSION
FAKE_NEXT_MINOR_VERSION = common.get_env_var('FAKE_NEXT_MINOR_VERSION')
PREVIOUS_RELEASE_REPO = common.get_env_var('PREVIOUS_RELEASE_REPO')
PREVIOUS_RELEASE_VERSION = common.get_env_var('PREVIOUS_RELEASE_VERSION')
YMINUS2_RELEASE_REPO = common.get_env_var('YMINUS2_RELEASE_REPO')
YMINUS2_RELEASE_VERSION = common.get_env_var('YMINUS2_RELEASE_VERSION')
BREW_Y0_RELEASE_VERSION = common.get_env_var('BREW_Y0_RELEASE_VERSION')
BREW_Y1_RELEASE_VERSION = common.get_env_var('BREW_Y1_RELEASE_VERSION')
BREW_Y2_RELEASE_VERSION = common.get_env_var('BREW_Y2_RELEASE_VERSION')
BREW_RC_RELEASE_VERSION = common.get_env_var('BREW_RC_RELEASE_VERSION')
BREW_EC_RELEASE_VERSION = common.get_env_var('BREW_EC_RELEASE_VERSION')
BREW_NIGHTLY_RELEASE_VERSION = common.get_env_var('BREW_NIGHTLY_RELEASE_VERSION')
# The source versions are deduced from the locally built RPMs
global SOURCE_VERSION
global SOURCE_VERSION_BASE
release_info_rpm = find_latest_rpm(LOCAL_REPO)
release_info_rpm_base = find_latest_rpm(BASE_REPO)
SOURCE_VERSION = common.run_command_in_shell(f"rpm -q --queryformat '%{{version}}-%{{release}}' {release_info_rpm}")
SOURCE_VERSION_BASE = common.run_command_in_shell(f"rpm -q --queryformat '%{{version}}-%{{release}}' {release_info_rpm_base}")
global SSL_CLIENT_KEY_FILE
global SSL_CLIENT_CERT_FILE
# Find the first file matching "*-key.pem" in the entitlements directory
keyfile = next(glob.iglob("/etc/pki/entitlement/*-key.pem"), None)
# Find the first file matching "*.pem" but not "*-key.pem" in the entitlements directory
certfile = next((file for file in glob.iglob("/etc/pki/entitlement/*.pem") if not file.endswith("-key.pem")), None)
# Replace the entitlement path with the one usable inside a container
SSL_CLIENT_KEY_FILE = keyfile.replace("/entitlement/", "/entitlement-host/")
SSL_CLIENT_CERT_FILE = certfile.replace("/entitlement/", "/entitlement-host/")
# Update selected environment variables based on the global variables.
# These are used for templating container files and images.
rpmver_globals_vars = [
'SOURCE_VERSION', 'SOURCE_VERSION_BASE', 'SSL_CLIENT_KEY_FILE', 'SSL_CLIENT_CERT_FILE'
]
for var in rpmver_globals_vars:
value = globals().get(var)
if value is None:
raise Exception(f"The '{var}' global variable does not exist")
os.environ[var] = str(value)
def get_container_images(path, version):
# Find the last microshift-release-info RPM with the specified version
release_info_rpm = find_latest_rpm(path, version)
# Extract list of image URIs and join them with a comma
cpio_cmd = f"rpm2cpio '{release_info_rpm}' | cpio -i --to-stdout '*release-{UNAME_M}.json' 2> /dev/null"
jq_cmd = "jq -r '[.images[]] | join(\",\")'"
return common.run_command_in_shell(f"{cpio_cmd} | {jq_cmd}")
def extract_container_images(version, repo_spec, outfile, dry_run=False):
common.print_msg(f"Extracting images from {version}")
# Create and change the directory for extracting RPMs
image_path = common.create_dir(f"{IMAGEDIR}/release-info-rpms")
common.pushd(str(image_path))
repo_name = common.basename(repo_spec)
dnf_options = []
if re.match(r'^https://.*', repo_spec):
# If the spec is a URL, set up the arguments to point to that location.
dnf_options.extend(["--repofrompath", f"{repo_name},{repo_spec}", "--repo", repo_name])
elif re.match(r'^/.*', repo_spec):
# If the spec is a path, set up the arguments to point to that path.
# Disabling dnf strict option and refreshing cache are required because the
# download command does not run elevated.
dnf_options.extend(["--repofrompath", f"{repo_name},{repo_spec}", "--repo", repo_name,
"--setopt=strict=False", "--refresh"])
elif repo_spec:
# If the spec is a name, assume it is already known to the
# system through normal configuration. The repo does not need
# to be enabled in order for dnf to download a package from it.
dnf_options.extend(["--repo", repo_spec])
# Construct and execute the dnf download command
dnf_command = ["dnf", "download"] + dnf_options + [f"microshift-release-info-{version}"]
if common.run_command(dnf_command, dry_run) is not None:
images_output = get_container_images(str(image_path), version)
with open(outfile, "a") as f:
f.write(images_output.replace(',', '\n'))
f.write('\n')
# Cleanup RPM files
rpm_list = list(map(str, image_path.glob("microshift-release-info-*.rpm")))
common.run_command(["rm", "-f"] + rpm_list, dry_run)
# Restore the current directory
common.popd()
def run_template_cmd(ifile, ofile, dry_run):
# Run the templating command
gomplate_args = [
GOMPLATE,
"--file", ifile,
"--out", ofile
]
common.run_command_in_shell(gomplate_args, dry_run)
def get_process_file_names(idir, ifile, obasedir):
path = os.path.join(idir, ifile)
outname = os.path.splitext(ifile)[0]
outdir = os.path.join(obasedir, outname)
logfile = os.path.join(obasedir, f"{ifile}.log")
return path, outname, outdir, logfile
def process_containerfile(groupdir, containerfile, dry_run):
cf_path, cf_outname, _, cf_logfile = get_process_file_names(
groupdir, containerfile, BOOTC_IMAGE_DIR)
# Run template command on the input file
cf_outfile = os.path.join(BOOTC_IMAGE_DIR, containerfile)
run_template_cmd(cf_path, cf_outfile, dry_run)
# Templating may generate an empty file
if not dry_run:
if not common.file_has_valid_lines(cf_outfile):
common.print_msg(f"Skipping an empty {containerfile} file")
return
common.print_msg(f"Processing {containerfile} with logs in {cf_logfile}")
start_process_container = time.time()
def should_skip(image, cached):
# Forcing the rebuild if needed
if FORCE_REBUILD:
common.print_msg(f"Forcing rebuild of '{image}'")
return False
if cached:
common.print_msg(f"The '{image}' already exists, skipping")
return True
return False
try:
# Redirect the output to the log file
with open(cf_logfile, 'w') as logfile:
is_cached = False
# Check if the container image exists in the mirror registry and skip
# the subsequent build command if the image has already been cached.
#
# Note: No retries. Failure to inspect the image is ignored.
try:
inspect_args = [
"sudo", "skopeo", "inspect",
"--authfile", PULL_SECRET,
f"docker://{MIRROR_REGISTRY}/{cf_outname}",
"&>/dev/null"
]
start = time.time()
common.run_command_in_shell(inspect_args, dry_run, logfile, logfile)
common.record_junit(cf_path, "inspect-image", "OK", start)
is_cached = True
except Exception:
None
# Check if the target artifact exists
if should_skip(cf_outname, is_cached):
common.record_junit(cf_path, "process-containerfile", "SKIPPED")
return
# Run the container build command
# Note:
# - The pull secret is necessary in some builds for pulling embedded
# container images referenced in release-info RPMs
# - The host network usage is required to access the RPM repository
# proxy server using the localhost URL to make generated builds
# reusable from cache on other hosts.
# - The explicit push-to-mirror sets the 'latest' tag as all the build
# layers are in the mirror due to 'cache-to' option
build_args = [
"sudo", "podman", "build",
"--authfile", PULL_SECRET,
"--network", "host",
"--secret", f"id=pullsecret,src={PULL_SECRET}",
"--cache-to", f"{MIRROR_REGISTRY}/{cf_outname}",
"--cache-from", f"{MIRROR_REGISTRY}/{cf_outname}",
"-t", cf_outname, "-f", cf_outfile,
IMAGEDIR
]
start = time.time()
common.retry_on_exception(3, common.run_command_in_shell, build_args, dry_run, logfile, logfile)
common.record_junit(cf_path, "build-container", "OK", start)
push_args = [
"sudo", "podman", "push",
"--authfile", PULL_SECRET,
cf_outname,
f"{MIRROR_REGISTRY}/{cf_outname}"
]
start = time.time()
common.retry_on_exception(3, common.run_command_in_shell, push_args, dry_run, logfile, logfile)
common.record_junit(cf_path, "push-container", "OK", start)
except Exception:
common.record_junit(cf_path, "process-container", "FAILED", start_process_container, log_filepath=cf_logfile)
# Propagate the exception to the caller
raise
finally:
# Always display the command logs with the prefix on each line
common.run_command(["sed", f"s/^/{cf_outname}: /", cf_logfile], dry_run)
def process_image_bootc(groupdir, bootcfile, dry_run):
bf_path, bf_outname, bf_outdir, bf_logfile = get_process_file_names(
groupdir, bootcfile, BOOTC_ISO_DIR)
bf_targetiso = os.path.join(VM_DISK_BASEDIR, f"{bf_outname}.iso")
def should_skip(file):
# Forcing the rebuild if needed
if FORCE_REBUILD:
common.print_msg(f"Forcing rebuild of '{file}'")
return False
if not os.path.exists(file):
return False
common.print_msg(f"The '{file}' already exists, skipping")
return True
# Check if the target artifact exists
if should_skip(bf_targetiso):
common.record_junit(bf_path, "process-bootc-image", "SKIPPED")
return
# Create the output directories
os.makedirs(bf_outdir, exist_ok=True)
os.makedirs(VM_DISK_BASEDIR, exist_ok=True)
# Run template command on the input file
bf_outfile = os.path.join(BOOTC_IMAGE_DIR, bootcfile)
run_template_cmd(bf_path, bf_outfile, dry_run)
# Templating may generate an empty file
if not dry_run:
if not common.file_has_valid_lines(bf_outfile):
common.print_msg(f"Skipping an empty {bootcfile} file")
return
common.print_msg(f"Processing {bootcfile} with logs in {bf_logfile}")
start_process_bootc_image = time.time()
try:
# Redirect the output to the log file
with open(bf_logfile, 'w') as logfile:
# Download the bootc image builder itself in case
# it requires authorization for accessing the image
pull_args = [
"sudo", "podman", "pull",
"--authfile", PULL_SECRET, BIB_IMAGE
]
start = time.time()
common.retry_on_exception(3, common.run_command_in_shell, pull_args, dry_run, logfile, logfile)
common.record_junit(bf_path, "pull-bootc-bib", "OK", start)
# Read the image reference
bf_imgref = common.read_file_valid_lines(bf_outfile).strip()
# Download the image to be used by bootc image builder.
# Locally built images should also be downloaded in case they were
# cached but not fetched from the mirror registry.
pull_args = [
"sudo", "podman", "pull",
"--authfile", PULL_SECRET,
bf_imgref
]
start = time.time()
common.retry_on_exception(3, common.run_command_in_shell, pull_args, dry_run, logfile, logfile)
common.record_junit(bf_path, "pull-bootc-image", "OK", start)
# The podman command with security elevation and
# mount of output / container storage
build_args = [
"sudo", "podman", "run",
"--rm", "-i", "--privileged",
"--network", "host",
"--pull=newer",
"--security-opt", "label=type:unconfined_t",
"-v", f"{bf_outdir}:/output",
"-v", "/var/lib/containers/storage:/var/lib/containers/storage"
]
# Add the bootc image builder command line using local images
build_args += [
BIB_IMAGE,
"--type", "anaconda-iso",
bf_imgref
]
start = time.time()
common.retry_on_exception(3, common.run_command_in_shell, build_args, dry_run, logfile, logfile)
common.record_junit(bf_path, "build-bootc-image", "OK", start)
except Exception:
common.record_junit(bf_path, "process-bootc-image", "FAILED", start_process_bootc_image, log_filepath=bf_logfile)
# Propagate the exception to the caller
raise
finally:
# Always display the command logs with the prefix on each line
common.run_command(["sed", f"s/^/{bf_outname}: /", bf_logfile], dry_run)
# Fix the directory ownership and move the artifact
if not dry_run:
common.run_command(
["sudo", "chown", "-R", f"{getpass.getuser()}.", bf_outdir],
dry_run)
os.rename(f"{bf_outdir}/bootiso/install.iso", bf_targetiso)
def process_container_encapsulate(groupdir, containerfile, dry_run):
ce_path, ce_outname, _, ce_logfile = get_process_file_names(
groupdir, containerfile, BOOTC_IMAGE_DIR)
ce_targetimg = f"{MIRROR_REGISTRY}/{ce_outname}:latest"
ce_localimg = f"localhost/{ce_outname}:latest"
def ostree_rev_in_registry(ce_imgref):
# Forcing the rebuild if needed
if FORCE_REBUILD:
common.print_msg(f"Forcing rebuild of '{ce_imgref}'")
return False
# Read the commit revision from the ostree repository (must succeed)
src_ref_cmd = [
"ostree", "rev-parse",
"--repo", os.path.join(IMAGEDIR, "repo"),
ce_imgref
]
src_ref = common.run_command_in_shell(src_ref_cmd, dry_run)
if not src_ref:
raise Exception(f"Failed to find ostree revision with '{ce_imgref}' reference")
# Read the commit revision from the registry (may fail, no error output)
try:
dst_ref_cmd = [
"skopeo", "inspect",
"--authfile", PULL_SECRET,
f"docker://{ce_targetimg}",
"2>/dev/null", "|",
"jq", "-r", "'.Labels[\"ostree.commit\"]'"
]
dst_ref = common.run_command_in_shell(dst_ref_cmd, dry_run)
if src_ref == dst_ref:
common.print_msg(f"The '{ce_targetimg}' already exists, skipping")
return True
except Exception:
None
return False
# Run template command on the input file
ce_outfile = os.path.join(BOOTC_IMAGE_DIR, containerfile)
run_template_cmd(ce_path, ce_outfile, dry_run)
common.print_msg(f"Processing {containerfile} with logs in {ce_logfile}")
start_process_container_encapsulate = time.time()
try:
# Redirect the output to the log file
with open(ce_logfile, 'w') as logfile:
# Read the image reference
ce_imgref = common.read_file(ce_outfile).strip()
# Check if the target artifact already exists in registry with
# the same ostree commit
if ostree_rev_in_registry(ce_imgref):
common.record_junit(ce_path, "process-container-encapsulate", "SKIPPED")
return
# Run the container image build command.
# The REGISTRY_AUTH_FILE setting is required for skopeo to succeed
# in accessing container registries that might require authentication.
build_args = [
"sudo", f"REGISTRY_AUTH_FILE={PULL_SECRET}",
"rpm-ostree", "compose",
"container-encapsulate",
"--repo", os.path.join(IMAGEDIR, "repo"),
ce_imgref,
f"registry:{ce_targetimg}"
]
start = time.time()
common.retry_on_exception(3, common.run_command_in_shell, build_args, dry_run, logfile, logfile)
common.record_junit(ce_path, "build-container", "OK", start)
# Copy the image into the local containers storage as it might be
# necessary for subsequent builds that depend on this container image
copy_args = [
"sudo", "skopeo", "copy",
"--authfile", PULL_SECRET,
f"docker://{ce_targetimg}",
f"containers-storage:{ce_localimg}"
]
start = time.time()
common.retry_on_exception(3, common.run_command_in_shell, copy_args, dry_run, logfile, logfile)
common.record_junit(ce_path, "copy-image", "OK", start)
except Exception:
common.record_junit(ce_path, "process-container-encapsulate", "FAILED", start_process_container_encapsulate, log_filepath=ce_logfile)
# Propagate the exception to the caller
raise
finally:
# Always display the command logs with the prefix on each line
common.run_command(["sed", f"s/^/{ce_outname}: /", ce_logfile], dry_run)
def process_group(groupdir, build_type, pattern="*", dry_run=False):
futures = []
try:
# Open the junit file
common.start_junit(groupdir)
# Process all the template files in the current group directory
# before starting the parallel processing
for ifile in os.listdir(groupdir):
if not ifile.endswith(".template"):
continue
# Create full path for output and input file names
ofile = os.path.join(BOOTC_IMAGE_DIR, ifile)
ifile = os.path.join(groupdir, ifile)
# Strip the .template suffix from the output file name
ofile = ofile.removesuffix(".template")
run_template_cmd(ifile, ofile, dry_run)
# Parallel processing loop
with concurrent.futures.ProcessPoolExecutor() as executor:
# Scan group directory contents sorted by length and then alphabetically
paths = glob.glob(os.path.join(groupdir, pattern))
files = [os.path.basename(file) for file in paths]
for file in sorted(files, key=lambda i: (len(i), i)):
if file.endswith(".containerfile"):
if build_type and build_type != "containerfile":
common.print_msg(f"Skipping '{file}' due to '{build_type}' filter")
continue
futures.append(executor.submit(process_containerfile, groupdir, file, dry_run))
elif file.endswith(".image-bootc"):
if build_type and build_type != "image-bootc":
common.print_msg(f"Skipping '{file}' due to '{build_type}' filter")
continue
futures.append(executor.submit(process_image_bootc, groupdir, file, dry_run))
elif file.endswith(".container-encapsulate"):
if build_type and build_type != "container-encapsulate":
common.print_msg(f"Skipping '{file}' due to '{build_type}' filter")
continue
futures.append(executor.submit(process_container_encapsulate, groupdir, file, dry_run))
elif not file.endswith(".template"):
common.print_msg(f"Skipping unknown file {file}")
# Wait for the parallel tasks to complete
for f in concurrent.futures.as_completed(futures):
common.print_msg(f"Task {f} completed")
# Result function generates an exception depending on the task state
f.result()
except Exception:
# Cancel all pending tasks
for f in futures:
if not f.done():
f.cancel()
common.print_msg(f"Task {f} cancelled")
# Propagate the exception to the caller
raise
finally:
# Close junit file
common.close_junit()
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description="Build image layers using Bootc Image Builder and Podman.")
parser.add_argument("-d", "--dry-run", action="store_true", help="Dry run: skip executing build commands.")
parser.add_argument("-f", "--force-rebuild", action="store_true", help="Force rebuilding images that already exist.")
parser.add_argument("-E", "--no-extract-images", action="store_true", help="Skip container image extraction.")
parser.add_argument("-X", "--skip-all-builds", action="store_true", help="Skip all image builds.")
parser.add_argument("-b", "--build-type",
choices=["image-bootc", "containerfile", "container-encapsulate"],
help="Only build images of the specified type.")
dirgroup = parser.add_mutually_exclusive_group(required=False)
dirgroup.add_argument("-l", "--layer-dir", action="append", default=[], help="Path to the layer directory to process. Can be specified multiple times.")
dirgroup.add_argument("-g", "--group-dir", type=str, help="Path to the group directory to process.")
dirgroup.add_argument("-t", "--template", type=str, help="Path to a template to build. Allows glob patterns (requires double qoutes).")
args = parser.parse_args()
# Validate: directory is required unless skip-all-builds mode
if not args.skip_all_builds and not (args.layer_dir or args.group_dir or args.template):
parser.error("one of the arguments -l/--layer-dir -g/--group-dir -t/--template is required (unless using -X/--extract-only)")
success_message = False
try:
pattern = "*"
dir2process = None
# Convert input directories to absolute paths
if args.group_dir:
args.group_dir = os.path.abspath(args.group_dir)
dir2process = args.group_dir
if args.layer_dir:
# Convert input layer directories to absolute paths
args.layer_dir = [os.path.abspath(d) for d in args.layer_dir]
# Validate each layer directory exists
for layer_dir in args.layer_dir:
if not os.path.isdir(layer_dir):
raise Exception(f"The layer directory '{layer_dir}' does not exist")
if args.template:
args.template = os.path.abspath(args.template)
dir2process = os.path.dirname(args.template)
pattern = os.path.basename(args.template)
# Make sure the input directory exists (only if specified)
if dir2process and not os.path.isdir(dir2process):
raise Exception(f"The input directory '{dir2process}' does not exist")
# Make sure the local RPM repository exists
if not os.path.isdir(LOCAL_REPO):
common.run_command([f"{SCRIPTDIR}/build_rpms.sh"], args.dry_run)
# Initialize force rebuild option
global FORCE_REBUILD
if args.force_rebuild:
FORCE_REBUILD = True
# Fetch gomplate if necessary
if not os.path.exists(GOMPLATE):
gomplate_args = [
f"{SCRIPTDIR}/../../scripts/fetch_tools.sh",
"gomplate"
]
common.run_command(gomplate_args, args.dry_run)
# Determine versions of RPM packages
set_rpm_version_info_vars()
# Prepare container images list for mirroring registries
common.delete_file(CONTAINER_LIST)
if args.no_extract_images:
common.print_msg("Skipping container image extraction")
else:
extract_container_images(SOURCE_VERSION, LOCAL_REPO, CONTAINER_LIST, args.dry_run)
# The following images are specific to layers that use fake rpms built from source
extract_container_images(f"4.{FAKE_NEXT_MINOR_VERSION}.*", NEXT_REPO, CONTAINER_LIST, args.dry_run)
extract_container_images(PREVIOUS_RELEASE_VERSION, PREVIOUS_RELEASE_REPO, CONTAINER_LIST, args.dry_run)
extract_container_images(YMINUS2_RELEASE_VERSION, YMINUS2_RELEASE_REPO, CONTAINER_LIST, args.dry_run)
# The following images are specific to the brew release versions
if BREW_Y0_RELEASE_VERSION:
extract_container_images(BREW_Y0_RELEASE_VERSION, BREW_REPO, CONTAINER_LIST, args.dry_run)
if BREW_Y1_RELEASE_VERSION:
extract_container_images(BREW_Y1_RELEASE_VERSION, BREW_REPO, CONTAINER_LIST, args.dry_run)
if BREW_Y2_RELEASE_VERSION:
extract_container_images(BREW_Y2_RELEASE_VERSION, BREW_REPO, CONTAINER_LIST, args.dry_run)
if BREW_RC_RELEASE_VERSION:
extract_container_images(BREW_RC_RELEASE_VERSION, BREW_REPO, CONTAINER_LIST, args.dry_run)
if BREW_EC_RELEASE_VERSION:
extract_container_images(BREW_EC_RELEASE_VERSION, BREW_REPO, CONTAINER_LIST, args.dry_run)
if BREW_NIGHTLY_RELEASE_VERSION:
extract_container_images(BREW_NIGHTLY_RELEASE_VERSION, BREW_REPO, CONTAINER_LIST, args.dry_run)
# Sort the images list, only leaving unique entries
common.sort_uniq_file(CONTAINER_LIST)
# Process package source templates
ipkgdir = f"{SCRIPTDIR}/../package-sources-bootc"
for ifile in os.listdir(ipkgdir):
# Create full path for output and input file names
ofile = os.path.join(BOOTC_IMAGE_DIR, ifile)
ifile = os.path.join(ipkgdir, ifile)
run_template_cmd(ifile, ofile, args.dry_run)
# Run the mirror registry
common.run_command([f"{SCRIPTDIR}/mirror_registry.sh"], args.dry_run)
# Skip all image builds
if args.skip_all_builds:
common.print_msg("Skipping all image builds")
success_message = True
return
# Add local registry credentials to the input pull secret file
global PULL_SECRET
opull_secret = os.path.join(BOOTC_IMAGE_DIR, "pull_secret.json", )
common.update_pull_secret(PULL_SECRET, opull_secret, MIRROR_REGISTRY)
PULL_SECRET = opull_secret
# Process layer directory contents sorted by length and then alphabetically
if args.layer_dir:
for layer_dir in args.layer_dir:
for item in sorted(os.listdir(layer_dir), key=lambda i: (len(i), i)):
item_path = os.path.join(layer_dir, item)
# Check if this item is a directory
if os.path.isdir(item_path):
process_group(item_path, args.build_type, dry_run=args.dry_run)
else:
# Process individual group directory or template
process_group(dir2process, args.build_type, pattern, args.dry_run)
# Toggle the success flag
success_message = True
except Exception as e:
common.print_msg(f"An error occurred: {e}")
traceback.print_exc()
sys.exit(1)
finally:
cleanup_atexit(args.dry_run)
# Exit status message
common.print_msg("Build " + ("OK" if success_message else "FAILED"))
if __name__ == "__main__":
multiprocessing.set_start_method("fork")
_ = common.MeasureRunTimeInScope("[MAIN] Building Images")
main()