aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitattributes1
-rw-r--r--.pylintrc4
-rw-r--r--AUTHORS2
-rw-r--r--Makefile4
-rw-r--r--README12
-rw-r--r--TODO79
-rw-r--r--arch/alpha.toml32
-rw-r--r--arch/amd64.toml29
-rw-r--r--arch/arm.toml64
-rw-r--r--arch/arm64.toml11
-rw-r--r--arch/hppa.toml8
-rw-r--r--arch/ia64.toml4
-rw-r--r--arch/loong.toml4
-rw-r--r--arch/m68k.toml6
-rw-r--r--arch/mips.toml296
-rw-r--r--arch/ppc.toml91
-rw-r--r--arch/riscv.toml39
-rw-r--r--arch/s390.toml12
-rw-r--r--arch/sh.toml48
-rw-r--r--arch/sparc.toml12
-rw-r--r--arch/x86.toml55
-rwxr-xr-xbin/catalyst40
-rwxr-xr-xbin/catalyst.git4
-rwxr-xr-xbin/pylint51
-rw-r--r--catalyst/__init__.py8
-rw-r--r--catalyst/arch/__init__.py0
-rw-r--r--catalyst/arch/alpha.py75
-rw-r--r--catalyst/arch/amd64.py76
-rw-r--r--catalyst/arch/arm.py131
-rw-r--r--catalyst/arch/arm64.py13
-rw-r--r--catalyst/arch/hppa.py37
-rw-r--r--catalyst/arch/ia64.py13
-rw-r--r--catalyst/arch/m68k.py20
-rw-r--r--catalyst/arch/mips.py501
-rw-r--r--catalyst/arch/powerpc.py158
-rw-r--r--catalyst/arch/riscv.py13
-rw-r--r--catalyst/arch/s390.py30
-rw-r--r--catalyst/arch/sh.py113
-rw-r--r--catalyst/arch/sparc.py36
-rw-r--r--catalyst/arch/x86.py143
-rw-r--r--catalyst/base/clearbase.py117
-rw-r--r--catalyst/base/genbase.py91
-rw-r--r--catalyst/base/resume.py237
-rw-r--r--catalyst/base/stagebase.py3344
-rw-r--r--catalyst/base/targetbase.py53
-rw-r--r--catalyst/builder.py30
-rw-r--r--catalyst/config.py233
-rw-r--r--catalyst/context.py54
-rw-r--r--catalyst/defaults.py232
-rw-r--r--catalyst/fileops.py216
-rw-r--r--catalyst/hash_utils.py128
-rw-r--r--catalyst/lock.py31
-rw-r--r--catalyst/log.py153
-rw-r--r--catalyst/main.py734
-rw-r--r--catalyst/support.py441
-rw-r--r--catalyst/targets/embedded.py66
-rw-r--r--catalyst/targets/grp.py97
-rw-r--r--catalyst/targets/livecd_stage1.py88
-rw-r--r--catalyst/targets/livecd_stage2.py172
-rw-r--r--catalyst/targets/netboot.py271
-rw-r--r--catalyst/targets/netboot2.py161
-rw-r--r--catalyst/targets/snapshot.py204
-rw-r--r--catalyst/targets/stage1.py216
-rw-r--r--catalyst/targets/stage2.py71
-rw-r--r--catalyst/targets/stage3.py30
-rw-r--r--catalyst/targets/stage4.py74
-rw-r--r--catalyst/targets/tinderbox.py47
-rw-r--r--catalyst/version.py96
-rw-r--r--doc/HOWTO.txt17
-rw-r--r--doc/catalyst-config.5.txt167
-rw-r--r--doc/catalyst-spec.5.txt135
-rw-r--r--doc/catalyst.1.txt19
-rwxr-xr-xdoc/make_subarch_table_guidexml.py145
-rwxr-xr-xdoc/make_target_table.py40
-rw-r--r--etc/catalyst.conf191
-rwxr-xr-xetc/catalystrc5
-rw-r--r--examples/gamecd.conf.example6
-rw-r--r--examples/generic_stage_template.spec18
-rw-r--r--examples/grp_template.spec117
-rw-r--r--examples/livecd-stage1_template.spec18
-rw-r--r--examples/livecd-stage2_template.spec94
-rw-r--r--examples/netboot2_template.spec301
-rw-r--r--examples/netboot_template.spec402
-rw-r--r--examples/stage4_template.spec76
-rw-r--r--examples/tinderbox_template.spec93
-rw-r--r--livecd/cdtar/elilo-3.6-cdtar.tar.bz2bin129254 -> 0 bytes
-rw-r--r--livecd/cdtar/grub-memtest86+-cdtar.tar.bz2bin109665 -> 0 bytes
-rw-r--r--livecd/cdtar/isolinux-3.72-cdtar.tar.bz2bin11110 -> 0 bytes
-rw-r--r--livecd/cdtar/isolinux-3.72-memtest86+-cdtar.tar.bz2bin65919 -> 0 bytes
-rw-r--r--livecd/cdtar/silo-1.4.13-sparc-cdtar.tar.bz2bin115857 -> 0 bytes
-rw-r--r--livecd/cdtar/yaboot-1.3.13-cdtar.tar.bz2bin51088 -> 0 bytes
-rw-r--r--livecd/files/README.txt4
-rw-r--r--livecd/files/gamecd.motd.txt8
-rw-r--r--livecd/files/livecd-bashrc13
-rw-r--r--livecd/files/livecd-local.start9
-rw-r--r--livecd/files/livecd.motd.txt9
-rw-r--r--livecd/files/minimal.motd.txt2
-rw-r--r--livecd/files/universal.motd.txt5
-rw-r--r--livecd/files/x86-F2.msg22
-rw-r--r--livecd/files/x86-F3.msg22
-rw-r--r--livecd/files/x86-F4.msg20
-rw-r--r--livecd/files/x86-F5.msg22
-rw-r--r--livecd/files/x86-F6.msg14
-rw-r--r--livecd/files/x86-F7.msg22
-rwxr-xr-xsetup.py171
-rwxr-xr-xtargets/embedded/chroot.sh12
-rwxr-xr-xtargets/embedded/controller.sh (renamed from targets/embedded/embedded-controller.sh)25
-rwxr-xr-xtargets/embedded/embedded-chroot.sh17
-rwxr-xr-xtargets/embedded/embedded-fs-runscript.sh44
-rwxr-xr-xtargets/embedded/fs-runscript.sh29
-rwxr-xr-xtargets/embedded/preclean-chroot.sh (renamed from targets/embedded/embedded-preclean-chroot.sh)0
-rwxr-xr-xtargets/grp/grp-chroot.sh21
-rwxr-xr-xtargets/grp/grp-controller.sh34
-rwxr-xr-xtargets/grp/grp-preclean-chroot.sh7
-rwxr-xr-xtargets/livecd-stage1/chroot.sh (renamed from targets/livecd-stage1/livecd-stage1-chroot.sh)3
-rwxr-xr-xtargets/livecd-stage1/controller.sh (renamed from targets/livecd-stage1/livecd-stage1-controller.sh)7
-rwxr-xr-xtargets/livecd-stage1/preclean-chroot.sh (renamed from targets/livecd-stage1/livecd-stage1-preclean-chroot.sh)0
-rwxr-xr-xtargets/livecd-stage2/controller.sh (renamed from targets/livecd-stage2/livecd-stage2-controller.sh)72
-rwxr-xr-xtargets/netboot/controller.sh61
-rwxr-xr-xtargets/netboot/copyfile.sh (renamed from targets/netboot2/netboot2-copyfile.sh)0
-rw-r--r--targets/netboot/nb-busybox.cf (renamed from targets/netboot2/nb-busybox.cf)0
-rwxr-xr-xtargets/netboot/netboot-chroot.sh6
-rwxr-xr-xtargets/netboot/netboot-combine.sh112
-rwxr-xr-xtargets/netboot/netboot-controller.sh82
-rwxr-xr-xtargets/netboot/netboot-image.sh13
-rwxr-xr-xtargets/netboot/pkg.sh (renamed from targets/netboot2/netboot2-pkg.sh)4
-rwxr-xr-xtargets/netboot2/netboot2-controller.sh75
-rwxr-xr-xtargets/stage1/build.py34
-rwxr-xr-xtargets/stage1/chroot.sh106
-rwxr-xr-xtargets/stage1/controller.sh (renamed from targets/stage1/stage1-controller.sh)11
-rwxr-xr-xtargets/stage1/preclean-chroot.sh (renamed from targets/stage1/stage1-preclean-chroot.sh)4
-rwxr-xr-xtargets/stage1/stage1-chroot.sh83
-rwxr-xr-xtargets/stage2/chroot.sh15
-rwxr-xr-xtargets/stage2/controller.sh25
-rwxr-xr-xtargets/stage2/preclean-chroot.sh (renamed from targets/stage2/stage2-preclean-chroot.sh)6
-rwxr-xr-xtargets/stage2/stage2-chroot.sh9
-rwxr-xr-xtargets/stage2/stage2-controller.sh31
-rwxr-xr-xtargets/stage3/chroot.sh12
-rwxr-xr-xtargets/stage3/controller.sh24
-rwxr-xr-xtargets/stage3/preclean-chroot.sh (renamed from targets/stage3/stage3-preclean-chroot.sh)4
-rwxr-xr-xtargets/stage3/stage3-chroot.sh8
-rwxr-xr-xtargets/stage3/stage3-controller.sh30
-rwxr-xr-xtargets/stage4/chroot.sh (renamed from targets/stage4/stage4-chroot.sh)3
-rwxr-xr-xtargets/stage4/controller.sh75
-rwxr-xr-xtargets/stage4/preclean-chroot.sh (renamed from targets/stage4/stage4-preclean-chroot.sh)0
-rwxr-xr-xtargets/stage4/stage4-controller.sh96
-rwxr-xr-xtargets/support/bootloader-setup.sh279
-rwxr-xr-xtargets/support/chroot-functions.sh291
-rwxr-xr-xtargets/support/create-iso.sh286
-rwxr-xr-xtargets/support/depclean.sh2
-rwxr-xr-xtargets/support/filesystem-functions.sh84
-rwxr-xr-xtargets/support/functions.sh192
-rwxr-xr-xtargets/support/kill-chroot-pids.sh62
-rwxr-xr-xtargets/support/kmerge.sh376
-rwxr-xr-xtargets/support/livecdfs-update.sh320
-rwxr-xr-xtargets/support/netboot-final.sh (renamed from targets/support/netboot2-final.sh)22
-rwxr-xr-xtargets/support/post-kmerge.sh11
-rw-r--r--targets/support/pre-distkmerge.sh7
-rwxr-xr-xtargets/support/pre-kmerge.sh47
-rwxr-xr-xtargets/support/rc-update.sh9
-rwxr-xr-xtargets/support/target_image_setup.sh40
-rwxr-xr-xtargets/support/unmerge.sh2
-rwxr-xr-xtargets/tinderbox/tinderbox-chroot.sh33
-rwxr-xr-xtargets/tinderbox/tinderbox-controller.sh21
-rwxr-xr-xtargets/tinderbox/tinderbox-preclean-chroot.sh5
165 files changed, 5930 insertions, 9508 deletions
diff --git a/.gitattributes b/.gitattributes
index 19ad929b..1a679c17 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,7 +1,6 @@
AUTHORS ident
ChangeLog ident
README ident
-TODO ident
catalyst ident
*.py ident
*.sh ident
diff --git a/.pylintrc b/.pylintrc
index 40851664..5428e349 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -26,7 +26,6 @@ load-plugins=
# We should clean up things so we can enable:
# missing-docstring -- add lots of docstrings everywhere!
# bad-whitespace -- fix spacing everywhere
-# bad-continuation -- might be hard with tab indentation policy
# invalid-name -- need to manage constants better
# line-too-long -- figure out a length and stick to it
# super-init-not-called -- fix the classes __init__ structure
@@ -46,7 +45,6 @@ disable=
fixme,
broad-except,
bad-whitespace,
- bad-continuation,
invalid-name,
line-too-long,
super-init-not-called,
@@ -85,7 +83,7 @@ max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
-indent-string='\t'
+indent-string=' '
[MISCELLANEOUS]
diff --git a/AUTHORS b/AUTHORS
index 3bf969bd..41366b8c 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,5 +1,3 @@
-# $Id$
-
The copyright for catalyst is held by the Gentoo Foundation and by each
of the individual contributors.
diff --git a/Makefile b/Makefile
index 6f7eb102..b359e8b7 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,7 @@ MAN_PAGES = $(patsubst doc/%.txt,files/%,$(MAN_PAGE_SOURCES))
MAN_PAGE_INCLUDES = doc/subarches.generated.txt doc/targets.generated.txt
DOC_SOURCES = $(filter-out $(MAN_PAGE_SOURCES) $(MAN_PAGE_INCLUDES),$(wildcard doc/*.txt))
DOCS = $(patsubst doc/%.txt,files/%.html,$(DOC_SOURCES))
-DOC_SIDE_EFFECTS = files/docbook-xsl.css
+DOC_SIDE_EFFECTS = files/docbook-xsl.css doc/subarches.generated.xml
EXTRA_DIST = $(MAN_PAGES) $(DOCS) $(DOC_SIDE_EFFECTS)
GENERATED_FILES = $(MAN_PAGES) $(MAN_PAGE_INCLUDES) $(DOCS) $(DOC_SIDE_EFFECTS)
@@ -27,7 +27,7 @@ $(MAN_PAGES): files/%: doc/%.txt doc/asciidoc.conf Makefile catalyst | files
files/catalyst.1: doc/subarches.generated.txt | files
files/catalyst-spec.5: doc/subarches.generated.txt doc/targets.generated.txt | files
-doc/subarches.generated.txt: $(wildcard catalyst/arch/*.py) doc/make_subarch_table_guidexml.py
+doc/subarches.generated.txt doc/subarches.generated.xml: $(wildcard arch/*.toml) doc/make_subarch_table_guidexml.py
./doc/make_subarch_table_guidexml.py
doc/targets.generated.txt: doc/make_target_table.py $(wildcard catalyst/targets/*.py)
diff --git a/README b/README
index 7be176c3..594de9e1 100644
--- a/README
+++ b/README
@@ -1,5 +1,3 @@
-# $Id$
-
Licensing
========================
@@ -19,10 +17,10 @@ simple and reproducable manner. Use at your own risk.
Requirements
=======================
-- Python 2.7 or greater (may still work with 2.6)
-- An ebuild repository snapshot (or an ebuild tree to create one)
+- Python 3.8 or greater
- A generic stage3 tarball for your architecture
-- shash for digest support
+- A squashfs ebuild repository snapshot
+ - Or an ebuild git repo with sys-fs/squashfs-tools-ng and dev-vcs/git
What is catalyst?
========================
@@ -36,8 +34,6 @@ Catalyst is capable of:
- Building installation stages
- Building bootable LiveCDs
-- Building GRP (Gentoo Reference Platform) sets
-- Setting up a Tinderbox target for test building
- Building netboot images
Configuring catalyst
@@ -52,7 +48,7 @@ distribution tarball's files directory.
Example catalyst.conf:
-distdir="/usr/portage/distfiles"
+distdir="/var/cache/distfiles"
options="pkgcache kerncache"
sharedir="/usr/share/catalyst"
diff --git a/TODO b/TODO
deleted file mode 100644
index a2ee05dd..00000000
--- a/TODO
+++ /dev/null
@@ -1,79 +0,0 @@
-# $Id$
-
-This file is a rough list of changes that need to be made to catalyst.
-
-Global:
-- Remove spec_prefix from all exported variables
- - variables without spec_prefix are global and used in all targets
- - variables with spec_prefix only apply to that target
-- add multiple target support for spec files
- - target: stage1
- - targets: stage1 stage2 stage3 stage4 livecd netboot
-- add more validation and checking for code which affects both host and target
- - setup defaults if host isn't configured
- - allow configuration globally (catalyst.conf), per-spec, or per-target
- - ccache
- - check host settings (size, location, etc) and restore them when done
- - distcc
- - check host settings (distcc_hosts, etc) and restore them when done
- - icecream
- - check host settings (icecream_hosts, etc) and restore them when done
-- add support for new portage features
- - Cache backends
- - metadata_overlay (portage default in 2.1.6)
- - sqlite
- - Jobs
- - Load Average
- - Dependency checking
- - Deep checking
- - Complete Graph
- - Build-time dependencies
- - USE changes
- - newuse versus changed-use
- - Failure detection
- - Add --keep-going support
-- detect GCC version in seed stages for supported settings (CFLAGS)
-
-Config:
-- security
- - sign materials automatically
-- logging
- - add logging support
- - file-based
- - syslog
- - add notification capabilities
- - email
- - snmp trap
-- separate out options that control catalyst versus the package manager
- - options key should specify catalyst options
- - new keys for package managers
- - portage_features
- - pkgcore_features
-
-Targets:
-- add support for livedvd
- - do we use livecd/type or something new
- - make livedvd type perform several actions automatically, if configured
- - auto-fetch distfiles
- - automatically pull stages
- - local disk via absolute or relative path
- - URI
-- change user creation to be more flexible
- - stage4 and netboot support
-- build boot software for targets on-demand
- - removes cdtar requirement on remaining arches
- - allows one to specify multiple bootloaders
- - boot/loader: grub elilo
- - netboot/boot/loader: pxelinux elilo
- - livecd/boot/loader: isolinux elilo
- - add ability to specify other "bootables" to build/install, like memtest86+
- - livecd/boot/image: memtest|/path/to/image
- - do we also need a way to specify an initrd with this?
-- add variable support for spec files
- - automatic substitution
- - %DATESTAMP%
- - %BUILDDIR%
- - user-defined
- - %DISTRIBUTION%
- - %AUTHOR%
-
diff --git a/arch/alpha.toml b/arch/alpha.toml
new file mode 100644
index 00000000..803bed4a
--- /dev/null
+++ b/arch/alpha.toml
@@ -0,0 +1,32 @@
+[alpha.alpha]
+COMMON_FLAGS = "-mieee -pipe -O2 -mcpu=ev4"
+CHOST = "alpha-unknown-linux-gnu"
+
+[alpha.ev4]
+COMMON_FLAGS = "-mieee -pipe -O2 -mcpu=ev4"
+CHOST = "alpha-unknown-linux-gnu"
+
+[alpha.ev45]
+COMMON_FLAGS = "-mieee -pipe -O2 -mcpu=ev45"
+CHOST = "alpha-unknown-linux-gnu"
+
+[alpha.ev5]
+COMMON_FLAGS = "-mieee -pipe -O2 -mcpu=ev5"
+CHOST = "alpha-unknown-linux-gnu"
+
+[alpha.ev56]
+COMMON_FLAGS = "-mieee -pipe -O2 -mcpu=ev56"
+CHOST = "alpha-unknown-linux-gnu"
+
+[alpha.pca56]
+COMMON_FLAGS = "-mieee -pipe -O2 -mcpu=pca56"
+CHOST = "alpha-unknown-linux-gnu"
+
+[alpha.ev6]
+COMMON_FLAGS = "-mieee -pipe -O2 -mcpu=ev6"
+CHOST = "alpha-unknown-linux-gnu"
+
+[alpha.ev67]
+COMMON_FLAGS = "-mieee -pipe -O2 -mcpu=ev67"
+CHOST = "alpha-unknown-linux-gnu"
+
diff --git a/arch/amd64.toml b/arch/amd64.toml
new file mode 100644
index 00000000..7cf0bce2
--- /dev/null
+++ b/arch/amd64.toml
@@ -0,0 +1,29 @@
+[amd64.amd64]
+COMMON_FLAGS = "-O2 -pipe"
+
+[amd64.x86_64]
+COMMON_FLAGS = "-O2 -pipe"
+
+[amd64.k8]
+COMMON_FLAGS = "-O2 -march=k8 -pipe"
+CPU_FLAGS_X86 = [ "mmx", "mmxext", "3dnow", "3dnowext", "sse", "sse2",]
+
+[amd64.nocona]
+COMMON_FLAGS = "-O2 -march=nocona -pipe"
+CPU_FLAGS_X86 = [ "mmx", "mmxext", "sse", "sse2", "sse3",]
+
+[amd64.core2]
+COMMON_FLAGS = "-O2 -march=core2 -pipe"
+CPU_FLAGS_X86 = [ "mmx", "mmxext", "sse", "sse2", "sse3", "ssse3",]
+
+[amd64.k8-sse3]
+COMMON_FLAGS = "-O2 -march=k8-sse3 -pipe"
+CPU_FLAGS_X86 = [ "mmx", "mmxext", "3dnow", "3dnowext", "sse", "sse2", "sse3",]
+
+[amd64.amdfam10]
+COMMON_FLAGS = "-O2 -march=amdfam10 -pipe"
+CPU_FLAGS_X86 = [ "mmx", "mmxext", "3dnow", "3dnowext", "sse", "sse2", "sse3", "sse4a",]
+
+[amd64.x32]
+COMMON_FLAGS = "-O2 -pipe"
+
diff --git a/arch/arm.toml b/arch/arm.toml
new file mode 100644
index 00000000..6e9cc635
--- /dev/null
+++ b/arch/arm.toml
@@ -0,0 +1,64 @@
+[arm.arm]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "arm-unknown-linux-gnu"
+
+[arm.armv4l]
+COMMON_FLAGS = "-O2 -pipe -march=armv4"
+CHOST = "armv4l-unknown-linux-gnu"
+
+[arm.armv4tl]
+COMMON_FLAGS = "-O2 -pipe -march=armv4t"
+CHOST = "armv4tl-softfloat-linux-gnueabi"
+
+[arm.armv5tl]
+COMMON_FLAGS = "-O2 -pipe -march=armv5t"
+CHOST = "armv5tl-softfloat-linux-gnueabi"
+
+[arm.armv5tel]
+COMMON_FLAGS = "-O2 -pipe -march=armv5te"
+CHOST = "armv5tel-softfloat-linux-gnueabi"
+
+[arm.armv5tejl]
+COMMON_FLAGS = "-O2 -pipe -march=armv5te"
+CHOST = "armv5tejl-softfloat-linux-gnueabi"
+
+[arm.armv6j]
+COMMON_FLAGS = "-O2 -pipe -march=armv6j -mfpu=vfp -mfloat-abi=softfp"
+CHOST = "armv6j-softfp-linux-gnueabi"
+
+[arm.armv6z]
+COMMON_FLAGS = "-O2 -pipe -march=armv6z -mfpu=vfp -mfloat-abi=softfp"
+CHOST = "armv6z-softfp-linux-gnueabi"
+
+[arm.armv6zk]
+COMMON_FLAGS = "-O2 -pipe -march=armv6zk -mfpu=vfp -mfloat-abi=softfp"
+CHOST = "armv6zk-softfp-linux-gnueabi"
+
+[arm.armv7a]
+COMMON_FLAGS = "-O2 -pipe -march=armv7-a -mfpu=vfpv3-d16 -mfloat-abi=softfp"
+CHOST = "armv7a-softfp-linux-gnueabi"
+
+[arm.armv6j_hardfp]
+COMMON_FLAGS = "-O2 -pipe -march=armv6j -mfpu=vfp -mfloat-abi=hard"
+CHOST = "armv6j-unknown-linux-gnueabihf"
+
+[arm.armv6j_hardfp_musl]
+COMMON_FLAGS = "-O2 -pipe -march=armv6j -mfpu=vfp -mfloat-abi=hard"
+CHOST = "armv6j-unknown-linux-musleabihf"
+
+[arm.armv7a_hardfp]
+COMMON_FLAGS = "-O2 -pipe -march=armv7-a -mfpu=vfpv3-d16 -mfloat-abi=hard"
+CHOST = "armv7a-unknown-linux-gnueabihf"
+
+[arm.armv7a_hardfp_musl]
+COMMON_FLAGS = "-O2 -pipe -march=armv7-a -mfpu=vfpv3-d16 -mfloat-abi=hard"
+CHOST = "armv7a-unknown-linux-musleabihf"
+
+[arm.armeb]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "armeb-unknown-linux-gnu"
+
+[arm.armv5teb]
+COMMON_FLAGS = "-O2 -pipe -mcpu=xscale"
+CHOST = "armv5teb-softfloat-linux-gnueabi"
+
diff --git a/arch/arm64.toml b/arch/arm64.toml
new file mode 100644
index 00000000..5f522d6c
--- /dev/null
+++ b/arch/arm64.toml
@@ -0,0 +1,11 @@
+[arm64.arm64]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "aarch64-unknown-linux-gnu"
+
+[arm64.aarch64]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "aarch64-unknown-linux-gnu"
+
+[arm64.aarch64_be]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "aarch64_be-unknown-linux-gnu"
diff --git a/arch/hppa.toml b/arch/hppa.toml
new file mode 100644
index 00000000..85a5b010
--- /dev/null
+++ b/arch/hppa.toml
@@ -0,0 +1,8 @@
+[hppa."hppa1.1"]
+COMMON_FLAGS = "-O2 -pipe -march=1.1"
+CHOST = "hppa1.1-unknown-linux-gnu"
+
+[hppa."hppa2.0"]
+COMMON_FLAGS = "-O2 -pipe -march=2.0"
+CHOST = "hppa2.0-unknown-linux-gnu"
+
diff --git a/arch/ia64.toml b/arch/ia64.toml
new file mode 100644
index 00000000..014ff58c
--- /dev/null
+++ b/arch/ia64.toml
@@ -0,0 +1,4 @@
+[ia64.ia64]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "ia64-unknown-linux-gnu"
+
diff --git a/arch/loong.toml b/arch/loong.toml
new file mode 100644
index 00000000..7ff92c03
--- /dev/null
+++ b/arch/loong.toml
@@ -0,0 +1,4 @@
+[loong.loong]
+COMMON_FLAGS = " -pipe -O2"
+CHOST = "loongarch64-unknown-linux-gnu"
+
diff --git a/arch/m68k.toml b/arch/m68k.toml
new file mode 100644
index 00000000..695f0f83
--- /dev/null
+++ b/arch/m68k.toml
@@ -0,0 +1,6 @@
+[m68k.m68k]
+COMMON_FLAGS = " -pipe -O2"
+CHOST = "m68k-unknown-linux-gnu"
+
+[m68k.m68k_musl]
+COMMON_FLAGS = " -pipe -O2"
diff --git a/arch/mips.toml b/arch/mips.toml
new file mode 100644
index 00000000..b22d4567
--- /dev/null
+++ b/arch/mips.toml
@@ -0,0 +1,296 @@
+[mips.mips1]
+CHOST = "mips-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips1 -mabi=32 -mplt -pipe"
+
+[mips.mips2]
+CHOST = "mips-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips2 -mabi=32 -mplt -pipe"
+
+[mips.mips2_softfloat]
+CHOST = "mips-softfloat-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips2 -mabi=32 -mplt -msoft-float -pipe"
+
+[mips.mips2_musl]
+CHOST = "mips-gentoo-linux-musl"
+COMMON_FLAGS = "-O2 -march=mips2 -mabi=32 -mplt -pipe"
+
+[mips.mips32]
+CHOST = "mips-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips32 -mabi=32 -mplt -pipe"
+
+[mips.mips32_softfloat]
+CHOST = "mips-softfloat-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips32 -mabi=32 -mplt -msoft-float -pipe"
+
+[mips.mips32r2]
+CHOST = "mips-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips32r2 -mabi=32 -mplt -pipe"
+
+[mips.mips32r2_softfloat]
+CHOST = "mips-softfloat-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips32r2 -mabi=32 -mplt -msoft-float -pipe"
+
+[mips.mips3]
+CHOST = "mips-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=32 -mplt -mfix-r4000 -mfix-r4400 -pipe"
+
+[mips.mips3_musl]
+CHOST = "mips-gentoo-linux-musl"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=32 -mplt -mfix-r4000 -mfix-r4400 -pipe"
+
+[mips.mips4_r5k]
+CHOST = "mips-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r5k -mabi=32 -mplt -pipe"
+
+[mips.mips4_r10k]
+CHOST = "mips-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r10k -mabi=32 -mplt -pipe"
+
+[mips.mips4_r12k]
+CHOST = "mips-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r12k -mno-fix-r10000 -mabi=32 -mplt -pipe"
+
+[mips.mips64]
+CHOST = "mips-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64 -mabi=32 -mplt -pipe"
+
+[mips.mips64r2]
+CHOST = "mips-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64r2 -mabi=32 -mplt -pipe"
+
+[mips.mipsel1]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips1 -mabi=32 -mplt -pipe"
+
+[mips.mipsel2]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips2 -mabi=32 -mplt -pipe"
+
+[mips.mipsel2_softfloat]
+CHOST = "mipsel-softfloat-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips2 -mabi=32 -mplt -msoft-float -pipe"
+
+[mips.mipsel2_musl]
+CHOST = "mipsel-gentoo-linux-musl"
+COMMON_FLAGS = "-O2 -march=mips2 -mabi=32 -mplt -pipe"
+
+[mips.mips32el]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips32 -mabi=32 -mplt -pipe"
+
+[mips.mips32el_softfloat]
+CHOST = "mipsel-softfloat-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips32 -mabi=32 -mplt -msoft-float -pipe"
+
+[mips.mips32r2el]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips32r2 -mabi=32 -mplt -pipe"
+
+[mips.mips32r2el_softfloat]
+CHOST = "mipsel-softfloat-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips32r2 -mabi=32 -mplt -msoft-float -pipe"
+
+[mips.mipsel3]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=32 -mplt -Wa,-mfix-loongson2f-nop -mfix-r5900 -pipe"
+
+[mips.mipsel3_musl]
+CHOST = "mipsel-gentoo-linux-musl"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=32 -mplt -Wa,-mfix-loongson2f-nop -mfix-r5900 -pipe"
+
+[mips.mipsel4_r5k]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r5k -mabi=32 -mplt -pipe"
+
+[mips.mips64el]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64 -mabi=32 -mplt -pipe"
+
+[mips.mips64r2el]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64r2 -mabi=32 -mplt -pipe"
+
+[mips.loongson2e]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson2e -mabi=32 -mplt -pipe"
+
+[mips.loongson2f]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson2f -mabi=32 -mplt -Wa,-mfix-loongson2f-nop -pipe"
+
+[mips.loongson3a]
+CHOST = "mipsel-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson3a -mabi=32 -mplt -pipe"
+
+[mips64.mips3_n32]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=n32 -mplt -mfix-r4000 -mfix-r4400 -pipe"
+
+[mips64.mips3_n32_softfloat]
+CHOST = "mips64-softfloat-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=n32 -mplt -mfix-r4000 -mfix-r4400 -msoft-float -pipe"
+
+[mips64.mips3_n64]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=64 -mfix-r4000 -mfix-r4400 -pipe"
+
+[mips64.mips3_n64_musl]
+CHOST = "mips64-gentoo-linux-musl"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=64 -mfix-r4000 -mfix-r4400 -pipe"
+
+[mips64.mips3_multilib]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips3 -mplt -mfix-r4000 -mfix-r4400 -pipe"
+
+[mips64.mips4_r5k_n32]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r5k -mabi=n32 -mplt -pipe"
+
+[mips64.mips4_r5k_n64]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r5k -mabi=64 -pipe"
+
+[mips64.mips4_r5k_multilib]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r5k -mplt -pipe"
+
+[mips64.mips4_r10k_n32]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r10k -mabi=n32 -mplt -pipe"
+
+[mips64.mips4_r10k_n64]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r10k -mabi=64 -pipe"
+
+[mips64.mips4_r10k_multilib]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r10k -mplt -pipe"
+
+[mips64.mips4_r12k_n32]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r12k -mno-fix-r10000 -mabi=n32 -mplt -pipe"
+
+[mips64.mips4_r12k_n64]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r12k -mno-fix-r10000 -mabi=64 -pipe"
+
+[mips64.mips4_r12k_multilib]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r12k -mno-fix-r10000 -mplt -pipe"
+
+[mips64.mips64_n32]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64 -mabi=n32 -mplt -pipe"
+
+[mips64.mips64_n64]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64 -mabi=64 -pipe"
+
+[mips64.mips64_multilib]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64 -mplt -pipe"
+
+[mips64.mips64r2_n32]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64r2 -mabi=n32 -mplt -pipe"
+
+[mips64.mips64r2_n64]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64r2 -mabi=64 -pipe"
+
+[mips64.mips64r2_multilib]
+CHOST = "mips64-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64r2 -mplt -pipe"
+
+[mips64.mipsel3_n32]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=n32 -mplt -Wa,-mfix-loongson2f-nop -mfix-r5900 -pipe"
+
+[mips64.mipsel3_n32_softfloat]
+CHOST = "mips64el-softfloat-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=n32 -mplt -Wa,-mfix-loongson2f-nop -mfix-r5900 -msoft-float -pipe"
+
+[mips64.mipsel3_n64]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips3 -mabi=64 -Wa,-mfix-loongson2f-nop -pipe"
+
+[mips64.mipsel3_multilib]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips3 -mplt -Wa,-mfix-loongson2f-nop -mfix-r5900 -pipe"
+
+[mips64.mipsel4_r5k_n32]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r5k -mabi=n32 -mplt -pipe"
+
+[mips64.mipsel4_r5k_n64]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r5k -mabi=64 -pipe"
+
+[mips64.mipsel4_r5k_multilib]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=r5k -mplt -pipe"
+
+[mips64.mips64el_n32]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64 -mabi=n32 -mplt -pipe"
+
+[mips64.mips64el_n64]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64 -mabi=64 -pipe"
+
+[mips64.mips64el_n64_musl]
+CHOST = "mips64el-gentoo-linux-musl"
+COMMON_FLAGS = "-O2 -march=mips64 -mabi=64 -pipe"
+
+[mips64.mips64el_multilib]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64 -mplt -pipe"
+
+[mips64.mips64r2el_n32]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64r2 -mabi=n32 -mplt -pipe"
+
+[mips64.mips64r2el_n64]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64r2 -mabi=64 -pipe"
+
+[mips64.mips64r2el_multilib]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=mips64r2 -mplt -pipe"
+
+[mips64.loongson2e_n32]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson2e -mabi=n32 -mplt -pipe"
+
+[mips64.loongson2e_n64]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson2e -mabi=64 -pipe"
+
+[mips64.loongson2e_multilib]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson2e -mplt -pipe"
+
+[mips64.loongson2f_n32]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson2f -mabi=n32 -mplt -Wa,-mfix-loongson2f-nop -pipe"
+
+[mips64.loongson2f_n64]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson2f -mabi=64 -Wa,-mfix-loongson2f-nop -pipe"
+
+[mips64.loongson2f_multilib]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson2f -mplt -Wa,-mfix-loongson2f-nop -pipe"
+
+[mips64.loongson3a_n32]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson3a -mabi=n32 -mplt -pipe"
+
+[mips64.loongson3a_n64]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson3a -mabi=64 -pipe"
+
+[mips64.loongson3a_multilib]
+CHOST = "mips64el-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -march=loongson3a -mplt -pipe"
+
diff --git a/arch/ppc.toml b/arch/ppc.toml
new file mode 100644
index 00000000..c8623cca
--- /dev/null
+++ b/arch/ppc.toml
@@ -0,0 +1,91 @@
+[setarch.ppc]
+arch = "linux32"
+if_build = "ppc64"
+
+[ppc64.970]
+COMMON_FLAGS = "-O2 -pipe -mcpu=970 -mtune=970"
+CHOST = "powerpc64-unknown-linux-gnu"
+USE = [ "altivec",]
+
+[ppc64.cell]
+COMMON_FLAGS = "-O2 -pipe -mcpu=cell -mtune=cell"
+CHOST = "powerpc64-unknown-linux-gnu"
+USE = [ "altivec", "ibm",]
+
+[ppc64.power5]
+COMMON_FLAGS = "-O2 -pipe -mcpu=power5 -mtune=power5"
+CHOST = "powerpc64-unknown-linux-gnu"
+USE = [ "ibm",]
+
+[ppc64.power6]
+COMMON_FLAGS = "-O2 -pipe -mcpu=power6 -mtune=power6"
+CHOST = "powerpc64-unknown-linux-gnu"
+USE = [ "altivec", "ibm",]
+
+[ppc64.power7]
+COMMON_FLAGS = "-O2 -pipe -mcpu=power7 -mtune=power7"
+CHOST = "powerpc64-unknown-linux-gnu"
+USE = [ "altivec", "ibm",]
+
+[ppc64.power7le]
+COMMON_FLAGS = "-O2 -pipe -mcpu=power7 -mtune=power7"
+CHOST = "powerpc64le-unknown-linux-gnu"
+USE = [ "altivec", "ibm",]
+
+[ppc64.power8]
+COMMON_FLAGS = "-O2 -pipe -mcpu=power8 -mtune=power8"
+CHOST = "powerpc64-unknown-linux-gnu"
+USE = [ "altivec", "ibm",]
+
+[ppc64.power8le]
+COMMON_FLAGS = "-O2 -pipe -mcpu=power8 -mtune=power8"
+CHOST = "powerpc64le-unknown-linux-gnu"
+USE = [ "altivec", "ibm",]
+
+[ppc64.power9]
+COMMON_FLAGS = "-O2 -pipe -mcpu=power9 -mtune=power9"
+CHOST = "powerpc64-unknown-linux-gnu"
+USE = [ "altivec", "ibm",]
+
+[ppc64.power9le]
+COMMON_FLAGS = "-O2 -pipe -mcpu=power9 -mtune=power9"
+CHOST = "powerpc64le-unknown-linux-gnu"
+USE = [ "altivec", "ibm",]
+
+[ppc64.ppc64]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "powerpc64-unknown-linux-gnu"
+
+[ppc64.powerpc64]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "powerpc64-unknown-linux-gnu"
+
+[ppc64.ppc64le]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "powerpc64le-unknown-linux-gnu"
+
+[ppc64.powerpc64le]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "powerpc64le-unknown-linux-gnu"
+
+[ppc.g3]
+CHOST = "powerpc-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -mcpu=G3 -mtune=G3 -pipe"
+
+[ppc.g4]
+CHOST = "powerpc-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -mcpu=G4 -mtune=G4 -maltivec -mabi=altivec -pipe"
+USE = [ "altivec",]
+
+[ppc.g5]
+CHOST = "powerpc-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -mcpu=G5 -mtune=G5 -maltivec -mabi=altivec -pipe"
+USE = [ "altivec",]
+
+[ppc.ppc]
+CHOST = "powerpc-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -mcpu=powerpc -mtune=powerpc -pipe"
+
+[ppc.powerpc]
+CHOST = "powerpc-unknown-linux-gnu"
+COMMON_FLAGS = "-O2 -mcpu=powerpc -mtune=powerpc -pipe"
diff --git a/arch/riscv.toml b/arch/riscv.toml
new file mode 100644
index 00000000..7f2d859f
--- /dev/null
+++ b/arch/riscv.toml
@@ -0,0 +1,39 @@
+[riscv.riscv]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "riscv64-unknown-linux-gnu"
+
+[riscv.rv64_multilib]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "riscv64-unknown-linux-gnu"
+
+[riscv.rv64_lp64d]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "riscv64-unknown-linux-gnu"
+
+[riscv.rv64_lp64d_musl]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "riscv64-gentoo-linux-musl"
+
+[riscv.rv64_lp64]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "riscv64-unknown-linux-gnu"
+
+[riscv.rv64_lp64_musl]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "riscv64-gentoo-linux-musl"
+
+[riscv.rv32_ilp32d]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "riscv32-unknown-linux-gnu"
+
+[riscv.rv32_ilp32d_musl]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "riscv32-unknown-linux-musl"
+
+[riscv.rv32_ilp32]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "riscv32-unknown-linux-gnu"
+
+[riscv.rv32_ilp32_musl]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "riscv32-unknown-linux-musl"
diff --git a/arch/s390.toml b/arch/s390.toml
new file mode 100644
index 00000000..875fe8e1
--- /dev/null
+++ b/arch/s390.toml
@@ -0,0 +1,12 @@
+[setarch.s390]
+arch = "s390"
+if_build = "s390x"
+
+[s390.s390]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "s390-ibm-linux-gnu"
+
+[s390x.s390x]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "s390x-ibm-linux-gnu"
+
diff --git a/arch/sh.toml b/arch/sh.toml
new file mode 100644
index 00000000..829b6d35
--- /dev/null
+++ b/arch/sh.toml
@@ -0,0 +1,48 @@
+[sh.sh]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "sh-unknown-linux-gnu"
+
+[sh.sh2]
+COMMON_FLAGS = "-O2 -m2 -pipe"
+CHOST = "sh2-unknown-linux-gnu"
+
+[sh.sh2a]
+COMMON_FLAGS = "-O2 -m2a -pipe"
+CHOST = "sh2a-unknown-linux-gnu"
+
+[sh.sh3]
+COMMON_FLAGS = "-O2 -m3 -pipe"
+CHOST = "sh3-unknown-linux-gnu"
+
+[sh.sh4]
+COMMON_FLAGS = "-O2 -m4 -pipe"
+CHOST = "sh4-unknown-linux-gnu"
+
+[sh.sh4a]
+COMMON_FLAGS = "-O2 -m4a -pipe"
+CHOST = "sh4a-unknown-linux-gnu"
+
+[sh.sheb]
+COMMON_FLAGS = "-O2 -pipe"
+CHOST = "sheb-unknown-linux-gnu"
+
+[sh.sh2eb]
+COMMON_FLAGS = "-O2 -m2 -pipe"
+CHOST = "sh2eb-unknown-linux-gnu"
+
+[sh.sh2aeb]
+COMMON_FLAGS = "-O2 -m2a -pipe"
+CHOST = "sh2aeb-unknown-linux-gnu"
+
+[sh.sh3eb]
+COMMON_FLAGS = "-O2 -m3 -pipe"
+CHOST = "sh3eb-unknown-linux-gnu"
+
+[sh.sh4eb]
+COMMON_FLAGS = "-O2 -m4 -pipe"
+CHOST = "sh4eb-unknown-linux-gnu"
+
+[sh.sh4aeb]
+COMMON_FLAGS = "-O2 -m4a -pipe"
+CHOST = "sh4aeb-unknown-linux-gnu"
+
diff --git a/arch/sparc.toml b/arch/sparc.toml
new file mode 100644
index 00000000..a19064b5
--- /dev/null
+++ b/arch/sparc.toml
@@ -0,0 +1,12 @@
+[setarch.sparc]
+arch = "linux32"
+if_build = "sparc64"
+
+[sparc.sparc]
+COMMON_FLAGS = "-O2 -mcpu=ultrasparc -pipe"
+CHOST = "sparc-unknown-linux-gnu"
+
+[sparc64.sparc64]
+COMMON_FLAGS = "-O2 -mcpu=ultrasparc -pipe"
+CHOST = "sparc64-unknown-linux-gnu"
+
diff --git a/arch/x86.toml b/arch/x86.toml
new file mode 100644
index 00000000..24d4c547
--- /dev/null
+++ b/arch/x86.toml
@@ -0,0 +1,55 @@
+[setarch.x86]
+arch = "linux32"
+if_build = "x86_64"
+
+[x86.x86]
+
+[x86.i486]
+COMMON_FLAGS = "-O2 -march=i486 -pipe"
+CHOST = "i486-pc-linux-gnu"
+
+[x86.i586]
+COMMON_FLAGS = "-O2 -march=i586 -pipe"
+CHOST = "i586-pc-linux-gnu"
+
+[x86.i686]
+COMMON_FLAGS = "-O2 -march=i686 -pipe"
+
+[x86.pentium]
+COMMON_FLAGS = "-O2 -march=i586 -pipe"
+CHOST = "i586-pc-linux-gnu"
+
+[x86.pentium2]
+COMMON_FLAGS = "-O2 -march=pentium2 -pipe"
+CPU_FLAGS_X86 = [ "mmx",]
+
+[x86.pentium3]
+COMMON_FLAGS = "-O2 -march=pentium3 -pipe"
+CPU_FLAGS_X86 = [ "mmx", "mmxext", "sse",]
+
+[x86.pentiumpro]
+COMMON_FLAGS = "-O2 -march=i686 -pipe"
+
+[x86.pentium-mmx]
+COMMON_FLAGS = "-O2 -march=pentium-mmx -pipe"
+CPU_FLAGS_X86 = [ "mmx",]
+
+[x86.k6]
+COMMON_FLAGS = "-O2 -march=k6 -pipe"
+CPU_FLAGS_X86 = [ "mmx",]
+
+[x86.k6-2]
+COMMON_FLAGS = "-O2 -march=k6-2 -pipe"
+CPU_FLAGS_X86 = [ "mmx", "3dnow",]
+
+[x86.athlon]
+COMMON_FLAGS = "-O2 -march=athlon -pipe"
+CPU_FLAGS_X86 = [ "mmx", "mmxext", "3dnow", "3dnowext",]
+
+[x86.athlon-xp]
+COMMON_FLAGS = "-O2 -march=athlon-xp -pipe"
+CPU_FLAGS_X86 = [ "mmx", "mmxext", "3dnow", "3dnowext", "sse",]
+
+[x86.i686-ssemath]
+COMMON_FLAGS = "-O2 -march=i686 -msse2 -mfpmath=sse -pipe"
+CPU_FLAGS_X86 = [ "mmx", "mmxext", "sse", "sse2",]
diff --git a/bin/catalyst b/bin/catalyst
index 72a4cb4d..e464d369 100755
--- a/bin/catalyst
+++ b/bin/catalyst
@@ -1,41 +1,31 @@
-#!/usr/bin/python -OO
-
-# Maintained in full by:
-# Catalyst Team <catalyst@gentoo.org>
-# Release Engineering Team <releng@gentoo.org>
-# Andrew Gaffney <agaffney@gentoo.org>
-# Chris Gianelloni <wolf31o2@wolf31o2.org>
-# $Id$
-
-
-from __future__ import print_function
+#!/usr/bin/python3 -OO
import sys
# This block ensures that ^C interrupts are handled quietly.
try:
- import signal
+ import signal
- def exithandler(_signum, _frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- print()
- sys.exit(1)
+ def exithandler(_signum, _frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ print()
+ sys.exit(1)
- signal.signal(signal.SIGINT, exithandler)
- signal.signal(signal.SIGTERM, exithandler)
- signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ signal.signal(signal.SIGINT, exithandler)
+ signal.signal(signal.SIGTERM, exithandler)
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except KeyboardInterrupt:
- print()
- sys.exit(1)
+ print()
+ sys.exit(1)
from catalyst.main import main
try:
- main(sys.argv[1:])
+ main(sys.argv[1:])
except KeyboardInterrupt:
- print("Aborted.")
- sys.exit(130)
+ print("Aborted.")
+ sys.exit(130)
sys.exit(0)
diff --git a/bin/catalyst.git b/bin/catalyst.git
index eb6234b9..9b3deaa1 100755
--- a/bin/catalyst.git
+++ b/bin/catalyst.git
@@ -1,12 +1,10 @@
-#!/usr/bin/python
+#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright 1999-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Run catalyst from git using local modules/scripts."""
-from __future__ import print_function
-
import os
import sys
import tempfile
diff --git a/bin/pylint b/bin/pylint
index b0018278..183e24be 100755
--- a/bin/pylint
+++ b/bin/pylint
@@ -1,48 +1,47 @@
-#!/usr/bin/python
+#!/usr/bin/python3
# Copyright 1999-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Run pylint with the right settings."""
-from __future__ import print_function
-
import os
import sys
def find_all_modules(source_root):
- """Locate all python modules in the tree for scanning"""
- ret = []
+ """Locate all python modules in the tree for scanning"""
+ ret = []
- for root, _dirs, files in os.walk(source_root, topdown=False):
- # Add all of the .py modules in the tree.
- ret += [os.path.join(root, x) for x in files if x.endswith('.py')]
+ for root, _dirs, files in os.walk(source_root, topdown=False):
+ # Add all of the .py modules in the tree.
+ ret += [os.path.join(root, x) for x in files if x.endswith('.py')]
- # Add the main scripts that don't end in .py.
- ret += [os.path.join(source_root, 'bin', x) for x in ('catalyst', 'pylint')]
+ # Add the main scripts that don't end in .py.
+ ret += [os.path.join(source_root, 'bin', x)
+ for x in ('catalyst', 'pylint')]
- return ret
+ return ret
def main(argv):
- """The main entry point"""
- source_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+ """The main entry point"""
+ source_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- if not argv:
- argv = find_all_modules(source_root)
+ if not argv:
+ argv = find_all_modules(source_root)
- pympath = source_root
- pythonpath = os.environ.get('PYTHONPATH')
- if pythonpath is None:
- pythonpath = pympath
- else:
- pythonpath = pympath + ':' + pythonpath
- os.environ['PYTHONPATH'] = pythonpath
+ pympath = source_root
+ pythonpath = os.environ.get('PYTHONPATH')
+ if pythonpath is None:
+ pythonpath = pympath
+ else:
+ pythonpath = pympath + ':' + pythonpath
+ os.environ['PYTHONPATH'] = pythonpath
- pylintrc = os.path.join(source_root, '.pylintrc')
- cmd = ['pylint', '--rcfile', pylintrc]
- os.execvp(cmd[0], cmd + argv)
+ pylintrc = os.path.join(source_root, '.pylintrc')
+ cmd = ['pylint', '--rcfile', pylintrc]
+ os.execvp(cmd[0], cmd + argv)
if __name__ == '__main__':
- main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/catalyst/__init__.py b/catalyst/__init__.py
index 7bc28970..143bdf81 100644
--- a/catalyst/__init__.py
+++ b/catalyst/__init__.py
@@ -3,8 +3,8 @@
__maintainer__ = 'Catalyst <catalyst@gentoo.org>'
try:
- from .verinfo import version as fullversion
- __version__ = fullversion.split('\n')[0].split()[1]
+ from .verinfo import version as fullversion
+ __version__ = fullversion.split('\n')[0].split()[1]
except ImportError:
- from .version import get_version, __version__
- fullversion = get_version(reset=True)
+ from .version import get_version, __version__
+ fullversion = get_version(reset=True)
diff --git a/catalyst/arch/__init__.py b/catalyst/arch/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/catalyst/arch/__init__.py
+++ /dev/null
diff --git a/catalyst/arch/alpha.py b/catalyst/arch/alpha.py
deleted file mode 100644
index 813b0020..00000000
--- a/catalyst/arch/alpha.py
+++ /dev/null
@@ -1,75 +0,0 @@
-
-
-from catalyst import builder
-
-
-class generic_alpha(builder.generic):
- "abstract base class for all alpha builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-mieee -pipe"
-
-class arch_alpha(generic_alpha):
- "builder class for generic alpha (ev4+)"
- def __init__(self,myspec):
- generic_alpha.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -O2 -mcpu=ev4"
- self.settings["CHOST"]="alpha-unknown-linux-gnu"
-
-class arch_ev4(generic_alpha):
- "builder class for alpha ev4"
- def __init__(self,myspec):
- generic_alpha.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -O2 -mcpu=ev4"
- self.settings["CHOST"]="alphaev4-unknown-linux-gnu"
-
-class arch_ev45(generic_alpha):
- "builder class for alpha ev45"
- def __init__(self,myspec):
- generic_alpha.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -O2 -mcpu=ev45"
- self.settings["CHOST"]="alphaev45-unknown-linux-gnu"
-
-class arch_ev5(generic_alpha):
- "builder class for alpha ev5"
- def __init__(self,myspec):
- generic_alpha.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -O2 -mcpu=ev5"
- self.settings["CHOST"]="alphaev5-unknown-linux-gnu"
-
-class arch_ev56(generic_alpha):
- "builder class for alpha ev56 (ev5 plus BWX)"
- def __init__(self,myspec):
- generic_alpha.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -O2 -mcpu=ev56"
- self.settings["CHOST"]="alphaev56-unknown-linux-gnu"
-
-class arch_pca56(generic_alpha):
- "builder class for alpha pca56 (ev5 plus BWX & MAX)"
- def __init__(self,myspec):
- generic_alpha.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -O2 -mcpu=pca56"
- self.settings["CHOST"]="alphaev56-unknown-linux-gnu"
-
-class arch_ev6(generic_alpha):
- "builder class for alpha ev6"
- def __init__(self,myspec):
- generic_alpha.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -O2 -mcpu=ev6"
- self.settings["CHOST"]="alphaev6-unknown-linux-gnu"
- self.settings["HOSTUSE"]=["ev6"]
-
-class arch_ev67(generic_alpha):
- "builder class for alpha ev67 (ev6 plus CIX)"
- def __init__(self,myspec):
- generic_alpha.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -O2 -mcpu=ev67"
- self.settings["CHOST"]="alphaev67-unknown-linux-gnu"
- self.settings["HOSTUSE"]=["ev6"]
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({ "alpha":arch_alpha, "ev4":arch_ev4, "ev45":arch_ev45,
- "ev5":arch_ev5, "ev56":arch_ev56, "pca56":arch_pca56,
- "ev6":arch_ev6, "ev67":arch_ev67 },
- ("alpha", ))
diff --git a/catalyst/arch/amd64.py b/catalyst/arch/amd64.py
deleted file mode 100644
index 73bf8b18..00000000
--- a/catalyst/arch/amd64.py
+++ /dev/null
@@ -1,76 +0,0 @@
-
-from catalyst import builder
-
-class generic_amd64(builder.generic):
- "abstract base class for all amd64 builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
-
-class arch_amd64(generic_amd64):
- "builder class for generic amd64 (Intel and AMD)"
- def __init__(self,myspec):
- generic_amd64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- #lines like this appears to be trying to set the profile default, better to use the profile
- #self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse","sse2"]}
-
-class arch_nocona(generic_amd64):
- "improved version of Intel Pentium 4 CPU with 64-bit extensions, MMX, SSE, SSE2 and SSE3 support"
- def __init__(self,myspec):
- generic_amd64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=nocona -pipe"
- #self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse","sse2"]}
-
-# Requires gcc 4.3 to use this class
-class arch_core2(generic_amd64):
- "Intel Core 2 CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3 and SSSE3 support"
- def __init__(self,myspec):
- generic_amd64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=core2 -pipe"
- #self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse","sse2","ssse3"]}
-
-class arch_k8(generic_amd64):
- "generic k8, opteron and athlon64 support"
- def __init__(self,myspec):
- generic_amd64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=k8 -pipe"
- #self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse","sse2","3dnow"]}
-
-class arch_k8_sse3(generic_amd64):
- "improved versions of k8, opteron and athlon64 with SSE3 support"
- def __init__(self,myspec):
- generic_amd64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=k8-sse3 -pipe"
- #self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse","sse2","3dnow"]}
-
-class arch_amdfam10(generic_amd64):
- "AMD Family 10h core based CPUs with x86-64 instruction set support"
- def __init__(self,myspec):
- generic_amd64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=amdfam10 -pipe"
- #self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse","sse2","3dnow"]}
-
-class arch_x32(generic_amd64):
- "builder class for generic x32 (Intel and AMD)"
- def __init__(self,myspec):
- generic_amd64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- #self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse","sse2"]}
-
-def register():
- "inform main catalyst program of the contents of this plugin"
- return ({
- "amd64" : arch_amd64,
- "k8" : arch_k8,
- "opteron" : arch_k8,
- "athlon64" : arch_k8,
- "athlonfx" : arch_k8,
- "nocona" : arch_nocona,
- "core2" : arch_core2,
- "k8-sse3" : arch_k8_sse3,
- "opteron-sse3" : arch_k8_sse3,
- "athlon64-sse3" : arch_k8_sse3,
- "amdfam10" : arch_amdfam10,
- "barcelona" : arch_amdfam10,
- "x32" : arch_x32,
- }, ("x86_64","amd64","nocona"))
diff --git a/catalyst/arch/arm.py b/catalyst/arch/arm.py
deleted file mode 100644
index 4b0efd8f..00000000
--- a/catalyst/arch/arm.py
+++ /dev/null
@@ -1,131 +0,0 @@
-
-
-from catalyst import builder
-
-class generic_arm(builder.generic):
- "Abstract base class for all arm (little endian) builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
-
-class generic_armeb(builder.generic):
- "Abstract base class for all arm (big endian) builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
-
-class arch_arm(generic_arm):
- "Builder class for arm (little endian) target"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="arm-unknown-linux-gnu"
-
-class arch_armeb(generic_armeb):
- "Builder class for arm (big endian) target"
- def __init__(self,myspec):
- generic_armeb.__init__(self,myspec)
- self.settings["CHOST"]="armeb-unknown-linux-gnu"
-
-class arch_armv4l(generic_arm):
- "Builder class for armv4l target"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv4l-unknown-linux-gnu"
- self.settings["COMMON_FLAGS"]+=" -march=armv4"
-
-class arch_armv4tl(generic_arm):
- "Builder class for armv4tl target"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv4tl-softfloat-linux-gnueabi"
- self.settings["COMMON_FLAGS"]+=" -march=armv4t"
-
-class arch_armv5tl(generic_arm):
- "Builder class for armv5tl target"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv5tl-softfloat-linux-gnueabi"
- self.settings["COMMON_FLAGS"]+=" -march=armv5t"
-
-class arch_armv5tel(generic_arm):
- "Builder class for armv5tel target"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv5tel-softfloat-linux-gnueabi"
- self.settings["COMMON_FLAGS"]+=" -march=armv5te"
-
-class arch_armv5tejl(generic_arm):
- "Builder class for armv5tejl target"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv5tejl-softfloat-linux-gnueabi"
- self.settings["COMMON_FLAGS"]+=" -march=armv5te"
-
-class arch_armv6j(generic_arm):
- "Builder class for armv6j target"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv6j-softfp-linux-gnueabi"
- self.settings["COMMON_FLAGS"]+=" -march=armv6j -mfpu=vfp -mfloat-abi=softfp"
-
-class arch_armv6z(generic_arm):
- "Builder class for armv6z target"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv6z-softfp-linux-gnueabi"
- self.settings["COMMON_FLAGS"]+=" -march=armv6z -mfpu=vfp -mfloat-abi=softfp"
-
-class arch_armv6zk(generic_arm):
- "Builder class for armv6zk target"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv6zk-softfp-linux-gnueabi"
- self.settings["COMMON_FLAGS"]+=" -march=armv6zk -mfpu=vfp -mfloat-abi=softfp"
-
-class arch_armv7a(generic_arm):
- "Builder class for armv7a target"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv7a-softfp-linux-gnueabi"
- self.settings["COMMON_FLAGS"]+=" -march=armv7-a -mfpu=vfpv3-d16 -mfloat-abi=softfp"
-
-class arch_armv6j_hardfp(generic_arm):
- "Builder class for armv6j hardfloat target, needs >=gcc-4.5"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv6j-hardfloat-linux-gnueabi"
- self.settings["COMMON_FLAGS"]+=" -march=armv6j -mfpu=vfp -mfloat-abi=hard"
-
-class arch_armv7a_hardfp(generic_arm):
- "Builder class for armv7a hardfloat target, needs >=gcc-4.5"
- def __init__(self,myspec):
- generic_arm.__init__(self,myspec)
- self.settings["CHOST"]="armv7a-hardfloat-linux-gnueabi"
- self.settings["COMMON_FLAGS"]+=" -march=armv7-a -mfpu=vfpv3-d16 -mfloat-abi=hard"
-
-class arch_armv5teb(generic_armeb):
- "Builder class for armv5teb (XScale) target"
- def __init__(self,myspec):
- generic_armeb.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -mcpu=xscale"
- self.settings["CHOST"]="armv5teb-softfloat-linux-gnueabi"
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({
- "arm" : arch_arm,
- "armv4l" : arch_armv4l,
- "armv4tl": arch_armv4tl,
- "armv5tl": arch_armv5tl,
- "armv5tel": arch_armv5tel,
- "armv5tejl": arch_armv5tejl,
- "armv6j" : arch_armv6j,
- "armv6z" : arch_armv6z,
- "armv6zk" : arch_armv6zk,
- "armv7a" : arch_armv7a,
- "armv6j_hardfp" : arch_armv6j_hardfp,
- "armv7a_hardfp" : arch_armv7a_hardfp,
- "armeb" : arch_armeb,
- "armv5teb" : arch_armv5teb
- }, ("arm", "armv4l", "armv4tl", "armv5tl", "armv5tel", "armv5tejl", "armv6l",
-"armv7l", "armeb", "armv5teb") )
diff --git a/catalyst/arch/arm64.py b/catalyst/arch/arm64.py
deleted file mode 100644
index 9a223834..00000000
--- a/catalyst/arch/arm64.py
+++ /dev/null
@@ -1,13 +0,0 @@
-
-from catalyst import builder
-
-class arch_arm64(builder.generic):
- "builder class for arm64"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- self.settings["CHOST"]="aarch64-unknown-linux-gnu"
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({ "arm64":arch_arm64 }, ("aarch64","arm64", ))
diff --git a/catalyst/arch/hppa.py b/catalyst/arch/hppa.py
deleted file mode 100644
index b3c5c931..00000000
--- a/catalyst/arch/hppa.py
+++ /dev/null
@@ -1,37 +0,0 @@
-
-from catalyst import builder
-
-class generic_hppa(builder.generic):
- "Abstract base class for all hppa builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
-
-class arch_hppa(generic_hppa):
- "Builder class for hppa systems"
- def __init__(self,myspec):
- generic_hppa.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -march=1.0"
- self.settings["CHOST"]="hppa-unknown-linux-gnu"
-
-class arch_hppa1_1(generic_hppa):
- "Builder class for hppa 1.1 systems"
- def __init__(self,myspec):
- generic_hppa.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -march=1.1"
- self.settings["CHOST"]="hppa1.1-unknown-linux-gnu"
-
-class arch_hppa2_0(generic_hppa):
- "Builder class for hppa 2.0 systems"
- def __init__(self,myspec):
- generic_hppa.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -march=2.0"
- self.settings["CHOST"]="hppa2.0-unknown-linux-gnu"
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({
- "hppa": arch_hppa,
- "hppa1.1": arch_hppa1_1,
- "hppa2.0": arch_hppa2_0
- }, ("parisc","parisc64","hppa","hppa64") )
diff --git a/catalyst/arch/ia64.py b/catalyst/arch/ia64.py
deleted file mode 100644
index 49b17c46..00000000
--- a/catalyst/arch/ia64.py
+++ /dev/null
@@ -1,13 +0,0 @@
-
-from catalyst import builder
-
-class arch_ia64(builder.generic):
- "builder class for ia64"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- self.settings["CHOST"]="ia64-unknown-linux-gnu"
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({ "ia64":arch_ia64 }, ("ia64", ))
diff --git a/catalyst/arch/m68k.py b/catalyst/arch/m68k.py
deleted file mode 100644
index de127fb4..00000000
--- a/catalyst/arch/m68k.py
+++ /dev/null
@@ -1,20 +0,0 @@
-
-from catalyst import builder
-
-class generic_m68k(builder.generic):
- "abstract base class for all m68k builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]=" -pipe"
-
-class arch_m68k(generic_m68k):
- "builder class for generic m68k"
- def __init__(self,myspec):
- generic_m68k.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]+=" -O2"
- self.settings["CHOST"]="m68k-unknown-linux-gnu"
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({ "m68k":arch_m68k },
- ("m68k", ))
diff --git a/catalyst/arch/mips.py b/catalyst/arch/mips.py
deleted file mode 100644
index 1ac6f85d..00000000
--- a/catalyst/arch/mips.py
+++ /dev/null
@@ -1,501 +0,0 @@
-
-from catalyst import builder
-
-class generic_mips(builder.generic):
- "Abstract base class for all mips builders [Big-endian]"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["CHOST"]="mips-unknown-linux-gnu"
-
-class generic_mipsel(builder.generic):
- "Abstract base class for all mipsel builders [Little-endian]"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["CHOST"]="mipsel-unknown-linux-gnu"
-
-class generic_mips64(builder.generic):
- "Abstract base class for all mips64 builders [Big-endian]"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["CHOST"]="mips64-unknown-linux-gnu"
-
-class generic_mips64el(builder.generic):
- "Abstract base class for all mips64el builders [Little-endian]"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["CHOST"]="mips64el-unknown-linux-gnu"
-
-class arch_mips1(generic_mips):
- "Builder class for MIPS I [Big-endian]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips1 -mabi=32 -mplt -pipe"
-
-class arch_mips2(generic_mips):
- "Builder class for MIPS II [Big-endian]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips2 -mabi=32 -mplt -pipe"
-
-class arch_mips32(generic_mips):
- "Builder class for MIPS 32 [Big-endian]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips32 -mabi=32 -mplt -pipe"
-
-class arch_mips32_softfloat(generic_mips):
- "Builder class for MIPS 32 [Big-endian softfloat]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips32 -mabi=32 -mplt -pipe"
- self.settings["CHOST"]="mips-softfloat-linux-gnu"
-
-class arch_mips32r2(generic_mips):
- "Builder class for MIPS 32r2 [Big-endian]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips32r2 -mabi=32 -mplt -pipe"
-
-class arch_mips32r2_softfloat(generic_mips):
- "Builder class for MIPS 32r2 [Big-endian softfloat]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips32r2 -mabi=32 -mplt -pipe"
- self.settings["CHOST"]="mips-softfloat-linux-gnu"
-
-class arch_mips3(generic_mips):
- "Builder class for MIPS III [Big-endian]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips3 -mabi=32 -mplt -mfix-r4000 -mfix-r4400 -pipe"
-
-class arch_mips3_n32(generic_mips64):
- "Builder class for MIPS III [Big-endian N32]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips3 -mabi=n32 -mplt -mfix-r4000 -mfix-r4400 -pipe"
-
-class arch_mips3_n64(generic_mips64):
- "Builder class for MIPS III [Big-endian N64]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips3 -mabi=64 -mfix-r4000 -mfix-r4400 -pipe"
-
-class arch_mips3_multilib(generic_mips64):
- "Builder class for MIPS III [Big-endian multilib]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips3 -mplt -mfix-r4000 -mfix-r4400 -pipe"
-
-class arch_mips4(generic_mips):
- "Builder class for MIPS IV [Big-endian]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips4 -mabi=32 -mplt -pipe"
-
-class arch_mips4_n32(generic_mips64):
- "Builder class for MIPS IV [Big-endian N32]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips4 -mabi=n32 -mplt -pipe"
-
-class arch_mips4_n64(generic_mips64):
- "Builder class for MIPS IV [Big-endian N64]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips4 -mabi=64 -pipe"
-
-class arch_mips4_multilib(generic_mips64):
- "Builder class for MIPS IV [Big-endian multilib]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips4 -mplt -pipe"
-
-class arch_mips4_r10k(generic_mips):
- "Builder class for MIPS IV R10k [Big-endian]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=r10k -mabi=32 -mplt -pipe"
-
-class arch_mips4_r10k_n32(generic_mips64):
- "Builder class for MIPS IV R10k [Big-endian N32]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=r10k -mabi=n32 -mplt -pipe"
-
-class arch_mips4_r10k_n64(generic_mips64):
- "Builder class for MIPS IV R10k [Big-endian N64]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=r10k -mabi=64 -pipe"
-
-class arch_mips4_r10k_multilib(generic_mips64):
- "Builder class for MIPS IV R10k [Big-endian multilib]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=r10k -mplt -pipe"
-
-class arch_mips4_r12k(generic_mips):
- "Builder class for MIPS IV R12k/R14k/R16k [Big-endian]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=r12k -mno-fix-r10000 -mabi=32 -mplt -pipe"
-
-class arch_mips4_r12k_n32(generic_mips64):
- "Builder class for MIPS IV R12k/R14k/R16k [Big-endian N32]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=r12k -mno-fix-r10000 -mabi=n32 -mplt -pipe"
-
-class arch_mips4_r12k_n64(generic_mips64):
- "Builder class for MIPS IV R12k/R14k/R16k [Big-endian N64]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=r12k -mno-fix-r10000 -mabi=64 -pipe"
-
-class arch_mips4_r12k_multilib(generic_mips64):
- "Builder class for MIPS IV R12k/R14k/R16k [Big-endian multilib]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=r12k -mno-fix-r10000 -mplt -pipe"
-
-class arch_mips64(generic_mips):
- "Builder class for MIPS 64 [Big-endian]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64 -mabi=32 -mplt -pipe"
-
-class arch_mips64_n32(generic_mips64):
- "Builder class for MIPS 64 [Big-endian N32]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64 -mabi=n32 -mplt -pipe"
-
-class arch_mips64_n64(generic_mips64):
- "Builder class for MIPS 64 [Big-endian N64]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64 -mabi=64 -pipe"
-
-class arch_mips64_multilib(generic_mips64):
- "Builder class for MIPS 64 [Big-endian multilib]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64 -mplt -pipe"
-
-class arch_mips64r2(generic_mips):
- "Builder class for MIPS 64r2 [Big-endian]"
- def __init__(self,myspec):
- generic_mips.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64r2 -mabi=32 -mplt -pipe"
-
-class arch_mips64r2_n32(generic_mips64):
- "Builder class for MIPS 64r2 [Big-endian N32]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64r2 -mabi=n32 -mplt -pipe"
-
-class arch_mips64r2_n64(generic_mips64):
- "Builder class for MIPS 64r2 [Big-endian N64]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64r2 -mabi=64 -pipe"
-
-class arch_mips64r2_multilib(generic_mips64):
- "Builder class for MIPS 64r2 [Big-endian multilib]"
- def __init__(self,myspec):
- generic_mips64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64r2 -mplt -pipe"
-
-class arch_mipsel1(generic_mipsel):
- "Builder class for MIPS I [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips1 -mabi=32 -mplt -pipe"
-
-class arch_mipsel2(generic_mipsel):
- "Builder class for MIPS II [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips2 -mabi=32 -mplt -pipe"
-
-class arch_mips32el(generic_mipsel):
- "Builder class for MIPS 32 [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips32 -mabi=32 -mplt -pipe"
-
-class arch_mips32el_softfloat(generic_mipsel):
- "Builder class for MIPS 32 [Little-endian softfloat]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips32 -mabi=32 -mplt -pipe"
- self.settings["CHOST"]="mipsel-softfloat-linux-gnu"
-
-class arch_mips32r2el(generic_mipsel):
- "Builder class for MIPS 32r2 [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips32r2 -mabi=32 -mplt -pipe"
-
-class arch_mips32r2el_softfloat(generic_mipsel):
- "Builder class for MIPS 32r2 [Little-endian softfloat]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips32r2 -mabi=32 -mplt -pipe"
- self.settings["CHOST"]="mipsel-softfloat-linux-gnu"
-
-class arch_mipsel3(generic_mipsel):
- "Builder class for MIPS III [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips3 -mabi=32 -mplt -Wa,-mfix-loongson2f-nop -pipe"
-
-class arch_mipsel3_n32(generic_mips64el):
- "Builder class for MIPS III [Little-endian N32]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips3 -mabi=n32 -mplt -Wa,-mfix-loongson2f-nop -pipe"
-
-class arch_mipsel3_n64(generic_mips64el):
- "Builder class for MIPS III [Little-endian N64]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips3 -mabi=64 -Wa,-mfix-loongson2f-nop -pipe"
-
-class arch_mipsel3_multilib(generic_mips64el):
- "Builder class for MIPS III [Little-endian multilib]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips3 -mplt -Wa,-mfix-loongson2f-nop -pipe"
-
-class arch_loongson2e(generic_mipsel):
- "Builder class for Loongson 2E [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson2e -mabi=32 -mplt -pipe"
-
-class arch_loongson2e_n32(generic_mips64el):
- "Builder class for Loongson 2E [Little-endian N32]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson2e -mabi=n32 -mplt -pipe"
-
-class arch_loongson2e_n64(generic_mips64el):
- "Builder class for Loongson 2E [Little-endian N64]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson2e -mabi=64 -pipe"
-
-class arch_loongson2e_multilib(generic_mips64el):
- "Builder class for Loongson 2E [Little-endian multilib]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson2e -mplt -pipe"
-
-class arch_loongson2f(generic_mipsel):
- "Builder class for Loongson 2F [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson2f -mabi=32 -mplt -Wa,-mfix-loongson2f-nop -pipe"
-
-class arch_loongson2f_n32(generic_mips64el):
- "Builder class for Loongson 2F [Little-endian N32]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson2f -mabi=n32 -mplt -Wa,-mfix-loongson2f-nop -pipe"
-
-class arch_loongson2f_n64(generic_mips64el):
- "Builder class for Loongson 2F [Little-endian N64]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson2f -mabi=64 -Wa,-mfix-loongson2f-nop -pipe"
-
-class arch_loongson2f_multilib(generic_mips64el):
- "Builder class for Loongson 2F [Little-endian multilib]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson2f -mplt -Wa,-mfix-loongson2f-nop -pipe"
-
-class arch_mipsel4(generic_mipsel):
- "Builder class for MIPS IV [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips4 -mabi=32 -mplt -pipe"
-
-class arch_mipsel4_n32(generic_mips64el):
- "Builder class for MIPS IV [Little-endian N32]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips4 -mabi=n32 -mplt -pipe"
-
-class arch_mipsel4_n64(generic_mips64el):
- "Builder class for MIPS IV [Little-endian N64]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips4 -mabi=64 -pipe"
-
-class arch_mipsel4_multilib(generic_mips64el):
- "Builder class for MIPS IV [Little-endian multilib]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips4 -mplt -pipe"
-
-class arch_mips64el(generic_mipsel):
- "Builder class for MIPS 64 [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64 -mabi=32 -mplt -pipe"
-
-class arch_mips64el_n32(generic_mips64el):
- "Builder class for MIPS 64 [Little-endian N32]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64 -mabi=n32 -mplt -pipe"
-
-class arch_mips64el_n64(generic_mips64el):
- "Builder class for MIPS 64 [Little-endian N64]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64 -mabi=64 -pipe"
-
-class arch_mips64el_multilib(generic_mips64el):
- "Builder class for MIPS 64 [Little-endian multilib]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64 -mplt -pipe"
-
-class arch_mips64r2el(generic_mipsel):
- "Builder class for MIPS 64r2 [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64r2 -mabi=32 -mplt -pipe"
-
-class arch_mips64r2el_n32(generic_mips64el):
- "Builder class for MIPS 64r2 [Little-endian N32]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64r2 -mabi=n32 -mplt -pipe"
-
-class arch_mips64r2el_n64(generic_mips64el):
- "Builder class for MIPS 64r2 [Little-endian N64]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64r2 -mabi=64 -pipe"
-
-class arch_mips64r2el_multilib(generic_mips64el):
- "Builder class for MIPS 64r2 [Little-endian multilib]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=mips64r2 -mplt -pipe"
-
-class arch_loongson3a(generic_mipsel):
- "Builder class for Loongson 3A [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson3a -mabi=32 -mplt -pipe"
-
-class arch_loongson3a_n32(generic_mips64el):
- "Builder class for Loongson 3A [Little-endian N32]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson3a -mabi=n32 -mplt -pipe"
-
-class arch_loongson3a_n64(generic_mips64el):
- "Builder class for Loongson 3A [Little-endian N64]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson3a -mabi=64 -pipe"
-
-class arch_loongson3a_multilib(generic_mips64el):
- "Builder class for Loongson 3A [Little-endian multilib]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=loongson3a -mplt -pipe"
-
-class arch_cobalt(generic_mipsel):
- "Builder class for cobalt [Little-endian]"
- def __init__(self,myspec):
- generic_mipsel.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=r5000 -mabi=32 -mplt -pipe"
- self.settings["HOSTUSE"]=["cobalt"]
-
-class arch_cobalt_n32(generic_mips64el):
- "Builder class for cobalt [Little-endian N32]"
- def __init__(self,myspec):
- generic_mips64el.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=r5000 -mabi=n32 -mplt -pipe"
- self.settings["HOSTUSE"]=["cobalt"]
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({
- "cobalt" : arch_cobalt,
- "cobalt_n32" : arch_cobalt_n32,
- "mips" : arch_mips1,
- "mips1" : arch_mips1,
- "mips2" : arch_mips2,
- "mips32" : arch_mips32,
- "mips32_softfloat" : arch_mips32_softfloat,
- "mips32r2" : arch_mips32r2,
- "mips32r2_softfloat" : arch_mips32r2_softfloat,
- "mips3" : arch_mips3,
- "mips3_n32" : arch_mips3_n32,
- "mips3_n64" : arch_mips3_n64,
- "mips3_multilib" : arch_mips3_multilib,
- "mips4" : arch_mips4,
- "mips4_n32" : arch_mips4_n32,
- "mips4_n64" : arch_mips4_n64,
- "mips4_multilib" : arch_mips4_multilib,
- "mips4_r10k" : arch_mips4_r10k,
- "mips4_r10k_n32" : arch_mips4_r10k_n32,
- "mips4_r10k_n64" : arch_mips4_r10k_n64,
- "mips4_r10k_multilib" : arch_mips4_r10k_multilib,
- "mips4_r12k" : arch_mips4_r12k,
- "mips4_r12k_n32" : arch_mips4_r12k_n32,
- "mips4_r12k_n64" : arch_mips4_r12k_n64,
- "mips4_r12k_multilib" : arch_mips4_r12k_multilib,
- "mips64" : arch_mips64,
- "mips64_n32" : arch_mips64_n32,
- "mips64_n64" : arch_mips64_n64,
- "mips64_multilib" : arch_mips64_multilib,
- "mips64r2" : arch_mips64r2,
- "mips64r2_n32" : arch_mips64r2_n32,
- "mips64r2_n64" : arch_mips64r2_n64,
- "mips64r2_multilib" : arch_mips64r2_multilib,
- "mipsel" : arch_mipsel1,
- "mipsel1" : arch_mipsel1,
- "mipsel2" : arch_mipsel2,
- "mips32el" : arch_mips32el,
- "mips32el_softfloat" : arch_mips32el_softfloat,
- "mips32r2el" : arch_mips32r2el,
- "mips32r2el_softfloat" : arch_mips32r2el_softfloat,
- "mipsel3" : arch_mipsel3,
- "mipsel3_n32" : arch_mipsel3_n32,
- "mipsel3_n64" : arch_mipsel3_n64,
- "mipsel3_multilib" : arch_mipsel3_multilib,
- "mipsel4" : arch_mipsel4,
- "mipsel4_n32" : arch_mipsel4_n32,
- "mipsel4_n64" : arch_mipsel4_n64,
- "mipsel4_multilib" : arch_mipsel4_multilib,
- "mips64el" : arch_mips64el,
- "mips64el_n32" : arch_mips64el_n32,
- "mips64el_n64" : arch_mips64el_n64,
- "mips64el_multilib" : arch_mips64el_multilib,
- "mips64r2el" : arch_mips64r2el,
- "mips64r2el_n32" : arch_mips64r2el_n32,
- "mips64r2el_n64" : arch_mips64r2el_n64,
- "mips64r2el_multilib" : arch_mips64r2el_multilib,
- "loongson2e" : arch_loongson2e,
- "loongson2e_n32" : arch_loongson2e_n32,
- "loongson2e_n64" : arch_loongson2e_n64,
- "loongson2e_multilib" : arch_loongson2e_multilib,
- "loongson2f" : arch_loongson2f,
- "loongson2f_n32" : arch_loongson2f_n32,
- "loongson2f_n64" : arch_loongson2f_n64,
- "loongson2f_multilib" : arch_loongson2f_multilib,
- "loongson3a" : arch_loongson3a,
- "loongson3a_n32" : arch_loongson3a_n32,
- "loongson3a_n64" : arch_loongson3a_n64,
- "loongson3a_multilib" : arch_loongson3a_multilib,
- }, ("mips","mips64"))
diff --git a/catalyst/arch/powerpc.py b/catalyst/arch/powerpc.py
deleted file mode 100644
index 59a6d625..00000000
--- a/catalyst/arch/powerpc.py
+++ /dev/null
@@ -1,158 +0,0 @@
-
-from catalyst import builder
-
-class generic_ppc(builder.generic):
- "abstract base class for all 32-bit powerpc builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["CHOST"]="powerpc-unknown-linux-gnu"
- if self.settings["buildarch"]=="ppc64":
- self.setarch('linux32')
- self.settings["crosscompile"] = False
-
-class generic_ppc64(builder.generic):
- "abstract base class for all 64-bit powerpc builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
-
-class arch_ppc(generic_ppc):
- "builder class for generic powerpc"
- def __init__(self,myspec):
- generic_ppc.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -mcpu=powerpc -mtune=powerpc -pipe"
-
-class arch_ppc64(generic_ppc64):
- "builder class for generic ppc64"
- def __init__(self,myspec):
- generic_ppc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- self.settings["CHOST"]="powerpc64-unknown-linux-gnu"
-
-class arch_ppc64le(generic_ppc64):
- "builder class for generic ppc64le"
- def __init__(self,myspec):
- generic_ppc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- self.settings["CHOST"]="powerpc64le-unknown-linux-gnu"
-
-class arch_970(arch_ppc64):
- "builder class for 970 aka G5 under ppc64"
- def __init__(self,myspec):
- arch_ppc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe -mcpu=970 -mtune=970"
- self.settings["HOSTUSE"]=["altivec"]
-
-class arch_cell(arch_ppc64):
- "builder class for cell under ppc64"
- def __init__(self,myspec):
- arch_ppc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe -mcpu=cell -mtune=cell"
- self.settings["HOSTUSE"]=["altivec","ibm"]
-
-class arch_g3(generic_ppc):
- def __init__(self,myspec):
- generic_ppc.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -mcpu=G3 -mtune=G3 -pipe"
-
-class arch_g4(generic_ppc):
- def __init__(self,myspec):
- generic_ppc.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -mcpu=G4 -mtune=G4 -maltivec -mabi=altivec -pipe"
- self.settings["HOSTUSE"]=["altivec"]
-
-class arch_g5(generic_ppc):
- def __init__(self,myspec):
- generic_ppc.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -mcpu=G5 -mtune=G5 -maltivec -mabi=altivec -pipe"
- self.settings["HOSTUSE"]=["altivec"]
-
-class arch_power(generic_ppc):
- "builder class for generic power"
- def __init__(self,myspec):
- generic_ppc.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -mcpu=power -mtune=power -pipe"
-
-class arch_power_ppc(generic_ppc):
- "builder class for generic powerpc/power"
- def __init__(self,myspec):
- generic_ppc.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -mcpu=common -mtune=common -pipe"
-
-class arch_power3(arch_ppc64):
- "builder class for power3 under ppc64"
- def __init__(self,myspec):
- arch_ppc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe -mcpu=power3 -mtune=power3"
- self.settings["HOSTUSE"]=["ibm"]
-
-class arch_power4(arch_ppc64):
- "builder class for power4 under ppc64"
- def __init__(self,myspec):
- arch_ppc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe -mcpu=power4 -mtune=power4"
- self.settings["HOSTUSE"]=["ibm"]
-
-class arch_power5(arch_ppc64):
- "builder class for power5 under ppc64"
- def __init__(self,myspec):
- arch_ppc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe -mcpu=power5 -mtune=power5"
- self.settings["HOSTUSE"]=["ibm"]
-
-class arch_power6(arch_ppc64):
- "builder class for power6 under ppc64"
- def __init__(self,myspec):
- arch_ppc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe -mcpu=power6 -mtune=power6"
- self.settings["HOSTUSE"]=["altivec","ibm"]
-
-class arch_power7(arch_ppc64):
- "builder class for power7 under ppc64"
- def __init__(self,myspec):
- arch_ppc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe -mcpu=power7 -mtune=power7 -mabi=elfv2"
- self.settings["HOSTUSE"]=["altivec","ibm"]
-
-class arch_power7le(arch_ppc64le):
- "builder class for power7 under ppc64le"
- def __init__(self,myspec):
- arch_ppc64le.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe -mcpu=power7 -mtune=power7 -mabi=elfv2"
- self.settings["HOSTUSE"]=["altivec","ibm"]
-
-class arch_power8(arch_ppc64):
- "builder class for power8 under ppc64"
- def __init__(self,myspec):
- arch_ppc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe -mcpu=power8 -mtune=power8 -mabi=elfv2"
- self.settings["HOSTUSE"]=["altivec","ibm"]
-
-class arch_power8le(arch_ppc64le):
- "builder class for power8 under ppc64le"
- def __init__(self,myspec):
- arch_ppc64le.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe -mcpu=power8 -mtune=power8 -mabi=elfv2"
- self.settings["HOSTUSE"]=["altivec","ibm"]
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({
- "970" : arch_970,
- "cell" : arch_cell,
- "g3" : arch_g3,
- "g4" : arch_g4,
- "g5" : arch_g5,
- "power" : arch_power,
- "power-ppc" : arch_power_ppc,
- "power3" : arch_power3,
- "power4" : arch_power4,
- "power5" : arch_power5,
- "power6" : arch_power6,
- "power7" : arch_power7,
- "power7le" : arch_power7le,
- "power8" : arch_power8,
- "power8le" : arch_power8le,
- "ppc" : arch_ppc,
- "ppc64" : arch_ppc64,
- "ppc64le" : arch_ppc64le,
- }, ("ppc","ppc64","ppc64le","powerpc","powerpc64","powerpc64le"))
diff --git a/catalyst/arch/riscv.py b/catalyst/arch/riscv.py
deleted file mode 100644
index 6f5695f9..00000000
--- a/catalyst/arch/riscv.py
+++ /dev/null
@@ -1,13 +0,0 @@
-
-from catalyst import builder
-
-class arch_riscv(builder.generic):
- "builder class for riscv"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- self.settings["CHOST"]="riscv64-unknown-linux-gnu"
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({ "riscv":arch_riscv }, ("rv64","riscv64","riscv"))
diff --git a/catalyst/arch/s390.py b/catalyst/arch/s390.py
deleted file mode 100644
index f4dfebd9..00000000
--- a/catalyst/arch/s390.py
+++ /dev/null
@@ -1,30 +0,0 @@
-
-from catalyst import builder
-
-class generic_s390(builder.generic):
- "abstract base class for all s390 builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
-
-class generic_s390x(builder.generic):
- "abstract base class for all s390x builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
-
-class arch_s390(generic_s390):
- "builder class for generic s390"
- def __init__(self,myspec):
- generic_s390.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- self.settings["CHOST"]="s390-ibm-linux-gnu"
-
-class arch_s390x(generic_s390x):
- "builder class for generic s390x"
- def __init__(self,myspec):
- generic_s390x.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- self.settings["CHOST"]="s390x-ibm-linux-gnu"
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({"s390":arch_s390,"s390x":arch_s390x}, ("s390", "s390x"))
diff --git a/catalyst/arch/sh.py b/catalyst/arch/sh.py
deleted file mode 100644
index 36ce2103..00000000
--- a/catalyst/arch/sh.py
+++ /dev/null
@@ -1,113 +0,0 @@
-
-from catalyst import builder
-
-class generic_sh(builder.generic):
- "Abstract base class for all sh builders [Little-endian]"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
-
-class generic_sheb(builder.generic):
- "Abstract base class for all sheb builders [Big-endian]"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
-
-class arch_sh(generic_sh):
- "Builder class for SH [Little-endian]"
- def __init__(self,myspec):
- generic_sh.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- self.settings["CHOST"]="sh-unknown-linux-gnu"
-
-class arch_sh2(generic_sh):
- "Builder class for SH-2 [Little-endian]"
- def __init__(self,myspec):
- generic_sh.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -m2 -pipe"
- self.settings["CHOST"]="sh2-unknown-linux-gnu"
-
-class arch_sh2a(generic_sh):
- "Builder class for SH-2A [Little-endian]"
- def __init__(self,myspec):
- generic_sh.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -m2a -pipe"
- self.settings["CHOST"]="sh2a-unknown-linux-gnu"
-
-class arch_sh3(generic_sh):
- "Builder class for SH-3 [Little-endian]"
- def __init__(self,myspec):
- generic_sh.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -m3 -pipe"
- self.settings["CHOST"]="sh3-unknown-linux-gnu"
-
-class arch_sh4(generic_sh):
- "Builder class for SH-4 [Little-endian]"
- def __init__(self,myspec):
- generic_sh.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -m4 -pipe"
- self.settings["CHOST"]="sh4-unknown-linux-gnu"
-
-class arch_sh4a(generic_sh):
- "Builder class for SH-4A [Little-endian]"
- def __init__(self,myspec):
- generic_sh.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -m4a -pipe"
- self.settings["CHOST"]="sh4a-unknown-linux-gnu"
-
-class arch_sheb(generic_sheb):
- "Builder class for SH [Big-endian]"
- def __init__(self,myspec):
- generic_sheb.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- self.settings["CHOST"]="sheb-unknown-linux-gnu"
-
-class arch_sh2eb(generic_sheb):
- "Builder class for SH-2 [Big-endian]"
- def __init__(self,myspec):
- generic_sheb.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -m2 -pipe"
- self.settings["CHOST"]="sh2eb-unknown-linux-gnu"
-
-class arch_sh2aeb(generic_sheb):
- "Builder class for SH-2A [Big-endian]"
- def __init__(self,myspec):
- generic_sheb.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -m2a -pipe"
- self.settings["CHOST"]="sh2aeb-unknown-linux-gnu"
-
-class arch_sh3eb(generic_sheb):
- "Builder class for SH-3 [Big-endian]"
- def __init__(self,myspec):
- generic_sheb.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -m3 -pipe"
- self.settings["CHOST"]="sh3eb-unknown-linux-gnu"
-
-class arch_sh4eb(generic_sheb):
- "Builder class for SH-4 [Big-endian]"
- def __init__(self,myspec):
- generic_sheb.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -m4 -pipe"
- self.settings["CHOST"]="sh4eb-unknown-linux-gnu"
-
-class arch_sh4aeb(generic_sheb):
- "Builder class for SH-4A [Big-endian]"
- def __init__(self,myspec):
- generic_sheb.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -m4a -pipe"
- self.settings["CHOST"]="sh4aeb-unknown-linux-gnu"
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({
- "sh" :arch_sh,
- "sh2" :arch_sh2,
- "sh2a" :arch_sh2a,
- "sh3" :arch_sh3,
- "sh4" :arch_sh4,
- "sh4a" :arch_sh4a,
- "sheb" :arch_sheb,
- "sh2eb" :arch_sh2eb,
- "sh2aeb" :arch_sh2aeb,
- "sh3eb" :arch_sh3eb,
- "sh4eb" :arch_sh4eb,
- "sh4aeb" :arch_sh4aeb
- }, ("sh2","sh2a","sh3","sh4","sh4a","sh2eb","sh2aeb","sh3eb","sh4eb","sh4aeb"))
diff --git a/catalyst/arch/sparc.py b/catalyst/arch/sparc.py
deleted file mode 100644
index d9abd4b5..00000000
--- a/catalyst/arch/sparc.py
+++ /dev/null
@@ -1,36 +0,0 @@
-
-from catalyst import builder
-
-class generic_sparc(builder.generic):
- "abstract base class for all sparc builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- if self.settings["buildarch"]=="sparc64":
- self.setarch('linux32')
- self.settings["crosscompile"] = False
-
-class generic_sparc64(builder.generic):
- "abstract base class for all sparc64 builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
-
-class arch_sparc(generic_sparc):
- "builder class for generic sparc (sun4cdm)"
- def __init__(self,myspec):
- generic_sparc.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -pipe"
- self.settings["CHOST"]="sparc-unknown-linux-gnu"
-
-class arch_sparc64(generic_sparc64):
- "builder class for generic sparc64 (sun4u)"
- def __init__(self,myspec):
- generic_sparc64.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -mcpu=ultrasparc -pipe"
- self.settings["CHOST"]="sparc-unknown-linux-gnu"
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({
- "sparc" : arch_sparc,
- "sparc64" : arch_sparc64
- }, ("sparc","sparc64", ))
diff --git a/catalyst/arch/x86.py b/catalyst/arch/x86.py
deleted file mode 100644
index 3e369370..00000000
--- a/catalyst/arch/x86.py
+++ /dev/null
@@ -1,143 +0,0 @@
-
-from catalyst import builder
-
-class generic_x86(builder.generic):
- "abstract base class for all x86 builders"
- def __init__(self,myspec):
- builder.generic.__init__(self,myspec)
- if self.settings["buildarch"]=="amd64":
- self.setarch('linux32')
- self.settings["crosscompile"] = False
-
-class arch_x86(generic_x86):
- "builder class for generic x86 (386+)"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -mtune=i686 -pipe"
- self.settings["CHOST"]="i386-pc-linux-gnu"
-
-class arch_i386(generic_x86):
- "Intel i386 CPU"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=i386 -pipe"
- self.settings["CHOST"]="i386-pc-linux-gnu"
-
-class arch_i486(generic_x86):
- "Intel i486 CPU"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=i486 -pipe"
- self.settings["CHOST"]="i486-pc-linux-gnu"
-
-class arch_i586(generic_x86):
- "Intel Pentium CPU"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=i586 -pipe"
- self.settings["CHOST"]="i586-pc-linux-gnu"
-
-class arch_i686(generic_x86):
- "Intel Pentium Pro CPU"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=i686 -pipe"
-
-class arch_pentium_mmx(generic_x86):
- "Intel Pentium MMX CPU with MMX support"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=pentium-mmx -pipe"
- self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx"]}
-
-class arch_pentium2(generic_x86):
- "Intel Pentium 2 CPU with MMX support"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=pentium2 -pipe"
- self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx"]}
-
-class arch_pentium3(generic_x86):
- "Intel Pentium 3 CPU with MMX and SSE support"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=pentium3 -pipe"
- self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse"]}
-
-class arch_pentium4(generic_x86):
- "Intel Pentium 4 CPU with MMX, SSE and SSE2 support"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=pentium4 -pipe"
- #lines like this appears to be trying to set the profile default, better to use the profile
- #self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse","sse2"]}
-
-class arch_pentium_m(generic_x86):
- "Intel Pentium M CPU with MMX, SSE and SSE2 support"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=pentium-m -pipe"
- #self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse","sse2"]}
-
-class arch_prescott(generic_x86):
- "improved version of Intel Pentium 4 CPU with MMX, SSE, SSE2 and SSE3 support"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=prescott -pipe"
- #self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","sse","sse2"]}
-
-class arch_k6(generic_x86):
- "AMD K6 CPU with MMX support"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=k6 -pipe"
- self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx"]}
-
-class arch_k6_2(generic_x86):
- "AMD K6-2 CPU with MMX and 3dNOW! support"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=k6-2 -pipe"
- self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","3dnow"]}
-
-class arch_athlon(generic_x86):
- "AMD Athlon CPU with MMX, 3dNOW!, enhanced 3dNOW! and SSE prefetch support"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=athlon -pipe"
- self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","3dnow"]}
-
-class arch_athlon_xp(generic_x86):
- "improved AMD Athlon CPU with MMX, 3dNOW!, enhanced 3dNOW! and full SSE support"
- def __init__(self,myspec):
- generic_x86.__init__(self,myspec)
- self.settings["COMMON_FLAGS"]="-O2 -march=athlon-xp -pipe"
- self.settings["HOSTUSEEXPAND"]={"CPU_FLAGS_X86":["mmx","3dnow","sse"]}
-
-def register():
- "Inform main catalyst program of the contents of this plugin."
- return ({
- "x86" : arch_x86,
- "i386" : arch_i386,
- "i486" : arch_i486,
- "i586" : arch_i586,
- "i686" : arch_i686,
- "pentium" : arch_i586,
- "pentium2" : arch_pentium2,
- "pentium3" : arch_pentium3,
- "pentium3m" : arch_pentium3,
- "pentium-m" : arch_pentium_m,
- "pentium4" : arch_pentium4,
- "pentium4m" : arch_pentium4,
- "pentiumpro" : arch_i686,
- "pentium-mmx" : arch_pentium_mmx,
- "prescott" : arch_prescott,
- "k6" : arch_k6,
- "k6-2" : arch_k6_2,
- "k6-3" : arch_k6_2,
- "athlon" : arch_athlon,
- "athlon-tbird" : arch_athlon,
- "athlon-4" : arch_athlon_xp,
- "athlon-xp" : arch_athlon_xp,
- "athlon-mp" : arch_athlon_xp
- }, ('i386', 'i486', 'i586', 'i686'))
diff --git a/catalyst/base/clearbase.py b/catalyst/base/clearbase.py
index 644a385f..6218330e 100644
--- a/catalyst/base/clearbase.py
+++ b/catalyst/base/clearbase.py
@@ -4,66 +4,59 @@ from catalyst import log
from catalyst.support import countdown
from catalyst.fileops import clear_dir
-class ClearBase(object):
- """
- This class does all of clearing after task completion
- """
- def __init__(self, myspec):
- self.settings = myspec
- self.resume = None
-
- def clear_autoresume(self):
- """ Clean resume points since they are no longer needed """
- if "autoresume" in self.settings["options"]:
- log.notice('Removing AutoResume Points ...')
- self.resume.clear_all()
-
-
- def remove_autoresume(self):
- """ Rmove all resume points since they are no longer needed """
- if "autoresume" in self.settings["options"]:
- log.notice('Removing AutoResume ...')
- self.resume.clear_all(remove=True)
-
-
- def clear_chroot(self):
- self.chroot_lock.unlock()
- log.notice('Clearing the chroot path ...')
- clear_dir(self.settings["chroot_path"], mode=0o755, chg_flags=True)
-
-
- def remove_chroot(self):
- self.chroot_lock.unlock()
- log.notice('Removing the chroot path ...')
- clear_dir(self.settings["chroot_path"], mode=0o755, chg_flags=True, remove=True)
-
-
- def clear_packages(self, remove=False):
- if "pkgcache" in self.settings["options"]:
- log.notice('purging the pkgcache ...')
- clear_dir(self.settings["pkgcache_path"], remove=remove)
-
-
- def clear_kerncache(self, remove=False):
- if "kerncache" in self.settings["options"]:
- log.notice('purging the kerncache ...')
- clear_dir(self.settings["kerncache_path"], remove=remove)
-
-
- def purge(self, remove=False):
- countdown(10,"Purging Caches ...")
- if any(k in self.settings["options"] for k in ("purge",
- "purgeonly", "purgetmponly")):
- log.notice('purge(); clearing autoresume ...')
- self.clear_autoresume()
-
- log.notice('purge(); clearing chroot ...')
- self.clear_chroot()
-
- if "purgetmponly" not in self.settings["options"]:
- log.notice('purge(); clearing package cache ...')
- self.clear_packages(remove)
-
- log.notice('purge(); clearing kerncache ...')
- self.clear_kerncache(remove)
+class ClearBase():
+ """
+ This class does all of clearing after task completion
+ """
+
+ def __init__(self, myspec):
+ self.settings = myspec
+ self.resume = None
+
+ def clear_autoresume(self):
+ """ Clean resume points since they are no longer needed """
+ if "autoresume" in self.settings["options"]:
+ log.notice('Removing AutoResume Points ...')
+ self.resume.clear_all()
+
+ def remove_autoresume(self):
+ """ Rmove all resume points since they are no longer needed """
+ if "autoresume" in self.settings["options"]:
+ log.notice('Removing AutoResume ...')
+ self.resume.clear_all(remove=True)
+
+ def clear_chroot(self):
+ log.notice('Clearing the chroot path ...')
+ clear_dir(self.settings["chroot_path"], mode=0o755)
+
+ def remove_chroot(self):
+ log.notice('Removing the chroot path ...')
+ clear_dir(self.settings["chroot_path"], mode=0o755, remove=True)
+
+ def clear_packages(self, remove=False):
+ if "pkgcache" in self.settings["options"]:
+ log.notice('purging the pkgcache ...')
+ clear_dir(self.settings["pkgcache_path"], remove=remove)
+
+ def clear_kerncache(self, remove=False):
+ if "kerncache" in self.settings["options"]:
+ log.notice('purging the kerncache ...')
+ clear_dir(self.settings["kerncache_path"], remove=remove)
+
+ def purge(self, remove=False):
+ countdown(10, "Purging Caches ...")
+ if any(k in self.settings["options"] for k in ("purge",
+ "purgeonly", "purgetmponly")):
+ log.notice('purge(); clearing autoresume ...')
+ self.clear_autoresume()
+
+ log.notice('purge(); clearing chroot ...')
+ self.clear_chroot()
+
+ if "purgetmponly" not in self.settings["options"]:
+ log.notice('purge(); clearing package cache ...')
+ self.clear_packages(remove)
+
+ log.notice('purge(); clearing kerncache ...')
+ self.clear_kerncache(remove)
diff --git a/catalyst/base/genbase.py b/catalyst/base/genbase.py
index 8af3b97a..1643b1dc 100644
--- a/catalyst/base/genbase.py
+++ b/catalyst/base/genbase.py
@@ -1,53 +1,48 @@
+import hashlib
import io
import os
+import gzip
+class GenBase():
+ """
+ Generates CONTENTS and DIGESTS files.
+ """
-class GenBase(object):
- """
- This class does generation of the contents and digests files.
- """
- def __init__(self,myspec):
- self.settings = myspec
-
-
- def gen_contents_file(self, path):
- contents = path + ".CONTENTS"
- if os.path.exists(contents):
- os.remove(contents)
- if "contents" in self.settings:
- contents_map = self.settings["contents_map"]
- if os.path.exists(path):
- with io.open(contents, "w", encoding='utf-8') as myf:
- keys={}
- for i in self.settings["contents"].split():
- keys[i]=1
- array = sorted(keys.keys())
- for j in array:
- contents = contents_map.contents(path, j,
- verbose=self.settings["VERBOSE"])
- if contents:
- myf.write(contents)
-
- def gen_digest_file(self, path):
- digests = path + ".DIGESTS"
- if os.path.exists(digests):
- os.remove(digests)
- if "digests" in self.settings:
- hash_map = self.settings["hash_map"]
- if os.path.exists(path):
- with io.open(digests, "w", encoding='utf-8') as myf:
- keys={}
- for i in self.settings["digests"].split():
- keys[i]=1
- array = sorted(keys.keys())
- for f in [path, path + '.CONTENTS']:
- if os.path.exists(f):
- if "all" in array:
- for k in list(hash_map.hash_map):
- digest = hash_map.generate_hash(f, hash_=k)
- myf.write(digest)
- else:
- for j in array:
- digest = hash_map.generate_hash(f, hash_=j)
- myf.write(digest)
+ def __init__(self, myspec):
+ self.settings = myspec
+
+ def generate_hash(self, filepath, name):
+ h = hashlib.new(name)
+
+ with open(filepath, 'rb') as f:
+ while True:
+ data = f.read(8192)
+ if not data:
+ break
+ h.update(data)
+
+ filename = os.path.split(filepath)[1]
+
+ if self.settings['digest_format'] == 'bsd':
+ return f'# {name.upper()} HASH\n{name.upper()} ({filename}) = {h.hexdigest()}\n'
+ else:
+ return f'# {name.upper()} HASH\n{h.hexdigest()} {filename}\n'
+
+ def gen_contents_file(self, path):
+ c = self.settings['contents_map']
+
+ with gzip.open(path + '.CONTENTS.gz', 'wt', encoding='utf-8') as file:
+ file.write(c.contents(path, '', verbose=self.settings['VERBOSE']))
+
+ def gen_digest_file(self, path):
+ if 'digests' not in self.settings:
+ return
+
+ with io.open(path + '.DIGESTS', 'w', encoding='utf-8') as file:
+ for f in [path, path + '.CONTENTS.gz']:
+ for i in self.settings['digests']:
+ file.write(self.generate_hash(f, name=i))
+
+ with io.open(path + '.sha256', 'w', encoding='utf-8') as sha256file:
+ sha256file.write(self.generate_hash(path, name='sha256'))
diff --git a/catalyst/base/resume.py b/catalyst/base/resume.py
index b2100735..977545e5 100644
--- a/catalyst/base/resume.py
+++ b/catalyst/base/resume.py
@@ -1,9 +1,4 @@
-#!/usr/bin/python
-
-# Maintained in full by:
-# Catalyst Team <catalyst@gentoo.org>
-# Release Engineering Team <releng@gentoo.org>
-# Copyright 2013 Brian Dolbec <dolsen@gentoo.org>
+#!/usr/bin/python3
'''resume.py
@@ -14,124 +9,116 @@ set, unset, is_set, is_unset, enabled, clear_all
import os
from snakeoil import fileutils
+from snakeoil.osutils import pjoin, listdir_files
from catalyst import log
-from catalyst.fileops import ensure_dirs, pjoin, listdir_files, clear_dir
-
-
-class AutoResume(object):
- '''Class for tracking and handling all aspects of
- the autoresume option and related files.
- '''
-
-
- def __init__(self, basedir, mode=0o755):
- self.basedir = basedir
- ensure_dirs(basedir, mode=mode, fatal=True)
- self._points = {}
- self._init_points_()
-
-
- def _init_points_(self):
- '''Internal function which reads the autoresume directory and
- for existing autoresume points and adds them to our _points variable
- '''
- existing = listdir_files(self.basedir, False)
- for point in existing:
- self._points[point] = pjoin(self.basedir, point)
-
-
- def enable(self, point, data=None):
- '''Sets the resume point 'ON'
-
- @param point: string. name of the resume point to enable
- @param data: string of information to store, or None
- @return boolean
- '''
- if point in self._points and not data:
- return True
- fname = pjoin(self.basedir, point)
- if data:
- with open(fname,"w") as myf:
- myf.write(data)
- else:
- try:
- fileutils.touch(fname)
- self._points[point] = fname
- except Exception as e:
- log.error('AutoResumeError: %s', e)
- return False
- return True
-
-
- def get(self, point, no_lf=True):
- '''Gets any data stored inside a resume point
-
- @param point: string. name of the resume point to enable
- @return data: string of information stored, or None
- '''
- if point in self._points:
- try:
- with open(self._points[point], 'r') as myf:
- data = myf.read()
- if data and no_lf:
- data = data.replace('\n', ' ')
- except OSError as e:
- log.error('AutoResumeError: %s', e)
- return None
- return data
- return None
-
-
- def disable(self, point):
- '''Sets the resume point 'OFF'
-
- @param point: string. name of the resume point to disable
- @return boolean
- '''
- if point not in self._points:
- return True
- try:
- os.unlink(self._points[point])
- self._points.pop(point)
- except Exception as e:
- log.error('AutoResumeError: %s', e)
- return False
- return True
-
-
- def is_enabled(self, point):
- '''Returns True if the resume point 'ON'
-
- @param point: string. name of the resume point enabled
- @return boolean
- '''
- return point in self._points
-
-
- def is_disabled(self, point):
- '''Returns True if the resume point 'OFF'
-
- @param point: string. name of the resume point not enabled
- @return boolean
- '''
- return point not in self._points
-
-
- @property
- def enabled(self):
- '''Returns a list of enabled points
- '''
- return list(self._points)
-
-
- def clear_all(self, remove=False):
- '''Clear all active resume points
-
- @remove: boolean, passed through to clear_dir()
- @return boolean
- '''
- if clear_dir(self.basedir, mode=0o755, chg_flags=True, remove=remove):
- self._points = {}
- return True
- return False
+from catalyst.fileops import ensure_dirs, clear_dir
+
+
+class AutoResume():
+ '''Class for tracking and handling all aspects of
+ the autoresume option and related files.
+ '''
+
+ def __init__(self, basedir, mode=0o755):
+ self.basedir = basedir
+ ensure_dirs(basedir, mode=mode, fatal=True)
+ self._points = {}
+ self._init_points_()
+
+ def _init_points_(self):
+ '''Internal function which reads the autoresume directory and
+ for existing autoresume points and adds them to our _points variable
+ '''
+ existing = listdir_files(self.basedir, False)
+ for point in existing:
+ self._points[point] = pjoin(self.basedir, point)
+
+ def enable(self, point, data=None):
+ '''Sets the resume point 'ON'
+
+ @param point: string. name of the resume point to enable
+ @param data: string of information to store, or None
+ @return boolean
+ '''
+ if point in self._points and not data:
+ return True
+ fname = pjoin(self.basedir, point)
+ if data:
+ with open(fname, "w") as myf:
+ myf.write(data)
+ else:
+ try:
+ fileutils.touch(fname)
+ self._points[point] = fname
+ except Exception as e:
+ log.error('AutoResumeError: %s', e)
+ return False
+ return True
+
+ def get(self, point, no_lf=True):
+ '''Gets any data stored inside a resume point
+
+ @param point: string. name of the resume point to enable
+ @return data: string of information stored, or None
+ '''
+ if point in self._points:
+ try:
+ with open(self._points[point], 'r') as myf:
+ data = myf.read()
+ if data and no_lf:
+ data = data.replace('\n', ' ')
+ except OSError as e:
+ log.error('AutoResumeError: %s', e)
+ return None
+ return data
+ return None
+
+ def disable(self, point):
+ '''Sets the resume point 'OFF'
+
+ @param point: string. name of the resume point to disable
+ @return boolean
+ '''
+ if point not in self._points:
+ return True
+ try:
+ os.unlink(self._points[point])
+ self._points.pop(point)
+ except Exception as e:
+ log.error('AutoResumeError: %s', e)
+ return False
+ return True
+
+ def is_enabled(self, point):
+ '''Returns True if the resume point 'ON'
+
+ @param point: string. name of the resume point enabled
+ @return boolean
+ '''
+ return point in self._points
+
+ def is_disabled(self, point):
+ '''Returns True if the resume point 'OFF'
+
+ @param point: string. name of the resume point not enabled
+ @return boolean
+ '''
+ return point not in self._points
+
+ @property
+ def enabled(self):
+ '''Returns a list of enabled points
+ '''
+ return list(self._points)
+
+ def clear_all(self, remove=False):
+ '''Clear all active resume points
+
+ @remove: boolean, passed through to clear_dir()
+ @return boolean
+ '''
+ if clear_dir(self.basedir, mode=0o755, remove=remove):
+ self._points = {}
+ return True
+ return False
diff --git a/catalyst/base/stagebase.py b/catalyst/base/stagebase.py
index 54a1f389..4bcab30c 100644
--- a/catalyst/base/stagebase.py
+++ b/catalyst/base/stagebase.py
@@ -1,1695 +1,1687 @@
+import configparser
+import copy
import os
-import imp
+import platform
import shutil
import sys
+from pathlib import Path
+
+import fasteners
+import libmount
+import tomli
+
from snakeoil import fileutils
+from snakeoil.osutils import pjoin
from DeComp.compress import CompressMap
from catalyst import log
-from catalyst.defaults import (SOURCE_MOUNT_DEFAULTS, TARGET_MOUNT_DEFAULTS,
- PORT_LOGDIR_CLEAN)
+from catalyst.context import namespace
+from catalyst.defaults import (confdefaults, MOUNT_DEFAULTS, PORT_LOGDIR_CLEAN)
from catalyst.support import (CatalystError, file_locate, normpath,
- cmd, read_makeconf, ismount, file_check)
+ cmd, command, read_makeconf, get_repo_name,
+ file_check, sanitize_name)
from catalyst.base.targetbase import TargetBase
from catalyst.base.clearbase import ClearBase
from catalyst.base.genbase import GenBase
-from catalyst.lock import LockDir, LockInUse
-from catalyst.fileops import ensure_dirs, pjoin, clear_dir, clear_path
+from catalyst.fileops import ensure_dirs, clear_dir, clear_path
from catalyst.base.resume import AutoResume
-if sys.version_info[0] >= 3:
- py_input = input
-else:
- py_input = raw_input # pylint: disable=undefined-variable
+
+def run_sequence(sequence):
+ for func in sequence:
+ log.notice('--- Running action sequence: %s', func.__name__)
+ sys.stdout.flush()
+ try:
+ func()
+ except Exception:
+ log.error('Exception running action sequence %s',
+ func.__name__, exc_info=True)
+ return False
+
+ return True
class StageBase(TargetBase, ClearBase, GenBase):
- """
- This class does all of the chroot setup, copying of files, etc. It is
- the driver class for pretty much everything that Catalyst does.
- """
- def __init__(self,myspec,addlargs):
- self.required_values.extend(["version_stamp", "target", "subarch",
- "rel_type", "profile", "snapshot", "source_subpath"])
-
- self.valid_values.extend(["version_stamp", "target", "subarch",
- "rel_type", "profile", "snapshot", "source_subpath",
- "portage_confdir", "portage_prefix", "portage_overlay",
- "cflags", "cxxflags", "fcflags", "fflags", "ldflags", "asflags",
- "common_flags", "cbuild", "hostuse", "catalyst_use",
- "distcc_hosts", "makeopts", "pkgcache_path", "kerncache_path",
- "compression_mode", "decompression_mode"])
-
- self.set_valid_build_kernel_vars(addlargs)
- TargetBase.__init__(self, myspec, addlargs)
- GenBase.__init__(self, myspec)
- ClearBase.__init__(self, myspec)
-
- # The semantics of subarchmap and machinemap changed a bit in 2.0.3 to
- # work better with vapier's CBUILD stuff. I've removed the "monolithic"
- # machinemap from this file and split up its contents amongst the
- # various arch/foo.py files.
- #
- # When register() is called on each module in the arch/ dir, it now
- # returns a tuple instead of acting on the subarchmap dict that is
- # passed to it. The tuple contains the values that were previously
- # added to subarchmap as well as a new list of CHOSTs that go along
- # with that arch. This allows us to build machinemap on the fly based
- # on the keys in subarchmap and the values of the 2nd list returned
- # (tmpmachinemap).
- #
- # Also, after talking with vapier. I have a slightly better idea of what
- # certain variables are used for and what they should be set to. Neither
- # 'buildarch' or 'hostarch' are used directly, so their value doesn't
- # really matter. They are just compared to determine if we are
- # cross-compiling. Because of this, they are just set to the name of the
- # module in arch/ that the subarch is part of to make things simpler.
- # The entire build process is still based off of 'subarch' like it was
- # previously. -agaffney
-
- self.makeconf = {}
- self.archmap = {}
- self.subarchmap = {}
- machinemap = {}
- arch_dir = self.settings["archdir"] + "/"
- for x in [
- x[:-3] for x in os.listdir(arch_dir) if x.endswith(".py")
- and x != "__init__.py"]:
- log.debug("Begin loading arch modules...")
- try:
- fh = open(arch_dir + x + ".py")
- # This next line loads the plugin as a module and
- # assigns it to archmap[x]
- self.archmap[x] = imp.load_module(x, fh, arch_dir + x + ".py",
- (".py", "r", imp.PY_SOURCE))
- # This next line registers all the subarches
- # supported in the plugin
- tmpsubarchmap, tmpmachinemap = self.archmap[x].register()
- self.subarchmap.update(tmpsubarchmap)
- for machine in tmpmachinemap:
- machinemap[machine] = x
- for subarch in tmpsubarchmap:
- machinemap[subarch] = x
- fh.close()
- except IOError:
- # This message should probably change a bit, since everything in
- # the dir should load just fine. If it doesn't, it's probably a
- # syntax error in the module
- log.warning("Can't find/load %s.py plugin in %s", x, arch_dir)
- log.debug("Loaded arch module: %s", self.archmap[x])
-
- if "chost" in self.settings:
- hostmachine = self.settings["chost"].split("-")[0]
- if hostmachine not in machinemap:
- raise CatalystError("Unknown host machine type " + hostmachine)
- self.settings["hostarch"] = machinemap[hostmachine]
- else:
- hostmachine = self.settings["subarch"]
- if hostmachine in machinemap:
- hostmachine = machinemap[hostmachine]
- self.settings["hostarch"] = hostmachine
- if "cbuild" in self.settings:
- buildmachine = self.settings["cbuild"].split("-")[0]
- else:
- buildmachine = os.uname()[4]
- if buildmachine not in machinemap:
- raise CatalystError("Unknown build machine type " + buildmachine)
- self.settings["buildarch"] = machinemap[buildmachine]
- self.settings["crosscompile"] = (self.settings["hostarch"] != \
- self.settings["buildarch"])
-
- # Call arch constructor, pass our settings
- try:
- self.arch = self.subarchmap[self.settings["subarch"]](self.settings)
- except KeyError:
- log.critical(
- 'Invalid subarch: %s\n'
- 'Choose one of the following:\n'
- ' %s',
- self.settings['subarch'], ' '.join(self.subarchmap))
-
- log.notice('Using target: %s', self.settings['target'])
- # Print a nice informational message
- if self.settings["buildarch"] == self.settings["hostarch"]:
- log.info('Building natively for %s', self.settings['hostarch'])
- elif self.settings["crosscompile"]:
- log.info('Cross-compiling on %s for different machine type %s',
- self.settings['buildarch'], self.settings['hostarch'])
- else:
- log.info('Building on %s for alternate personality type %s',
- self.settings['buildarch'], self.settings['hostarch'])
-
- # This must be set first as other set_ options depend on this
- self.set_spec_prefix()
-
- # Initialize our (de)compressor's)
- self.decompressor = CompressMap(self.settings["decompress_definitions"],
- env = self.env,
- search_order = self.settings["decompressor_search_order"],
- comp_prog = self.settings["comp_prog"],
- decomp_opt = self.settings["decomp_opt"])
- self.accepted_extensions = self.decompressor.search_order_extensions(
- self.settings["decompressor_search_order"])
- log.notice("Source file specification matching setting is: %s",
- self.settings["source_matching"])
- log.notice("Accepted source file extensions search order: %s",
- self.accepted_extensions)
- # save resources, it is not always needed
- self.compressor = None
-
- # Define all of our core variables
- self.set_target_profile()
- self.set_target_subpath()
- self.set_source_subpath()
-
- # Set paths
- self.set_snapshot_path()
- self.set_root_path()
- self.set_source_path()
- self.set_snapcache_path()
- self.set_chroot_path()
- self.set_autoresume_path()
- self.set_dest_path()
- self.set_stage_path()
- self.set_target_path()
-
- self.set_controller_file()
- self.set_default_action_sequence()
- self.set_use()
- self.set_catalyst_use()
- self.set_cleanables()
- self.set_iso_volume_id()
- self.set_build_kernel_vars()
- self.set_fsscript()
- self.set_install_mask()
- self.set_rcadd()
- self.set_rcdel()
- self.set_cdtar()
- self.set_fstype()
- self.set_fsops()
- self.set_iso()
- self.set_packages()
- self.set_rm()
- self.set_linuxrc()
- self.set_busybox_config()
- self.set_overlay()
- self.set_portage_overlay()
- self.set_root_overlay()
-
- # This next line checks to make sure that the specified variables exist on disk.
- #pdb.set_trace()
- file_locate(self.settings, ["distdir"], expand = 0)
- # If we are using portage_confdir, check that as well.
- if "portage_confdir" in self.settings:
- file_locate(self.settings, ["portage_confdir"], expand = 0)
-
- # Setup our mount points.
- # initialize our target mounts.
- self.target_mounts = TARGET_MOUNT_DEFAULTS.copy()
-
- self.mounts = ["proc", "dev", "portdir", "distdir", "port_tmpdir"]
- # initialize our source mounts
- self.mountmap = SOURCE_MOUNT_DEFAULTS.copy()
- # update these from settings
- self.mountmap["portdir"] = self.settings["portdir"]
- self.mountmap["distdir"] = self.settings["distdir"]
- self.target_mounts["portdir"] = normpath(self.settings["repo_basedir"] +
- "/" + self.settings["repo_name"])
- self.target_mounts["distdir"] = self.settings["target_distdir"]
- self.target_mounts["packagedir"] = self.settings["target_pkgdir"]
- if "snapcache" not in self.settings["options"]:
- self.mounts.remove("portdir")
- self.mountmap["portdir"] = None
- else:
- self.mountmap["portdir"] = normpath("/".join([
- self.settings["snapshot_cache_path"],
- self.settings["repo_name"],
- ]))
- if os.uname()[0] == "Linux":
- self.mounts.append("devpts")
- self.mounts.append("shm")
-
- self.set_mounts()
-
- # Configure any user specified options (either in catalyst.conf or on
- # the command line).
- if "pkgcache" in self.settings["options"]:
- self.set_pkgcache_path()
- log.info('Location of the package cache is %s', self.settings['pkgcache_path'])
- self.mounts.append("packagedir")
- self.mountmap["packagedir"] = self.settings["pkgcache_path"]
-
- if "kerncache" in self.settings["options"]:
- self.set_kerncache_path()
- log.info('Location of the kerncache is %s', self.settings['kerncache_path'])
- self.mounts.append("kerncache")
- self.mountmap["kerncache"] = self.settings["kerncache_path"]
-
- if "ccache" in self.settings["options"]:
- if "CCACHE_DIR" in os.environ:
- ccdir = os.environ["CCACHE_DIR"]
- del os.environ["CCACHE_DIR"]
- else:
- ccdir = "/var/tmp/ccache"
- if not os.path.isdir(ccdir):
- raise CatalystError(
- "Compiler cache support can't be enabled (can't find " + ccdir+")")
- self.mounts.append("ccache")
- self.mountmap["ccache"] = ccdir
- # for the chroot:
- self.env["CCACHE_DIR"] = self.target_mounts["ccache"]
-
- if "icecream" in self.settings["options"]:
- self.mounts.append("icecream")
- self.mountmap["icecream"] = self.settings["icecream"]
- self.env["PATH"] = self.target_mounts["icecream"] + ":" + self.env["PATH"]
-
- if "port_logdir" in self.settings:
- self.mounts.append("port_logdir")
- self.mountmap["port_logdir"] = self.settings["port_logdir"]
- self.env["PORT_LOGDIR"] = self.settings["port_logdir"]
- self.env["PORT_LOGDIR_CLEAN"] = PORT_LOGDIR_CLEAN
-
- def override_cbuild(self):
- if "CBUILD" in self.makeconf:
- self.settings["CBUILD"] = self.makeconf["CBUILD"]
-
- def override_chost(self):
- if "CHOST" in self.makeconf:
- self.settings["CHOST"] = self.makeconf["CHOST"]
-
- def override_cflags(self):
- if "CFLAGS" in self.makeconf:
- self.settings["CFLAGS"] = self.makeconf["CFLAGS"]
-
- def override_cxxflags(self):
- if "CXXFLAGS" in self.makeconf:
- self.settings["CXXFLAGS"] = self.makeconf["CXXFLAGS"]
-
- def override_fcflags(self):
- if "FCFLAGS" in self.makeconf:
- self.settings["FCFLAGS"] = self.makeconf["FCFLAGS"]
-
- def override_fflags(self):
- if "FFLAGS" in self.makeconf:
- self.settings["FFLAGS"] = self.makeconf["FFLAGS"]
-
- def override_ldflags(self):
- if "LDFLAGS" in self.makeconf:
- self.settings["LDFLAGS"] = self.makeconf["LDFLAGS"]
-
- def override_asflags(self):
- if "ASFLAGS" in self.makeconf:
- self.settings["ASFLAGS"] = self.makeconf["ASFLAGS"]
-
- def override_common_flags(self):
- if "COMMON_FLAGS" in self.makeconf:
- self.settings["COMMON_FLAGS"] = self.makeconf["COMMON_FLAGS"]
-
- def set_install_mask(self):
- if "install_mask" in self.settings:
- if not isinstance(self.settings['install_mask'], str):
- self.settings["install_mask"] = \
- ' '.join(self.settings["install_mask"])
-
- def set_spec_prefix(self):
- self.settings["spec_prefix"] = self.settings["target"]
-
- def set_target_profile(self):
- self.settings["target_profile"] = self.settings["profile"]
-
- def set_target_subpath(self):
- common = self.settings["rel_type"] + "/" + \
- self.settings["target"] + "-" + self.settings["subarch"]
- self.settings["target_subpath"] = \
- common + \
- "-" + self.settings["version_stamp"] + \
- "/"
- self.settings["target_subpath_unversioned"] = \
- common + \
- "/"
-
- def set_source_subpath(self):
- if not isinstance(self.settings['source_subpath'], str):
- raise CatalystError(
- "source_subpath should have been a string. Perhaps you have " + \
- "something wrong in your spec file?")
-
- def set_pkgcache_path(self):
- if "pkgcache_path" in self.settings:
- if not isinstance(self.settings['pkgcache_path'], str):
- self.settings["pkgcache_path"] = \
- normpath(self.settings["pkgcache_path"])
- elif "versioned_cache" in self.settings["options"]:
- self.settings["pkgcache_path"] = \
- normpath(self.settings["storedir"] + "/packages/" + \
- self.settings["target_subpath"] + "/")
- else:
- self.settings["pkgcache_path"] = \
- normpath(self.settings["storedir"] + "/packages/" + \
- self.settings["target_subpath_unversioned"] + "/")
-
- def set_kerncache_path(self):
- if "kerncache_path" in self.settings:
- if not isinstance(self.settings['kerncache_path'], str):
- self.settings["kerncache_path"] = \
- normpath(self.settings["kerncache_path"])
- elif "versioned_cache" in self.settings["options"]:
- self.settings["kerncache_path"] = normpath(self.settings["storedir"] + \
- "/kerncache/" + self.settings["target_subpath"])
- else:
- self.settings["kerncache_path"] = normpath(self.settings["storedir"] + \
- "/kerncache/" + self.settings["target_subpath_unversioned"])
-
- def set_target_path(self):
- self.settings["target_path"] = normpath(self.settings["storedir"] + \
- "/builds/" + self.settings["target_subpath"])
- if "autoresume" in self.settings["options"]\
- and self.resume.is_enabled("setup_target_path"):
- log.notice('Resume point detected, skipping target path setup operation...')
- else:
- self.resume.enable("setup_target_path")
- ensure_dirs(self.settings["storedir"] + "/builds")
-
- def set_fsscript(self):
- if self.settings["spec_prefix"] + "/fsscript" in self.settings:
- self.settings["fsscript"] = \
- self.settings[self.settings["spec_prefix"] + "/fsscript"]
- del self.settings[self.settings["spec_prefix"] + "/fsscript"]
-
- def set_rcadd(self):
- if self.settings["spec_prefix"] + "/rcadd" in self.settings:
- self.settings["rcadd"] = \
- self.settings[self.settings["spec_prefix"] + "/rcadd"]
- del self.settings[self.settings["spec_prefix"] + "/rcadd"]
-
- def set_rcdel(self):
- if self.settings["spec_prefix"] + "/rcdel" in self.settings:
- self.settings["rcdel"] = \
- self.settings[self.settings["spec_prefix"] + "/rcdel"]
- del self.settings[self.settings["spec_prefix"] + "/rcdel"]
-
- def set_cdtar(self):
- if self.settings["spec_prefix"] + "/cdtar" in self.settings:
- self.settings["cdtar"] = \
- normpath(self.settings[self.settings["spec_prefix"] + "/cdtar"])
- del self.settings[self.settings["spec_prefix"] + "/cdtar"]
-
- def set_iso(self):
- if self.settings["spec_prefix"] + "/iso" in self.settings:
- if self.settings[self.settings["spec_prefix"] + "/iso"].startswith('/'):
- self.settings["iso"] = \
- normpath(self.settings[self.settings["spec_prefix"] + "/iso"])
- else:
- # This automatically prepends the build dir to the ISO output path
- # if it doesn't start with a /
- self.settings["iso"] = normpath(self.settings["storedir"] + \
- "/builds/" + self.settings["rel_type"] + "/" + \
- self.settings[self.settings["spec_prefix"] + "/iso"])
- del self.settings[self.settings["spec_prefix"] + "/iso"]
-
- def set_fstype(self):
- if self.settings["spec_prefix"] + "/fstype" in self.settings:
- self.settings["fstype"] = \
- self.settings[self.settings["spec_prefix"] + "/fstype"]
- del self.settings[self.settings["spec_prefix"] + "/fstype"]
-
- if "fstype" not in self.settings:
- self.settings["fstype"] = "normal"
- for x in self.valid_values:
- if x == self.settings["spec_prefix"] + "/fstype":
- log.info('%s/fstype is being set to the default of "normal"',
- self.settings['spec_prefix'])
-
- def set_fsops(self):
- if "fstype" in self.settings:
- self.valid_values.append("fsops")
- if self.settings["spec_prefix"] + "/fsops" in self.settings:
- self.settings["fsops"] = \
- self.settings[self.settings["spec_prefix"] + "/fsops"]
- del self.settings[self.settings["spec_prefix"] + "/fsops"]
-
- def set_source_path(self):
- if "seedcache" in self.settings["options"]\
- and os.path.isdir(normpath(self.settings["storedir"] + "/tmp/" +
- self.settings["source_subpath"] + "/")):
- self.settings["source_path"] = normpath(self.settings["storedir"] +
- "/tmp/" + self.settings["source_subpath"] + "/")
- log.debug("source_subpath is: %s", self.settings["source_path"])
- else:
- log.debug('Checking source path existence and '
- 'get the final filepath. subpath: %s',
- self.settings["source_subpath"])
- self.settings["source_path"] = file_check(
- normpath(self.settings["storedir"] + "/builds/" +
- self.settings["source_subpath"]),
- self.accepted_extensions,
- self.settings["source_matching"] in ["strict"]
- )
- log.debug('Source path returned from file_check is: %s',
- self.settings["source_path"])
- if os.path.isfile(self.settings["source_path"]):
- # XXX: Is this even necessary if the previous check passes?
- if os.path.exists(self.settings["source_path"]):
- self.settings["source_path_hash"] = \
- self.settings["hash_map"].generate_hash(
- self.settings["source_path"],
- hash_ = self.settings["hash_function"])
- log.notice('Source path set to %s', self.settings['source_path'])
-
- def set_dest_path(self):
- if "root_path" in self.settings:
- self.settings["destpath"] = normpath(self.settings["chroot_path"] +
- self.settings["root_path"])
- else:
- self.settings["destpath"] = normpath(self.settings["chroot_path"])
-
- def set_cleanables(self):
- self.settings["cleanables"] = ["/etc/resolv.conf", "/var/tmp/*", "/tmp/*",
- self.settings["repo_basedir"] + "/" +
- self.settings["repo_name"]]
-
- def set_snapshot_path(self):
- self.settings["snapshot_path"] = file_check(
- normpath(self.settings["storedir"] +
- "/snapshots/" + self.settings["snapshot_name"] +
- self.settings["snapshot"]),
- self.accepted_extensions,
- self.settings["source_matching"] is "strict"
- )
- log.info('SNAPSHOT_PATH set to: %s', self.settings['snapshot_path'])
- self.settings["snapshot_path_hash"] = \
- self.settings["hash_map"].generate_hash(
- self.settings["snapshot_path"],
- hash_ = self.settings["hash_function"])
-
- def set_snapcache_path(self):
- self.settings["snapshot_cache_path"] = \
- normpath(pjoin(self.settings["snapshot_cache"],
- self.settings["snapshot"]))
- if "snapcache" in self.settings["options"]:
- self.settings["snapshot_cache_path"] = \
- normpath(pjoin(self.settings["snapshot_cache"],
- self.settings["snapshot"]))
- self.snapcache_lock = \
- LockDir(self.settings["snapshot_cache_path"])
- log.info('Setting snapshot cache to %s', self.settings['snapshot_cache_path'])
-
- def set_chroot_path(self):
- """
- NOTE: the trailing slash has been removed
- Things *could* break if you don't use a proper join()
- """
- self.settings["chroot_path"] = normpath(self.settings["storedir"] +
- "/tmp/" + self.settings["target_subpath"].rstrip('/'))
- self.chroot_lock = LockDir(self.settings["chroot_path"])
-
- def set_autoresume_path(self):
- self.settings["autoresume_path"] = normpath(pjoin(
- self.settings["storedir"], "tmp", self.settings["rel_type"],
- ".autoresume-%s-%s-%s"
- %(self.settings["target"], self.settings["subarch"],
- self.settings["version_stamp"])
- ))
- if "autoresume" in self.settings["options"]:
- log.info('The autoresume path is %s', self.settings['autoresume_path'])
- self.resume = AutoResume(self.settings["autoresume_path"], mode=0o755)
-
- def set_controller_file(self):
- self.settings["controller_file"] = normpath(self.settings["sharedir"] +
- "/targets/" + self.settings["target"] + "/" + self.settings["target"]
- + "-controller.sh")
-
- def set_iso_volume_id(self):
- if self.settings["spec_prefix"] + "/volid" in self.settings:
- self.settings["iso_volume_id"] = \
- self.settings[self.settings["spec_prefix"] + "/volid"]
- if len(self.settings["iso_volume_id"]) > 32:
- raise CatalystError(
- "ISO volume ID must not exceed 32 characters.")
- else:
- self.settings["iso_volume_id"] = "catalyst "+self.settings["snapshot"]
-
- def set_default_action_sequence(self):
- """ Default action sequence for run method.
-
- This method sets the optional purgeonly action sequence and returns.
- Or it calls the normal set_action_sequence() for the target stage.
- """
- if "purgeonly" in self.settings["options"]:
- self.settings["action_sequence"] = ["remove_chroot"]
- return
- self.set_action_sequence()
-
- def set_action_sequence(self):
- """Set basic stage1, 2, 3 action sequences"""
- self.settings["action_sequence"] = ["unpack", "unpack_snapshot",
- "setup_confdir", "portage_overlay",
- "base_dirs", "bind", "chroot_setup", "setup_environment",
- "run_local", "preclean", "unbind", "clean"]
- self.set_completion_action_sequences()
-
- def set_completion_action_sequences(self):
- if "fetch" not in self.settings["options"]:
- self.settings["action_sequence"].append("capture")
- if "keepwork" in self.settings["options"]:
- self.settings["action_sequence"].append("clear_autoresume")
- elif "seedcache" in self.settings["options"]:
- self.settings["action_sequence"].append("remove_autoresume")
- else:
- self.settings["action_sequence"].append("remove_autoresume")
- self.settings["action_sequence"].append("remove_chroot")
- return
-
- def set_use(self):
- use = self.settings["spec_prefix"] + "/use"
- if use in self.settings:
- if isinstance(self.settings[use], str):
- self.settings["use"] = self.settings[use].split()
- self.settings["use"] = self.settings[use]
- del self.settings[use]
- else:
- self.settings["use"] = []
-
- def set_catalyst_use(self):
- catalyst_use = self.settings["spec_prefix"] + "/catalyst_use"
- if catalyst_use in self.settings:
- if isinstance(self.settings[catalyst_use], str):
- self.settings["catalyst_use"] = self.settings[catalyst_use].split()
- else:
- self.settings["catalyst_use"] = self.settings[catalyst_use]
- del self.settings[catalyst_use]
- else:
- self.settings["catalyst_use"] = []
-
- # Force bindist when options ask for it
- if "bindist" in self.settings["options"]:
- log.debug("Enabling bindist USE flag")
- self.settings["catalyst_use"].append("bindist")
-
- def set_stage_path(self):
- self.settings["stage_path"] = normpath(self.settings["chroot_path"])
-
- def set_mounts(self):
- pass
-
- def set_packages(self):
- pass
-
- def set_rm(self):
- if self.settings["spec_prefix"] + "/rm" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/rm'], str):
- self.settings[self.settings["spec_prefix"] + "/rm"] = \
- self.settings[self.settings["spec_prefix"] + "/rm"].split()
-
- def set_linuxrc(self):
- if self.settings["spec_prefix"] + "/linuxrc" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/linuxrc'], str):
- self.settings["linuxrc"] = \
- self.settings[self.settings["spec_prefix"] + "/linuxrc"]
- del self.settings[self.settings["spec_prefix"] + "/linuxrc"]
-
- def set_busybox_config(self):
- if self.settings["spec_prefix"] + "/busybox_config" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/busybox_config'], str):
- self.settings["busybox_config"] = \
- self.settings[self.settings["spec_prefix"] + "/busybox_config"]
- del self.settings[self.settings["spec_prefix"] + "/busybox_config"]
-
- def set_portage_overlay(self):
- if "portage_overlay" in self.settings:
- if isinstance(self.settings['portage_overlay'], str):
- self.settings["portage_overlay"] = \
- self.settings["portage_overlay"].split()
- log.info('portage_overlay directories are set to: %s',
- ' '.join(self.settings['portage_overlay']))
-
- def set_overlay(self):
- if self.settings["spec_prefix"] + "/overlay" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/overlay'], str):
- self.settings[self.settings["spec_prefix"] + "/overlay"] = \
- self.settings[self.settings["spec_prefix"] + \
- "/overlay"].split()
-
- def set_root_overlay(self):
- if self.settings["spec_prefix"] + "/root_overlay" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/root_overlay'], str):
- self.settings[self.settings["spec_prefix"] + "/root_overlay"] = \
- self.settings[self.settings["spec_prefix"] + \
- "/root_overlay"].split()
-
- def set_root_path(self):
- """ ROOT= variable for emerges """
- self.settings["root_path"] = "/"
-
- def set_valid_build_kernel_vars(self,addlargs):
- if "boot/kernel" in addlargs:
- if isinstance(addlargs['boot/kernel'], str):
- loopy = [addlargs["boot/kernel"]]
- else:
- loopy = addlargs["boot/kernel"]
-
- for x in loopy:
- self.valid_values.append("boot/kernel/" + x + "/aliases")
- self.valid_values.append("boot/kernel/" + x + "/config")
- self.valid_values.append("boot/kernel/" + x + "/console")
- self.valid_values.append("boot/kernel/" + x + "/extraversion")
- self.valid_values.append("boot/kernel/" + x + "/gk_action")
- self.valid_values.append("boot/kernel/" + x + "/gk_kernargs")
- self.valid_values.append("boot/kernel/" + x + "/initramfs_overlay")
- self.valid_values.append("boot/kernel/" + x + "/machine_type")
- self.valid_values.append("boot/kernel/" + x + "/sources")
- self.valid_values.append("boot/kernel/" + x + "/softlevel")
- self.valid_values.append("boot/kernel/" + x + "/use")
- self.valid_values.append("boot/kernel/" + x + "/packages")
- self.valid_values.append("boot/kernel/" + x + "/kernelopts")
- if "boot/kernel/" + x + "/packages" in addlargs:
- if isinstance(addlargs['boot/kernel/' + x + '/packages'], str):
- addlargs["boot/kernel/" + x + "/packages"] = \
- [addlargs["boot/kernel/" + x + "/packages"]]
-
- def set_build_kernel_vars(self):
- if self.settings["spec_prefix"] + "/gk_mainargs" in self.settings:
- self.settings["gk_mainargs"] = \
- self.settings[self.settings["spec_prefix"] + "/gk_mainargs"]
- del self.settings[self.settings["spec_prefix"] + "/gk_mainargs"]
-
- def kill_chroot_pids(self):
- log.info('Checking for processes running in chroot and killing them.')
-
- # Force environment variables to be exported so script can see them
- self.setup_environment()
-
- killcmd = normpath(self.settings["sharedir"] +
- self.settings["shdir"] + "/support/kill-chroot-pids.sh")
- if os.path.exists(killcmd):
- cmd([killcmd], env = self.env)
-
- def mount_safety_check(self):
- """
- Check and verify that none of our paths in mypath are mounted. We don't
- want to clean up with things still mounted, and this allows us to check.
- Returns 1 on ok, 0 on "something is still mounted" case.
- """
-
- if not os.path.exists(self.settings["chroot_path"]):
- return
-
- log.debug('self.mounts = %s', self.mounts)
- for x in self.mounts:
- target = normpath(self.settings["chroot_path"] + self.target_mounts[x])
- log.debug('mount_safety_check() x = %s %s', x, target)
- if not os.path.exists(target):
- continue
-
- if ismount(target):
- # Something is still mounted
- try:
- log.warning('%s is still mounted; performing auto-bind-umount...', target)
- # Try to umount stuff ourselves
- self.unbind()
- if ismount(target):
- raise CatalystError("Auto-unbind failed for " + target)
- else:
- log.notice('Auto-unbind successful...')
- except CatalystError:
- raise CatalystError("Unable to auto-unbind " + target)
-
- def unpack(self):
-
- clst_unpack_hash = self.resume.get("unpack")
-
- # Set up all unpack info settings
- unpack_info = self.decompressor.create_infodict(
- source = self.settings["source_path"],
- destination = self.settings["chroot_path"],
- arch = self.settings["compressor_arch"],
- other_options = self.settings["compressor_options"],
- )
-
- display_msg = (
- 'Starting %(mode)s from %(source)s\nto '
- '%(destination)s (this may take some time) ..')
-
- error_msg = "'%(mode)s' extraction of %(source)s to %(destination)s failed."
-
- if "seedcache" in self.settings["options"]:
- if os.path.isdir(unpack_info["source"]):
- # SEEDCACHE Is a directory, use rsync
- unpack_info['mode'] = "rsync"
- else:
- # SEEDCACHE is a not a directory, try untar'ing
- log.notice('Referenced SEEDCACHE does not appear to be a directory, trying to untar...')
- unpack_info['source'] = file_check(unpack_info['source'])
- else:
- # No SEEDCACHE, use tar
- unpack_info['source'] = file_check(unpack_info['source'])
- # end of unpack_info settings
-
- # set defaults,
- # only change them if the resume point is proven to be good
- _unpack = True
- invalid_chroot = True
- # Begin autoresume validation
- if "autoresume" in self.settings["options"]:
- # check chroot
- if os.path.isdir(self.settings["chroot_path"]):
- if self.resume.is_enabled("unpack"):
- # Autoresume is valid in the chroot
- _unpack = False
- invalid_chroot = False
- log.notice('Resume: "chroot" is valid...')
- else:
- # self.resume.is_disabled("unpack")
- # Autoresume is invalid in the chroot
- log.notice('Resume: "seed source" unpack resume point is disabled')
-
- # check seed source
- if os.path.isfile(self.settings["source_path"]) and not invalid_chroot:
- if self.settings["source_path_hash"].replace("\n", " ") == clst_unpack_hash:
- # Seed tarball has not changed, chroot is valid
- _unpack = False
- invalid_chroot = False
- log.notice('Resume: "seed source" hash matches chroot...')
- else:
- # self.settings["source_path_hash"] != clst_unpack_hash
- # Seed tarball has changed, so invalidate the chroot
- _unpack = True
- invalid_chroot = True
- log.notice('Resume: "seed source" has changed, hashes do not match, invalidating resume...')
- log.notice(' source_path......: %s', self.settings["source_path"])
- log.notice(' new source hash..: %s', self.settings["source_path_hash"].replace("\n", " "))
- log.notice(' recorded hash....: %s', clst_unpack_hash)
- unpack_info['source'] = file_check(unpack_info['source'])
-
- else:
- # No autoresume, check SEEDCACHE
- if "seedcache" in self.settings["options"]:
- # if the seedcache is a dir, rsync will clean up the chroot
- if os.path.isdir(self.settings["source_path"]):
- pass
- elif os.path.isdir(self.settings["source_path"]):
- # We should never reach this, so something is very wrong
- raise CatalystError(
- "source path is a dir but seedcache is not enabled: %s"
- % self.settings["source_path"])
-
- if _unpack:
- self.mount_safety_check()
-
- if invalid_chroot:
- if "autoresume" in self.settings["options"]:
- log.notice('Resume: Target chroot is invalid, cleaning up...')
-
- self.clear_autoresume()
- self.clear_chroot()
-
- ensure_dirs(self.settings["chroot_path"])
-
- ensure_dirs(self.settings["chroot_path"] + "/tmp", mode=1777)
-
- if "pkgcache" in self.settings["options"]:
- ensure_dirs(self.settings["pkgcache_path"], mode=0o755)
-
- if "kerncache" in self.settings["options"]:
- ensure_dirs(self.settings["kerncache_path"], mode=0o755)
-
- log.notice('%s', display_msg % unpack_info)
-
- # now run the decompressor
- if not self.decompressor.extract(unpack_info):
- log.error('%s', error_msg % unpack_info)
-
- if "source_path_hash" in self.settings:
- self.resume.enable("unpack",
- data = self.settings["source_path_hash"])
- else:
- self.resume.enable("unpack")
- else:
- log.notice('Resume: Valid resume point detected, skipping seed unpack operation...')
-
- def unpack_snapshot(self):
- unpack = True
- snapshot_hash = self.resume.get("unpack_repo")
-
- unpack_errmsg = "Error unpacking snapshot using mode %(mode)s"
-
- unpack_info = self.decompressor.create_infodict(
- source = self.settings["snapshot_path"],
- destination = self.settings["snapshot_cache_path"],
- arch = self.settings["compressor_arch"],
- other_options = self.settings["compressor_options"],
- )
-
- target_portdir = normpath(self.settings["chroot_path"] +
- self.settings["repo_basedir"] + "/" + self.settings["repo_name"])
- log.info('%s', self.settings['chroot_path'])
- log.info('unpack_snapshot(), target_portdir = %s', target_portdir)
- if "snapcache" in self.settings["options"]:
- snapshot_cache_hash_path = pjoin(
- self.settings['snapshot_cache_path'], 'catalyst-hash')
- snapshot_cache_hash = fileutils.readfile(snapshot_cache_hash_path, True)
- unpack_info['mode'] = self.decompressor.determine_mode(
- unpack_info['source'])
-
- cleanup_msg = "Cleaning up invalid snapshot cache at \n\t" + \
- self.settings["snapshot_cache_path"] + \
- " (this can take a long time)..."
-
- if self.settings["snapshot_path_hash"] == snapshot_cache_hash:
- log.info('Valid snapshot cache, skipping unpack of portage tree...')
- unpack = False
- else:
- cleanup_msg = \
- 'Cleaning up existing portage tree (this can take a long time)...'
- unpack_info['destination'] = normpath(
- self.settings["chroot_path"] + self.settings["repo_basedir"])
- unpack_info['mode'] = self.decompressor.determine_mode(
- unpack_info['source'])
-
- if "autoresume" in self.settings["options"] \
- and os.path.exists(target_portdir) \
- and self.resume.is_enabled("unpack_repo") \
- and self.settings["snapshot_path_hash"] == snapshot_hash:
- log.notice('Valid Resume point detected, skipping unpack of portage tree...')
- unpack = False
-
- if unpack:
- if "snapcache" in self.settings["options"]:
- self.snapcache_lock.write_lock()
- if os.path.exists(target_portdir):
- log.info('%s', cleanup_msg)
- clear_dir(target_portdir)
-
- log.notice('Unpacking portage tree (this can take a long time) ...')
- if not self.decompressor.extract(unpack_info):
- log.error('%s', unpack_errmsg % unpack_info)
-
- if "snapcache" in self.settings["options"]:
- with open(snapshot_cache_hash_path, 'w') as myf:
- myf.write(self.settings["snapshot_path_hash"])
- else:
- log.info('Setting snapshot autoresume point')
- self.resume.enable("unpack_repo",
- data = self.settings["snapshot_path_hash"])
-
- if "snapcache" in self.settings["options"]:
- self.snapcache_lock.unlock()
-
- def config_profile_link(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("config_profile_link"):
- log.notice('Resume point detected, skipping config_profile_link operation...')
- else:
- # TODO: zmedico and I discussed making this a directory and pushing
- # in a parent file, as well as other user-specified configuration.
- log.info('Configuring profile link...')
- clear_path(self.settings['chroot_path'] + \
- self.settings['port_conf'] + '/make.profile')
- ensure_dirs(self.settings['chroot_path'] + self.settings['port_conf'])
- cmd(['ln', '-sf',
- '../..' + self.settings['portdir'] + '/profiles/' + self.settings['target_profile'],
- self.settings['chroot_path'] + self.settings['port_conf'] + '/make.profile'],
- env=self.env)
- self.resume.enable("config_profile_link")
-
- def setup_confdir(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("setup_confdir"):
- log.notice('Resume point detected, skipping setup_confdir operation...')
- else:
- if "portage_confdir" in self.settings:
- log.info('Configuring %s...', self.settings['port_conf'])
- dest = normpath(self.settings['chroot_path'] + '/' + self.settings['port_conf'])
- ensure_dirs(dest)
- # The trailing slashes on both paths are important:
- # We want to make sure rsync copies the dirs into each
- # other and not as subdirs.
- cmd(['rsync', '-a', self.settings['portage_confdir'] + '/', dest + '/'],
- env=self.env)
- self.resume.enable("setup_confdir")
-
- def portage_overlay(self):
- """ We copy the contents of our overlays to /usr/local/portage """
- if "portage_overlay" in self.settings:
- for x in self.settings["portage_overlay"]:
- if os.path.exists(x):
- log.info('Copying overlay dir %s', x)
- ensure_dirs(self.settings['chroot_path'] + self.settings['local_overlay'])
- cmd("cp -a " + x + "/* " + self.settings["chroot_path"] +
- self.settings["local_overlay"],
- env=self.env)
-
- def root_overlay(self):
- """ Copy over the root_overlay """
- if self.settings["spec_prefix"] + "/root_overlay" in self.settings:
- for x in self.settings[self.settings["spec_prefix"] +
- "/root_overlay"]:
- if os.path.exists(x):
- log.info('Copying root_overlay: %s', x)
- cmd(['rsync', '-a', x + '/', self.settings['chroot_path']],
- env=self.env)
-
- def base_dirs(self):
- pass
-
- def bind(self):
- for x in self.mounts:
- log.debug('bind(); x = %s', x)
- target = normpath(self.settings["chroot_path"] + self.target_mounts[x])
- ensure_dirs(target, mode=0o755)
-
- if not os.path.exists(self.mountmap[x]):
- if self.mountmap[x] not in ["tmpfs", "shmfs"]:
- ensure_dirs(self.mountmap[x], mode=0o755)
-
- src = self.mountmap[x]
- log.debug('bind(); src = %s', src)
- if "snapcache" in self.settings["options"] and x == "portdir":
- self.snapcache_lock.read_lock()
- _cmd = None
- if src == "tmpfs":
- if "var_tmpfs_portage" in self.settings:
- _cmd = ['mount', '-t', 'tmpfs',
- '-o', 'size=' + self.settings['var_tmpfs_portage'] + 'G',
- src, target]
- else:
- if os.uname()[0] == "FreeBSD":
- if src == "/dev":
- _cmd = ['mount', '-t', 'devfs', 'none', target]
- else:
- _cmd = ['mount_nullfs', src, target]
- else:
- if src == "shmfs":
- _cmd = ['mount', '-t', 'tmpfs', '-o', 'noexec,nosuid,nodev', 'shm', target]
- else:
- _cmd = ['mount', '--bind', src, target]
- if _cmd:
- log.debug('bind(); _cmd = %s', _cmd)
- cmd(_cmd, env=self.env, fail_func=self.unbind)
- log.debug('bind(); finished :D')
-
- def unbind(self):
- ouch = 0
- mypath = self.settings["chroot_path"]
- myrevmounts = self.mounts[:]
- myrevmounts.reverse()
- # Unmount in reverse order for nested bind-mounts
- for x in myrevmounts:
- target = normpath(mypath + self.target_mounts[x])
- if not os.path.exists(target):
- continue
-
- if not ismount(target):
- continue
-
- try:
- cmd(['umount', target], env=self.env)
- except CatalystError:
- log.warning('First attempt to unmount failed: %s', target)
- log.warning('Killing any pids still running in the chroot')
-
- self.kill_chroot_pids()
-
- try:
- cmd(['umount', target], env=self.env)
- except CatalystError:
- ouch = 1
- log.warning("Couldn't umount bind mount: %s", target)
-
- if "snapcache" in self.settings["options"] and x == "/usr/portage":
- try:
- # It's possible the snapshot lock object isn't created yet.
- # This is because mount safety check calls unbind before the
- # target is fully initialized
- self.snapcache_lock.unlock()
- except Exception:
- pass
- if ouch:
- # if any bind mounts really failed, then we need to raise
- # this to potentially prevent an upcoming bash stage cleanup script
- # from wiping our bind mounts.
- raise CatalystError(
- "Couldn't umount one or more bind-mounts; aborting for safety.")
-
- def chroot_setup(self):
- self.makeconf = read_makeconf(normpath(self.settings["chroot_path"] +
- self.settings["make_conf"]))
- self.override_cbuild()
- self.override_chost()
- self.override_cflags()
- self.override_cxxflags()
- self.override_fcflags()
- self.override_fflags()
- self.override_ldflags()
- self.override_asflags()
- self.override_common_flags()
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("chroot_setup"):
- log.notice('Resume point detected, skipping chroot_setup operation...')
- else:
- log.notice('Setting up chroot...')
-
- shutil.copy('/etc/resolv.conf', self.settings['chroot_path'] + '/etc/')
-
- # Copy over the envscript, if applicable
- if "envscript" in self.settings:
- if not os.path.exists(self.settings["envscript"]):
- raise CatalystError(
- "Can't find envscript " + self.settings["envscript"],
- print_traceback=True)
-
- log.warning(
- 'Overriding certain env variables may cause catastrophic failure.\n'
- 'If your build fails look here first as the possible problem.\n'
- 'Catalyst assumes you know what you are doing when setting these variables.\n'
- 'Catalyst Maintainers use VERY minimal envscripts, if used at all.\n'
- 'You have been warned.')
-
- shutil.copy(self.settings['envscript'],
- self.settings['chroot_path'] + '/tmp/envscript')
-
- # Copy over /etc/hosts from the host in case there are any
- # specialties in there
- hosts_file = self.settings['chroot_path'] + '/etc/hosts'
- if os.path.exists(hosts_file):
- os.rename(hosts_file, hosts_file + '.catalyst')
- shutil.copy('/etc/hosts', hosts_file)
- # write out the make.conf
- try:
- self.write_make_conf(setup=True)
- except OSError as e:
- raise CatalystError('Could not write %s: %s' % (
- normpath(self.settings["chroot_path"] +
- self.settings["make_conf"]), e))
- self.resume.enable("chroot_setup")
-
- def write_make_conf(self, setup=True):
- # Modify and write out make.conf (for the chroot)
- makepath = normpath(self.settings["chroot_path"] +
- self.settings["make_conf"])
- clear_path(makepath)
- with open(makepath, "w") as myf:
- log.notice("Writing the stage make.conf to: %s" % makepath)
- myf.write("# These settings were set by the catalyst build script "
- "that automatically\n# built this stage.\n")
- myf.write("# Please consult "
- "/usr/share/portage/config/make.conf.example "
- "for a more\n# detailed example.\n")
-
- for flags in ["COMMON_FLAGS", "CFLAGS", "CXXFLAGS", "FCFLAGS", "FFLAGS",
- "LDFLAGS", "ASFLAGS"]:
- if flags in ["LDFLAGS", "ASFLAGS"]:
- if not flags in self.settings:
- continue
- myf.write("# %s is unsupported. USE AT YOUR OWN RISK!\n"
- % flags)
- if flags not in self.settings or (flags is not "COMMON_FLAGS" and
- self.settings[flags] == self.settings["COMMON_FLAGS"]):
- myf.write('%s="${COMMON_FLAGS}"\n' % flags)
- elif isinstance(self.settings[flags], list):
- myf.write('%s="%s"\n'
- % (flags, ' '.join(self.settings[flags])))
- else:
- myf.write('%s="%s"\n'
- % (flags, self.settings[flags]))
-
- if "CBUILD" in self.settings:
- myf.write("# This should not be changed unless you know exactly"
- " what you are doing. You\n# should probably be "
- "using a different stage, instead.\n")
- myf.write('CBUILD="' + self.settings["CBUILD"] + '"\n')
-
- if "CHOST" in self.settings:
- myf.write("# WARNING: Changing your CHOST is not something "
- "that should be done lightly.\n# Please consult "
- "https://wiki.gentoo.org/wiki/Changing_the_CHOST_variable "
- "before changing.\n")
- myf.write('CHOST="' + self.settings["CHOST"] + '"\n')
-
- # Figure out what our USE vars are for building
- myusevars = []
- if "bindist" in self.settings["options"]:
- myf.write("\n# NOTE: This stage was built with the bindist Use flag enabled\n")
- if setup or "sticky-config" in self.settings["options"]:
- myusevars.extend(self.settings["catalyst_use"])
- log.notice("STICKY-CONFIG is enabled")
- if "HOSTUSE" in self.settings:
- myusevars.extend(self.settings["HOSTUSE"])
-
- if "use" in self.settings:
- myusevars.extend(self.settings["use"])
-
- if myusevars:
- myf.write("# These are the USE and USE_EXPAND flags that were "
- "used for\n# building in addition to what is provided "
- "by the profile.\n")
- myusevars = sorted(set(myusevars))
- myf.write('USE="' + ' '.join(myusevars) + '"\n')
- if '-*' in myusevars:
- log.warning(
- 'The use of -* in %s/use will cause portage to ignore\n'
- 'package.use in the profile and portage_confdir.\n'
- "You've been warned!", self.settings['spec_prefix'])
-
- myuseexpandvars = {}
- if "HOSTUSEEXPAND" in self.settings:
- for hostuseexpand in self.settings["HOSTUSEEXPAND"]:
- myuseexpandvars.update(
- {hostuseexpand:self.settings["HOSTUSEEXPAND"][hostuseexpand]})
-
- if myuseexpandvars:
- for hostuseexpand in myuseexpandvars:
- myf.write(hostuseexpand + '="' +
- ' '.join(myuseexpandvars[hostuseexpand]) + '"\n')
- # write out a shipable version
- target_portdir = normpath(self.settings["repo_basedir"] + "/" +
- self.settings["repo_name"])
-
- myf.write('PORTDIR="%s"\n' % target_portdir)
- myf.write('DISTDIR="%s"\n' % self.settings['target_distdir'])
- myf.write('PKGDIR="%s"\n' % self.settings['target_pkgdir'])
- if setup:
- # Setup the portage overlay
- if "portage_overlay" in self.settings:
- myf.write('PORTDIR_OVERLAY="%s"\n' % self.settings["local_overlay"])
-
- # Set default locale for system responses. #478382
- myf.write(
- '\n'
- '# This sets the language of build output to English.\n'
- '# Please keep this setting intact when reporting bugs.\n'
- 'LC_MESSAGES=C\n')
-
-
- def fsscript(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("fsscript"):
- log.notice('Resume point detected, skipping fsscript operation...')
- else:
- if "fsscript" in self.settings:
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'fsscript'],
- env=self.env)
- self.resume.enable("fsscript")
-
- def rcupdate(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("rcupdate"):
- log.notice('Resume point detected, skipping rcupdate operation...')
- else:
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'rc-update'],
- env=self.env)
- self.resume.enable("rcupdate")
-
- def clean(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("clean"):
- log.notice('Resume point detected, skipping clean operation...')
- else:
- for x in self.settings["cleanables"]:
- log.notice('Cleaning chroot: %s', x)
- clear_path(normpath(self.settings["destpath"] + x))
-
- # Put /etc/hosts back into place
- hosts_file = self.settings['chroot_path'] + '/etc/hosts'
- if os.path.exists(hosts_file + '.catalyst'):
- os.rename(hosts_file + '.catalyst', hosts_file)
-
- # optionally clean up portage configs
- if ("portage_prefix" in self.settings and
- "sticky-config" not in self.settings["options"]):
- log.debug("clean(), portage_preix = %s, no sticky-config", self.settings["portage_prefix"])
- for _dir in "accept_keywords", "keywords", "mask", "unmask", "use":
- target = pjoin(self.settings["destpath"],
- "etc/portage/package.%s" % _dir,
- self.settings["portage_prefix"])
- log.notice("Clearing portage_prefix target: %s", target)
- clear_path(target)
-
- # Remove our overlay
- overlay = normpath(self.settings["chroot_path"] + self.settings["local_overlay"])
- if os.path.exists(overlay):
- clear_path(overlay)
-
- if "sticky-config" not in self.settings["options"]:
- # re-write the make.conf to be sure it is clean
- self.write_make_conf(setup=False)
-
- # Clean up old and obsoleted files in /etc
- if os.path.exists(self.settings["stage_path"]+"/etc"):
- cmd(['find', self.settings['stage_path'] + '/etc',
- '-maxdepth', '1', '-name', '*-', '-delete'],
- env=self.env)
-
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'clean'], env=self.env)
- self.resume.enable("clean")
-
- def empty(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("empty"):
- log.notice('Resume point detected, skipping empty operation...')
- else:
- if self.settings["spec_prefix"] + "/empty" in self.settings:
- if isinstance(
- self.settings[self.settings['spec_prefix'] + '/empty'],
- str):
- self.settings[self.settings["spec_prefix"] + "/empty"] = \
- self.settings[self.settings["spec_prefix"] + \
- "/empty"].split()
- for x in self.settings[self.settings["spec_prefix"] + "/empty"]:
- myemp = self.settings["destpath"] + x
- if not os.path.isdir(myemp) or os.path.islink(myemp):
- log.warning('not a directory or does not exist, '
- 'skipping "empty" operation: %s', x)
- continue
- log.info('Emptying directory %s', x)
- clear_dir(myemp)
- self.resume.enable("empty")
-
- def remove(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("remove"):
- log.notice('Resume point detected, skipping remove operation...')
- else:
- if self.settings["spec_prefix"] + "/rm" in self.settings:
- for x in self.settings[self.settings["spec_prefix"] + "/rm"]:
- # We're going to shell out for all these cleaning
- # operations, so we get easy glob handling.
- log.notice('livecd: removing %s', x)
- clear_path(self.settings["chroot_path"] + x)
- try:
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'clean'],
- env=self.env)
- self.resume.enable("remove")
- except:
- self.unbind()
- raise
-
- def preclean(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("preclean"):
- log.notice('Resume point detected, skipping preclean operation...')
- else:
- try:
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'preclean'],
- env=self.env)
- self.resume.enable("preclean")
-
- except:
- self.unbind()
- raise CatalystError("Build failed, could not execute preclean")
-
- def capture(self):
- # initialize it here so it doesn't use
- # resources if it is not needed
- if not self.compressor:
- self.compressor = CompressMap(self.settings["compress_definitions"],
- env=self.env, default_mode=self.settings['compression_mode'],
- comp_prog=self.settings['comp_prog'])
-
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("capture"):
- log.notice('Resume point detected, skipping capture operation...')
- else:
- log.notice('Capture target in a tarball')
- # Remove filename from path
- mypath = os.path.dirname(self.settings["target_path"].rstrip('/'))
-
- # Now make sure path exists
- ensure_dirs(mypath)
-
- pack_info = self.compressor.create_infodict(
- source=".",
- basedir=self.settings["stage_path"],
- filename=self.settings["target_path"].rstrip('/'),
- mode=self.settings["compression_mode"],
- auto_extension=True,
- arch=self.settings["compressor_arch"],
- other_options=self.settings["compressor_options"],
- )
- target_filename = ".".join([self.settings["target_path"].rstrip('/'),
- self.compressor.extension(pack_info['mode'])])
-
- log.notice('Creating stage tarball... mode: %s',
- self.settings['compression_mode'])
-
- if self.compressor.compress(pack_info):
- self.gen_contents_file(target_filename)
- self.gen_digest_file(target_filename)
- self.resume.enable("capture")
- else:
- log.warning("Couldn't create stage tarball: %s", target_filename)
-
- def run_local(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("run_local"):
- log.notice('Resume point detected, skipping run_local operation...')
- else:
- try:
- if os.path.exists(self.settings["controller_file"]):
- log.info('run_local() starting controller script...')
- cmd([self.settings['controller_file'], 'run'],
- env=self.env)
- self.resume.enable("run_local")
- else:
- log.info('run_local() no controller_file found... %s',
- self.settings['controller_file'])
-
- except CatalystError:
- self.unbind()
- raise CatalystError("Stage build aborting due to error.",
- print_traceback=False)
-
- def setup_environment(self):
- """
- Modify the current environment. This is an ugly hack that should be
- fixed. We need this to use the os.system() call since we can't
- specify our own environ
- """
- log.debug('setup_environment(); settings = %r', self.settings)
- for x in list(self.settings):
- log.debug('setup_environment(); processing: %s', x)
- if x == "options":
- #self.env['clst_' + x] = ' '.join(self.settings[x])
- for opt in self.settings[x]:
- self.env['clst_' + opt.upper()] = "true"
- continue
- # Sanitize var names by doing "s|/-.|_|g"
- varname = "clst_" + x.replace("/", "_")
- varname = varname.replace("-", "_")
- varname = varname.replace(".", "_")
- if isinstance(self.settings[x], str):
- # Prefix to prevent namespace clashes
- #os.environ[varname] = self.settings[x]
- if "path" in x:
- self.env[varname] = self.settings[x].rstrip("/")
- else:
- self.env[varname] = self.settings[x]
- elif isinstance(self.settings[x], list):
- #os.environ[varname] = ' '.join(self.settings[x])
- self.env[varname] = ' '.join(self.settings[x])
- elif isinstance(self.settings[x], bool):
- if self.settings[x]:
- self.env[varname] = "true"
- else:
- self.env[varname] = "false"
- # This handles a dictionary of objects just one level deep and no deeper!
- # Its currently used only for USE_EXPAND flags which are dictionaries of
- # lists in arch/amd64.py and friends. If we wanted self.settigs[var]
- # of any depth, we should make this function recursive.
- elif isinstance(self.settings[x], dict):
- if x in ["compress_definitions",
- "decompress_definitions"]:
- continue
- self.env[varname] = ' '.join(self.settings[x].keys())
- for y in self.settings[x].keys():
- varname2 = "clst_" + y.replace("/", "_")
- varname2 = varname2.replace("-", "_")
- varname2 = varname2.replace(".", "_")
- if isinstance(self.settings[x][y], str):
- self.env[varname2] = self.settings[x][y]
- elif isinstance(self.settings[x][y], list):
- self.env[varname2] = ' '.join(self.settings[x][y])
- elif isinstance(self.settings[x][y], bool):
- if self.settings[x][y]:
- self.env[varname] = "true"
- else:
- self.env[varname] = "false"
-
- if "makeopts" in self.settings:
- if isinstance(self.settings["makeopts"], str):
- self.env["MAKEOPTS"] = self.settings["makeopts"]
- else:
- # ensure makeopts is a string
- self.env["MAKEOPTS"] = ' '.join(self.settings["makeopts"])
-
- log.debug('setup_environment(); env = %r', self.env)
-
- def run(self):
- self.chroot_lock.write_lock()
-
- # Kill any pids in the chroot
- self.kill_chroot_pids()
-
- # Check for mounts right away and abort if we cannot unmount them
- self.mount_safety_check()
-
- if "clear-autoresume" in self.settings["options"]:
- self.clear_autoresume()
-
- if "purgetmponly" in self.settings["options"]:
- self.purge()
- return
-
- if "purgeonly" in self.settings["options"]:
- log.info('StageBase: run() purgeonly')
- self.purge()
-
- if "purge" in self.settings["options"]:
- log.info('StageBase: run() purge')
- self.purge()
-
- failure = False
- for x in self.settings["action_sequence"]:
- log.notice('--- Running action sequence: %s', x)
- sys.stdout.flush()
- try:
- getattr(self, x)()
- except LockInUse:
- log.error('Unable to aquire the lock...')
- failure = True
- break
- except Exception:
- log.error('Exception running action sequence %s', x, exc_info=True)
- failure = True
- break
-
- if failure:
- log.notice('Cleaning up... Running unbind()')
- self.unbind()
- return False
- return True
-
-
- def unmerge(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("unmerge"):
- log.notice('Resume point detected, skipping unmerge operation...')
- else:
- if self.settings["spec_prefix"] + "/unmerge" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/unmerge'], str):
- self.settings[self.settings["spec_prefix"] + "/unmerge"] = \
- [self.settings[self.settings["spec_prefix"] + "/unmerge"]]
-
- # Before cleaning, unmerge stuff
- try:
- cmd([self.settings['controller_file'], 'unmerge'] +
- self.settings[self.settings['spec_prefix'] + '/unmerge'],
- env=self.env)
- log.info('unmerge shell script')
- except CatalystError:
- self.unbind()
- raise
- self.resume.enable("unmerge")
-
- def target_setup(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("target_setup"):
- log.notice('Resume point detected, skipping target_setup operation...')
- else:
- log.notice('Setting up filesystems per filesystem type')
- cmd([self.settings['controller_file'], 'target_image_setup',
- self.settings['target_path']], env=self.env)
- self.resume.enable("target_setup")
-
- def setup_overlay(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("setup_overlay"):
- log.notice('Resume point detected, skipping setup_overlay operation...')
- else:
- if self.settings["spec_prefix"] + "/overlay" in self.settings:
- for x in self.settings[self.settings["spec_prefix"] + "/overlay"]:
- if os.path.exists(x):
- cmd(['rsync', '-a', x + '/', self.settings['target_path']],
- env=self.env)
- self.resume.enable("setup_overlay")
-
- def create_iso(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("create_iso"):
- log.notice('Resume point detected, skipping create_iso operation...')
- else:
- # Create the ISO
- if "iso" in self.settings:
- cmd([self.settings['controller_file'], 'iso', self.settings['iso']],
- env=self.env)
- self.gen_contents_file(self.settings["iso"])
- self.gen_digest_file(self.settings["iso"])
- self.resume.enable("create_iso")
- else:
- log.warning('livecd/iso was not defined. '
- 'An ISO Image will not be created.')
-
- def build_packages(self):
- build_packages_resume = pjoin(self.settings["autoresume_path"],
- "build_packages")
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("build_packages"):
- log.notice('Resume point detected, skipping build_packages operation...')
- else:
- if self.settings["spec_prefix"] + "/packages" in self.settings:
- target_pkgs = self.settings["spec_prefix"] + '/packages'
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("build_packages"):
- log.notice('Resume point detected, skipping build_packages '
- 'operation...')
- else:
- command = [self.settings['controller_file'], 'build_packages']
- if isinstance(self.settings[target_pkgs], str):
- command.append(self.settings[target_pkgs])
- else:
- command.extend(self.settings[target_pkgs])
- try:
- cmd(command, env=self.env)
- fileutils.touch(build_packages_resume)
- self.resume.enable("build_packages")
- except CatalystError:
- self.unbind()
- raise CatalystError(
- self.settings["spec_prefix"] +
- "build aborting due to error.")
-
- def build_kernel(self):
- '''Build all configured kernels'''
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("build_kernel"):
- log.notice('Resume point detected, skipping build_kernel operation...')
- else:
- if "boot/kernel" in self.settings:
- try:
- mynames = self.settings["boot/kernel"]
- if isinstance(mynames, str):
- mynames = [mynames]
- # Execute the script that sets up the kernel build environment
- cmd([self.settings['controller_file'], 'pre-kmerge'],
- env=self.env)
- for kname in mynames:
- self._build_kernel(kname = kname)
- self.resume.enable("build_kernel")
- except CatalystError:
- self.unbind()
- raise CatalystError(
- "build aborting due to kernel build error.",
- print_traceback=True)
-
- def _build_kernel(self, kname):
- "Build a single configured kernel by name"
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("build_kernel_" + kname):
- log.notice('Resume point detected, skipping build_kernel '
- 'for %s operation...', kname)
- return
- self._copy_kernel_config(kname=kname)
-
- # If we need to pass special options to the bootloader
- # for this kernel put them into the environment
- key = 'boot/kernel/' + kname + '/kernelopts'
- if key in self.settings:
- myopts = self.settings[key]
-
- if not isinstance(myopts, str):
- myopts = ' '.join(myopts)
- self.env[kname + "_kernelopts"] = myopts
- else:
- self.env[kname + "_kernelopts"] = ""
-
- key = 'boot/kernel/' + kname + '/extraversion'
- self.settings.setdefault(key, '')
- self.env["clst_kextraversion"] = self.settings[key]
-
- self._copy_initramfs_overlay(kname=kname)
-
- # Execute the script that builds the kernel
- cmd([self.settings['controller_file'], 'kernel', kname],
- env=self.env)
-
- if "boot/kernel/" + kname + "/initramfs_overlay" in self.settings:
- log.notice('Cleaning up temporary overlay dir')
- clear_dir(self.settings['chroot_path'] + '/tmp/initramfs_overlay/')
-
- self.resume.is_enabled("build_kernel_" + kname)
-
- # Execute the script that cleans up the kernel build environment
- cmd([self.settings['controller_file'], 'post-kmerge'],
- env=self.env)
-
- def _copy_kernel_config(self, kname):
- key = 'boot/kernel/' + kname + '/config'
- if key in self.settings:
- if not os.path.exists(self.settings[key]):
- self.unbind()
- raise CatalystError("Can't find kernel config: %s" %
- self.settings[key])
-
- try:
- shutil.copy(self.settings[key],
- self.settings['chroot_path'] + '/var/tmp/' + kname + '.config')
-
- except IOError:
- self.unbind()
-
- def _copy_initramfs_overlay(self, kname):
- key = 'boot/kernel/' + kname + '/initramfs_overlay'
- if key in self.settings:
- if os.path.exists(self.settings[key]):
- log.notice('Copying initramfs_overlay dir %s', self.settings[key])
-
- ensure_dirs(
- self.settings['chroot_path'] +
- '/tmp/initramfs_overlay/' + self.settings[key])
-
- cmd('cp -R ' + self.settings[key] + '/* ' +
- self.settings['chroot_path'] +
- '/tmp/initramfs_overlay/' + self.settings[key], env=self.env)
-
- def bootloader(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("bootloader"):
- log.notice('Resume point detected, skipping bootloader operation...')
- else:
- try:
- cmd([self.settings['controller_file'], 'bootloader',
- self.settings['target_path'].rstrip('/')],
- env=self.env)
- self.resume.enable("bootloader")
- except CatalystError:
- self.unbind()
- raise CatalystError("Script aborting due to error.")
-
- def livecd_update(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("livecd_update"):
- log.notice('Resume point detected, skipping build_packages operation...')
- else:
- try:
- cmd([self.settings['controller_file'], 'livecd-update'],
- env=self.env)
- self.resume.enable("livecd_update")
-
- except CatalystError:
- self.unbind()
- raise CatalystError("build aborting due to livecd_update error.")
-
- @staticmethod
- def _debug_pause_():
- py_input("press any key to continue: ")
-
-# vim: ts=4 sw=4 sta et sts=4 ai
+ """
+ This class does all of the chroot setup, copying of files, etc. It is
+ the driver class for pretty much everything that Catalyst does.
+ """
+
+ def __init__(self, myspec, addlargs):
+ self.required_values |= frozenset([
+ "profile",
+ "rel_type",
+ "snapshot_treeish",
+ "source_subpath",
+ "subarch",
+ "target",
+ "version_stamp",
+ ])
+ self.valid_values |= self.required_values | frozenset([
+ "asflags",
+ "binrepo_path",
+ "catalyst_use",
+ "cbuild",
+ "cflags",
+ "common_flags",
+ "compression_mode",
+ "cxxflags",
+ "decompressor_search_order",
+ "fcflags",
+ "fflags",
+ "hostuse",
+ "install_mask",
+ "interpreter",
+ "kerncache_path",
+ "ldflags",
+ "pkgcache_path",
+ "portage_confdir",
+ "repos",
+ "portage_prefix",
+ ])
+ self.prepare_sequence = [
+ self.unpack,
+ self.config_profile_link,
+ self.setup_confdir,
+ self.process_repos,
+ ]
+ self.build_sequence = [
+ self.bind,
+ self.chroot_setup,
+ self.setup_environment,
+ ]
+ if 'enter-chroot' in myspec['options']:
+ self.build_sequence.append(self.enter_chroot)
+
+ self.finish_sequence = []
+
+ self.set_valid_build_kernel_vars(addlargs)
+ TargetBase.__init__(self, myspec, addlargs)
+ GenBase.__init__(self, myspec)
+ ClearBase.__init__(self, myspec)
+
+ self.makeconf = {}
+
+ host = self.settings["subarch"]
+ self.settings["hostarch"] = host
+
+ if "cbuild" in self.settings:
+ build = self.settings["cbuild"].split("-")[0]
+ else:
+ build = platform.machine()
+ self.settings["buildarch"] = build
+
+ arch_dir = normpath(self.settings['sharedir'] + '/arch/')
+
+ log.debug("Searching arch definitions...")
+ for x in [x for x in os.listdir(arch_dir) if x.endswith('.toml')]:
+ log.debug("\tTrying %s", x)
+ name = x[:-len('.toml')]
+
+ with open(arch_dir + x, 'rb') as file:
+ arch_config = tomli.load(file)
+
+ # Search for a subarchitecture in each arch in the arch_config
+ for arch in [x for x in arch_config if x.startswith(name) and host in arch_config[x]]:
+ self.settings.update(arch_config[arch][host])
+ setarch = arch_config.get('setarch', {}).get(arch, {})
+ break
+ else:
+ # Didn't find a matching subarchitecture, keep searching
+ continue
+
+ break
+ else:
+ raise CatalystError("Unknown host machine type " + host)
+
+ if setarch.get('if_build', '') == platform.machine():
+ chroot = f'setarch {setarch["arch"]} chroot'
+ else:
+ chroot = 'chroot'
+ self.settings["CHROOT"] = chroot
+
+ log.notice('Using target: %s', self.settings['target'])
+ # Print a nice informational message
+ if chroot.startswith('setarch'):
+ log.info('Building on %s for alternate personality type %s',
+ build, host)
+ else:
+ log.info('Building natively for %s', host)
+
+ # This must be set first as other set_ options depend on this
+ self.set_spec_prefix()
+
+ # Initialize our (de)compressor's)
+ self.decompressor = CompressMap(self.settings["decompress_definitions"],
+ env=self.env,
+ search_order=self.settings["decompressor_search_order"],
+ comp_prog=self.settings["comp_prog"],
+ decomp_opt=self.settings["decomp_opt"])
+ self.accepted_extensions = self.decompressor.search_order_extensions(
+ self.settings["decompressor_search_order"])
+ log.notice("Accepted source file extensions search order: %s",
+ self.accepted_extensions)
+ # save resources, it is not always needed
+ self.compressor = None
+
+ # Define all of our core variables
+ self.set_target_profile()
+ self.set_target_subpath()
+ self.set_source_subpath()
+
+ # Set paths
+ self.set_snapshot()
+ self.set_root_path()
+ self.set_source_path()
+ self.set_chroot_path()
+ self.set_autoresume_path()
+ self.set_stage_path()
+ self.set_target_path()
+
+ self.set_controller_file()
+ self.set_default_action_sequence()
+ self.set_use()
+ self.set_catalyst_use()
+ self.set_cleanables()
+ self.set_iso_volume_id()
+ self.set_build_kernel_vars()
+ self.set_fsscript()
+ self.set_install_mask()
+ self.set_rcadd()
+ self.set_rcdel()
+ self.set_cdtar()
+ self.set_fstype()
+ self.set_fsops()
+ self.set_iso()
+ self.set_packages()
+ self.set_rm()
+ self.set_linuxrc()
+ self.set_groups()
+ self.set_users()
+ self.set_ssh_public_keys()
+ self.set_busybox_config()
+ self.set_overlay()
+ self.set_repos()
+ self.set_root_overlay()
+
+ # This next line checks to make sure that the specified variables exist on disk.
+ # pdb.set_trace()
+ file_locate(self.settings, ["distdir"], expand=0)
+ # If we are using portage_confdir, check that as well.
+ if "portage_confdir" in self.settings:
+ file_locate(self.settings, ["portage_confdir"], expand=0)
+
+ # Setup our mount points.
+ self.mount = copy.deepcopy(MOUNT_DEFAULTS)
+
+ # Create mount entry for each repository
+ for path, name, _ in self.repos:
+ name = get_repo_name(path)
+ mount_id = f'repo_{name}'
+
+ self.mount[mount_id] = {
+ 'enable': True,
+ 'source': path,
+ 'target': self.get_repo_location(name)
+ }
+
+ self.mount['distdir']['source'] = self.settings['distdir']
+ self.mount["distdir"]['target'] = self.settings['target_distdir']
+
+ # Configure any user specified options (either in catalyst.conf or on
+ # the command line).
+ if "pkgcache" in self.settings["options"]:
+ self.set_pkgcache_path()
+ self.mount['pkgdir']['enable'] = True
+ self.mount['pkgdir']['source'] = self.settings['pkgcache_path']
+ self.mount['pkgdir']['target'] = self.settings["target_pkgdir"]
+ log.info('Location of the package cache is %s',
+ self.settings['pkgcache_path'])
+
+ if "kerncache" in self.settings["options"]:
+ self.set_kerncache_path()
+ self.mount['kerncache']['enable'] = True
+ self.mount['kerncache']['source'] = self.settings["kerncache_path"]
+ log.info('Location of the kerncache is %s',
+ self.settings['kerncache_path'])
+
+ if "ccache" in self.settings["options"]:
+ if "CCACHE_DIR" in os.environ:
+ ccdir = os.environ["CCACHE_DIR"]
+ del os.environ["CCACHE_DIR"]
+ else:
+ ccdir = "/var/tmp/ccache"
+ if not os.path.isdir(ccdir):
+ raise CatalystError(
+ "Compiler cache support can't be enabled (can't find " + ccdir+")")
+ self.mount['ccache']['enable'] = True
+ self.mount['ccache']['source'] = ccdir
+ self.env["CCACHE_DIR"] = self.mount['ccache']['target']
+
+ if "icecream" in self.settings["options"]:
+ self.mount['icecream']['enable'] = True
+ self.mount['icecream']['source'] = self.settings['icecream']
+ self.env["PATH"] = self.mount['icecream']['target'] + \
+ ":" + self.env["PATH"]
+
+ if "port_logdir" in self.settings:
+ self.mount['port_logdir']['enable'] = True
+ self.mount['port_logdir']['source'] = normpath(self.settings['port_logdir'] + "/" + self.settings["target_subpath"] + "/")
+ self.env["PORTAGE_LOGDIR"] = self.settings["target_logdir"]
+
+ def override_cbuild(self):
+ if "CBUILD" in self.makeconf:
+ self.settings["CBUILD"] = self.makeconf["CBUILD"]
+
+ def override_chost(self):
+ if "CHOST" in self.makeconf:
+ self.settings["CHOST"] = self.makeconf["CHOST"]
+
+ def override_cflags(self):
+ if "CFLAGS" in self.makeconf:
+ self.settings["CFLAGS"] = self.makeconf["CFLAGS"]
+
+ def override_cxxflags(self):
+ if "CXXFLAGS" in self.makeconf:
+ self.settings["CXXFLAGS"] = self.makeconf["CXXFLAGS"]
+
+ def override_fcflags(self):
+ if "FCFLAGS" in self.makeconf:
+ self.settings["FCFLAGS"] = self.makeconf["FCFLAGS"]
+
+ def override_fflags(self):
+ if "FFLAGS" in self.makeconf:
+ self.settings["FFLAGS"] = self.makeconf["FFLAGS"]
+
+ def override_ldflags(self):
+ if "LDFLAGS" in self.makeconf:
+ self.settings["LDFLAGS"] = self.makeconf["LDFLAGS"]
+
+ def override_asflags(self):
+ if "ASFLAGS" in self.makeconf:
+ self.settings["ASFLAGS"] = self.makeconf["ASFLAGS"]
+
+ def override_common_flags(self):
+ if "COMMON_FLAGS" in self.makeconf:
+ self.settings["COMMON_FLAGS"] = self.makeconf["COMMON_FLAGS"]
+
+ def set_install_mask(self):
+ if "install_mask" in self.settings:
+ if not isinstance(self.settings['install_mask'], str):
+ self.settings["install_mask"] = \
+ ' '.join(self.settings["install_mask"])
+
+ def set_spec_prefix(self):
+ self.settings["spec_prefix"] = self.settings["target"]
+
+ def set_target_profile(self):
+ self.settings["target_profile"] = self.settings["profile"]
+
+ def set_target_subpath(self):
+ common = self.settings["rel_type"] + "/" + \
+ self.settings["target"] + "-" + self.settings["subarch"]
+ self.settings["target_subpath"] = \
+ common + \
+ "-" + self.settings["version_stamp"] + \
+ "/"
+ self.settings["target_subpath_unversioned"] = \
+ common + \
+ "/"
+
+ def set_source_subpath(self):
+ if not isinstance(self.settings['source_subpath'], str):
+ raise CatalystError(
+ "source_subpath should have been a string. Perhaps you have " +
+ "something wrong in your spec file?")
+
+ def set_pkgcache_path(self):
+ if "pkgcache_path" in self.settings:
+ if not isinstance(self.settings['pkgcache_path'], str):
+ self.settings["pkgcache_path"] = \
+ normpath(self.settings["pkgcache_path"])
+ elif "versioned_cache" in self.settings["options"]:
+ self.settings["pkgcache_path"] = \
+ normpath(self.settings["storedir"] + "/packages/" +
+ self.settings["target_subpath"] + "/")
+ else:
+ self.settings["pkgcache_path"] = \
+ normpath(self.settings["storedir"] + "/packages/" +
+ self.settings["target_subpath_unversioned"] + "/")
+
+ def set_kerncache_path(self):
+ if "kerncache_path" in self.settings:
+ if not isinstance(self.settings['kerncache_path'], str):
+ self.settings["kerncache_path"] = \
+ normpath(self.settings["kerncache_path"])
+ elif "versioned_cache" in self.settings["options"]:
+ self.settings["kerncache_path"] = normpath(self.settings["storedir"] +
+ "/kerncache/" + self.settings["target_subpath"])
+ else:
+ self.settings["kerncache_path"] = normpath(self.settings["storedir"] +
+ "/kerncache/" + self.settings["target_subpath_unversioned"])
+
+ def set_target_path(self):
+ self.settings["target_path"] = normpath(self.settings["storedir"] +
+ "/builds/" + self.settings["target_subpath"])
+ if "autoresume" in self.settings["options"]\
+ and self.resume.is_enabled("setup_target_path"):
+ log.notice(
+ 'Resume point detected, skipping target path setup operation...')
+ return
+
+ self.resume.enable("setup_target_path")
+ ensure_dirs(self.settings["storedir"] + "/builds")
+
+ def set_fsscript(self):
+ if self.settings["spec_prefix"] + "/fsscript" in self.settings:
+ self.settings["fsscript"] = \
+ self.settings[self.settings["spec_prefix"] + "/fsscript"]
+ del self.settings[self.settings["spec_prefix"] + "/fsscript"]
+
+ def set_rcadd(self):
+ if self.settings["spec_prefix"] + "/rcadd" in self.settings:
+ self.settings["rcadd"] = \
+ self.settings[self.settings["spec_prefix"] + "/rcadd"]
+ del self.settings[self.settings["spec_prefix"] + "/rcadd"]
+
+ def set_rcdel(self):
+ if self.settings["spec_prefix"] + "/rcdel" in self.settings:
+ self.settings["rcdel"] = \
+ self.settings[self.settings["spec_prefix"] + "/rcdel"]
+ del self.settings[self.settings["spec_prefix"] + "/rcdel"]
+
+ def set_cdtar(self):
+ if self.settings["spec_prefix"] + "/cdtar" in self.settings:
+ self.settings["cdtar"] = \
+ normpath(
+ self.settings[self.settings["spec_prefix"] + "/cdtar"])
+ del self.settings[self.settings["spec_prefix"] + "/cdtar"]
+
+ def set_iso(self):
+ if self.settings["spec_prefix"] + "/iso" in self.settings:
+ if self.settings[self.settings["spec_prefix"] + "/iso"].startswith('/'):
+ self.settings["iso"] = \
+ normpath(
+ self.settings[self.settings["spec_prefix"] + "/iso"])
+ else:
+ # This automatically prepends the build dir to the ISO output path
+ # if it doesn't start with a /
+ self.settings["iso"] = normpath(self.settings["storedir"] +
+ "/builds/" + self.settings["rel_type"] + "/" +
+ self.settings[self.settings["spec_prefix"] + "/iso"])
+ del self.settings[self.settings["spec_prefix"] + "/iso"]
+
+ def set_fstype(self):
+ if self.settings["spec_prefix"] + "/fstype" in self.settings:
+ self.settings["fstype"] = \
+ self.settings[self.settings["spec_prefix"] + "/fstype"]
+ del self.settings[self.settings["spec_prefix"] + "/fstype"]
+
+ if "fstype" not in self.settings:
+ self.settings["fstype"] = "squashfs"
+ for x in self.valid_values:
+ if x == self.settings["spec_prefix"] + "/fstype":
+ log.info('%s/fstype is being set to the default of "squashfs"',
+ self.settings['spec_prefix'])
+
+ def set_fsops(self):
+ if "fstype" in self.settings:
+ self.valid_values |= {"fsops"}
+ if self.settings["spec_prefix"] + "/fsops" in self.settings:
+ self.settings["fsops"] = \
+ self.settings[self.settings["spec_prefix"] + "/fsops"]
+ del self.settings[self.settings["spec_prefix"] + "/fsops"]
+
+ def set_source_path(self):
+ if "seedcache" in self.settings["options"]\
+ and os.path.isdir(normpath(self.settings["storedir"] + "/tmp/" +
+ self.settings["source_subpath"] + "/")):
+ self.settings["source_path"] = normpath(self.settings["storedir"] +
+ "/tmp/" + self.settings["source_subpath"] + "/")
+ log.debug("source_subpath is: %s", self.settings["source_path"])
+ else:
+ log.debug('Checking source path existence and '
+ 'get the final filepath. subpath: %s',
+ self.settings["source_subpath"])
+ self.settings["source_path"] = file_check(
+ normpath(self.settings["storedir"] + "/builds/" +
+ self.settings["source_subpath"]),
+ self.accepted_extensions,
+ )
+ log.debug('Source path returned from file_check is: %s',
+ self.settings["source_path"])
+ if os.path.isfile(self.settings["source_path"]):
+ # XXX: Is this even necessary if the previous check passes?
+ if os.path.exists(self.settings["source_path"]):
+ self.settings["source_path_hash"] = \
+ self.generate_hash(self.settings["source_path"], "sha1")
+ log.notice('Source path set to %s', self.settings['source_path'])
+
+ def set_cleanables(self):
+ self.settings['cleanables'] = [
+ "/etc/machine-id",
+ "/etc/resolv.conf",
+ "/var/tmp/*",
+ "/tmp/*",
+ ]
+
+ def set_chroot_path(self):
+ """
+ NOTE: the trailing slash has been removed
+ Things *could* break if you don't use a proper join()
+ """
+ self.settings["chroot_path"] = normpath(self.settings["storedir"] +
+ "/tmp/" + self.settings["target_subpath"].rstrip('/'))
+
+ def set_autoresume_path(self):
+ self.settings["autoresume_path"] = normpath(pjoin(
+ self.settings["storedir"], "tmp", self.settings["rel_type"],
+ ".autoresume-%s-%s-%s"
+ % (self.settings["target"], self.settings["subarch"],
+ self.settings["version_stamp"])
+ ))
+ if "autoresume" in self.settings["options"]:
+ log.info('The autoresume path is %s',
+ self.settings['autoresume_path'])
+ self.resume = AutoResume(self.settings["autoresume_path"], mode=0o755)
+
+ def set_controller_file(self):
+ self.settings["controller_file"] = normpath(self.settings["sharedir"] +
+ "/targets/" + self.settings["target"] + "/" + "controller.sh")
+
+ def set_iso_volume_id(self):
+ if self.settings["spec_prefix"] + "/volid" in self.settings:
+ self.settings["iso_volume_id"] = \
+ self.settings[self.settings["spec_prefix"] + "/volid"]
+ if len(self.settings["iso_volume_id"]) > 32:
+ raise CatalystError(
+ "ISO volume ID must not exceed 32 characters.")
+ else:
+ self.settings["iso_volume_id"] = "catalyst " + \
+ self.settings['snapshot_treeish']
+
+ def set_default_action_sequence(self):
+ """ Default action sequence for run method.
+
+ This method sets the optional purgeonly action sequence and returns.
+ Or it calls the normal set_action_sequence() for the target stage.
+ """
+ if "purgeonly" in self.settings["options"]:
+ self.build_sequence = [self.remove_chroot]
+ return
+ self.set_action_sequence()
+
+ def set_action_sequence(self):
+ """Set basic stage1, 2, 3 action sequences"""
+ self.build_sequence.extend([
+ self.run_local,
+ self.preclean,
+ ])
+ self.finish_sequence.extend([
+ self.clean,
+ ])
+ self.set_completion_action_sequences()
+
+ def set_completion_action_sequences(self):
+ if "fetch" not in self.settings["options"]:
+ self.finish_sequence.append(self.capture)
+ if "keepwork" in self.settings["options"]:
+ self.finish_sequence.append(self.clear_autoresume)
+ elif "seedcache" in self.settings["options"]:
+ self.finish_sequence.append(self.remove_autoresume)
+ else:
+ self.finish_sequence.append(self.remove_autoresume)
+ self.finish_sequence.append(self.remove_chroot)
+
+ def set_use(self):
+ use = self.settings["spec_prefix"] + "/use"
+ if use in self.settings:
+ if isinstance(self.settings[use], str):
+ self.settings["use"] = self.settings[use].split()
+ else:
+ self.settings["use"] = self.settings[use]
+ del self.settings[use]
+ else:
+ self.settings["use"] = []
+
+ def set_catalyst_use(self):
+ catalyst_use = self.settings["spec_prefix"] + "/catalyst_use"
+ if catalyst_use in self.settings:
+ if isinstance(self.settings[catalyst_use], str):
+ self.settings["catalyst_use"] = self.settings[catalyst_use].split()
+ else:
+ self.settings["catalyst_use"] = self.settings[catalyst_use]
+ del self.settings[catalyst_use]
+ else:
+ self.settings["catalyst_use"] = []
+
+ # Force bindist when options ask for it
+ if "bindist" in self.settings["options"]:
+ log.debug("Enabling bindist USE flag")
+ self.settings["catalyst_use"].append("bindist")
+
+ def set_stage_path(self):
+ self.settings["stage_path"] = normpath(self.settings["chroot_path"] +
+ self.settings["root_path"])
+
+ def set_packages(self):
+ pass
+
+ def set_rm(self):
+ if self.settings["spec_prefix"] + "/rm" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/rm'], str):
+ self.settings[self.settings["spec_prefix"] + "/rm"] = \
+ self.settings[self.settings["spec_prefix"] + "/rm"].split()
+
+ def set_linuxrc(self):
+ if self.settings["spec_prefix"] + "/linuxrc" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/linuxrc'], str):
+ self.settings["linuxrc"] = \
+ self.settings[self.settings["spec_prefix"] + "/linuxrc"]
+ del self.settings[self.settings["spec_prefix"] + "/linuxrc"]
+
+ def set_groups(self):
+ groups = self.settings["spec_prefix"] + "/groups"
+ if groups in self.settings:
+ self.settings["groups"] = self.settings[groups]
+ if isinstance(self.settings[groups], str):
+ self.settings["groups"] = self.settings[groups].split(",")
+ del self.settings[groups]
+ else:
+ self.settings["groups"] = []
+ log.info('groups to create: %s' % self.settings["groups"])
+
+ def set_users(self):
+ users = self.settings["spec_prefix"] + "/users"
+ if users in self.settings:
+ self.settings["users"] = self.settings[users]
+ if isinstance(self.settings[users], str):
+ self.settings["users"] = [self.settings[users]]
+ del self.settings[users]
+ else:
+ self.settings["users"] = []
+ log.info('users to create: %s' % self.settings["users"])
+
+ def set_ssh_public_keys(self):
+ ssh_public_keys = self.settings["spec_prefix"] + "/ssh_public_keys"
+ if ssh_public_keys in self.settings:
+ self.settings["ssh_public_keys"] = self.settings[ssh_public_keys]
+ if isinstance(self.settings[ssh_public_keys], str):
+ self.settings["ssh_public_keys"] = self.settings[ssh_public_keys].split(",")
+ del self.settings[ssh_public_keys]
+ else:
+ self.settings["ssh_public_keys"] = []
+ log.info('ssh public keys to copy: %s' % self.settings["ssh_public_keys"])
+
+ def set_busybox_config(self):
+ if self.settings["spec_prefix"] + "/busybox_config" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/busybox_config'], str):
+ self.settings["busybox_config"] = \
+ self.settings[self.settings["spec_prefix"] +
+ "/busybox_config"]
+ del self.settings[self.settings["spec_prefix"] +
+ "/busybox_config"]
+
+ def set_repos(self):
+
+ # Each entry in this list will be a tuple of the form
+ # (source, name, default)
+ #
+ # source: the location of the repo on the host system,
+ # either a directory or a squashfs file.
+ #
+ # name: the repository name parsed from the repo.
+ # This is just a caching mechanism to avoid parsing the name
+ # every time the source is processed.
+ #
+ # default: Default location where the repo is expected in the
+ # target system. If this matches the path where we mount the repo to
+ # (as per get_repo_location), then we can skip generating a repos.conf
+ # entry for that repo. Currently this mechanism is only used for
+ # the main repo, which has a default location hard-coded in
+ # /usr/share/portage/config/repos.conf. For the other repos,
+ # the default is set to None.
+ self.repos = []
+
+ # Create entry for snapshot
+ default_location = Path(confdefaults['repo_basedir'], confdefaults['repo_name'])
+ self.repos.append((self.snapshot, get_repo_name(self.snapshot), default_location))
+
+ # Create entry for every other repo
+ if 'repos' in self.settings:
+ if isinstance(self.settings['repos'], str):
+ self.settings['repos'] = \
+ self.settings['repos'].split()
+ log.info('repos are set to: %s',
+ ' '.join(self.settings['repos']))
+
+ get_info = lambda repo: (repo, get_repo_name(repo), None)
+ self.repos.extend(map(get_info, self.settings['repos']))
+
+ def set_overlay(self):
+ if self.settings["spec_prefix"] + "/overlay" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/overlay'], str):
+ self.settings[self.settings["spec_prefix"] + "/overlay"] = \
+ self.settings[self.settings["spec_prefix"] +
+ "/overlay"].split()
+
+ def set_root_overlay(self):
+ if self.settings["spec_prefix"] + "/root_overlay" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/root_overlay'], str):
+ self.settings[self.settings["spec_prefix"] + "/root_overlay"] = \
+ self.settings[self.settings["spec_prefix"] +
+ "/root_overlay"].split()
+
+ def set_root_path(self):
+ """ ROOT= variable for emerges """
+ self.settings["root_path"] = "/"
+
+ def set_valid_build_kernel_vars(self, addlargs):
+ if "boot/kernel" in addlargs:
+ if isinstance(addlargs['boot/kernel'], str):
+ loopy = [addlargs["boot/kernel"]]
+ else:
+ loopy = addlargs["boot/kernel"]
+
+ for x in loopy:
+ self.valid_values |= frozenset([
+ "boot/kernel/" + x + "/aliases",
+ "boot/kernel/" + x + "/config",
+ "boot/kernel/" + x + "/console",
+ "boot/kernel/" + x + "/distkernel",
+ "boot/kernel/" + x + "/dracut_args",
+ "boot/kernel/" + x + "/extraversion",
+ "boot/kernel/" + x + "/gk_action",
+ "boot/kernel/" + x + "/gk_kernargs",
+ "boot/kernel/" + x + "/initramfs_overlay",
+ "boot/kernel/" + x + "/packages",
+ "boot/kernel/" + x + "/softlevel",
+ "boot/kernel/" + x + "/sources",
+ "boot/kernel/" + x + "/use",
+ ])
+ if "boot/kernel/" + x + "/packages" in addlargs:
+ if isinstance(addlargs['boot/kernel/' + x + '/packages'], str):
+ addlargs["boot/kernel/" + x + "/packages"] = \
+ [addlargs["boot/kernel/" + x + "/packages"]]
+
+ def set_build_kernel_vars(self):
+ prefix = self.settings["spec_prefix"]
+
+ gk_mainargs = prefix + "/gk_mainargs"
+ if gk_mainargs in self.settings:
+ self.settings["gk_mainargs"] = self.settings[gk_mainargs]
+ del self.settings[gk_mainargs]
+
+ dracut_mainargs = prefix + "/dracut_args"
+ if dracut_mainargs in self.settings:
+ self.settings["dracut_args"] = self.settings[dracut_mainargs]
+ del self.settings[dracut_mainargs]
+
+ # Ask genkernel to include b2sum if <target>/verify is set
+ verify = prefix + "/verify"
+ if verify in self.settings:
+ assert self.settings[verify] == "blake2"
+ self.settings.setdefault("gk_mainargs", []).append("--b2sum")
+
+ def unpack(self):
+ clst_unpack_hash = self.resume.get("unpack")
+
+ # Set up all unpack info settings
+ unpack_info = self.decompressor.create_infodict(
+ source=self.settings["source_path"],
+ destination=self.settings["chroot_path"],
+ arch=self.settings["compressor_arch"],
+ other_options=self.settings["compressor_options"],
+ )
+
+ display_msg = (
+ 'Starting %(mode)s from %(source)s\nto '
+ '%(destination)s (this may take some time) ..')
+
+ error_msg = "'%(mode)s' extraction of %(source)s to %(destination)s failed."
+
+ if "seedcache" in self.settings["options"]:
+ if os.path.isdir(unpack_info["source"]):
+ # SEEDCACHE Is a directory, use rsync
+ unpack_info['mode'] = "rsync"
+ else:
+ # SEEDCACHE is a not a directory, try untar'ing
+ log.notice(
+ 'Referenced SEEDCACHE does not appear to be a directory, trying to untar...')
+ unpack_info['source'] = file_check(unpack_info['source'])
+ else:
+ # No SEEDCACHE, use tar
+ unpack_info['source'] = file_check(unpack_info['source'])
+ # end of unpack_info settings
+
+ # set defaults,
+ # only change them if the resume point is proven to be good
+ _unpack = True
+ invalid_chroot = True
+ # Begin autoresume validation
+ if "autoresume" in self.settings["options"]:
+ # check chroot
+ if os.path.isdir(self.settings["chroot_path"]):
+ if self.resume.is_enabled("unpack"):
+ # Autoresume is valid in the chroot
+ _unpack = False
+ invalid_chroot = False
+ log.notice('Resume: "chroot" is valid...')
+ else:
+ # self.resume.is_disabled("unpack")
+ # Autoresume is invalid in the chroot
+ log.notice(
+ 'Resume: "seed source" unpack resume point is disabled')
+
+ # check seed source
+ if os.path.isfile(self.settings["source_path"]) and not invalid_chroot:
+ if self.settings["source_path_hash"].replace("\n", " ") == clst_unpack_hash:
+ # Seed tarball has not changed, chroot is valid
+ _unpack = False
+ invalid_chroot = False
+ log.notice('Resume: "seed source" hash matches chroot...')
+ else:
+ # self.settings["source_path_hash"] != clst_unpack_hash
+ # Seed tarball has changed, so invalidate the chroot
+ _unpack = True
+ invalid_chroot = True
+ log.notice(
+ 'Resume: "seed source" has changed, hashes do not match, invalidating resume...')
+ log.notice(' source_path......: %s',
+ self.settings["source_path"])
+ log.notice(' new source hash..: %s',
+ self.settings["source_path_hash"].replace("\n", " "))
+ log.notice(' recorded hash....: %s',
+ clst_unpack_hash)
+ unpack_info['source'] = file_check(unpack_info['source'])
+
+ else:
+ # No autoresume, check SEEDCACHE
+ if "seedcache" in self.settings["options"]:
+ # if the seedcache is a dir, rsync will clean up the chroot
+ if os.path.isdir(self.settings["source_path"]):
+ pass
+ elif os.path.isdir(self.settings["source_path"]):
+ # We should never reach this, so something is very wrong
+ raise CatalystError(
+ "source path is a dir but seedcache is not enabled: %s"
+ % self.settings["source_path"])
+
+ if _unpack:
+ if invalid_chroot:
+ if "autoresume" in self.settings["options"]:
+ log.notice(
+ 'Resume: Target chroot is invalid, cleaning up...')
+
+ self.clear_autoresume()
+ self.clear_chroot()
+
+ ensure_dirs(self.settings["chroot_path"])
+
+ ensure_dirs(self.settings["chroot_path"] + "/tmp", mode=1777)
+
+ if "pkgcache" in self.settings["options"]:
+ ensure_dirs(self.settings["pkgcache_path"], mode=0o755)
+
+ if "kerncache" in self.settings["options"]:
+ ensure_dirs(self.settings["kerncache_path"], mode=0o755)
+
+ log.notice('%s', display_msg % unpack_info)
+
+ # now run the decompressor
+ if not self.decompressor.extract(unpack_info):
+ log.error('%s', error_msg % unpack_info)
+
+ if "source_path_hash" in self.settings:
+ self.resume.enable("unpack",
+ data=self.settings["source_path_hash"])
+ else:
+ self.resume.enable("unpack")
+ else:
+ log.notice(
+ 'Resume: Valid resume point detected, skipping seed unpack operation...')
+
+ def config_profile_link(self):
+ log.info('Configuring profile link...')
+ make_profile = Path(self.settings['chroot_path'] + self.settings['port_conf'],
+ 'make.profile')
+ make_profile.unlink(missing_ok=True)
+ try:
+ repo_name, target_profile = self.settings['target_profile'].split(":", 1)
+ except ValueError:
+ repo_name = self.settings['repo_name']
+ target_profile = self.settings['target_profile']
+ make_profile.symlink_to(Path('../..' + self.settings['repo_basedir'],
+ repo_name,
+ 'profiles',
+ target_profile),
+ target_is_directory=True)
+
+ def setup_confdir(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("setup_confdir"):
+ log.notice(
+ 'Resume point detected, skipping setup_confdir operation...')
+ return
+
+ if "portage_confdir" in self.settings:
+ log.info('Configuring %s...', self.settings['port_conf'])
+ dest = normpath(
+ self.settings['chroot_path'] + '/' + self.settings['port_conf'])
+ ensure_dirs(dest)
+ # The trailing slashes on both paths are important:
+ # We want to make sure rsync copies the dirs into each
+ # other and not as subdirs.
+ cmd(['rsync', '-a', self.settings['portage_confdir'] + '/', dest + '/'],
+ env=self.env)
+ self.resume.enable("setup_confdir")
+
+ def to_chroot(self, path):
+ """ Prepend chroot path to the given path. """
+
+ chroot = Path(self.settings['chroot_path'])
+ return chroot / path.relative_to(path.anchor)
+
+ def get_repo_conf_path(self, repo_name):
+ """ Construct repo conf path: {repos_conf}/{name}.conf """
+ return Path(self.settings['repos_conf'], repo_name + ".conf")
+
+ def get_repo_location(self, repo_name):
+ """ Construct overlay repo path: {repo_basedir}/{name} """
+ return Path(self.settings['repo_basedir'], repo_name)
+
+ def write_repo_conf(self, repo_name, config):
+ """ Write ConfigParser to {chroot}/{repos_conf}/{name}.conf """
+
+ repo_conf = self.get_repo_conf_path(repo_name)
+
+ repo_conf_chroot = self.to_chroot(repo_conf)
+ repo_conf_chroot.parent.mkdir(mode=0o755, parents=True, exist_ok=True)
+
+ log.info('Creating repo config %s.', repo_conf_chroot)
+
+ try:
+ with open(repo_conf_chroot, 'w') as f:
+ config.write(f)
+ except OSError as e:
+ raise CatalystError(f'Could not write {repo_conf_chroot}: {e}') from e
+
+ def process_repos(self):
+ """ Create repos.conf entry for every repo """
+
+ for _, name, default in self.repos:
+ location = self.get_repo_location(name)
+
+ if default == location:
+ log.debug('Skipping repos.conf entry for repo %s '
+ 'with default location %s.', name, location)
+ continue
+
+ config = configparser.ConfigParser()
+ config[name] = {'location': location}
+ self.write_repo_conf(name, config)
+
+ def root_overlay(self):
+ """ Copy over the root_overlay """
+ if self.settings["spec_prefix"] + "/root_overlay" in self.settings:
+ for x in self.settings[self.settings["spec_prefix"] +
+ "/root_overlay"]:
+ if os.path.exists(x):
+ log.info('Copying root_overlay: %s', x)
+ cmd(['rsync', '-a', x + '/', self.settings['stage_path']],
+ env=self.env)
+
+ def groups(self):
+ for x in self.settings["groups"]:
+ log.notice("Creating group: '%s'", x)
+ cmd(["groupadd", "-R", self.settings['chroot_path'], x], env=self.env)
+
+ def users(self):
+ for x in self.settings["users"]:
+ usr, grp = '', ''
+ try:
+ usr, grp = x.split("=")
+ except ValueError:
+ usr = x
+ log.debug("users: '=' separator not found on line " + x)
+ log.debug("users: missing separator means no groups found")
+ uacmd = ["useradd", "-R", self.settings['chroot_path'], "-m", x]
+ msg_create_user = "Creating user: '%s'" % usr
+ if grp != '':
+ uacmd = ["useradd", "-R", self.settings['chroot_path'], "-m", "-G", grp, usr]
+ msg_create_user = "Creating user: '%s' in group(s): %s" % usr, grp
+ log.notice(msg_create_user)
+ cmd(uacmd, env=self.env)
+
+ def ssh_public_keys(self):
+ for x in self.settings["ssh_public_keys"]:
+ usr, pub_key_src = '', ''
+ try:
+ usr, pub_key_src = x.split("=")
+ except ValueError:
+ raise CatalystError(f"ssh_public_keys: '=' separator not found on line {x}")
+ log.notice("Copying SSH public key for user: '%s'", usr)
+ pub_key_dest = self.settings['chroot_path'] + f"/home/{usr}/.ssh/authorized_keys"
+ cpcmd = ["cp", "-av", pub_key_src, pub_key_dest]
+ cmd(cpcmd, env=self.env)
+ chcmd = ["chmod", "0644", pub_key_dest]
+ cmd(chcmd, env=self.env)
+
+ def bind(self):
+ for x in [x for x in self.mount if self.mount[x]['enable']]:
+ if str(self.mount[x]['source']) == 'config':
+ raise CatalystError(f'"{x}" bind mount source is not configured')
+ if str(self.mount[x]['target']) == 'config':
+ raise CatalystError(f'"{x}" bind mount target is not configured')
+
+ source = str(self.mount[x]['source'])
+ target = self.settings['chroot_path'] + str(self.mount[x]['target'])
+ fstype = ''
+ options = ''
+
+ log.debug('bind %s: "%s" -> "%s"', x, source, target)
+
+ if source == 'maybe_tmpfs':
+ if 'var_tmpfs_portage' not in self.settings:
+ continue
+
+ fstype = 'tmpfs'
+ options = f"size={self.settings['var_tmpfs_portage']}G"
+ elif source == 'tmpfs':
+ fstype = 'tmpfs'
+ elif source == 'shm':
+ fstype = 'tmpfs'
+ options = 'noexec,nosuid,nodev'
+ else:
+ source_path = Path(self.mount[x]['source'])
+ if source_path.suffix == '.sqfs':
+ fstype = 'squashfs'
+ options = 'ro,loop'
+ else:
+ options = 'bind'
+
+ # We may need to create the source of the bind mount. E.g., in the
+ # case of an empty package cache we must create the directory that
+ # the binary packages will be stored into.
+ source_path.mkdir(mode=0o755, parents=True, exist_ok=True)
+
+ Path(target).mkdir(mode=0o755, parents=True, exist_ok=True)
+
+ try:
+ cxt = libmount.Context(source=source, target=target,
+ fstype=fstype, options=options)
+ cxt.mount()
+ except Exception as e:
+ raise CatalystError(f"Couldn't mount: {source}, {e}") from e
+
+ def chroot_setup(self):
+ self.makeconf = read_makeconf(normpath(self.settings["chroot_path"] +
+ self.settings["make_conf"]))
+ self.override_cbuild()
+ self.override_chost()
+ self.override_cflags()
+ self.override_cxxflags()
+ self.override_fcflags()
+ self.override_fflags()
+ self.override_ldflags()
+ self.override_asflags()
+ self.override_common_flags()
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("chroot_setup"):
+ log.notice(
+ 'Resume point detected, skipping chroot_setup operation...')
+ return
+
+ log.notice('Setting up chroot...')
+
+ shutil.copy('/etc/resolv.conf',
+ self.settings['chroot_path'] + '/etc/')
+
+ # Copy over the binary interpreter(s) (qemu), if applicable; note that they are given
+ # as space-separated list of full paths and go to the same place in the chroot
+ if "interpreter" in self.settings:
+ if isinstance(self.settings["interpreter"], str):
+ myints = [self.settings["interpreter"]]
+ else:
+ myints = self.settings["interpreter"]
+
+ for myi in myints:
+ if not os.path.exists(myi):
+ raise CatalystError("Can't find interpreter " + myi, print_traceback=True)
+
+ log.notice('Copying binary interpreter %s into chroot', myi)
+
+ if os.path.exists(self.settings['chroot_path'] + '/' + myi):
+ os.rename(self.settings['chroot_path'] + '/' + myi, self.settings['chroot_path'] + '/' + myi + '.catalyst')
+
+ shutil.copy(myi, self.settings['chroot_path'] + '/' + myi)
+
+ # Copy over the envscript, if applicable
+ if "envscript" in self.settings:
+ if not os.path.exists(self.settings["envscript"]):
+ raise CatalystError(
+ "Can't find envscript " + self.settings["envscript"],
+ print_traceback=True)
+
+ log.warning(
+ 'env variables in catalystrc may cause catastrophic failure.\n'
+ 'If your build fails look here first as the possible problem.')
+
+ shutil.copy(self.settings['envscript'],
+ self.settings['chroot_path'] + '/tmp/envscript')
+
+ # Copy over /etc/hosts from the host in case there are any
+ # specialties in there
+ hosts_file = self.settings['chroot_path'] + '/etc/hosts'
+ if os.path.exists(hosts_file):
+ os.rename(hosts_file, hosts_file + '.catalyst')
+ shutil.copy('/etc/hosts', hosts_file)
+
+ # write out the make.conf
+ try:
+ self.write_make_conf(setup=True)
+ except OSError as e:
+ raise CatalystError('Could not write %s: %s' % (
+ normpath(self.settings["chroot_path"] +
+ self.settings["make_conf"]), e)) from e
+
+ # write out the binrepos.conf
+ # we do this here for later user convenience, but normally
+ # it should not affect stage builds (which only get --usepkg,
+ # but never --getbinpkg as emerge parameters).
+ try:
+ self.write_binrepos_conf()
+ except OSError as e:
+ raise CatalystError('Could not write binrepos.conf: %s' % ( e )) from e
+
+ self.resume.enable("chroot_setup")
+
+ def write_make_conf(self, setup=True):
+ # Modify and write out make.conf (for the chroot)
+ makepath = normpath(self.settings["chroot_path"] +
+ self.settings["make_conf"])
+ with open(makepath, "w") as myf:
+ log.notice("Writing the stage make.conf to: %s" % makepath)
+ myf.write("# These settings were set by the catalyst build script "
+ "that automatically\n# built this stage.\n")
+ myf.write("# Please consult "
+ "/usr/share/portage/config/make.conf.example "
+ "for a more\n# detailed example.\n")
+
+ for flags in ["COMMON_FLAGS", "CFLAGS", "CXXFLAGS", "FCFLAGS", "FFLAGS",
+ "LDFLAGS", "ASFLAGS"]:
+ if flags in ["LDFLAGS", "ASFLAGS"]:
+ if not flags in self.settings:
+ continue
+ myf.write("# %s is unsupported. USE AT YOUR OWN RISK!\n"
+ % flags)
+ if flags not in self.settings or (flags != "COMMON_FLAGS" and
+ self.settings[flags] == self.settings["COMMON_FLAGS"]):
+ myf.write('%s="${COMMON_FLAGS}"\n' % flags)
+ elif isinstance(self.settings[flags], list):
+ myf.write('%s="%s"\n'
+ % (flags, ' '.join(self.settings[flags])))
+ else:
+ myf.write('%s="%s"\n'
+ % (flags, self.settings[flags]))
+
+ if "CBUILD" in self.settings:
+ myf.write("\n# This should not be changed unless you know exactly"
+ " what you are doing. You\n# should probably be "
+ "using a different stage, instead.\n")
+ myf.write('CBUILD="' + self.settings["CBUILD"] + '"\n')
+
+ if "CHOST" in self.settings:
+ myf.write("\n# WARNING: Changing your CHOST is not something "
+ "that should be done lightly.\n# Please consult "
+ "https://wiki.gentoo.org/wiki/Changing_the_CHOST_variable "
+ "before changing.\n")
+ myf.write('CHOST="' + self.settings["CHOST"] + '"\n')
+
+ # Figure out what our USE vars are for building
+ myusevars = []
+ if "bindist" in self.settings["options"]:
+ myf.write(
+ "\n# NOTE: This stage was built with the bindist Use flag enabled\n")
+ if setup or "sticky-config" in self.settings["options"]:
+ myusevars.extend(self.settings["catalyst_use"])
+ log.notice("STICKY-CONFIG is enabled")
+ if "HOSTUSE" in self.settings:
+ myusevars.extend(self.settings["HOSTUSE"])
+
+ if "use" in self.settings:
+ myusevars.extend(self.settings["use"])
+
+ if myusevars:
+ myf.write("# These are the USE and USE_EXPAND flags that were "
+ "used for\n# building in addition to what is provided "
+ "by the profile.\n")
+ myusevars = sorted(set(myusevars))
+ myf.write('USE="' + ' '.join(myusevars) + '"\n')
+ if '-*' in myusevars:
+ log.warning(
+ 'The use of -* in %s/use will cause portage to ignore\n'
+ 'package.use in the profile and portage_confdir.\n'
+ "You've been warned!", self.settings['spec_prefix'])
+
+ myuseexpandvars = {}
+ if "HOSTUSEEXPAND" in self.settings:
+ for hostuseexpand in self.settings["HOSTUSEEXPAND"]:
+ myuseexpandvars.update(
+ {hostuseexpand: self.settings["HOSTUSEEXPAND"][hostuseexpand]})
+
+ if myuseexpandvars:
+ for hostuseexpand in myuseexpandvars:
+ myf.write(hostuseexpand + '="' +
+ ' '.join(myuseexpandvars[hostuseexpand]) + '"\n')
+
+ for x in ['target_distdir', 'target_pkgdir']:
+ if self.settings[x] != confdefaults[x]:
+ varname = x.split('_')[1].upper()
+ myf.write(f'{varname}="{self.settings[x]}"\n')
+
+ # Set default locale for system responses. #478382
+ myf.write(
+ '\n'
+ '# This sets the language of build output to English.\n'
+ '# Please keep this setting intact when reporting bugs.\n'
+ 'LC_MESSAGES=C.utf8\n')
+
+ def write_binrepos_conf(self):
+ # only if catalyst.conf defines the host and the spec defines the path...
+ if self.settings["binhost"] != '' and "binrepo_path" in self.settings:
+
+ # Write out binrepos.conf (for the chroot)
+ binrpath = normpath(self.settings["chroot_path"] +
+ self.settings["binrepos_conf"])
+ Path(binrpath).mkdir(mode=0o755, parents=True, exist_ok=True)
+
+ binrfile = binrpath + "/gentoobinhost.conf"
+ with open(binrfile, "w") as myb:
+ log.notice("Writing the stage binrepo config to: %s" % binrfile)
+ myb.write("# These settings were set by the catalyst build script "
+ "that automatically\n# built this stage.\n")
+ myb.write("# Please consider using a local mirror.\n\n")
+ myb.write("[gentoobinhost]\n")
+ myb.write("priority = 1\n")
+ myb.write("sync-uri = " + self.settings["binhost"] + \
+ self.settings["binrepo_path"] + "\n")
+
+ def fsscript(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("fsscript"):
+ log.notice('Resume point detected, skipping fsscript operation...')
+ return
+
+ if "fsscript" in self.settings:
+ if os.path.exists(self.settings["controller_file"]):
+ cmd([self.settings['controller_file'], 'fsscript'],
+ env=self.env)
+ self.resume.enable("fsscript")
+
+ def rcupdate(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("rcupdate"):
+ log.notice('Resume point detected, skipping rcupdate operation...')
+ return
+
+ if os.path.exists(self.settings["controller_file"]):
+ cmd([self.settings['controller_file'], 'rc-update'],
+ env=self.env)
+ self.resume.enable("rcupdate")
+
+ def clean(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("clean"):
+ log.notice('Resume point detected, skipping clean operation...')
+ else:
+ for x in self.settings["cleanables"]:
+ log.notice('Cleaning chroot: %s', x)
+ clear_path(normpath(self.settings["stage_path"] + x))
+
+ # Put /etc/hosts back into place
+ hosts_file = self.settings['chroot_path'] + '/etc/hosts'
+ if os.path.exists(hosts_file + '.catalyst'):
+ os.rename(hosts_file + '.catalyst', hosts_file)
+
+ # optionally clean up binary interpreter(s)
+ if "interpreter" in self.settings:
+ if isinstance(self.settings["interpreter"], str):
+ myints = [self.settings["interpreter"]]
+ else:
+ myints = self.settings["interpreter"]
+
+ for myi in myints:
+ if os.path.exists(self.settings['chroot_path'] + '/' + myi + '.catalyst'):
+ os.rename(self.settings['chroot_path'] + '/' + myi + '.catalyst', self.settings['chroot_path'] + '/' + myi)
+ else:
+ os.remove(self.settings['chroot_path'] + '/' + myi)
+
+ # optionally clean up portage configs
+ if ("portage_prefix" in self.settings and
+ "sticky-config" not in self.settings["options"]):
+ log.debug("clean(), portage_preix = %s, no sticky-config",
+ self.settings["portage_prefix"])
+ for _dir in "package.accept_keywords", "package.keywords", "package.mask", "package.unmask", "package.use", "package.env", "env", "profile/package.use.force", "profile/package.use.mask":
+ target = pjoin(self.settings["stage_path"],
+ "etc/portage/%s" % _dir,
+ self.settings["portage_prefix"])
+ log.notice("Clearing portage_prefix target: %s", target)
+ clear_path(target)
+
+ # Remove hacks that should *never* go into stages
+ target = pjoin(self.settings["stage_path"], "etc/portage/patches")
+ if os.path.exists(target):
+ log.warning("You've been hacking. Clearing target patches: %s", target)
+ clear_path(target)
+
+ # Remove repo data
+ for _, name, _ in self.repos:
+
+ # Remove repos.conf entry
+ repo_conf = self.get_repo_conf_path(name)
+ chroot_repo_conf = self.to_chroot(repo_conf)
+ chroot_repo_conf.unlink(missing_ok=True)
+
+ # The repo has already been unmounted, remove the mount point
+ location = self.get_repo_location(name)
+ chroot_location = self.to_chroot(location)
+ clear_path(str(chroot_location))
+
+ if "sticky-config" not in self.settings["options"]:
+ # re-write the make.conf to be sure it is clean
+ self.write_make_conf(setup=False)
+
+ # Clean up old and obsoleted files in /etc
+ if os.path.exists(self.settings["stage_path"]+"/etc"):
+ cmd(['find', self.settings['stage_path'] + '/etc',
+ '-maxdepth', '1', '-name', '*-', '-delete'],
+ env=self.env)
+
+ if os.path.exists(self.settings["controller_file"]):
+ cmd([self.settings['controller_file'], 'clean'], env=self.env)
+ self.resume.enable("clean")
+
+ def empty(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("empty"):
+ log.notice('Resume point detected, skipping empty operation...')
+ return
+
+ if self.settings["spec_prefix"] + "/empty" in self.settings:
+ if isinstance(
+ self.settings[self.settings['spec_prefix'] + '/empty'],
+ str):
+ self.settings[self.settings["spec_prefix"] + "/empty"] = \
+ self.settings[self.settings["spec_prefix"] +
+ "/empty"].split()
+ for x in self.settings[self.settings["spec_prefix"] + "/empty"]:
+ myemp = self.settings["stage_path"] + x
+ if not os.path.isdir(myemp) or os.path.islink(myemp):
+ log.warning('not a directory or does not exist, '
+ 'skipping "empty" operation: %s', x)
+ continue
+ log.info('Emptying directory %s', x)
+ clear_dir(myemp)
+ self.resume.enable("empty")
+
+ def remove(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("remove"):
+ log.notice('Resume point detected, skipping remove operation...')
+ return
+
+ if self.settings["spec_prefix"] + "/rm" in self.settings:
+ for x in self.settings[self.settings["spec_prefix"] + "/rm"]:
+ # We're going to shell out for all these cleaning
+ # operations, so we get easy glob handling.
+ log.notice('%s: removing %s', self.settings["spec_prefix"], x)
+ clear_path(self.settings["stage_path"] + x)
+
+ if os.path.exists(self.settings["controller_file"]):
+ cmd([self.settings['controller_file'], 'clean'],
+ env=self.env)
+ self.resume.enable("remove")
+
+ def preclean(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("preclean"):
+ log.notice('Resume point detected, skipping preclean operation...')
+ return
+
+ try:
+ if os.path.exists(self.settings["controller_file"]):
+ cmd([self.settings['controller_file'], 'preclean'],
+ env=self.env)
+ self.resume.enable("preclean")
+
+ except:
+ raise CatalystError("Build failed, could not execute preclean")
+
+ def capture(self):
+ # initialize it here so it doesn't use
+ # resources if it is not needed
+ if not self.compressor:
+ self.compressor = CompressMap(self.settings["compress_definitions"],
+ env=self.env, default_mode=self.settings['compression_mode'],
+ comp_prog=self.settings['comp_prog'])
+
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("capture"):
+ log.notice('Resume point detected, skipping capture operation...')
+ return
+
+ log.notice('Capture target in a tarball')
+ # Remove filename from path
+ mypath = os.path.dirname(self.settings["target_path"].rstrip('/'))
+
+ # Now make sure path exists
+ ensure_dirs(mypath)
+
+ pack_info = self.compressor.create_infodict(
+ source=".",
+ basedir=self.settings["stage_path"],
+ filename=self.settings["target_path"].rstrip('/'),
+ mode=self.settings["compression_mode"],
+ auto_extension=True,
+ arch=self.settings["compressor_arch"],
+ other_options=self.settings["compressor_options"],
+ )
+ target_filename = ".".join([self.settings["target_path"].rstrip('/'),
+ self.compressor.extension(pack_info['mode'])])
+
+ log.notice('Creating stage tarball... mode: %s',
+ self.settings['compression_mode'])
+
+ if self.compressor.compress(pack_info):
+ self.gen_contents_file(target_filename)
+ self.gen_digest_file(target_filename)
+ self.resume.enable("capture")
+ else:
+ log.warning("Couldn't create stage tarball: %s",
+ target_filename)
+
+ def run_local(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("run_local"):
+ log.notice('Resume point detected, skipping run_local operation...')
+ return
+
+ if os.path.exists(self.settings["controller_file"]):
+ log.info('run_local() starting controller script...')
+ cmd([self.settings['controller_file'], 'run'],
+ env=self.env)
+ self.resume.enable("run_local")
+ else:
+ log.info('run_local() no controller_file found... %s',
+ self.settings['controller_file'])
+
+ def setup_environment(self):
+ log.debug('setup_environment(); settings = %r', self.settings)
+ for x in list(self.settings):
+ log.debug('setup_environment(); processing: %s', x)
+ if x == "options":
+ for opt in self.settings[x]:
+ self.env['clst_' + opt.upper()] = "true"
+ continue
+
+ varname = 'clst_' + sanitize_name(x)
+
+ if isinstance(self.settings[x], str):
+ # Prefix to prevent namespace clashes
+ if "path" in x:
+ self.env[varname] = self.settings[x].rstrip("/")
+ else:
+ self.env[varname] = self.settings[x]
+ elif isinstance(self.settings[x], list):
+ self.env[varname] = ' '.join(self.settings[x])
+ elif isinstance(self.settings[x], bool):
+ if self.settings[x]:
+ self.env[varname] = "true"
+ elif isinstance(self.settings[x], (int, float)):
+ self.env[varname] = str(self.settings[x])
+ elif isinstance(self.settings[x], dict):
+ if x in ['compress_definitions', 'decompress_definitions']:
+ continue
+ log.warning("Not making envar for '%s', is a dict", x)
+
+ makeopts = []
+ for flag, setting in {'j': 'jobs', 'l': 'load-average'}.items():
+ if setting in self.settings:
+ makeopts.append(f'-{flag}{self.settings[setting]}')
+ self.env['MAKEOPTS'] = ' '.join(makeopts)
+
+ log.debug('setup_environment(); env = %r', self.env)
+
+ def enter_chroot(self):
+ chroot = command('chroot')
+ # verify existence only
+ command(os.path.join(self.settings['chroot_path'], '/bin/bash'))
+
+ log.notice("Entering chroot")
+ try:
+ cmd([chroot, self.settings['chroot_path'], '/bin/bash', '-l'],
+ env=self.env)
+ except CatalystError:
+ pass
+
+ def run(self):
+ with fasteners.InterProcessLock(self.settings["chroot_path"] + '.lock'):
+ return self._run()
+
+ def _run(self):
+ if "clear-autoresume" in self.settings["options"]:
+ self.clear_autoresume()
+
+ if "purgetmponly" in self.settings["options"]:
+ self.purge()
+ return True
+
+ if "purgeonly" in self.settings["options"]:
+ log.info('StageBase: run() purgeonly')
+ self.purge()
+
+ if "purge" in self.settings["options"]:
+ log.info('StageBase: run() purge')
+ self.purge()
+
+ if not run_sequence(self.prepare_sequence):
+ return False
+
+ with namespace(mount=True):
+ if not run_sequence(self.build_sequence):
+ return False
+
+ if not run_sequence(self.finish_sequence):
+ return False
+
+ return True
+
+ def unmerge(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("unmerge"):
+ log.notice('Resume point detected, skipping unmerge operation...')
+ return
+
+ if self.settings["spec_prefix"] + "/unmerge" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/unmerge'], str):
+ self.settings[self.settings["spec_prefix"] + "/unmerge"] = \
+ [self.settings[self.settings["spec_prefix"] + "/unmerge"]]
+
+ # Before cleaning, unmerge stuff
+ cmd([self.settings['controller_file'], 'unmerge'] +
+ self.settings[self.settings['spec_prefix'] + '/unmerge'],
+ env=self.env)
+ log.info('unmerge shell script')
+ self.resume.enable("unmerge")
+
+ def target_setup(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("target_setup"):
+ log.notice(
+ 'Resume point detected, skipping target_setup operation...')
+ return
+
+ log.notice('Setting up filesystems per filesystem type')
+ cmd([self.settings['controller_file'], 'target_image_setup',
+ self.settings['target_path']], env=self.env)
+ self.resume.enable("target_setup")
+
+ def setup_overlay(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("setup_overlay"):
+ log.notice(
+ 'Resume point detected, skipping setup_overlay operation...')
+ return
+
+ if self.settings["spec_prefix"] + "/overlay" in self.settings:
+ for x in self.settings[self.settings["spec_prefix"] + "/overlay"]:
+ if os.path.exists(x):
+ cmd(['rsync', '-a', x + '/', self.settings['target_path']],
+ env=self.env)
+ self.resume.enable("setup_overlay")
+
+ def create_iso(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("create_iso"):
+ log.notice(
+ 'Resume point detected, skipping create_iso operation...')
+ return
+
+ # Create the ISO
+ if "iso" in self.settings:
+ cmd([self.settings['controller_file'], 'iso', self.settings['iso']],
+ env=self.env)
+ self.gen_contents_file(self.settings["iso"])
+ self.gen_digest_file(self.settings["iso"])
+ self.resume.enable("create_iso")
+ else:
+ log.warning('livecd/iso was not defined. '
+ 'An ISO Image will not be created.')
+
+ def build_packages(self):
+ build_packages_resume = pjoin(self.settings["autoresume_path"],
+ "build_packages")
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("build_packages"):
+ log.notice(
+ 'Resume point detected, skipping build_packages operation...')
+ return
+
+ if self.settings["spec_prefix"] + "/packages" in self.settings:
+ target_pkgs = self.settings["spec_prefix"] + '/packages'
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("build_packages"):
+ log.notice('Resume point detected, skipping build_packages '
+ 'operation...')
+ else:
+ command = [self.settings['controller_file'],
+ 'build_packages']
+ if isinstance(self.settings[target_pkgs], str):
+ command.append(self.settings[target_pkgs])
+ else:
+ command.extend(self.settings[target_pkgs])
+ cmd(command, env=self.env)
+ fileutils.touch(build_packages_resume)
+ self.resume.enable("build_packages")
+
+ def build_kernel(self):
+ """Build all configured kernels"""
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("build_kernel"):
+ log.notice(
+ 'Resume point detected, skipping build_kernel operation...')
+ return
+
+ if "boot/kernel" in self.settings:
+ mynames = self.settings["boot/kernel"]
+ if isinstance(mynames, str):
+ mynames = [mynames]
+ for kname in [sanitize_name(name) for name in mynames]:
+ if "boot/kernel/" + kname + "/distkernel" in self.settings:
+ cmd([self.settings['controller_file'], 'pre-distkmerge'], env=self.env)
+ else:
+ # Execute the script that sets up the kernel build environment
+ cmd([self.settings['controller_file'], 'pre-kmerge'], env=self.env)
+ self._build_kernel(kname=kname)
+ self.resume.enable("build_kernel")
+
+ def _build_kernel(self, kname):
+ """Build a single configured kernel by name"""
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("build_kernel_" + kname):
+ log.notice('Resume point detected, skipping build_kernel '
+ 'for %s operation...', kname)
+ return
+
+ self._copy_kernel_config(kname=kname)
+
+ key = 'boot/kernel/' + kname + '/extraversion'
+ self.settings.setdefault(key, '')
+ self.env["clst_kextraversion"] = self.settings[key]
+
+ self._copy_initramfs_overlay(kname=kname)
+
+ # Execute the script that builds the kernel
+ cmd([self.settings['controller_file'], 'kernel', kname], env=self.env)
+
+ if "boot/kernel/" + kname + "/initramfs_overlay" in self.settings:
+ log.notice('Cleaning up temporary overlay dir')
+ clear_dir(self.settings['chroot_path'] + '/tmp/initramfs_overlay/')
+
+ self.resume.is_enabled("build_kernel_" + kname)
+
+ def _copy_kernel_config(self, kname):
+ key = 'boot/kernel/' + kname + '/config'
+ if key in self.settings:
+ if not os.path.exists(self.settings[key]):
+ raise CatalystError("Can't find kernel config: %s" %
+ self.settings[key])
+
+ shutil.copy(self.settings[key],
+ self.settings['chroot_path'] + '/var/tmp/' + kname + '.config')
+
+ def _copy_initramfs_overlay(self, kname):
+ key = 'boot/kernel/' + kname + '/initramfs_overlay'
+ if key in self.settings:
+ if os.path.exists(self.settings[key]):
+ log.notice('Copying initramfs_overlay dir %s',
+ self.settings[key])
+
+ ensure_dirs(
+ self.settings['chroot_path'] +
+ '/tmp/initramfs_overlay/' + self.settings[key])
+
+ cmd('cp -R ' + self.settings[key] + '/* ' +
+ self.settings['chroot_path'] +
+ '/tmp/initramfs_overlay/' + self.settings[key], env=self.env)
+
+ def bootloader(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("bootloader"):
+ log.notice(
+ 'Resume point detected, skipping bootloader operation...')
+ return
+
+ cmd([self.settings['controller_file'], 'bootloader',
+ self.settings['target_path'].rstrip('/')],
+ env=self.env)
+ self.resume.enable("bootloader")
+
+ def livecd_update(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("livecd_update"):
+ log.notice(
+ 'Resume point detected, skipping build_packages operation...')
+ return
+
+ cmd([self.settings['controller_file'], 'livecd-update'],
+ env=self.env)
+ self.resume.enable("livecd_update")
+
+ @staticmethod
+ def _debug_pause_():
+ input("press any key to continue: ")
diff --git a/catalyst/base/targetbase.py b/catalyst/base/targetbase.py
index 4dcd88b7..ce16566b 100644
--- a/catalyst/base/targetbase.py
+++ b/catalyst/base/targetbase.py
@@ -1,15 +1,46 @@
import os
+from abc import ABC, abstractmethod
+from pathlib import Path
+
from catalyst.support import addl_arg_parse
-class TargetBase(object):
- """
- The toplevel class for all targets. This is about as generic as we get.
- """
- def __init__(self, myspec, addlargs):
- addl_arg_parse(myspec,addlargs,self.required_values,self.valid_values)
- self.settings=myspec
- self.env = {
- 'PATH': '/bin:/sbin:/usr/bin:/usr/sbin',
- 'TERM': os.getenv('TERM', 'dumb'),
- }
+
+class TargetBase(ABC):
+ """
+ The toplevel class for all targets. This is about as generic as we get.
+ """
+
+ def __init__(self, myspec, addlargs):
+ addl_arg_parse(myspec, addlargs, self.required_values,
+ self.valid_values)
+ self.settings = myspec
+ self.env = {
+ 'PATH': '/bin:/sbin:/usr/bin:/usr/sbin',
+ 'TERM': os.getenv('TERM', 'dumb'),
+ }
+ self.snapshot = None
+
+ def set_snapshot(self, treeish=None):
+ # Make snapshots directory
+ snapshot_dir = Path(self.settings['storedir'], 'snapshots')
+ snapshot_dir.mkdir(mode=0o755, parents=True, exist_ok=True)
+
+ repo_name = self.settings['repo_name']
+ if treeish is None:
+ treeish = self.settings['snapshot_treeish']
+
+ self.snapshot = Path(snapshot_dir,
+ f'{repo_name}-{treeish}.sqfs')
+
+ @property
+ @classmethod
+ @abstractmethod
+ def required_values(cls):
+ return NotImplementedError
+
+ @property
+ @classmethod
+ @abstractmethod
+ def valid_values(cls):
+ return NotImplementedError
diff --git a/catalyst/builder.py b/catalyst/builder.py
deleted file mode 100644
index 4d58de65..00000000
--- a/catalyst/builder.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import os
-
-class generic(object):
- def __init__(self,myspec):
- self.settings=myspec
- self.settings.setdefault('CHROOT', 'chroot')
-
- def setarch(self, arch):
- """Set the chroot wrapper to run through `setarch |arch|`
-
- Useful for building x86-on-amd64 and such.
- """
- if os.uname()[0] == 'Linux':
- self.settings['CHROOT'] = 'setarch %s %s' % (arch, self.settings['CHROOT'])
-
- def mount_safety_check(self):
- """
- Make sure that no bind mounts exist in chrootdir (to use before
- cleaning the directory, to make sure we don't wipe the contents of
- a bind mount
- """
- pass
-
- def mount_all(self):
- """do all bind mounts"""
- pass
-
- def umount_all(self):
- """unmount all bind mounts"""
- pass
diff --git a/catalyst/config.py b/catalyst/config.py
index a3a7200a..e1963f71 100644
--- a/catalyst/config.py
+++ b/catalyst/config.py
@@ -4,132 +4,117 @@ import re
from catalyst import log
from catalyst.support import CatalystError
-class ParserBase(object):
-
- filename = ""
- lines = None
- values = None
- key_value_separator = "="
- multiple_values = False
- empty_values = True
- eval_none = False
-
- def __getitem__(self, key):
- return self.values[key]
-
- def get_values(self):
- return self.values
-
- def dump(self):
- dump = ""
- for x in self.values.keys():
- dump += x + " = " + repr(self.values[x]) + "\n"
- return dump
-
- def parse_file(self, filename):
- try:
- with open(filename, "r") as myf:
- self.lines = myf.readlines()
- except:
- raise CatalystError("Could not open file " + filename,
- print_traceback=True)
- self.filename = filename
- self.parse()
-
- def parse_lines(self, lines):
- self.lines = lines
- self.parse()
-
- def parse(self):
- values = {}
- cur_array = []
-
- trailing_comment=re.compile(r'\s*#.*$')
- #white_space=re.compile('\s+')
-
- for x, myline in enumerate(self.lines):
- myline = myline.strip()
-
- # Force the line to be clean
- # Remove Comments ( anything following # )
- myline = trailing_comment.sub("", myline)
-
- # Skip any blank lines
- if not myline:
- continue
-
- if self.key_value_separator in myline:
- # Split on the first occurence of the separator creating two strings in the array mobjs
- mobjs = myline.split(self.key_value_separator, 1)
- mobjs[1] = mobjs[1].strip().strip('"')
-
-# # Check that this key doesn't exist already in the spec
-# if mobjs[0] in values:
-# raise Exception("You have a duplicate key (" + mobjs[0] + ") in your spec. Please fix it")
-
- # Start a new array using the first element of mobjs
- cur_array = [mobjs[0]]
- if mobjs[1]:
- # do any variable substitiution embeded in it with
- # the values already obtained
- mobjs[1] = mobjs[1] % values
- if self.multiple_values:
- # split on white space creating additional array elements
-# subarray = white_space.split(mobjs[1])
- subarray = mobjs[1].split()
- cur_array += subarray
- else:
- cur_array += [mobjs[1]]
-
- # Else add on to the last key we were working on
- else:
- if self.multiple_values:
-# mobjs = white_space.split(myline)
-# cur_array += mobjs
- cur_array += myline.split()
- else:
- raise CatalystError("Syntax error: %s" % x, print_traceback=True)
-
- # XXX: Do we really still need this "single value is a string" behavior?
- if len(cur_array) == 2:
- values[cur_array[0]] = cur_array[1]
- else:
- values[cur_array[0]] = cur_array[1:]
-
- if not self.empty_values:
- # Make sure the list of keys is static since we modify inside the loop.
- for x in list(values.keys()):
- # Delete empty key pairs
- if not values[x]:
- log.warning('No value set for key "%s"; deleting', x)
- del values[x]
-
- if self.eval_none:
- # Make sure the list of keys is static since we modify inside the loop.
- for x in list(values.keys()):
- # reset None values
- if isinstance(values[x], str) and values[x].lower() in ['none']:
- log.info('None value found for key "%s"; reseting', x)
- values[x] = None
- self.values = values
-class SpecParser(ParserBase):
-
- key_value_separator = ':'
- multiple_values = True
- empty_values = False
- eval_none = True
+class ParserBase():
+
+ filename = ""
+ lines = None
+ values = None
+ key_value_separator = "="
+ multiple_values = False
+ empty_values = True
+ eval_none = False
+
+ def __getitem__(self, key):
+ return self.values[key]
+
+ def get_values(self):
+ return self.values
+
+ def dump(self):
+ dump = ""
+ for x in self.values:
+ dump += x + " = " + repr(self.values[x]) + "\n"
+ return dump
+
+ def parse_file(self, filename):
+ try:
+ with open(filename, "r") as myf:
+ self.lines = myf.readlines()
+ except:
+ raise CatalystError("Could not open file " + filename,
+ print_traceback=True)
+ self.filename = filename
+ self.parse()
+
+ def parse_lines(self, lines):
+ self.lines = lines
+ self.parse()
+
+ def parse(self):
+ values = {}
+ cur_array = []
+
+ trailing_comment = re.compile(r'\s*#.*$')
+
+ for x, myline in enumerate(self.lines):
+ myline = myline.strip()
+
+ # Force the line to be clean
+ # Remove Comments ( anything following # )
+ myline = trailing_comment.sub("", myline)
+
+ # Skip any blank lines
+ if not myline:
+ continue
+
+ if self.key_value_separator in myline:
+ # Split on the first occurence of the separator creating two strings in the array mobjs
+ mobjs = myline.split(self.key_value_separator, 1)
+ mobjs[1] = mobjs[1].strip().strip('"')
+
+ # Start a new array using the first element of mobjs
+ cur_array = [mobjs[0]]
+ if mobjs[1]:
+ # do any variable substitiution embeded in it with
+ # the values already obtained
+ mobjs[1] = mobjs[1] % values
+ if self.multiple_values:
+ # split on white space creating additional array elements
+ subarray = mobjs[1].split()
+ cur_array += subarray
+ else:
+ cur_array += [mobjs[1]]
+
+ # Else add on to the last key we were working on
+ else:
+ if self.multiple_values:
+ cur_array += myline.split()
+ else:
+ raise CatalystError("Syntax error: %s" %
+ x, print_traceback=True)
+
+ # XXX: Do we really still need this "single value is a string" behavior?
+ if len(cur_array) == 2:
+ values[cur_array[0]] = cur_array[1]
+ else:
+ values[cur_array[0]] = cur_array[1:]
+
+ if not self.empty_values:
+ # Make sure the list of keys is static since we modify inside the loop.
+ for x in list(values.keys()):
+ # Delete empty key pairs
+ if not values[x]:
+ log.warning('No value set for key "%s"; deleting', x)
+ del values[x]
+
+ if self.eval_none:
+ # Make sure the list of keys is static since we modify inside the loop.
+ for x in list(values.keys()):
+ # reset None values
+ if isinstance(values[x], str) and values[x].lower() in ['none']:
+ log.info('None value found for key "%s"; reseting', x)
+ values[x] = None
+ self.values = values
- def __init__(self, filename=""):
- if filename:
- self.parse_file(filename)
-class ConfigParser(ParserBase):
+class SpecParser(ParserBase):
- key_value_separator = '='
- multiple_values = False
- empty_values = True
+ key_value_separator = ':'
+ multiple_values = True
+ empty_values = False
+ eval_none = True
- def __init__(self, filename=""):
- if filename:
- self.parse_file(filename)
+ def __init__(self, filename=""):
+ if filename:
+ self.parse_file(filename)
diff --git a/catalyst/context.py b/catalyst/context.py
new file mode 100644
index 00000000..01a6d930
--- /dev/null
+++ b/catalyst/context.py
@@ -0,0 +1,54 @@
+
+import contextlib
+import os
+
+from snakeoil.process.namespaces import setns, simple_unshare
+
+@contextlib.contextmanager
+def namespace(mount=False, uts=False, ipc=False, net=False, pid=False,
+ user=False, hostname=None):
+ namespaces = {
+ (mount, "mnt"): None,
+ (uts, "uts"): None,
+ (ipc, "ipc"): None,
+ (net, "net"): None,
+ (pid, "pid"): None,
+ (user, "user"): None,
+ }
+
+ dirs = {
+ "root": None,
+ "cwd": None,
+ }
+
+ # Save fds of current namespaces
+ for ns in [ns for ns in namespaces if ns[0]]:
+ fp = open(f"/proc/self/ns/{ns[1]}")
+ namespaces[ns] = fp
+
+ # Save fds of current directories
+ if mount:
+ for d in dirs:
+ dirs[d] = os.open(f"/proc/self/{d}", os.O_RDONLY)
+
+ simple_unshare(mount=mount, uts=uts, ipc=ipc, net=net, pid=pid, user=user,
+ hostname=hostname)
+ try:
+ yield
+ finally:
+ for ns in [ns for ns in namespaces if ns[0]]:
+ fp = namespaces[ns]
+ setns(fp.fileno(), 0)
+ fp.close()
+
+ if mount:
+ # Restore original root and cwd. Since we cannot directly chroot to
+ # a fd, first change the current directory to the fd of the
+ # original root, then chroot to "."
+
+ os.fchdir(dirs["root"])
+ os.chroot(".")
+ os.fchdir(dirs["cwd"])
+
+ for fd in dirs.values():
+ os.close(fd)
diff --git a/catalyst/defaults.py b/catalyst/defaults.py
index f2fe29df..f4d48fef 100644
--- a/catalyst/defaults.py
+++ b/catalyst/defaults.py
@@ -1,120 +1,142 @@
-import os
+import portage
+
+from collections import OrderedDict
from DeComp.definitions import DECOMPRESSOR_SEARCH_ORDER
from DeComp.definitions import COMPRESSOR_PROGRAM_OPTIONS, XATTRS_OPTIONS
from DeComp.definitions import DECOMPRESSOR_PROGRAM_OPTIONS, LIST_XATTRS_OPTIONS
-# Used for the (de)compressor definitions
-if os.uname()[0] in ["Linux", "linux"]:
- TAR = 'linux'
-else:
- TAR = 'bsd'
-
-
-# these should never be touched
-required_build_targets = ["targetbase", "generic_stage_target"]
-
-# new build types should be added here
-valid_build_targets = ["stage1_target", "stage2_target", "stage3_target",
- "stage4_target", "grp_target", "livecd_stage1_target", "livecd_stage2_target",
- "embedded_target", "tinderbox_target", "snapshot_target", "netboot_target",
- "netboot2_target"
- ]
-required_config_file_values = ["storedir", "sharedir", "distdir", "portdir"]
+valid_config_file_values = frozenset([
+ "binhost",
+ "compression_mode",
+ "digests",
+ "digest_format",
+ "distcc_hosts",
+ "distdir",
+ "envscript",
+ "jobs",
+ "load-average",
+ "options",
+ "port_logdir",
+ "repo_basedir",
+ "repo_name",
+ "repos_storedir",
+ "sharedir",
+ "storedir",
+ "target_distdir",
+ "target_logdir",
+ "target_pkgdir",
+ "var_tmpfs_portage",
+])
-valid_config_file_values = required_config_file_values[:]
-valid_config_file_values.extend([ "distcc", "envscript",
- "options", "DEBUG", "VERBOSE",
- "snapshot_cache", "hash_function", "digests", "contents", "compressor_arch",
- "compression_mode", "compressor_options", "decompressor_search_order",
- ])
-
-# set our base defaults here to keep
-# them in one location.
-BASE_GENTOO_DIR = "/var/gentoo"
-REPODIR = BASE_GENTOO_DIR + "/repos"
-DISTDIR = BASE_GENTOO_DIR + "/distfiles"
-PKGDIR = BASE_GENTOO_DIR + "/packages"
-MAINREPO = "gentoo"
-PORTDIR = REPODIR + "/" + MAINREPO
-
-confdefaults={
- "archdir": "%(PythonDir)s/arch",
- "comp_prog": COMPRESSOR_PROGRAM_OPTIONS[TAR],
- "compression_mode": 'lbzip2',
- "compressor_arch": None,
- "compressor_options": XATTRS_OPTIONS[TAR],
- "decomp_opt": DECOMPRESSOR_PROGRAM_OPTIONS[TAR],
- "decompressor_search_order": DECOMPRESSOR_SEARCH_ORDER,
- "distdir": DISTDIR[:],
- "hash_function": "crc32",
- "icecream": "/var/cache/icecream",
- 'list_xattrs_opt': LIST_XATTRS_OPTIONS[TAR],
- "local_overlay": REPODIR[:] + "/local",
- "port_conf": "/etc/portage",
- "make_conf": "%(port_conf)s/make.conf",
- "options": set(),
- "packagedir": PKGDIR[:],
- "portdir": PORTDIR[:],
- "port_tmpdir": "/var/tmp/portage",
- "PythonDir": "./catalyst",
- "repo_basedir": REPODIR[:],
- "repo_name": MAINREPO[:],
- "sed": "sed",
- "sharedir": "/usr/share/catalyst",
- "shdir": "/usr/share/catalyst/targets/",
- "snapshot_cache": "/var/tmp/catalyst/snapshot_cache",
- "snapshot_name": "%(repo_name)s-",
- "source_matching": "strict",
- "storedir": "/var/tmp/catalyst",
- "target_distdir": DISTDIR[:],
- "target_pkgdir": PKGDIR[:],
- }
+confdefaults = {
+ "binhost": '',
+ "comp_prog": COMPRESSOR_PROGRAM_OPTIONS['linux'],
+ "compression_mode": 'lbzip2',
+ "compressor_arch": None,
+ "compressor_options": XATTRS_OPTIONS['linux'],
+ "decomp_opt": DECOMPRESSOR_PROGRAM_OPTIONS['linux'],
+ "decompressor_search_order": DECOMPRESSOR_SEARCH_ORDER,
+ "digest_format": 'linux',
+ "distdir": portage.settings['DISTDIR'],
+ "icecream": "/var/cache/icecream",
+ 'list_xattrs_opt': LIST_XATTRS_OPTIONS['linux'],
+ "port_conf": "/etc/portage",
+ "binrepos_conf": "%(port_conf)s/binrepos.conf",
+ "make_conf": "%(port_conf)s/make.conf",
+ "repos_conf": "%(port_conf)s/repos.conf",
+ "options": set(),
+ "pkgdir": "/var/cache/binpkgs",
+ "port_tmpdir": "/var/tmp/portage",
+ "repo_basedir": "/var/db/repos",
+ "repo_name": "gentoo",
+ "repos_storedir": "%(storedir)s/repos",
+ "sharedir": "/usr/share/catalyst",
+ "shdir": "%(sharedir)s/targets",
+ "storedir": "/var/tmp/catalyst",
+ "target_distdir": "/var/cache/distfiles",
+ "target_logdir": "/var/log/portage",
+ "target_pkgdir": "/var/cache/binpkgs",
+}
DEFAULT_CONFIG_FILE = '/etc/catalyst/catalyst.conf'
PORT_LOGDIR_CLEAN = \
- 'find "${PORT_LOGDIR}" -type f ! -name "summary.log*" -mtime +30 -delete'
-
-TARGET_MOUNT_DEFAULTS = {
- "ccache": "/var/tmp/ccache",
- "dev": "/dev",
- "devpts": "/dev/pts",
- "distdir": DISTDIR[:],
- "icecream": "/usr/lib/icecc/bin",
- "kerncache": "/tmp/kerncache",
- "packagedir": PKGDIR[:],
- "portdir": PORTDIR[:],
- "port_tmpdir": "/var/tmp/portage",
- "port_logdir": "/var/log/portage",
- "proc": "/proc",
- "shm": "/dev/shm",
- }
+ 'find "${PORT_LOGDIR}" -type f ! -name "summary.log*" -mtime +30 -delete'
-SOURCE_MOUNT_DEFAULTS = {
- "dev": "/dev",
- "devpts": "/dev/pts",
- "distdir": DISTDIR[:],
- "portdir": PORTDIR[:],
- "port_tmpdir": "tmpfs",
- "proc": "/proc",
- "shm": "shmfs",
- }
+MOUNT_DEFAULTS = OrderedDict([
+ ('proc', {
+ 'enable': True,
+ 'source': '/proc',
+ 'target': '/proc',
+ }),
+ ('dev', {
+ 'enable': True,
+ 'source': '/dev',
+ 'target': '/dev',
+ }),
+ ('devpts', {
+ 'enable': True,
+ 'source': '/dev/pts',
+ 'target': '/dev/pts',
+ }),
+ ('shm', {
+ 'enable': True,
+ 'source': 'shm',
+ 'target': '/dev/shm',
+ }),
+ ('run', {
+ 'enable': True,
+ 'source': 'tmpfs',
+ 'target': '/run',
+ }),
+ ('distdir', {
+ 'enable': True,
+ 'source': 'config',
+ 'target': 'config',
+ }),
+ ('pkgdir', {
+ 'enable': False,
+ 'source': 'config',
+ 'target': 'config',
+ }),
+ ('port_tmpdir', {
+ 'enable': True,
+ 'source': 'maybe_tmpfs',
+ 'target': '/var/tmp/portage',
+ }),
+ ('kerncache', {
+ 'enable': False,
+ 'source': 'config',
+ 'target': '/tmp/kerncache',
+ }),
+ ('port_logdir', {
+ 'enable': False,
+ 'source': 'config',
+ 'target': '/var/log/portage',
+ }),
+ ('ccache', {
+ 'enable': False,
+ 'source': 'config',
+ 'target': '/var/tmp/ccache',
+ }),
+ ('icecream', {
+ 'enable': False,
+ 'source': ...,
+ 'target': '/usr/lib/icecc/bin',
+ }),
+])
-# legend: key: message
option_messages = {
- "autoresume": "Autoresuming support enabled.",
- "ccache": "Compiler cache support enabled.",
- "clear-autoresume": "Cleaning autoresume flags support enabled.",
- #"compress": "Compression enabled.",
- "distcc": "Distcc support enabled.",
- "icecream": "Icecream compiler cluster support enabled.",
- "kerncache": "Kernel cache support enabled.",
- "pkgcache": "Package cache support enabled.",
- "purge": "Purge support enabled.",
- "seedcache": "Seed cache support enabled.",
- "snapcache": "Snapshot cache support enabled.",
- #"tarball": "Tarball creation enabled.",
- }
+ "autoresume": "Autoresuming support enabled.",
+ "ccache": "Compiler cache support enabled.",
+ "clear-autoresume": "Cleaning autoresume flags support enabled.",
+ "distcc": "Distcc support enabled.",
+ "icecream": "Icecream compiler cluster support enabled.",
+ "kerncache": "Kernel cache support enabled.",
+ "pkgcache": "Package cache support enabled.",
+ "purge": "Purge support enabled.",
+ "seedcache": "Seed cache support enabled.",
+}
diff --git a/catalyst/fileops.py b/catalyst/fileops.py
index 878e6303..4252285e 100644
--- a/catalyst/fileops.py
+++ b/catalyst/fileops.py
@@ -1,8 +1,3 @@
-
-# Maintained in full by:
-# Catalyst Team <catalyst@gentoo.org>
-# Release Engineering Team <releng@gentoo.org>
-
'''fileops.py
Performs file operations such as pack/unpack,
@@ -15,126 +10,113 @@ import os
import shutil
from stat import ST_UID, ST_GID, ST_MODE
-# NOTE: pjoin and listdir_files are imported here for export
-# to other catalyst modules
-# pylint: disable=unused-import
-from snakeoil.osutils import (ensure_dirs as snakeoil_ensure_dirs,
- pjoin, listdir_files)
-# pylint: enable=unused-import
+from snakeoil.osutils import ensure_dirs as snakeoil_ensure_dirs
from catalyst import log
-from catalyst.support import (cmd, CatalystError)
+from catalyst.support import CatalystError
def ensure_dirs(path, gid=-1, uid=-1, mode=0o755, minimal=True,
- failback=None, fatal=False):
- '''Wrapper to snakeoil.osutil's ensure_dirs()
- This additionally allows for failures to run
- cleanup or other code and/or raise fatal errors.
-
- :param path: directory to ensure exists on disk
- :param gid: a valid GID to set any created directories to
- :param uid: a valid UID to set any created directories to
- :param mode: permissions to set any created directories to
- :param minimal: boolean controlling whether or not the specified mode
- must be enforced, or is the minimal permissions necessary. For example,
- if mode=0o755, minimal=True, and a directory exists with mode 0707,
- this will restore the missing group perms resulting in 757.
- :param failback: function to run in the event of a failed attemp
- to create the directory.
- :return: True if the directory could be created/ensured to have those
- permissions, False if not.
- '''
- succeeded = snakeoil_ensure_dirs(path, gid=gid, uid=uid, mode=mode, minimal=minimal)
- if not succeeded:
- if failback:
- failback()
- if fatal:
- raise CatalystError(
- "Failed to create directory: %s" % path, print_traceback=True)
- return succeeded
-
-
-def clear_dir(target, mode=0o755, chg_flags=False, remove=False,
- clear_nondir=True):
- '''Universal directory clearing function
-
- @target: string, path to be cleared or removed
- @mode: integer, desired mode to set the directory to
- @chg_flags: boolean used for FreeBSD hosts
- @remove: boolean, passed through to clear_dir()
- @return boolean
- '''
- log.debug('start: %s', target)
- if not target:
- log.debug('no target... returning')
- return False
-
- mystat = None
- if os.path.isdir(target) and not os.path.islink(target):
- log.notice('Emptying directory: %s', target)
- # stat the dir, delete the dir, recreate the dir and set
- # the proper perms and ownership
- try:
- log.debug('os.stat()')
- mystat = os.stat(target)
- # There's no easy way to change flags recursively in python
- if chg_flags and os.uname()[0] == "FreeBSD":
- cmd(['chflags', '-R', 'noschg', target])
- log.debug('shutil.rmtree()')
- shutil.rmtree(target)
- except Exception:
- log.error('clear_dir failed', exc_info=True)
- return False
- elif os.path.exists(target):
- if clear_nondir:
- log.debug("Clearing (unlinking) non-directory: %s", target)
- os.unlink(target)
- else:
- log.info('clear_dir failed: %s: is not a directory', target)
- return False
- else:
- log.debug("Conditions not met to clear: %s", target)
- log.debug(" isdir: %s", os.path.isdir(target))
- log.debug(" islink: %s", os.path.islink(target))
- log.debug(" exists: %s", os.path.exists(target))
-
- if not remove:
- log.debug('ensure_dirs()')
- ensure_dirs(target, mode=mode)
- if mystat:
- os.chown(target, mystat[ST_UID], mystat[ST_GID])
- os.chmod(target, mystat[ST_MODE])
-
- log.debug('DONE, returning True')
- return True
+ failback=None, fatal=False):
+ '''Wrapper to snakeoil.osutil's ensure_dirs()
+ This additionally allows for failures to run
+ cleanup or other code and/or raise fatal errors.
+
+ :param path: directory to ensure exists on disk
+ :param gid: a valid GID to set any created directories to
+ :param uid: a valid UID to set any created directories to
+ :param mode: permissions to set any created directories to
+ :param minimal: boolean controlling whether or not the specified mode
+ must be enforced, or is the minimal permissions necessary. For example,
+ if mode=0o755, minimal=True, and a directory exists with mode 0707,
+ this will restore the missing group perms resulting in 757.
+ :param failback: function to run in the event of a failed attemp
+ to create the directory.
+ :return: True if the directory could be created/ensured to have those
+ permissions, False if not.
+ '''
+ succeeded = snakeoil_ensure_dirs(
+ path, gid=gid, uid=uid, mode=mode, minimal=minimal)
+ if not succeeded:
+ if failback:
+ failback()
+ if fatal:
+ raise CatalystError(
+ "Failed to create directory: %s" % path, print_traceback=True)
+ return succeeded
+
+
+def clear_dir(target, mode=0o755, remove=False):
+ '''Universal directory clearing function
+
+ @target: string, path to be cleared or removed
+ @mode: integer, desired mode to set the directory to
+ @remove: boolean, passed through to clear_dir()
+ @return boolean
+ '''
+ log.debug('start: %s', target)
+ if not target:
+ log.debug('no target... returning')
+ return False
+
+ mystat = None
+ if os.path.isdir(target) and not os.path.islink(target):
+ log.notice('Emptying directory: %s', target)
+ # stat the dir, delete the dir, recreate the dir and set
+ # the proper perms and ownership
+ try:
+ log.debug('os.stat()')
+ mystat = os.stat(target)
+ log.debug('shutil.rmtree()')
+ shutil.rmtree(target)
+ except Exception:
+ log.error('clear_dir failed', exc_info=True)
+ return False
+ elif os.path.exists(target):
+ log.debug("Clearing (unlinking) non-directory: %s", target)
+ os.unlink(target)
+ else:
+ log.debug("Conditions not met to clear: %s", target)
+ log.debug(" isdir: %s", os.path.isdir(target))
+ log.debug(" islink: %s", os.path.islink(target))
+ log.debug(" exists: %s", os.path.exists(target))
+
+ if not remove:
+ log.debug('ensure_dirs()')
+ ensure_dirs(target, mode=mode)
+ if mystat:
+ os.chown(target, mystat[ST_UID], mystat[ST_GID])
+ os.chmod(target, mystat[ST_MODE])
+
+ log.debug('DONE, returning True')
+ return True
def clear_path(target_path):
- """Nuke |target_path| regardless of it being a dir, file or glob."""
- targets = glob.glob(target_path)
- for path in targets:
- clear_dir(path, remove=True)
+ """Nuke |target_path| regardless of it being a dir, file or glob."""
+ targets = glob.iglob(target_path, recursive=True)
+ for path in targets:
+ clear_dir(path, remove=True)
def move_path(src, dest):
- '''Move a source target to a new destination
-
- :param src: source path to move
- :param dest: destination path to move it to
- :returns: boolean
- '''
- log.debug('Start move_path(%s, %s)', src, dest)
- if os.path.isdir(src) and not os.path.islink(src):
- if os.path.exists(dest):
- log.warning('Removing existing target destination: %s', dest)
- if not clear_dir(dest, remove=True):
- return False
- log.debug('Moving source...')
- try:
- shutil.move(src, dest)
- except Exception:
- log.error('move_path failed', exc_info=True)
- return False
- return True
- return False
+ '''Move a source target to a new destination
+
+ :param src: source path to move
+ :param dest: destination path to move it to
+ :returns: boolean
+ '''
+ log.debug('Start move_path(%s, %s)', src, dest)
+ if os.path.isdir(src) and not os.path.islink(src):
+ if os.path.exists(dest):
+ log.warning('Removing existing target destination: %s', dest)
+ if not clear_dir(dest, remove=True):
+ return False
+ log.debug('Moving source...')
+ try:
+ shutil.move(src, dest)
+ except Exception:
+ log.error('move_path failed', exc_info=True)
+ return False
+ return True
+ return False
diff --git a/catalyst/hash_utils.py b/catalyst/hash_utils.py
deleted file mode 100644
index 1134f502..00000000
--- a/catalyst/hash_utils.py
+++ /dev/null
@@ -1,128 +0,0 @@
-
-import os
-from collections import namedtuple
-from subprocess import Popen, PIPE
-
-from catalyst import log
-from catalyst.support import CatalystError
-
-
-# Use HashMap.fields for the value legend
-# fields = ["func", "cmd", "args", "id"]
-HASH_DEFINITIONS = {
- "adler32" :["calc_hash2", "shash", ["-a", "ADLER32"], "ADLER32"],
- "crc32" :["calc_hash2", "shash", ["-a", "CRC32"], "CRC32"],
- "crc32b" :["calc_hash2", "shash", ["-a", "CRC32B"], "CRC32B"],
- "gost" :["calc_hash2", "shash", ["-a", "GOST"], "GOST"],
- "haval128" :["calc_hash2", "shash", ["-a", "HAVAL128"], "HAVAL128"],
- "haval160" :["calc_hash2", "shash", ["-a", "HAVAL160"], "HAVAL160"],
- "haval192" :["calc_hash2", "shash", ["-a", "HAVAL192"], "HAVAL192"],
- "haval224" :["calc_hash2", "shash", ["-a", "HAVAL224"], "HAVAL224"],
- "haval256" :["calc_hash2", "shash", ["-a", "HAVAL256"], "HAVAL256"],
- "md2" :["calc_hash2", "shash", ["-a", "MD2"], "MD2"],
- "md4" :["calc_hash2", "shash", ["-a", "MD4"], "MD4"],
- "md5" :["calc_hash2", "shash", ["-a", "MD5"], "MD5"],
- "ripemd128":["calc_hash2", "shash", ["-a", "RIPEMD128"], "RIPEMD128"],
- "ripemd160":["calc_hash2", "shash", ["-a", "RIPEMD160"], "RIPEMD160"],
- "ripemd256":["calc_hash2", "shash", ["-a", "RIPEMD256"], "RIPEMD256"],
- "ripemd320":["calc_hash2", "shash", ["-a", "RIPEMD320"], "RIPEMD320"],
- "sha1" :["calc_hash2", "shash", ["-a", "SHA1"], "SHA1"],
- "sha224" :["calc_hash2", "shash", ["-a", "SHA224"], "SHA224"],
- "sha256" :["calc_hash2", "shash", ["-a", "SHA256"], "SHA256"],
- "sha384" :["calc_hash2", "shash", ["-a", "SHA384"], "SHA384"],
- "sha512" :["calc_hash2", "shash", ["-a", "SHA512"], "SHA512"],
- "snefru128":["calc_hash2", "shash", ["-a", "SNEFRU128"], "SNEFRU128"],
- "snefru256":["calc_hash2", "shash", ["-a", "SNEFRU256"], "SNEFRU256"],
- "tiger" :["calc_hash2", "shash", ["-a", "TIGER"], "TIGER"],
- "tiger128" :["calc_hash2", "shash", ["-a", "TIGER128"], "TIGER128"],
- "tiger160" :["calc_hash2", "shash", ["-a", "TIGER160"], "TIGER160"],
- "whirlpool":["calc_hash2", "shash", ["-a", "WHIRLPOOL"], "WHIRLPOOL"],
- }
-
-
-class HashMap(object):
- '''Class for handling
- Catalyst's hash generation'''
-
- fields = ["func", "cmd", "args", "id"]
-
-
- def __init__(self, hashes=None):
- '''Class init
-
- @param hashes: dictionary of Key:[function, cmd, cmd_args, Print string]
- @param fields: list of ordered field names for the hashes
- eg: ["func", "cmd", "args", "id"]
- '''
- if hashes is None:
- hashes = {}
- self.hash_map = {}
-
- # create the hash definition namedtuple classes
- for name in list(hashes):
- obj = namedtuple(name, self.fields)
- obj.__slots__ = ()
- self.hash_map[name] = obj._make(hashes[name])
- del obj
-
-
- def generate_hash(self, file_, hash_="crc32"):
- '''Prefered method of generating a hash for the passed in file_
-
- @param file_: the file to generate the hash for
- @param hash_: the hash algorythm to use
- @returns the hash result
- '''
- try:
- return getattr(self, self.hash_map[hash_].func)(
- file_,
- hash_)
- except:
- raise CatalystError("Error generating hash, is appropriate " + \
- "utility installed on your system?", print_traceback=True)
-
-
- def calc_hash(self, file_, hash_):
- '''
- Calculate the hash for "file_"
-
- @param file_: the file to generate the hash for
- @param hash_: the hash algorythm to use
- @returns the hash result
- '''
- _hash = self.hash_map[hash_]
- args = [_hash.cmd]
- args.extend(_hash.args)
- args.append(file_)
- source = Popen(args, stdout=PIPE)
- mylines = source.communicate()[0]
- mylines=mylines[0].split()
- result=mylines[0]
- log.info('%s (%s) = %s', _hash.id, file_, result)
- return result
-
-
- def calc_hash2(self, file_, hash_type):
- '''
- Calculate the hash for "file_"
-
- @param file_: the file to generate the hash for
- @param hash_: the hash algorythm to use
- @returns the hash result
- '''
- _hash = self.hash_map[hash_type]
- args = [_hash.cmd]
- args.extend(_hash.args)
- args.append(file_)
- log.debug('args = %r', args)
- source = Popen(args, stdout=PIPE)
- output = source.communicate()
- lines = output[0].decode('ascii').split('\n')
- log.debug('output = %s', output)
- header = lines[0]
- h_f = lines[1].split()
- hash_result = h_f[0]
- short_file = os.path.split(h_f[1])[1]
- result = header + "\n" + hash_result + " " + short_file + "\n"
- log.info('%s (%s) = %s', header, short_file, result)
- return result
diff --git a/catalyst/lock.py b/catalyst/lock.py
deleted file mode 100644
index 808df4ec..00000000
--- a/catalyst/lock.py
+++ /dev/null
@@ -1,31 +0,0 @@
-
-import os
-
-from snakeoil import fileutils
-from snakeoil import osutils
-from catalyst.fileops import ensure_dirs
-
-
-LockInUse = osutils.LockException
-
-
-class LockDir(object):
- """An object that creates locks inside dirs"""
-
- def __init__(self, lockdir):
- self.gid = 250
- self.lockfile = os.path.join(lockdir, '.catalyst_lock')
- ensure_dirs(lockdir)
- fileutils.touch(self.lockfile, mode=0o664)
- os.chown(self.lockfile, -1, self.gid)
- self.lock = osutils.FsLock(self.lockfile)
-
- def read_lock(self):
- self.lock.acquire_read_lock()
-
- def write_lock(self):
- self.lock.acquire_write_lock()
-
- def unlock(self):
- # Releasing a write lock is the same as a read lock.
- self.lock.release_write_lock()
diff --git a/catalyst/log.py b/catalyst/log.py
index d640dece..ee124392 100644
--- a/catalyst/log.py
+++ b/catalyst/log.py
@@ -8,8 +8,6 @@ another level "notice" between warning & info, and all output goes through
the "catalyst" logger.
"""
-from __future__ import print_function
-
import logging
import logging.handlers
import os
@@ -18,15 +16,15 @@ import time
class CatalystLogger(logging.Logger):
- """Override the _log member to autosplit on new lines"""
+ """Override the _log member to autosplit on new lines"""
- def _log(self, level, msg, args, **kwargs):
- """If given a multiline message, split it"""
- # We have to interpolate it first in case they spread things out
- # over multiple lines like: Bad Thing:\n%s\nGoodbye!
- msg %= args
- for line in msg.splitlines():
- super(CatalystLogger, self)._log(level, line, (), **kwargs)
+ def _log(self, level, msg, args, **kwargs):
+ """If given a multiline message, split it"""
+ # We have to interpolate it first in case they spread things out
+ # over multiple lines like: Bad Thing:\n%s\nGoodbye!
+ msg %= args
+ for line in msg.splitlines():
+ super(CatalystLogger, self)._log(level, line, (), **kwargs)
# The logger that all output should go through.
@@ -45,14 +43,16 @@ logging.addLevelName(NOTICE, 'NOTICE')
# The API we expose to consumers.
def notice(msg, *args, **kwargs):
- """Log a notice message"""
- logger.log(NOTICE, msg, *args, **kwargs)
+ """Log a notice message"""
+ logger.log(NOTICE, msg, *args, **kwargs)
+
def critical(msg, *args, **kwargs):
- """Log a critical message and then exit"""
- status = kwargs.pop('status', 1)
- logger.critical(msg, *args, **kwargs)
- sys.exit(status)
+ """Log a critical message and then exit"""
+ status = kwargs.pop('status', 1)
+ logger.critical(msg, *args, **kwargs)
+ sys.exit(status)
+
error = logger.error
warning = logger.warning
@@ -61,41 +61,40 @@ debug = logger.debug
class CatalystFormatter(logging.Formatter):
- """Mark bad messages with colors automatically"""
-
- _COLORS = {
- 'CRITICAL': '\033[1;35m',
- 'ERROR': '\033[1;31m',
- 'WARNING': '\033[1;33m',
- 'DEBUG': '\033[1;34m',
- }
- _NORMAL = '\033[0m'
-
- @staticmethod
- def detect_color():
- """Figure out whether the runtime env wants color"""
- if 'NOCOLOR' is os.environ:
- return False
- return os.isatty(sys.stdout.fileno())
-
- def __init__(self, *args, **kwargs):
- """Initialize"""
- color = kwargs.pop('color', None)
- if color is None:
- color = self.detect_color()
- if not color:
- self._COLORS = {}
-
- super(CatalystFormatter, self).__init__(*args, **kwargs)
-
- def format(self, record, **kwargs):
- """Format the |record| with our color settings"""
- msg = super(CatalystFormatter, self).format(record, **kwargs)
- color = self._COLORS.get(record.levelname)
- if color:
- return color + msg + self._NORMAL
- else:
- return msg
+ """Mark bad messages with colors automatically"""
+
+ _COLORS = {
+ 'CRITICAL': '\033[1;35m',
+ 'ERROR': '\033[1;31m',
+ 'WARNING': '\033[1;33m',
+ 'DEBUG': '\033[1;34m',
+ }
+ _NORMAL = '\033[0m'
+
+ @staticmethod
+ def detect_color():
+ """Figure out whether the runtime env wants color"""
+ if 'NOCOLOR' in os.environ:
+ return False
+ return os.isatty(sys.stdout.fileno())
+
+ def __init__(self, *args, **kwargs):
+ """Initialize"""
+ color = kwargs.pop('color', None)
+ if color is None:
+ color = self.detect_color()
+ if not color:
+ self._COLORS = {}
+
+ super(CatalystFormatter, self).__init__(*args, **kwargs)
+
+ def format(self, record, **kwargs):
+ """Format the |record| with our color settings"""
+ msg = super(CatalystFormatter, self).format(record, **kwargs)
+ color = self._COLORS.get(record.levelname)
+ if color:
+ return color + msg + self._NORMAL
+ return msg
# We define |debug| in global scope so people can call log.debug(), but it
@@ -103,29 +102,29 @@ class CatalystFormatter(logging.Formatter):
# use that func in here, it's not a problem, so silence the warning.
# pylint: disable=redefined-outer-name
def setup_logging(level, output=None, debug=False, color=None):
- """Initialize the logging module using the |level| level"""
- # The incoming level will be things like "info", but setLevel wants
- # the numeric constant. Convert it here.
- level = logging.getLevelName(level.upper())
-
- # The good stuff.
- fmt = '%(asctime)s: %(levelname)-8s: '
- if debug:
- fmt += '%(filename)s:%(funcName)s: '
- fmt += '%(message)s'
-
- # Figure out where to send the log output.
- if output is None:
- handler = logging.StreamHandler(stream=sys.stdout)
- else:
- handler = logging.FileHandler(output)
-
- # Use a date format that is readable by humans & machines.
- # Think e-mail/RFC 2822: 05 Oct 2013 18:58:50 EST
- tzname = time.strftime('%Z', time.localtime())
- datefmt = '%d %b %Y %H:%M:%S ' + tzname
- formatter = CatalystFormatter(fmt, datefmt, color=color)
- handler.setFormatter(formatter)
-
- logger.addHandler(handler)
- logger.setLevel(level)
+ """Initialize the logging module using the |level| level"""
+ # The incoming level will be things like "info", but setLevel wants
+ # the numeric constant. Convert it here.
+ level = logging.getLevelName(level.upper())
+
+ # The good stuff.
+ fmt = '%(asctime)s: %(levelname)-8s: '
+ if debug:
+ fmt += '%(filename)s:%(funcName)s: '
+ fmt += '%(message)s'
+
+ # Figure out where to send the log output.
+ if output is None:
+ handler = logging.StreamHandler(stream=sys.stdout)
+ else:
+ handler = logging.FileHandler(output)
+
+ # Use a date format that is readable by humans & machines.
+ # Think e-mail/RFC 2822: 05 Oct 2013 18:58:50 EST
+ tzname = time.strftime('%Z', time.localtime())
+ datefmt = '%d %b %Y %H:%M:%S ' + tzname
+ formatter = CatalystFormatter(fmt, datefmt, color=color)
+ handler.setFormatter(formatter)
+
+ logger.addHandler(handler)
+ logger.setLevel(level)
diff --git a/catalyst/main.py b/catalyst/main.py
index 01c456ea..6e9a2d3e 100644
--- a/catalyst/main.py
+++ b/catalyst/main.py
@@ -1,457 +1,373 @@
-
-# Maintained in full by:
-# Catalyst Team <catalyst@gentoo.org>
-# Release Engineering Team <releng@gentoo.org>
-# Andrew Gaffney <agaffney@gentoo.org>
-# Chris Gianelloni <wolf31o2@wolf31o2.org>
-# $Id$
-
import argparse
+import copy
import datetime
+import hashlib
import os
import sys
+import textwrap
-from snakeoil import process
-from snakeoil.process import namespaces
+import tomli
from DeComp.definitions import (COMPRESS_DEFINITIONS, DECOMPRESS_DEFINITIONS,
- CONTENTS_DEFINITIONS)
+ CONTENTS_DEFINITIONS)
from DeComp.contents import ContentsMap
from catalyst import log
import catalyst.config
-from catalyst.defaults import confdefaults, option_messages, DEFAULT_CONFIG_FILE
-from catalyst.hash_utils import HashMap, HASH_DEFINITIONS
+from catalyst.context import namespace
+from catalyst.defaults import (confdefaults, option_messages,
+ DEFAULT_CONFIG_FILE, valid_config_file_values)
from catalyst.support import CatalystError
from catalyst.version import get_version
-
-conf_values={}
+conf_values = copy.deepcopy(confdefaults)
def version():
- log.info(get_version())
- log.info('Copyright 2003-%s Gentoo Foundation', datetime.datetime.now().year)
- log.info('Copyright 2008-2012 various authors')
- log.info('Distributed under the GNU General Public License version 2.1')
+ log.info(get_version())
+ log.info('Copyright 2003-%s Gentoo Foundation',
+ datetime.datetime.now().year)
+ log.info('Copyright 2008-2012 various authors')
+ log.info('Distributed under the GNU General Public License version 2.1')
+
def parse_config(config_files):
- # search a couple of different areas for the main config file
- myconf={}
-
- # try and parse the config file "config_file"
- for config_file in config_files:
- log.notice('Loading configuration file: %s', config_file)
- try:
- config = catalyst.config.ConfigParser(config_file)
- myconf.update(config.get_values())
- except Exception as e:
- log.critical('Could not find parse configuration file: %s: %s',
- config_file, e)
-
- # now, load up the values into conf_values so that we can use them
- for x in list(confdefaults):
- if x in myconf:
- if x == 'options':
- conf_values[x] = set(myconf[x].split())
- elif x in ["decompressor_search_order"]:
- conf_values[x] = myconf[x].split()
- else:
- conf_values[x]=myconf[x]
- else:
- conf_values[x]=confdefaults[x]
-
- # add our python base directory to use for loading target arch's
- conf_values["PythonDir"] = os.path.dirname(os.path.realpath(__file__))
-
- # print out any options messages
- for opt in conf_values['options']:
- if opt in option_messages:
- log.info(option_messages[opt])
-
- for key in ["digests", "envscript", "var_tmpfs_portage", "port_logdir",
- "local_overlay"]:
- if key in myconf:
- conf_values[key] = myconf[key]
-
- if "contents" in myconf:
- # replace '-' with '_' (for compatibility with existing configs)
- conf_values["contents"] = myconf["contents"].replace("-", '_')
-
- if "envscript" in myconf:
- log.info('Envscript support enabled.')
-
- # take care of any variable substitutions that may be left
- for x in list(conf_values):
- if isinstance(conf_values[x], str):
- conf_values[x] = conf_values[x] % conf_values
+ for config_file in config_files:
+ log.notice('Loading configuration file: %s', config_file)
+ try:
+ with open(config_file, 'rb') as f:
+ config = tomli.load(f)
+ for key in config:
+ if key not in valid_config_file_values:
+ log.critical("Unknown option '%s' in config file %s",
+ key, config_file)
+ conf_values.update(config)
+ except Exception as e:
+ log.critical('Could not find parse configuration file: %s: %s',
+ config_file, e)
+
+ # print out any options messages
+ for opt in conf_values['options']:
+ if opt in option_messages:
+ log.info(option_messages[opt])
+
+ if "envscript" in conf_values:
+ log.info('Envscript support enabled.')
+
+ # take care of any variable substitutions that may be left
+ for x in list(conf_values):
+ if isinstance(conf_values[x], str):
+ conf_values[x] = conf_values[x] % conf_values
def import_module(target):
- """
- import catalyst's own modules
- (i.e. targets and the arch modules)
- """
- try:
- mod_name = "catalyst.targets." + target
- module = __import__(mod_name, [],[], ["not empty"])
- except ImportError:
- log.critical('Python module import error: %s', target, exc_info=True)
- return module
+ """
+ import catalyst's own modules
+ (i.e. targets and the arch modules)
+ """
+ try:
+ mod_name = "catalyst.targets." + target
+ module = __import__(mod_name, [], [], ["not empty"])
+ except ImportError:
+ log.critical('Python module import error: %s', target, exc_info=True)
+ return module
def build_target(addlargs):
- try:
- target = addlargs["target"].replace('-', '_')
- module = import_module(target)
- target = getattr(module, target)(conf_values, addlargs)
- except AttributeError:
- raise CatalystError(
- "Target \"%s\" not available." % target,
- print_traceback=True)
- except CatalystError:
- return False
- return target.run()
+ try:
+ target = addlargs["target"].replace('-', '_')
+ module = import_module(target)
+ target = getattr(module, target)(conf_values, addlargs)
+ except AttributeError as e:
+ raise CatalystError(
+ "Target \"%s\" not available." % target,
+ print_traceback=True) from e
+ except CatalystError:
+ return False
+ return target.run()
-class FilePath(object):
- """Argparse type for getting a path to a file."""
+class FilePath():
+ """Argparse type for getting a path to a file."""
- def __init__(self, exists=True):
- self.exists = exists
+ def __init__(self, exists=True):
+ self.exists = exists
- def __call__(self, string):
- if not os.path.exists(string):
- raise argparse.ArgumentTypeError('file does not exist: %s' % string)
- return string
+ def __call__(self, string):
+ if not os.path.exists(string):
+ raise argparse.ArgumentTypeError(
+ 'file does not exist: %s' % string)
+ return string
- def __repr__(self):
- return '%s(exists=%s)' % (type(self).__name__, self.exists)
+ def __repr__(self):
+ return '%s(exists=%s)' % (type(self).__name__, self.exists)
def get_parser():
- """Return an argument parser"""
- epilog = """Usage examples:
-
-Using the commandline option (-C, --cli) to build a Portage snapshot:
-$ catalyst -C target=snapshot version_stamp=my_date
-
-Using the snapshot option (-s, --snapshot) to build a release snapshot:
-$ catalyst -s 20071121
-
-Using the specfile option (-f, --file) to build a stage target:
-$ catalyst -f stage1-specfile.spec"""
-
- parser = argparse.ArgumentParser(epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
-
- parser.add_argument('-V', '--version',
- action='version', version=get_version(),
- help='display version information')
-
- group = parser.add_argument_group('Program output options')
- group.add_argument('-d', '--debug',
- default=False, action='store_true',
- help='enable debugging (and default --log-level debug)')
- group.add_argument('-v', '--verbose',
- default=False, action='store_true',
- help='verbose output (and default --log-level info)')
- group.add_argument('--log-level',
- default=None,
- choices=('critical', 'error', 'warning', 'notice', 'info', 'debug'),
- help='set verbosity of output (default: notice)')
- group.add_argument('--log-file',
- type=FilePath(exists=False),
- help='write all output to this file (instead of stdout)')
- group.add_argument('--color',
- default=None, action='store_true',
- help='colorize output all the time (default: detect)')
- group.add_argument('--nocolor',
- dest='color', action='store_false',
- help='never colorize output all the time (default: detect)')
-
- group = parser.add_argument_group('Developer options')
- group.add_argument('--trace',
- default=False, action='store_true',
- help='trace program output (akin to `sh -x`)')
- group.add_argument('--profile',
- default=False, action='store_true',
- help='profile program execution')
-
- group = parser.add_argument_group('Temporary file management')
- group.add_argument('-a', '--clear-autoresume',
- default=False, action='store_true',
- help='clear autoresume flags')
- group.add_argument('-p', '--purge',
- default=False, action='store_true',
- help='clear tmp dirs, package cache, autoresume flags')
- group.add_argument('-P', '--purgeonly',
- default=False, action='store_true',
- help='clear tmp dirs, package cache, autoresume flags and exit')
- group.add_argument('-T', '--purgetmponly',
- default=False, action='store_true',
- help='clear tmp dirs and autoresume flags and exit')
- group.add_argument('--versioned-cachedir',
- dest='versioned_cachedir', action='store_true',
- help='use stage version on cache directory name')
- group.add_argument('--unversioned-cachedir',
- dest='versioned_cachedir', action='store_false',
- help='do not use stage version on cache directory name')
- group.set_defaults(versioned_cachedir=False)
-
-
- group = parser.add_argument_group('Target/config file management')
- group.add_argument('-F', '--fetchonly',
- default=False, action='store_true',
- help='fetch files only')
- group.add_argument('-c', '--configs',
- type=FilePath(), action='append',
- help='use specified configuration files')
- group.add_argument('-f', '--file',
- type=FilePath(),
- help='read specfile')
- group.add_argument('-s', '--snapshot',
- help='generate a release snapshot')
- group.add_argument('-C', '--cli',
- default=[], nargs=argparse.REMAINDER,
- help='catalyst commandline (MUST BE LAST OPTION)')
-
- return parser
+ """Return an argument parser"""
+ epilog = textwrap.dedent("""\
+ Usage examples:
+
+ Using the snapshot option to make a snapshot of the ebuild repo:
+ $ catalyst --snapshot <git-treeish>
+
+ Using the specfile option (-f, --file) to build a stage target:
+ $ catalyst -f stage1-specfile.spec
+ """)
+
+ parser = argparse.ArgumentParser(
+ epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ parser.add_argument('-V', '--version',
+ action='version', version=get_version(),
+ help='display version information')
+ parser.add_argument('--enter-chroot', default=False, action='store_true',
+ help='Enter chroot before starting the build')
+
+ group = parser.add_argument_group('Program output options')
+ group.add_argument('-d', '--debug',
+ default=False, action='store_true',
+ help='enable debugging (and default --log-level debug)')
+ group.add_argument('-v', '--verbose',
+ default=False, action='store_true',
+ help='verbose output (and default --log-level info)')
+ group.add_argument('--log-level',
+ default=None,
+ choices=('critical', 'error', 'warning',
+ 'notice', 'info', 'debug'),
+ help='set verbosity of output (default: notice)')
+ group.add_argument('--log-file',
+ type=FilePath(exists=False),
+ help='write all output to this file (instead of stdout)')
+ group.add_argument('--color',
+ default=None, action='store_true',
+ help='colorize output all the time (default: detect)')
+ group.add_argument('--nocolor',
+ dest='color', action='store_false',
+ help='never colorize output all the time (default: detect)')
+
+ group = parser.add_argument_group('Developer options')
+ group.add_argument('--trace',
+ default=False, action='store_true',
+ help='trace program output (akin to `sh -x`)')
+ group.add_argument('--profile',
+ default=False, action='store_true',
+ help='profile program execution')
+
+ group = parser.add_argument_group('Temporary file management')
+ group.add_argument('-a', '--clear-autoresume',
+ default=False, action='store_true',
+ help='clear autoresume flags')
+ group.add_argument('-p', '--purge',
+ default=False, action='store_true',
+ help='clear tmp dirs, package cache, autoresume flags')
+ group.add_argument('-P', '--purgeonly',
+ default=False, action='store_true',
+ help='clear tmp dirs, package cache, autoresume flags and exit')
+ group.add_argument('-T', '--purgetmponly',
+ default=False, action='store_true',
+ help='clear tmp dirs and autoresume flags and exit')
+ group.add_argument('--versioned-cachedir',
+ dest='versioned_cachedir', action='store_true',
+ help='use stage version on cache directory name')
+ group.add_argument('--unversioned-cachedir',
+ dest='versioned_cachedir', action='store_false',
+ help='do not use stage version on cache directory name')
+ group.set_defaults(versioned_cachedir=False)
+
+ group = parser.add_argument_group('Target/config file management')
+ group.add_argument('-F', '--fetchonly',
+ default=False, action='store_true',
+ help='fetch files only')
+ group.add_argument('-c', '--configs',
+ type=FilePath(), action='append',
+ help='use specified configuration files')
+ group.add_argument('-f', '--file',
+ type=FilePath(),
+ help='read specfile')
+ group.add_argument('-s', '--snapshot', type=str,
+ help='Make an ebuild repo snapshot')
+
+ return parser
def trace(func, *args, **kwargs):
- """Run |func| through the trace module (akin to `sh -x`)"""
- import trace
-
- # Ignore common system modules we use.
- ignoremods = set((
- 'argparse',
- 'genericpath', 'gettext',
- 'locale',
- 'os',
- 'posixpath',
- 're',
- 'sre_compile', 'sre_parse', 'sys',
- 'tempfile', 'threading',
- 'UserDict',
- ))
-
- # Ignore all the system modules.
- ignoredirs = set(sys.path)
- # But try to strip out the catalyst paths.
- try:
- ignoredirs.remove(os.path.dirname(os.path.dirname(
- os.path.realpath(__file__))))
- except KeyError:
- pass
-
- tracer = trace.Trace(
- count=False,
- trace=True,
- timing=True,
- ignoremods=ignoremods,
- ignoredirs=ignoredirs)
- return tracer.runfunc(func, *args, **kwargs)
+ """Run |func| through the trace module (akin to `sh -x`)"""
+ import trace
+
+ # Ignore common system modules we use.
+ ignoremods = set((
+ 'argparse',
+ 'genericpath', 'gettext',
+ 'locale',
+ 'os',
+ 'posixpath',
+ 're',
+ 'sre_compile', 'sre_parse', 'sys',
+ 'tempfile', 'threading',
+ 'UserDict',
+ ))
+
+ # Ignore all the system modules.
+ ignoredirs = set(sys.path)
+ # But try to strip out the catalyst paths.
+ try:
+ ignoredirs.remove(os.path.dirname(os.path.dirname(
+ os.path.realpath(__file__))))
+ except KeyError:
+ pass
+
+ tracer = trace.Trace(
+ count=False,
+ trace=True,
+ timing=True,
+ ignoremods=ignoremods,
+ ignoredirs=ignoredirs)
+ return tracer.runfunc(func, *args, **kwargs)
def profile(func, *args, **kwargs):
- """Run |func| through the profile module"""
- # Should make this an option.
- sort_keys = ('time',)
+ """Run |func| through the profile module"""
+ # Should make this an option.
+ sort_keys = ('time',)
- # Collect the profile.
- import cProfile
- profiler = cProfile.Profile(subcalls=True, builtins=True)
- try:
- ret = profiler.runcall(func, *args, **kwargs)
- finally:
- # Then process the results.
- import pstats
- stats = pstats.Stats(profiler, stream=sys.stderr)
- stats.strip_dirs().sort_stats(*sort_keys).print_stats()
+ # Collect the profile.
+ import cProfile
+ profiler = cProfile.Profile(subcalls=True, builtins=True)
+ try:
+ ret = profiler.runcall(func, *args, **kwargs)
+ finally:
+ # Then process the results.
+ import pstats
+ stats = pstats.Stats(profiler, stream=sys.stderr)
+ stats.strip_dirs().sort_stats(*sort_keys).print_stats()
- return ret
+ return ret
def main(argv):
- """The main entry point for frontends to use"""
- parser = get_parser()
- opts = parser.parse_args(argv)
+ """The main entry point for frontends to use"""
+ parser = get_parser()
+ opts = parser.parse_args(argv)
- if opts.trace:
- return trace(_main, parser, opts)
- elif opts.profile:
- return profile(_main, parser, opts)
- else:
- return _main(parser, opts)
+ if opts.trace:
+ return trace(_main, parser, opts)
+ if opts.profile:
+ return profile(_main, parser, opts)
+ return _main(parser, opts)
def _main(parser, opts):
- """The "main" main function so we can trace/profile."""
- # Initialize the logger before anything else.
- log_level = opts.log_level
- if log_level is None:
- if opts.debug:
- log_level = 'debug'
- elif opts.verbose:
- log_level = 'info'
- else:
- log_level = 'notice'
- log.setup_logging(log_level, output=opts.log_file, debug=opts.debug,
- color=opts.color)
-
- # Parse the command line options.
- myconfigs = opts.configs
- if not myconfigs:
- myconfigs = [DEFAULT_CONFIG_FILE]
- myspecfile = opts.file
- mycmdline = opts.cli[:]
-
- if opts.snapshot:
- mycmdline.append('target=snapshot')
- mycmdline.append('version_stamp=' + opts.snapshot)
-
- conf_values['DEBUG'] = opts.debug
- conf_values['VERBOSE'] = opts.debug or opts.verbose
-
- options = set()
- if opts.fetchonly:
- options.add('fetch')
- if opts.purge:
- options.add('purge')
- if opts.purgeonly:
- options.add('purgeonly')
- if opts.purgetmponly:
- options.add('purgetmponly')
- if opts.clear_autoresume:
- options.add('clear-autoresume')
-
- # Make sure we have some work before moving further.
- if not myspecfile and not mycmdline:
- parser.error('please specify one of either -f or -C or -s')
-
- # made it this far so start by outputting our version info
- version()
- # import configuration file and import our main module using those settings
- parse_config(myconfigs)
-
- conf_values["options"].update(options)
- log.notice('conf_values[options] = %s', conf_values['options'])
-
- # initialize our contents generator
- contents_map = ContentsMap(CONTENTS_DEFINITIONS,
- comp_prog=conf_values['comp_prog'],
- decomp_opt=conf_values['decomp_opt'],
- list_xattrs_opt=conf_values['list_xattrs_opt'])
- conf_values["contents_map"] = contents_map
-
- # initialze our hash and contents generators
- hash_map = HashMap(HASH_DEFINITIONS)
- conf_values["hash_map"] = hash_map
-
- # initialize our (de)compression definitions
- conf_values['decompress_definitions'] = DECOMPRESS_DEFINITIONS
- conf_values['compress_definitions'] = COMPRESS_DEFINITIONS
- # TODO add capability to config/spec new definitions
-
- # Start checking that digests are valid now that hash_map is initialized
- if "digests" in conf_values:
- digests = set(conf_values['digests'].split())
- valid_digests = set(HASH_DEFINITIONS.keys())
-
- # Use the magic keyword "auto" to use all algos that are available.
- skip_missing = False
- if 'auto' in digests:
- skip_missing = True
- digests.remove('auto')
- if not digests:
- digests = set(valid_digests)
-
- # First validate all the requested digests are valid keys.
- if digests - valid_digests:
- log.critical(
- 'These are not valid digest entries:\n'
- '%s\n'
- 'Valid digest entries:\n'
- '%s',
- ', '.join(digests - valid_digests),
- ', '.join(sorted(valid_digests)))
-
- # Then check for any programs that the hash func requires.
- for digest in digests:
- try:
- process.find_binary(hash_map.hash_map[digest].cmd)
- except process.CommandNotFound:
- # In auto mode, just ignore missing support.
- if skip_missing:
- digests.remove(digest)
- continue
- log.critical(
- 'The "%s" binary needed by digest "%s" was not found. '
- 'It needs to be in your system path.',
- hash_map.hash_map[digest].cmd, digest)
-
- # Now reload the config with our updated value.
- conf_values['digests'] = ' '.join(digests)
-
- if "hash_function" in conf_values:
- if conf_values["hash_function"] not in HASH_DEFINITIONS:
- log.critical(
- '%s is not a valid hash_function entry\n'
- 'Valid hash_function entries:\n'
- '%s', conf_values["hash_function"], HASH_DEFINITIONS.keys())
- try:
- process.find_binary(hash_map.hash_map[conf_values["hash_function"]].cmd)
- except process.CommandNotFound:
- log.critical(
- 'The "%s" binary needed by hash_function "%s" was not found. '
- 'It needs to be in your system path.',
- hash_map.hash_map[conf_values['hash_function']].cmd,
- conf_values['hash_function'])
-
- # detect GNU sed
- for sed in ('/usr/bin/gsed', '/bin/sed', '/usr/bin/sed'):
- if os.path.exists(sed):
- conf_values["sed"] = sed
- break
-
- addlargs={}
-
- if myspecfile:
- log.notice("Processing spec file: %s", myspecfile)
- spec = catalyst.config.SpecParser(myspecfile)
- addlargs.update(spec.get_values())
-
- if mycmdline:
- try:
- cmdline = catalyst.config.ConfigParser()
- cmdline.parse_lines(mycmdline)
- addlargs.update(cmdline.get_values())
- except CatalystError:
- log.critical('Could not parse commandline')
-
- if "target" not in addlargs:
- raise CatalystError("Required value \"target\" not specified.")
-
- if os.getuid() != 0:
- # catalyst cannot be run as a normal user due to chroots, mounts, etc
- log.critical('This script requires root privileges to operate')
-
- # Namespaces aren't supported on *BSDs at the moment. So let's check
- # whether we're on Linux.
- if os.uname().sysname in ["Linux", "linux"]:
- # Start off by creating unique namespaces to run in. Would be nice to
- # use pid & user namespaces, but snakeoil's namespace module has signal
- # transfer issues (CTRL+C doesn't propagate), and user namespaces need
- # more work due to Gentoo build process (uses sudo/root/portage).
- namespaces.simple_unshare(
- mount=True, uts=True, ipc=True, pid=False, net=False, user=False,
- hostname='catalyst')
-
- # everything is setup, so the build is a go
- try:
- success = build_target(addlargs)
- except KeyboardInterrupt:
- log.critical('Catalyst build aborted due to user interrupt (Ctrl-C)')
- if not success:
- sys.exit(2)
- sys.exit(0)
+ """The "main" main function so we can trace/profile."""
+ # Initialize the logger before anything else.
+ log_level = opts.log_level
+ if log_level is None:
+ if opts.debug:
+ log_level = 'debug'
+ elif opts.verbose:
+ log_level = 'info'
+ else:
+ log_level = 'notice'
+ log.setup_logging(log_level, output=opts.log_file, debug=opts.debug,
+ color=opts.color)
+
+ # Parse the command line options.
+ myconfigs = opts.configs
+ if not myconfigs:
+ myconfigs = [DEFAULT_CONFIG_FILE]
+ myspecfile = opts.file
+
+ mycmdline = list()
+ if opts.snapshot:
+ mycmdline.append('target: snapshot')
+ mycmdline.append('snapshot_treeish: ' + opts.snapshot)
+
+ conf_values['DEBUG'] = opts.debug
+ conf_values['VERBOSE'] = opts.debug or opts.verbose
+
+ options = []
+ if opts.fetchonly:
+ options.append('fetch')
+ if opts.purge:
+ options.append('purge')
+ if opts.purgeonly:
+ options.append('purgeonly')
+ if opts.purgetmponly:
+ options.append('purgetmponly')
+ if opts.clear_autoresume:
+ options.append('clear-autoresume')
+ if opts.enter_chroot:
+ options.append('enter-chroot')
+
+ # Make sure we have some work before moving further.
+ if not myspecfile and not mycmdline:
+ parser.error('please specify one of either -f or -C or -s')
+
+ # made it this far so start by outputting our version info
+ version()
+ # import configuration file and import our main module using those settings
+ parse_config(myconfigs)
+
+ conf_values["options"].extend(options)
+ log.notice('conf_values[options] = %s', conf_values['options'])
+
+ # initialize our contents generator
+ contents_map = ContentsMap(CONTENTS_DEFINITIONS,
+ comp_prog=conf_values['comp_prog'],
+ decomp_opt=conf_values['decomp_opt'],
+ list_xattrs_opt=conf_values['list_xattrs_opt'])
+ conf_values["contents_map"] = contents_map
+
+ # initialize our (de)compression definitions
+ conf_values['decompress_definitions'] = DECOMPRESS_DEFINITIONS
+ conf_values['compress_definitions'] = COMPRESS_DEFINITIONS
+ # TODO add capability to config/spec new definitions
+
+ if "digests" in conf_values:
+ valid_digests = hashlib.algorithms_available
+ digests = set(conf_values['digests'])
+ conf_values['digests'] = digests
+
+ # First validate all the requested digests are valid keys.
+ if digests - valid_digests:
+ raise CatalystError('These are not valid digest entries:\n%s\n'
+ 'Valid digest entries:\n%s' %
+ (', '.join(sorted(digests - valid_digests)),
+ ', '.join(sorted(valid_digests))))
+
+ addlargs = {}
+
+ if myspecfile:
+ log.notice("Processing spec file: %s", myspecfile)
+ spec = catalyst.config.SpecParser(myspecfile)
+ addlargs.update(spec.get_values())
+
+ if mycmdline:
+ try:
+ cmdline = catalyst.config.SpecParser()
+ cmdline.parse_lines(mycmdline)
+ addlargs.update(cmdline.get_values())
+ except CatalystError:
+ log.critical('Could not parse commandline')
+
+ if "target" not in addlargs:
+ raise CatalystError("Required value \"target\" not specified.")
+
+ if os.getuid() != 0:
+ # catalyst cannot be run as a normal user due to chroots, mounts, etc
+ log.critical('This script requires root privileges to operate')
+
+ # Start off by creating unique namespaces to run in. Would be nice to
+ # use pid & user namespaces, but snakeoil's namespace module has signal
+ # transfer issues (CTRL+C doesn't propagate), and user namespaces need
+ # more work due to Gentoo build process (uses sudo/root/portage).
+ with namespace(uts=True, ipc=True, hostname='catalyst'):
+ # everything is setup, so the build is a go
+ try:
+ success = build_target(addlargs)
+ except KeyboardInterrupt:
+ log.critical('Catalyst build aborted due to user interrupt (Ctrl-C)')
+
+ if not success:
+ sys.exit(2)
+ sys.exit(0)
diff --git a/catalyst/support.py b/catalyst/support.py
index 9cc5d751..6945d053 100644
--- a/catalyst/support.py
+++ b/catalyst/support.py
@@ -2,107 +2,123 @@
import glob
import sys
import os
-import re
+import shutil
import time
+from pathlib import Path
from subprocess import Popen
+import libmount
+
+from portage.repository.config import RepoConfig
+from tempfile import TemporaryDirectory
+
+from snakeoil.bash import read_bash_dict
+
from catalyst import log
-from catalyst.defaults import valid_config_file_values
+from catalyst.context import namespace
-BASH_BINARY = "/bin/bash"
+BASH_BINARY = "/bin/bash"
class CatalystError(Exception):
- def __init__(self, message, print_traceback=False):
- if message:
- log.error('CatalystError: %s', message, exc_info=print_traceback)
+ def __init__(self, message, print_traceback=False):
+ if message:
+ log.error('CatalystError: %s', message, exc_info=print_traceback)
+def command(name):
+ c = shutil.which(name)
+ if not c:
+ raise CatalystError(f'"{name}" not found or is not executable')
+ return c
def cmd(mycmd, env=None, debug=False, fail_func=None):
- """Run the external |mycmd|.
-
- If |mycmd| is a string, then it's assumed to be a bash snippet and will
- be run through bash. Otherwise, it's a standalone command line and will
- be run directly.
- """
- log.debug('cmd: %r', mycmd)
- sys.stdout.flush()
-
- if env is None:
- env = {}
- if 'BASH_ENV' not in env:
- env = env.copy()
- env['BASH_ENV'] = '/etc/spork/is/not/valid/profile.env'
-
- args = []
- if isinstance(mycmd, str):
- args.append(BASH_BINARY)
- if debug:
- args.append('-x')
- args.extend(['-c', mycmd])
- else:
- args.extend(mycmd)
-
- log.debug('args: %r', args)
- proc = Popen(args, env=env)
- ret = proc.wait()
- if ret:
- if fail_func:
- log.error('cmd(%r) exited %s; running fail_func().', args, ret)
- fail_func()
- raise CatalystError('cmd(%r) exited %s' % (args, ret),
- print_traceback=False)
-
-
-def file_check(filepath, extensions=None, strict=True):
- '''Check for the files existence and that only one exists
- if others are found with various extensions
- '''
- if os.path.isfile(filepath):
- return filepath
- # it didn't exist
- # so check if there are files of that name with an extension
- files = glob.glob("%s.*" % filepath)
- # remove any false positive files
- files = [x for x in files if not x.endswith(".CONTENTS") and not x.endswith(".DIGESTS")]
- if len(files) is 1:
- return files[0]
- elif len(files) > 1 and strict:
- msg = "Ambiguos Filename: %s\nPlease specify the correct extension as well" % filepath
- raise CatalystError(msg, print_traceback=False)
- else:
- target_file = None
- for ext in extensions:
- target = filepath + "." + ext
- if target in files:
- target_file = target
- break
- if target_file:
- return target_file
- raise CatalystError("File Not Found: %s" % filepath)
-
-
-def file_locate(settings,filelist,expand=1):
- #if expand=1, non-absolute paths will be accepted and
- # expanded to os.getcwd()+"/"+localpath if file exists
- for myfile in filelist:
- if myfile not in settings:
- #filenames such as cdtar are optional, so we don't assume the variable is defined.
- pass
- else:
- if len(settings[myfile])==0:
- raise CatalystError("File variable \"" + myfile +
- "\" has a length of zero (not specified.)", print_traceback=True)
- if settings[myfile][0]=="/":
- if not os.path.exists(settings[myfile]):
- raise CatalystError("Cannot locate specified " + myfile +
- ": " + settings[myfile], print_traceback=False)
- elif expand and os.path.exists(os.getcwd()+"/"+settings[myfile]):
- settings[myfile]=os.getcwd()+"/"+settings[myfile]
- else:
- raise CatalystError("Cannot locate specified " + myfile +
- ": "+settings[myfile]+" (2nd try)" +
-"""
+ """Run the external |mycmd|.
+
+ If |mycmd| is a string, then it's assumed to be a bash snippet and will
+ be run through bash. Otherwise, it's a standalone command line and will
+ be run directly.
+ """
+ log.debug('cmd: %r', mycmd)
+ sys.stdout.flush()
+
+ if env is None:
+ env = {}
+ if 'BASH_ENV' not in env:
+ env = env.copy()
+ env['BASH_ENV'] = '/etc/spork/is/not/valid/profile.env'
+
+ args = []
+ if isinstance(mycmd, str):
+ args.append(BASH_BINARY)
+ if debug:
+ args.append('-x')
+ args.extend(['-c', mycmd])
+ else:
+ args.extend(mycmd)
+
+ log.debug('args: %r', args)
+ proc = Popen(args, env=env)
+ ret = proc.wait()
+ if ret:
+ if fail_func:
+ log.error('cmd(%r) exited %s; running fail_func().', args, ret)
+ fail_func()
+ raise CatalystError('cmd(%r) exited %s' % (args, ret),
+ print_traceback=False)
+
+
+def file_check(filepath, extensions=None):
+ '''Check for the files existence and that only one exists
+ if others are found with various extensions
+ '''
+ if os.path.isfile(filepath):
+ return filepath
+ # it didn't exist
+ # so check if there are files of that name with an extension
+ files = glob.glob("%s.*" % filepath)
+ # remove any false positive files
+ files = [x for x in files if
+ not x.endswith(".CONTENTS") and
+ not x.endswith(".CONTENTS.gz") and
+ not x.endswith(".DIGESTS") and
+ not x.endswith(".sha256")]
+ if len(files) == 1:
+ return files[0]
+ if len(files) > 1:
+ msg = "Ambiguous Filename: %s\nPlease specify the correct extension as well" % filepath
+ raise CatalystError(msg, print_traceback=False)
+ target_file = None
+ for ext in extensions:
+ target = filepath + "." + ext
+ if target in files:
+ target_file = target
+ break
+ if target_file:
+ return target_file
+ raise CatalystError("File Not Found: %s" % filepath)
+
+
+def file_locate(settings, filelist, expand=1):
+ # if expand=1, non-absolute paths will be accepted and
+ # expanded to os.getcwd()+"/"+localpath if file exists
+ for myfile in filelist:
+ if myfile not in settings:
+ # filenames such as cdtar are optional, so we don't assume the variable is defined.
+ pass
+ else:
+ if not settings[myfile]:
+ raise CatalystError("File variable \"" + myfile +
+ "\" has a length of zero (not specified.)", print_traceback=True)
+ if settings[myfile][0] == "/":
+ if not os.path.exists(settings[myfile]):
+ raise CatalystError("Cannot locate specified " + myfile +
+ ": " + settings[myfile], print_traceback=False)
+ elif expand and os.path.exists(os.getcwd()+"/"+settings[myfile]):
+ settings[myfile] = os.getcwd()+"/"+settings[myfile]
+ else:
+ raise CatalystError("Cannot locate specified " + myfile +
+ ": "+settings[myfile]+" (2nd try)" +
+ """
Spec file format:
The spec file format is a very simple and easy-to-use format for storing data. Here's an example
@@ -122,136 +138,139 @@ that the order of multiple-value items is preserved, but the order that the item
defined are not preserved. In other words, "foo", "bar", "oni" ordering is preserved but "item1"
"item2" "item3" ordering is not, as the item strings are stored in a dictionary (hash).
""",
- print_traceback=True)
-
-
-def parse_makeconf(mylines):
- mymakeconf={}
- pos=0
- pat=re.compile("([0-9a-zA-Z_]*)=(.*)")
- while pos<len(mylines):
- if len(mylines[pos])<=1:
- #skip blanks
- pos += 1
- continue
- if mylines[pos][0] in ["#"," ","\t"]:
- #skip indented lines, comments
- pos += 1
- continue
- else:
- myline=mylines[pos]
- mobj=pat.match(myline)
- pos += 1
- if mobj.group(2):
- clean_string = re.sub(r"\"",r"",mobj.group(2))
- mymakeconf[mobj.group(1)]=clean_string
- return mymakeconf
+ print_traceback=True)
def read_makeconf(mymakeconffile):
- if os.path.exists(mymakeconffile):
- try:
- try:
- import snakeoil.bash #import snakeoil.fileutils
- return snakeoil.bash.read_bash_dict(mymakeconffile, sourcing_command="source")
- except ImportError:
- try:
- import portage.util
- return portage.util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True)
- except Exception:
- try:
- import portage_util
- return portage_util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True)
- except ImportError:
- with open(mymakeconffile, "r") as myf:
- mylines=myf.readlines()
- return parse_makeconf(mylines)
- except Exception:
- raise CatalystError("Could not parse make.conf file " +
- mymakeconffile, print_traceback=True)
- else:
- makeconf={}
- return makeconf
-
-
-def pathcompare(path1,path2):
- # Change double slashes to slash
- path1 = re.sub(r"//",r"/",path1)
- path2 = re.sub(r"//",r"/",path2)
- # Removing ending slash
- path1 = re.sub("/$","",path1)
- path2 = re.sub("/$","",path2)
-
- if path1 == path2:
- return 1
- return 0
+ if os.path.exists(mymakeconffile):
+ try:
+ return read_bash_dict(mymakeconffile, sourcing_command="source")
+ except Exception as e:
+ raise CatalystError("Could not parse make.conf file " +
+ mymakeconffile, print_traceback=True) from e
+ else:
+ makeconf = {}
+ return makeconf
+
+
+def get_repo_name_from_dir(repo_path):
+ """ Get the name of the repo at the given repo_path.
+
+ References:
+ https://wiki.gentoo.org/wiki/Repository_format/profiles/repo_name
+ https://wiki.gentoo.org/wiki/Repository_format/metadata/layout.conf#repo-name
+ """
+
+ repo_config = RepoConfig(None, {"location": repo_path})
+
+ if repo_config.missing_repo_name:
+ raise CatalystError(f"Missing name in repository {repo_path}")
+
+ return repo_config.name
+
+
+def get_repo_name_from_squash(repo_squash_path):
+ """ Get the name of the repo at the given repo_squash_path.
+ To obtain the name, the squash file is mounted to a temporary directory.
+ """
+
+ repo_name = None
+
+ # Mount squash file to temp directory in separate mount namespace
+ with TemporaryDirectory() as temp, namespace(mount=True):
+ try:
+ source = str(repo_squash_path)
+ target = str(temp)
+ fstype = 'squashfs'
+ options = 'ro,loop'
+ cxt = libmount.Context(source=source, target=target,
+ fstype=fstype, options=options)
+ cxt.mount()
+ repo_name = get_repo_name_from_dir(target)
+
+ except Exception as e:
+ raise CatalystError(f"Couldn't mount: {source}, {e}") from e
+
+ return repo_name
+
+
+def get_repo_name(repo_path):
+ if not Path(repo_path).is_dir():
+ return get_repo_name_from_squash(repo_path)
+
+ return get_repo_name_from_dir(repo_path)
def ismount(path):
- """Like os.path.ismount, but also support bind mounts"""
- if os.path.ismount(path):
- return 1
- a=os.popen("mount")
- mylines=a.readlines()
- a.close()
- for line in mylines:
- mysplit=line.split()
- if pathcompare(path,mysplit[2]):
- return 1
- return 0
-
-
-def addl_arg_parse(myspec,addlargs,requiredspec,validspec):
- "helper function to help targets parse additional arguments"
- messages = []
- for x in addlargs.keys():
- if x not in validspec and x not in valid_config_file_values and x not in requiredspec:
- messages.append("Argument \""+x+"\" not recognized.")
- else:
- myspec[x]=addlargs[x]
-
- for x in requiredspec:
- if x not in myspec:
- messages.append("Required argument \""+x+"\" not specified.")
-
- if messages:
- raise CatalystError('\n\tAlso: '.join(messages))
+ """Like os.path.ismount, but also support bind mounts"""
+ path = Path(path)
+ if path.is_mount():
+ return True
+
+ cxt = libmount.Context()
+ while (fs := cxt.mtab.next_fs()) is not None:
+ if path == Path(fs.target):
+ return True
+
+ return False
+
+
+def addl_arg_parse(myspec, addlargs, requiredspec, validspec):
+ "helper function to help targets parse additional arguments"
+ messages = []
+ for x in addlargs.keys():
+ if x not in validspec and x not in requiredspec:
+ messages.append("Argument \""+x+"\" not recognized.")
+ else:
+ myspec[x] = addlargs[x]
+
+ for x in requiredspec:
+ if x not in myspec:
+ messages.append("Required argument \""+x+"\" not specified.")
+
+ if messages:
+ raise CatalystError('\n\tAlso: '.join(messages))
def countdown(secs=5, doing="Starting"):
- # If this is non-interactive (e.g. a cronjob), then sleeping is pointless.
- if not os.isatty(sys.stdin.fileno()):
- return
-
- if secs:
- sys.stdout.write(
- ('>>> Waiting %s seconds before starting...\n'
- '>>> (Control-C to abort)...\n'
- '%s in: ') % (secs, doing))
- # py3 now creates a range object, so wrap it with list()
- ticks=list(range(secs))
- ticks.reverse()
- for sec in ticks:
- sys.stdout.write(str(sec+1)+" ")
- sys.stdout.flush()
- time.sleep(1)
- sys.stdout.write('\n')
+ # Don't sleep if this is non-interactive
+ if not os.isatty(sys.stdin.fileno()) or secs == 0:
+ return
+
+ sys.stdout.write(
+ ('>>> Waiting %s seconds before starting...\n'
+ '>>> (Control-C to abort)...\n'
+ '%s in: ') % (secs, doing))
+ for sec in reversed(range(1, secs + 1)):
+ sys.stdout.write(str(sec) + " ")
+ sys.stdout.flush()
+ time.sleep(1)
+ sys.stdout.write('\n')
def normpath(mypath):
- """Clean up a little more than os.path.normpath
-
- Namely:
- - Make sure leading // is turned into /.
- - Leave trailing slash intact.
- """
- TrailingSlash=False
- if mypath[-1] == "/":
- TrailingSlash=True
- newpath = os.path.normpath(mypath)
- if len(newpath) > 1:
- if newpath[:2] == "//":
- newpath = newpath[1:]
- if TrailingSlash:
- newpath=newpath+'/'
- return newpath
+ """Clean up a little more than os.path.normpath
+
+ Namely:
+ - Make sure leading // is turned into /.
+ - Leave trailing slash intact.
+ """
+ TrailingSlash = False
+ if mypath[-1] == "/":
+ TrailingSlash = True
+ newpath = os.path.normpath(mypath)
+ if len(newpath) > 1:
+ if newpath[:2] == "//":
+ newpath = newpath[1:]
+ if TrailingSlash:
+ newpath = newpath+'/'
+ return newpath
+
+
+def sanitize_name(name: str) -> str:
+ """
+ Normalize name by replacing [.-/] with _, so it may be used as a
+ variable name in bash
+ """
+ table = str.maketrans(".-/", "___")
+ return name.translate(table)
diff --git a/catalyst/targets/embedded.py b/catalyst/targets/embedded.py
index e441757b..01ad035d 100644
--- a/catalyst/targets/embedded.py
+++ b/catalyst/targets/embedded.py
@@ -14,32 +14,48 @@ from catalyst import log
from catalyst.support import normpath
from catalyst.base.stagebase import StageBase
-class embedded(StageBase):
- """
- Builder class for embedded target
- """
- def __init__(self,spec,addlargs):
- self.required_values=[]
- self.valid_values=[]
- self.valid_values.extend(["embedded/empty","embedded/rm","embedded/unmerge","embedded/fs-prepare","embedded/fs-finish","embedded/mergeroot","embedded/packages","embedded/fs-type","embedded/runscript","boot/kernel","embedded/linuxrc"])
- self.valid_values.extend(["embedded/use"])
- if "embedded/fs-type" in addlargs:
- self.valid_values.append("embedded/fs-ops")
- StageBase.__init__(self,spec,addlargs)
+class embedded(StageBase):
+ """
+ Builder class for embedded target
+ """
+ required_values = frozenset()
+ valid_values = required_values | frozenset([
+ "boot/kernel",
+ "embedded/empty",
+ "embedded/fs-finish",
+ "embedded/fs-ops",
+ "embedded/fs-prepare",
+ "embedded/fs-type",
+ "embedded/linuxrc",
+ "embedded/mergeroot",
+ "embedded/packages",
+ "embedded/rm",
+ "embedded/root_overlay",
+ "embedded/runscript",
+ "embedded/unmerge",
+ "embedded/use",
+ ])
- def set_action_sequence(self):
- self.settings["action_sequence"]=["dir_setup","unpack","unpack_snapshot",\
- "config_profile_link","setup_confdir",\
- "portage_overlay","bind","chroot_setup",\
- "setup_environment","build_kernel","build_packages",\
- "bootloader","root_overlay","fsscript","unmerge",\
- "unbind","remove","empty","clean","capture","clear_autoresume"]
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
- def set_stage_path(self):
- self.settings["stage_path"]=normpath(self.settings["chroot_path"]+"/tmp/mergeroot")
- log.info('embedded stage path is %s', self.settings['stage_path'])
+ def set_action_sequence(self):
+ self.build_sequence.extend([
+ self.build_kernel,
+ self.build_packages,
+ self.root_overlay,
+ self.fsscript,
+ self.unmerge,
+ ])
+ self.finish_sequence.extend([
+ self.remove,
+ self.empty,
+ self.clean,
+ self.capture,
+ ])
+ self.set_completion_action_sequences()
- def set_root_path(self):
- self.settings["root_path"]=normpath("/tmp/mergeroot")
- log.info('embedded root path is %s', self.settings['root_path'])
+ def set_root_path(self):
+ self.settings["root_path"] = normpath("/tmp/mergeroot")
+ log.info('embedded root path is %s', self.settings['root_path'])
diff --git a/catalyst/targets/grp.py b/catalyst/targets/grp.py
deleted file mode 100644
index d47654d0..00000000
--- a/catalyst/targets/grp.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Gentoo Reference Platform (GRP) target
-"""
-# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation.
-
-import os
-import glob
-
-from catalyst import log
-from catalyst.support import (CatalystError, normpath, cmd)
-from catalyst.base.stagebase import StageBase
-
-
-class grp(StageBase):
- """
- The builder class for GRP (Gentoo Reference Platform) builds.
- """
- def __init__(self,spec,addlargs):
- self.required_values=["version_stamp","target","subarch",\
- "rel_type","profile","snapshot","source_subpath"]
-
- self.valid_values=self.required_values[:]
- self.valid_values.extend(["grp/use"])
- if "grp" not in addlargs:
- raise CatalystError("Required value \"grp\" not specified in spec.")
-
- self.required_values.extend(["grp"])
- if isinstance(addlargs['grp'], str):
- addlargs["grp"]=[addlargs["grp"]]
-
- if "grp/use" in addlargs:
- if isinstance(addlargs['grp/use'], str):
- addlargs["grp/use"]=[addlargs["grp/use"]]
-
- for x in addlargs["grp"]:
- self.required_values.append("grp/"+x+"/packages")
- self.required_values.append("grp/"+x+"/type")
-
- StageBase.__init__(self,spec,addlargs)
-
- def run_local(self):
- for pkgset in self.settings["grp"]:
- # example call: "grp.sh run pkgset cd1 xmms vim sys-apps/gleep"
- try:
- cmd([self.settings['controller_file'], 'run',
- self.settings['grp/' + pkgset + '/type'],
- pkgset] + self.settings['grp/' + pkgset + '/packages'],
- env=self.env)
-
- except CatalystError:
- self.unbind()
- raise CatalystError("GRP build aborting due to error.",
- print_traceback=True)
-
- def set_mounts(self):
- self.mounts.append("/tmp/grp")
- self.mountmap["/tmp/grp"]=self.settings["target_path"]
-
- def generate_digests(self):
- for pkgset in self.settings["grp"]:
- if self.settings["grp/"+pkgset+"/type"] == "pkgset":
- destdir=normpath(self.settings["target_path"]+"/"+pkgset+"/All")
- log.notice('Digesting files in the pkgset...')
- digests=glob.glob(destdir+'/*.DIGESTS')
- for i in digests:
- if os.path.exists(i):
- os.remove(i)
-
- files=os.listdir(destdir)
- #ignore files starting with '.' using list comprehension
- files=[filename for filename in files if filename[0] != '.']
- for i in files:
- if os.path.isfile(normpath(destdir+"/"+i)):
- self.gen_contents_file(normpath(destdir+"/"+i))
- self.gen_digest_file(normpath(destdir+"/"+i))
- else:
- destdir=normpath(self.settings["target_path"]+"/"+pkgset)
- log.notice('Digesting files in the srcset...')
-
- digests=glob.glob(destdir+'/*.DIGESTS')
- for i in digests:
- if os.path.exists(i):
- os.remove(i)
-
- files=os.listdir(destdir)
- #ignore files starting with '.' using list comprehension
- files=[filename for filename in files if filename[0] != '.']
- for i in files:
- if os.path.isfile(normpath(destdir+"/"+i)):
- #self.gen_contents_file(normpath(destdir+"/"+i))
- self.gen_digest_file(normpath(destdir+"/"+i))
-
- def set_action_sequence(self):
- self.settings["action_sequence"]=["unpack","unpack_snapshot",\
- "config_profile_link","setup_confdir","portage_overlay","bind","chroot_setup",\
- "setup_environment","run_local","unbind",\
- "generate_digests","clear_autoresume"]
diff --git a/catalyst/targets/livecd_stage1.py b/catalyst/targets/livecd_stage1.py
index c0a664fa..dbfa54ed 100644
--- a/catalyst/targets/livecd_stage1.py
+++ b/catalyst/targets/livecd_stage1.py
@@ -9,44 +9,50 @@ from catalyst.base.stagebase import StageBase
class livecd_stage1(StageBase):
- """
- Builder class for LiveCD stage1.
- """
- def __init__(self,spec,addlargs):
- self.required_values=["livecd/packages"]
- self.valid_values=self.required_values[:]
-
- self.valid_values.extend(["livecd/use"])
- StageBase.__init__(self,spec,addlargs)
-
- def set_action_sequence(self):
- self.settings["action_sequence"]=["unpack","unpack_snapshot",\
- "config_profile_link","setup_confdir","portage_overlay",\
- "bind","chroot_setup","setup_environment","build_packages",\
- "unbind", "clean"]
- self.set_completion_action_sequences()
-
- def set_spec_prefix(self):
- self.settings["spec_prefix"]="livecd"
-
- def set_catalyst_use(self):
- StageBase.set_catalyst_use(self)
- if "catalyst_use" in self.settings:
- self.settings["catalyst_use"].append("livecd")
- else:
- self.settings["catalyst_use"] = ["livecd"]
-
- def set_packages(self):
- StageBase.set_packages(self)
- if self.settings["spec_prefix"]+"/packages" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix']+'/packages'], str):
- self.settings[self.settings["spec_prefix"]+"/packages"] = \
- self.settings[self.settings["spec_prefix"]+"/packages"].split()
- self.settings[self.settings["spec_prefix"]+"/packages"].append("app-misc/livecd-tools")
-
- def set_pkgcache_path(self):
- if "pkgcache_path" in self.settings:
- if not isinstance(self.settings['pkgcache_path'], str):
- self.settings["pkgcache_path"] = normpath(' '.join(self.settings["pkgcache_path"]))
- else:
- StageBase.set_pkgcache_path(self)
+ """
+ Builder class for LiveCD stage1.
+ """
+ required_values = frozenset([
+ "livecd/packages",
+ ])
+ valid_values = required_values | frozenset([
+ "livecd/use",
+ ])
+
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
+
+ def set_action_sequence(self):
+ self.build_sequence.extend([
+ self.build_packages,
+ ])
+ self.finish_sequence.extend([
+ self.clean,
+ ])
+ self.set_completion_action_sequences()
+
+ def set_spec_prefix(self):
+ self.settings["spec_prefix"] = "livecd"
+
+ def set_catalyst_use(self):
+ StageBase.set_catalyst_use(self)
+ if "catalyst_use" in self.settings:
+ self.settings["catalyst_use"].append("livecd")
+ else:
+ self.settings["catalyst_use"] = ["livecd"]
+
+ def set_packages(self):
+ StageBase.set_packages(self)
+ if self.settings["spec_prefix"]+"/packages" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix']+'/packages'], str):
+ self.settings[self.settings["spec_prefix"]+"/packages"] = \
+ self.settings[self.settings["spec_prefix"] +
+ "/packages"].split()
+
+ def set_pkgcache_path(self):
+ if "pkgcache_path" in self.settings:
+ if not isinstance(self.settings['pkgcache_path'], str):
+ self.settings["pkgcache_path"] = normpath(
+ ' '.join(self.settings["pkgcache_path"]))
+ else:
+ StageBase.set_pkgcache_path(self)
diff --git a/catalyst/targets/livecd_stage2.py b/catalyst/targets/livecd_stage2.py
index 18810667..1a798a1e 100644
--- a/catalyst/targets/livecd_stage2.py
+++ b/catalyst/targets/livecd_stage2.py
@@ -9,92 +9,100 @@ from catalyst.base.stagebase import StageBase
class livecd_stage2(StageBase):
- """
- Builder class for a LiveCD stage2 build.
- """
- def __init__(self,spec,addlargs):
- self.required_values=["boot/kernel"]
+ """
+ Builder class for a LiveCD stage2 build.
+ """
+ required_values = frozenset([
+ "boot/kernel",
+ ])
+ valid_values = required_values | frozenset([
+ "livecd/bootargs",
+ "livecd/cdtar",
+ "livecd/depclean",
+ "livecd/empty",
+ "livecd/fsops",
+ "livecd/fsscript",
+ "livecd/fstype",
+ "livecd/gk_mainargs",
+ "livecd/iso",
+ "livecd/linuxrc",
+ "livecd/modblacklist",
+ "livecd/motd",
+ "livecd/overlay",
+ "livecd/rcadd",
+ "livecd/rcdel",
+ "livecd/readme",
+ "livecd/rm",
+ "livecd/root_overlay",
+ "livecd/type",
+ "livecd/unmerge",
+ "livecd/users",
+ "livecd/verify",
+ "livecd/volid",
+ "repos",
+ ])
- self.valid_values=[]
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
+ if "livecd/type" not in self.settings:
+ self.settings["livecd/type"] = "generic-livecd"
- self.valid_values.extend(self.required_values)
- self.valid_values.extend(["livecd/cdtar","livecd/empty","livecd/rm","livecd/depclean",\
- "livecd/unmerge","livecd/iso","livecd/gk_mainargs","livecd/type",\
- "livecd/readme","livecd/motd","livecd/overlay",\
- "livecd/modblacklist","livecd/splash_theme","livecd/rcadd",\
- "livecd/rcdel","livecd/fsscript","livecd/xinitrc",\
- "livecd/root_overlay","livecd/users","portage_overlay",\
- "livecd/fstype","livecd/fsops","livecd/linuxrc","livecd/bootargs",\
- "gamecd/conf","livecd/xdm","livecd/xsession","livecd/volid","livecd/verify"])
+ file_locate(self.settings, ["cdtar", "controller_file"])
- StageBase.__init__(self,spec,addlargs)
- if "livecd/type" not in self.settings:
- self.settings["livecd/type"] = "generic-livecd"
+ def set_spec_prefix(self):
+ self.settings["spec_prefix"] = "livecd"
- file_locate(self.settings, ["cdtar","controller_file"])
+ def set_target_path(self):
+ '''Set the target path for the finished stage.
- def set_spec_prefix(self):
- self.settings["spec_prefix"]="livecd"
+ This method runs the StageBase.set_target_path mehtod,
+ and additionally creates a staging directory for assembling
+ the final components needed to produce the iso image.
+ '''
+ super(livecd_stage2, self).set_target_path()
+ clear_dir(self.settings['target_path'])
- def set_target_path(self):
- '''Set the target path for the finished stage.
+ def run_local(self):
+ # what modules do we want to blacklist?
+ if "livecd/modblacklist" in self.settings:
+ path = normpath(self.settings["chroot_path"] +
+ "/etc/modprobe.d/blacklist.conf")
+ try:
+ with open(path, "a") as myf:
+ myf.write("\n#Added by Catalyst:")
+ # workaround until config.py is using configparser
+ if isinstance(self.settings["livecd/modblacklist"], str):
+ self.settings["livecd/modblacklist"] = self.settings[
+ "livecd/modblacklist"].split()
+ for x in self.settings["livecd/modblacklist"]:
+ myf.write("\nblacklist "+x)
+ except Exception as e:
+ raise CatalystError("Couldn't open " +
+ self.settings["chroot_path"] +
+ "/etc/modprobe.d/blacklist.conf.",
+ print_traceback=True) from e
- This method runs the StageBase.set_target_path mehtod,
- and additionally creates a staging directory for assembling
- the final components needed to produce the iso image.
- '''
- super(livecd_stage2, self).set_target_path()
- clear_dir(self.settings['target_path'])
-
- def run_local(self):
- # what modules do we want to blacklist?
- if "livecd/modblacklist" in self.settings:
- path = normpath(self.settings["chroot_path"] +
- "/etc/modprobe.d/blacklist.conf")
- try:
- with open(path, "a") as myf:
- myf.write("\n#Added by Catalyst:")
- # workaround until config.py is using configparser
- if isinstance(self.settings["livecd/modblacklist"], str):
- self.settings["livecd/modblacklist"] = self.settings[
- "livecd/modblacklist"].split()
- for x in self.settings["livecd/modblacklist"]:
- myf.write("\nblacklist "+x)
- except:
- self.unbind()
- raise CatalystError("Couldn't open " +
- self.settings["chroot_path"] +
- "/etc/modprobe.d/blacklist.conf.",
- print_traceback=True)
-
- def set_action_sequence(self):
- self.settings["action_sequence"]=[
- "unpack",
- "unpack_snapshot",
- "config_profile_link",
- "setup_confdir",
- "portage_overlay",
- "bind",
- "chroot_setup",
- "setup_environment",
- "run_local",
- "build_kernel"
- ]
- if "fetch" not in self.settings["options"]:
- self.settings["action_sequence"] += [
- "bootloader",
- "preclean",
- "livecd_update",
- "root_overlay",
- "fsscript",
- "rcupdate",
- "unmerge",
- "unbind",
- "remove",
- "empty",
- "clean",
- "target_setup",
- "setup_overlay",
- "create_iso"
- ]
- self.settings["action_sequence"].append("clear_autoresume")
+ def set_action_sequence(self):
+ self.build_sequence.extend([
+ self.run_local,
+ self.build_kernel
+ ])
+ if "fetch" not in self.settings["options"]:
+ self.build_sequence.extend([
+ self.bootloader,
+ self.preclean,
+ self.livecd_update,
+ self.root_overlay,
+ self.fsscript,
+ self.rcupdate,
+ self.unmerge,
+ ])
+ self.finish_sequence.extend([
+ self.remove,
+ self.empty,
+ self.clean,
+ self.target_setup,
+ self.setup_overlay,
+ self.create_iso,
+ ])
+ self.set_completion_action_sequences()
diff --git a/catalyst/targets/netboot.py b/catalyst/targets/netboot.py
index 161300db..38d0cb45 100644
--- a/catalyst/targets/netboot.py
+++ b/catalyst/targets/netboot.py
@@ -1,132 +1,163 @@
"""
-netboot target, version 1
+netboot target, version 2
"""
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation.
import os
from catalyst import log
-from catalyst.support import (CatalystError, normpath,
- cmd, file_locate)
+from catalyst.support import (CatalystError, normpath, cmd)
+from catalyst.fileops import (ensure_dirs, clear_dir, clear_path)
from catalyst.base.stagebase import StageBase
class netboot(StageBase):
- """
- Builder class for a netboot build.
- """
- def __init__(self,spec,addlargs):
- self.valid_values = [
- "netboot/kernel/sources",
- "netboot/kernel/config",
- "netboot/kernel/prebuilt",
-
- "netboot/busybox_config",
-
- "netboot/extra_files",
- "netboot/packages"
- ]
- self.required_values=[]
-
- try:
- # XXX: This code does nothing because the for loop below is disabled.
- if "netboot/packages" in addlargs:
- if isinstance(addlargs['netboot/packages'], str):
- _loopy = [addlargs["netboot/packages"]]
- else:
- _loopy = addlargs["netboot/packages"]
-
- # for x in loopy:
- # self.required_values.append("netboot/packages/"+x+"/files")
- except:
- raise CatalystError("configuration error in netboot/packages.")
-
- StageBase.__init__(self,spec,addlargs)
- if "netboot/busybox_config" in addlargs:
- file_locate(self.settings, ["netboot/busybox_config"])
-
- # Custom Kernel Tarball --- use that instead ...
-
- # unless the user wants specific CFLAGS/CXXFLAGS, let's use -Os
-
- for envvar in "CFLAGS", "CXXFLAGS":
- if envvar not in os.environ and envvar not in addlargs:
- self.settings[envvar] = "-Os -pipe"
-
- def set_root_path(self):
- # ROOT= variable for emerges
- self.settings["root_path"]=normpath("/tmp/image")
- log.info('netboot root path is %s', self.settings['root_path'])
-
-# def build_packages(self):
-# # build packages
-# if "netboot/packages" in self.settings:
-# try:
-# cmd([self.settings['controller_file'], 'packages'] +
-# self.settings['netboot/packages'], env=self.env)
-# except CatalystError:
-# self.unbind()
-# raise CatalystError('netboot build aborting due to error.',
-# print_traceback=True)
-
- def build_busybox(self):
- # build busybox
- if "netboot/busybox_config" in self.settings:
- mycmd = [self.settings['netboot/busybox_config']]
- else:
- mycmd = []
- try:
- cmd([self.settings['controller_file'], 'busybox'] + mycmd, env=self.env)
- except CatalystError:
- self.unbind()
- raise CatalystError("netboot build aborting due to error.",
- print_traceback=True)
-
- def copy_files_to_image(self):
- # create image
- myfiles=[]
- if "netboot/packages" in self.settings:
- if isinstance(self.settings['netboot/packages'], str):
- loopy=[self.settings["netboot/packages"]]
- else:
- loopy=self.settings["netboot/packages"]
-
- for x in loopy:
- if "netboot/packages/"+x+"/files" in self.settings:
- if isinstance(type(self.settings['netboot/packages/'+x+'/files']), str):
- myfiles.extend(self.settings["netboot/packages/"+x+"/files"])
- else:
- myfiles.append(self.settings["netboot/packages/"+x+"/files"])
-
- if "netboot/extra_files" in self.settings:
- if isinstance(self.settings['netboot/extra_files'], list):
- myfiles.extend(self.settings["netboot/extra_files"])
- else:
- myfiles.append(self.settings["netboot/extra_files"])
-
- try:
- cmd([self.settings['controller_file'], 'image'] + myfiles,
- env=self.env)
- except CatalystError:
- self.unbind()
- raise CatalystError("netboot build aborting due to error.",
- print_traceback=True)
-
- def create_netboot_files(self):
- # finish it all up
- try:
- cmd([self.settings['controller_file'], 'finish'], env=self.env)
- except CatalystError:
- self.unbind()
- raise CatalystError("netboot build aborting due to error.",
- print_traceback=True)
- # end
- log.notice('netboot: build finished !')
-
- def set_action_sequence(self):
- self.settings["action_sequence"]=["unpack","unpack_snapshot",
- "config_profile_link","setup_confdir","bind","chroot_setup",\
- "setup_environment","build_packages","build_busybox",\
- "build_kernel","copy_files_to_image",\
- "clean","create_netboot_files","unbind","clear_autoresume"]
+ """
+ Builder class for a netboot build, version 2
+ """
+ required_values = frozenset([
+ "boot/kernel",
+ ])
+ valid_values = required_values | frozenset([
+ "netboot/busybox_config",
+ "netboot/extra_files",
+ "netboot/linuxrc",
+ "netboot/overlay",
+ "netboot/packages",
+ "netboot/root_overlay",
+ "netboot/use",
+ ])
+
+ def __init__(self, spec, addlargs):
+ if "netboot/packages" in addlargs:
+ if isinstance(addlargs['netboot/packages'], str):
+ loopy = [addlargs["netboot/packages"]]
+ else:
+ loopy = addlargs["netboot/packages"]
+
+ for x in loopy:
+ self.valid_values |= {"netboot/packages/"+x+"/files"}
+
+ StageBase.__init__(self, spec, addlargs)
+ self.settings["merge_path"] = normpath("/tmp/image/")
+
+ def set_target_path(self):
+ self.settings["target_path"] = normpath(self.settings["storedir"]+"/builds/" +
+ self.settings["target_subpath"])
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("setup_target_path"):
+ log.notice(
+ 'Resume point detected, skipping target path setup operation...')
+ else:
+ # first clean up any existing target stuff
+ clear_path(self.settings['target_path'])
+ self.resume.enable("setup_target_path")
+ ensure_dirs(self.settings["storedir"]+"/builds/")
+
+ def copy_files_to_image(self):
+ # copies specific files from the buildroot to merge_path
+ myfiles = []
+
+ # check for autoresume point
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("copy_files_to_image"):
+ log.notice(
+ 'Resume point detected, skipping target path setup operation...')
+ else:
+ if "netboot/packages" in self.settings:
+ if isinstance(self.settings['netboot/packages'], str):
+ loopy = [self.settings["netboot/packages"]]
+ else:
+ loopy = self.settings["netboot/packages"]
+
+ for x in loopy:
+ if "netboot/packages/"+x+"/files" in self.settings:
+ if isinstance(self.settings['netboot/packages/'+x+'/files'], list):
+ myfiles.extend(
+ self.settings["netboot/packages/"+x+"/files"])
+ else:
+ myfiles.append(
+ self.settings["netboot/packages/"+x+"/files"])
+
+ if "netboot/extra_files" in self.settings:
+ if isinstance(self.settings['netboot/extra_files'], list):
+ myfiles.extend(self.settings["netboot/extra_files"])
+ else:
+ myfiles.append(self.settings["netboot/extra_files"])
+
+ cmd([self.settings['controller_file'], 'image'] +
+ myfiles, env=self.env)
+
+ self.resume.enable("copy_files_to_image")
+
+ def setup_overlay(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("setup_overlay"):
+ log.notice(
+ 'Resume point detected, skipping setup_overlay operation...')
+ else:
+ if "netboot/overlay" in self.settings:
+ for x in self.settings["netboot/overlay"]:
+ if os.path.exists(x):
+ cmd(['rsync', '-a', x + '/',
+ self.settings['chroot_path'] + self.settings['merge_path']],
+ env=self.env)
+ self.resume.enable("setup_overlay")
+
+ def move_kernels(self):
+ # we're done, move the kernels to builds/*
+ # no auto resume here as we always want the
+ # freshest images moved
+ cmd([self.settings['controller_file'], 'final'], env=self.env)
+ log.notice('Netboot Build Finished!')
+
+ def remove(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("remove"):
+ log.notice('Resume point detected, skipping remove operation...')
+ else:
+ if self.settings["spec_prefix"]+"/rm" in self.settings:
+ for x in self.settings[self.settings["spec_prefix"]+"/rm"]:
+ # we're going to shell out for all these cleaning operations,
+ # so we get easy glob handling
+ log.notice('netboot: removing %s', x)
+ clear_path(self.settings['chroot_path'] +
+ self.settings['merge_path'] + x)
+
+ def empty(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("empty"):
+ log.notice('Resume point detected, skipping empty operation...')
+ else:
+ if "netboot/empty" in self.settings:
+ if isinstance(self.settings['netboot/empty'], str):
+ self.settings["netboot/empty"] = self.settings["netboot/empty"].split()
+ for x in self.settings["netboot/empty"]:
+ myemp = self.settings["chroot_path"] + \
+ self.settings["merge_path"] + x
+ if not os.path.isdir(myemp):
+ log.warning(
+ 'not a directory or does not exist, skipping "empty" operation: %s', x)
+ continue
+ log.info('Emptying directory %s', x)
+ # stat the dir, delete the dir, recreate the dir and set
+ # the proper perms and ownership
+ clear_dir(myemp)
+ self.resume.enable("empty")
+
+ def set_action_sequence(self):
+ self.build_sequence.extend([
+ self.build_packages,
+ self.root_overlay,
+ self.copy_files_to_image,
+ self.setup_overlay,
+ self.build_kernel,
+ self.move_kernels,
+ self.remove,
+ self.empty,
+ ])
+ self.finish_sequence.extend([
+ self.clean,
+ self.clear_autoresume,
+ ])
diff --git a/catalyst/targets/netboot2.py b/catalyst/targets/netboot2.py
deleted file mode 100644
index 87dada3b..00000000
--- a/catalyst/targets/netboot2.py
+++ /dev/null
@@ -1,161 +0,0 @@
-"""
-netboot target, version 2
-"""
-# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation.
-
-import os
-
-from catalyst import log
-from catalyst.support import (CatalystError, normpath, cmd)
-from catalyst.fileops import (ensure_dirs, clear_dir, clear_path)
-
-from catalyst.base.stagebase import StageBase
-
-
-class netboot2(StageBase):
- """
- Builder class for a netboot build, version 2
- """
- def __init__(self,spec,addlargs):
- self.required_values=[
- "boot/kernel"
- ]
- self.valid_values=self.required_values[:]
- self.valid_values.extend([
- "netboot2/packages",
- "netboot2/use",
- "netboot2/extra_files",
- "netboot2/overlay",
- "netboot2/busybox_config",
- "netboot2/root_overlay",
- "netboot2/linuxrc"
- ])
-
- try:
- if "netboot2/packages" in addlargs:
- if isinstance(addlargs['netboot2/packages'], str):
- loopy=[addlargs["netboot2/packages"]]
- else:
- loopy=addlargs["netboot2/packages"]
-
- for x in loopy:
- self.valid_values.append("netboot2/packages/"+x+"/files")
- except:
- raise CatalystError("configuration error in netboot2/packages.")
-
- StageBase.__init__(self,spec,addlargs)
- self.settings["merge_path"]=normpath("/tmp/image/")
-
- def set_target_path(self):
- self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+\
- self.settings["target_subpath"])
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("setup_target_path"):
- log.notice('Resume point detected, skipping target path setup operation...')
- else:
- # first clean up any existing target stuff
- clear_path(self.settings['target_path'])
- self.resume.enable("setup_target_path")
- ensure_dirs(self.settings["storedir"]+"/builds/")
-
- def copy_files_to_image(self):
- # copies specific files from the buildroot to merge_path
- myfiles=[]
-
- # check for autoresume point
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("copy_files_to_image"):
- log.notice('Resume point detected, skipping target path setup operation...')
- else:
- if "netboot2/packages" in self.settings:
- if isinstance(self.settings['netboot2/packages'], str):
- loopy=[self.settings["netboot2/packages"]]
- else:
- loopy=self.settings["netboot2/packages"]
-
- for x in loopy:
- if "netboot2/packages/"+x+"/files" in self.settings:
- if isinstance(self.settings['netboot2/packages/'+x+'/files'], list):
- myfiles.extend(self.settings["netboot2/packages/"+x+"/files"])
- else:
- myfiles.append(self.settings["netboot2/packages/"+x+"/files"])
-
- if "netboot2/extra_files" in self.settings:
- if isinstance(self.settings['netboot2/extra_files'], list):
- myfiles.extend(self.settings["netboot2/extra_files"])
- else:
- myfiles.append(self.settings["netboot2/extra_files"])
-
- try:
- cmd([self.settings['controller_file'], 'image'] +
- myfiles, env=self.env)
- except CatalystError:
- self.unbind()
- raise CatalystError("Failed to copy files to image!",
- print_traceback=True)
-
- self.resume.enable("copy_files_to_image")
-
- def setup_overlay(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("setup_overlay"):
- log.notice('Resume point detected, skipping setup_overlay operation...')
- else:
- if "netboot2/overlay" in self.settings:
- for x in self.settings["netboot2/overlay"]:
- if os.path.exists(x):
- cmd(['rsync', '-a', x + '/',
- self.settings['chroot_path'] + self.settings['merge_path']],
- env=self.env)
- self.resume.enable("setup_overlay")
-
- def move_kernels(self):
- # we're done, move the kernels to builds/*
- # no auto resume here as we always want the
- # freshest images moved
- try:
- cmd([self.settings['controller_file'], 'final'], env=self.env)
- log.notice('Netboot Build Finished!')
- except CatalystError:
- self.unbind()
- raise CatalystError("Failed to move kernel images!",
- print_traceback=True)
-
- def remove(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("remove"):
- log.notice('Resume point detected, skipping remove operation...')
- else:
- if self.settings["spec_prefix"]+"/rm" in self.settings:
- for x in self.settings[self.settings["spec_prefix"]+"/rm"]:
- # we're going to shell out for all these cleaning operations,
- # so we get easy glob handling
- log.notice('netboot2: removing %s', x)
- clear_path(self.settings['chroot_path'] +
- self.settings['merge_path'] + x)
-
- def empty(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("empty"):
- log.notice('Resume point detected, skipping empty operation...')
- else:
- if "netboot2/empty" in self.settings:
- if isinstance(self.settings['netboot2/empty'], str):
- self.settings["netboot2/empty"]=self.settings["netboot2/empty"].split()
- for x in self.settings["netboot2/empty"]:
- myemp=self.settings["chroot_path"] + self.settings["merge_path"] + x
- if not os.path.isdir(myemp):
- log.warning('not a directory or does not exist, skipping "empty" operation: %s', x)
- continue
- log.info('Emptying directory %s', x)
- # stat the dir, delete the dir, recreate the dir and set
- # the proper perms and ownership
- clear_dir(myemp)
- self.resume.enable("empty")
-
- def set_action_sequence(self):
- self.settings["action_sequence"]=["unpack","unpack_snapshot","config_profile_link",
- "setup_confdir","portage_overlay","bind","chroot_setup",\
- "setup_environment","build_packages","root_overlay",\
- "copy_files_to_image","setup_overlay","build_kernel","move_kernels",\
- "remove","empty","unbind","clean","clear_autoresume"]
diff --git a/catalyst/targets/snapshot.py b/catalyst/targets/snapshot.py
index 607e718e..ef68765d 100644
--- a/catalyst/targets/snapshot.py
+++ b/catalyst/targets/snapshot.py
@@ -2,102 +2,114 @@
Snapshot target
"""
-from DeComp.compress import CompressMap
+import subprocess
+import sys
+
+import fasteners
+
+from pathlib import Path
from catalyst import log
-from catalyst.support import normpath, cmd
from catalyst.base.targetbase import TargetBase
-from catalyst.base.genbase import GenBase
-from catalyst.fileops import (clear_dir, ensure_dirs)
-
-
-class snapshot(TargetBase, GenBase):
- """
- Builder class for snapshots.
- """
- def __init__(self,myspec,addlargs):
- self.required_values=["version_stamp","target"]
- self.valid_values=["version_stamp","target", "compression_mode"]
-
- TargetBase.__init__(self, myspec, addlargs)
- GenBase.__init__(self,myspec)
- #self.settings=myspec
- self.settings["target_subpath"]="repos"
- st=self.settings["storedir"]
- self.settings["snapshot_path"] = normpath(st + "/snapshots/"
- + self.settings["snapshot_name"]
- + self.settings["version_stamp"])
- self.settings["tmp_path"]=normpath(st+"/tmp/"+self.settings["target_subpath"])
-
- def setup(self):
- x=normpath(self.settings["storedir"]+"/snapshots")
- ensure_dirs(x)
-
- def mount_safety_check(self):
- pass
-
- def run(self):
- if "purgeonly" in self.settings["options"]:
- self.purge()
- return True
-
- if "purge" in self.settings["options"]:
- self.purge()
-
- success = True
- self.setup()
- log.notice('Creating %s tree snapshot %s from %s ...',
- self.settings["repo_name"], self.settings['version_stamp'],
- self.settings['portdir'])
-
- mytmp=self.settings["tmp_path"]
- ensure_dirs(mytmp)
-
- cmd(['rsync', '-a', '--no-o', '--no-g', '--delete',
- '--exclude=/packages/',
- '--exclude=/distfiles/',
- '--exclude=/local/',
- '--exclude=CVS/',
- '--exclude=.svn',
- '--exclude=.git/',
- '--filter=H_**/files/digest-*',
- self.settings['portdir'] + '/',
- mytmp + '/' + self.settings['repo_name'] + '/'],
- env=self.env)
-
- log.notice('Compressing %s snapshot tarball ...', self.settings["repo_name"])
- compressor = CompressMap(self.settings["compress_definitions"],
- env=self.env, default_mode=self.settings['compression_mode'],
- comp_prog=self.settings["comp_prog"])
- infodict = compressor.create_infodict(
- source=self.settings["repo_name"],
- destination=self.settings["snapshot_path"],
- basedir=mytmp,
- filename=self.settings["snapshot_path"],
- mode=self.settings["compression_mode"],
- auto_extension=True
- )
- if not compressor.compress(infodict):
- success = False
- log.error('Snapshot compression failure')
- else:
- filename = '.'.join([self.settings["snapshot_path"],
- compressor.extension(self.settings["compression_mode"])])
- log.notice('Snapshot successfully written to %s', filename)
- self.gen_contents_file(filename)
- self.gen_digest_file(filename)
- if "keepwork" not in self.settings["options"]:
- self.cleanup()
- if success:
- log.info('snapshot: complete!')
- return success
-
- def kill_chroot_pids(self):
- pass
-
- def cleanup(self):
- log.info('Cleaning up ...')
- self.purge()
-
- def purge(self):
- clear_dir(self.settings['tmp_path'])
+from catalyst.support import CatalystError, command
+
+class snapshot(TargetBase):
+ """
+ Builder class for snapshots.
+ """
+ required_values = frozenset([
+ 'target',
+ ])
+ valid_values = required_values | frozenset([
+ 'snapshot_treeish',
+ ])
+
+ def __init__(self, myspec, addlargs):
+ TargetBase.__init__(self, myspec, addlargs)
+
+ self.git = command('git')
+ self.ebuild_repo = Path(self.settings['repos_storedir'],
+ self.settings['repo_name']).with_suffix('.git')
+ self.gitdir = str(self.ebuild_repo)
+
+ def update_ebuild_repo(self) -> str:
+ repouri = 'https://anongit.gentoo.org/git/repo/sync/gentoo.git'
+
+ if self.ebuild_repo.is_dir():
+ git_cmds = [
+ [self.git, '-C', self.gitdir, 'fetch', '--quiet', '--depth=1'],
+ [self.git, '-C', self.gitdir, 'update-ref', 'HEAD', 'FETCH_HEAD'],
+ [self.git, '-C', self.gitdir, 'gc', '--quiet'],
+ ]
+ else:
+ git_cmds = [
+ [self.git, 'clone', '--quiet', '--depth=1', '--bare',
+ # Set some config options to enable git gc to clean everything
+ # except what we just fetched. See git-gc(1).
+ '-c', 'gc.reflogExpire=0',
+ '-c', 'gc.reflogExpireUnreachable=0',
+ '-c', 'gc.rerereresolved=0',
+ '-c', 'gc.rerereunresolved=0',
+ '-c', 'gc.pruneExpire=now',
+ '--branch=stable',
+ repouri, self.gitdir],
+ ]
+
+ try:
+ for cmd in git_cmds:
+ log.notice('>>> ' + ' '.join(cmd))
+ subprocess.run(cmd,
+ capture_output=True,
+ check=True,
+ encoding='utf-8',
+ close_fds=False)
+
+ sp = subprocess.run([self.git, '-C', self.gitdir, 'rev-parse', 'stable'],
+ capture_output=True,
+ check=True,
+ encoding='utf-8',
+ close_fds=False)
+ return sp.stdout.rstrip()
+
+ except subprocess.CalledProcessError as e:
+ raise CatalystError(f'{e.cmd} failed with return code'
+ f'{e.returncode}\n'
+ f'{e.output}\n') from e
+
+ def run(self):
+ if self.settings['snapshot_treeish'] == 'stable':
+ treeish = self.update_ebuild_repo()
+ else:
+ treeish = self.settings['snapshot_treeish']
+
+ self.set_snapshot(treeish)
+
+ git_cmd = [self.git, '-C', self.gitdir, 'archive', '--format=tar',
+ treeish]
+ tar2sqfs_cmd = [command('tar2sqfs'), str(self.snapshot), '-q', '-f',
+ '-j1', '-c', 'gzip']
+
+ log.notice('Creating %s tree snapshot %s from %s',
+ self.settings['repo_name'], treeish, self.gitdir)
+ log.notice('>>> ' + ' '.join([*git_cmd, '|']))
+ log.notice(' ' + ' '.join(tar2sqfs_cmd))
+
+ with fasteners.InterProcessLock(self.snapshot.with_suffix('.lock')):
+ git = subprocess.Popen(git_cmd,
+ stdout=subprocess.PIPE,
+ stderr=sys.stderr,
+ close_fds=False)
+ tar2sqfs = subprocess.Popen(tar2sqfs_cmd,
+ stdin=git.stdout,
+ stdout=sys.stdout,
+ stderr=sys.stderr,
+ close_fds=False)
+ git.stdout.close()
+ git.wait()
+ tar2sqfs.wait()
+
+ if tar2sqfs.returncode == 0:
+ log.notice('Wrote snapshot to %s', self.snapshot)
+ else:
+ log.error('Failed to create snapshot')
+ return tar2sqfs.returncode == 0
diff --git a/catalyst/targets/stage1.py b/catalyst/targets/stage1.py
index cc4366b6..0ea5f92b 100644
--- a/catalyst/targets/stage1.py
+++ b/catalyst/targets/stage1.py
@@ -3,123 +3,111 @@ stage1 target
"""
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation.
-import os
-
-from snakeoil import fileutils
-
from catalyst import log
from catalyst.support import normpath
-from catalyst.fileops import ensure_dirs, move_path
+from catalyst.fileops import move_path
from catalyst.base.stagebase import StageBase
class stage1(StageBase):
- """
- Builder class for a stage1 installation tarball build.
- """
- def __init__(self,spec,addlargs):
- self.required_values=[]
- self.valid_values=["chost"]
- self.valid_values.extend(["update_seed","update_seed_command"])
- StageBase.__init__(self,spec,addlargs)
-
- def set_stage_path(self):
- self.settings["stage_path"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
- log.notice('stage1 stage path is %s', self.settings['stage_path'])
-
- def set_root_path(self):
- # sets the root path, relative to 'chroot_path', of the stage1 root
- self.settings["root_path"]=normpath("/tmp/stage1root")
- log.info('stage1 root path is %s', self.settings['root_path'])
-
- def set_cleanables(self):
- StageBase.set_cleanables(self)
- self.settings["cleanables"].extend([\
- "/usr/share/zoneinfo", self.settings["port_conf"] + "/package*"])
-
- # XXX: How do these override_foo() functions differ from the ones in StageBase and why aren't they in stage3_target?
-
- def override_chost(self):
- if "chost" in self.settings:
- self.settings["CHOST"] = self.settings["chost"]
-
- def override_cflags(self):
- if "cflags" in self.settings:
- self.settings["CFLAGS"] = self.settings["cflags"]
-
- def override_cxxflags(self):
- if "cxxflags" in self.settings:
- self.settings["CXXFLAGS"] = self.settings["cxxflags"]
-
- def override_ldflags(self):
- if "ldflags" in self.settings:
- self.settings["LDFLAGS"] = self.settings["ldflags"]
-
- def set_portage_overlay(self):
- StageBase.set_portage_overlay(self)
- if "portage_overlay" in self.settings:
- log.warning(
- 'Using an overlay for earlier stages could cause build issues.\n'
- "If you break it, you buy it. Don't complain to us about it.\n"
- "Don't say we did not warn you.")
-
- def base_dirs(self):
- if os.uname()[0] == "FreeBSD":
- # baselayout no longer creates the .keep files in proc and dev for FreeBSD as it
- # would create them too late...we need them earlier before bind mounting filesystems
- # since proc and dev are not writeable, so...create them here
- ensure_dirs(self.settings["stage_path"]+"/proc")
- ensure_dirs(self.settings["stage_path"]+"/dev")
- for f in ('/proc', '/dev'):
- f = self.settings['stage_path'] + f + '/.keep'
- if not os.path.isfile(f):
- try:
- fileutils.touch(f)
- except IOError:
- log.error('Failed to create %s', f)
-
- def set_mounts(self):
- # stage_path/proc probably doesn't exist yet, so create it
- ensure_dirs(self.settings["stage_path"]+"/proc")
-
- # alter the mount mappings to bind mount proc onto it
- self.mounts.append("stage1root/proc")
- self.target_mounts["stage1root/proc"] = "/tmp/stage1root/proc"
- self.mountmap["stage1root/proc"] = "/proc"
-
- def set_completion_action_sequences(self):
- '''Override function for stage1
-
- Its purpose is to move the new stage1root out of the seed stage
- and rename it to the stage1 chroot_path after cleaning the seed stage
- chroot for re-use in stage2 without the need to unpack it.
- '''
- if "fetch" not in self.settings["options"]:
- self.settings["action_sequence"].append("capture")
- if "keepwork" in self.settings["options"]:
- self.settings["action_sequence"].append("clear_autoresume")
- elif "seedcache" in self.settings["options"]:
- self.settings["action_sequence"].append("remove_autoresume")
- self.settings["action_sequence"].append("clean_stage1")
- else:
- self.settings["action_sequence"].append("remove_autoresume")
- self.settings["action_sequence"].append("remove_chroot")
- return
-
-
- def clean_stage1(self):
- '''seedcache is enabled, so salvage the /tmp/stage1root,
- remove the seed chroot'''
- log.notice('Salvaging the stage1root from the chroot path ...')
- # move the self.settings["stage_path"] outside of the self.settings["chroot_path"]
- tmp_path = normpath(self.settings["storedir"] + "/tmp/" + "stage1root")
- if move_path(self.settings["stage_path"], tmp_path):
- self.remove_chroot()
- # move it to self.settings["chroot_path"]
- if not move_path(tmp_path, self.settings["chroot_path"]):
- log.error('clean_stage1 failed, see previous log messages for details')
- return False
- log.notice('Successfully moved and cleaned the stage1root for the seedcache')
- return True
- log.error('clean_stage1 failed to move the stage1root to a temporary loation')
- return False
+ """
+ Builder class for a stage1 installation tarball build.
+ """
+ required_values = frozenset()
+ valid_values = required_values | frozenset([
+ "chost",
+ "update_seed",
+ "update_seed_command",
+ ])
+
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
+
+ def set_root_path(self):
+ # sets the root path, relative to 'chroot_path', of the stage1 root
+ self.settings["root_path"] = normpath("/tmp/stage1root")
+ log.info('stage1 root path is %s', self.settings['root_path'])
+
+ def set_cleanables(self):
+ StageBase.set_cleanables(self)
+ self.settings["cleanables"].extend([
+ self.settings["port_conf"] + "/package*",
+ ])
+
+ # XXX: How do these override_foo() functions differ from the ones in StageBase and why aren't they in stage3_target?
+ # XXY: It appears the difference is that these functions are actually doing something and the ones in stagebase don't :-(
+ # XXZ: I have a wierd suspicion that it's the difference in capitolization
+
+ def override_chost(self):
+ if "chost" in self.settings:
+ self.settings["CHOST"] = self.settings["chost"]
+
+ def override_common_flags(self):
+ if "common_flags" in self.settings:
+ self.settings["COMMON_FLAGS"] = self.settings["common_flags"]
+
+ def override_cflags(self):
+ if "cflags" in self.settings:
+ self.settings["CFLAGS"] = self.settings["cflags"]
+
+ def override_cxxflags(self):
+ if "cxxflags" in self.settings:
+ self.settings["CXXFLAGS"] = self.settings["cxxflags"]
+
+ def override_fcflags(self):
+ if "fcflags" in self.settings:
+ self.settings["FCFLAGS"] = self.settings["fcflags"]
+
+ def override_fflags(self):
+ if "fflags" in self.settings:
+ self.settings["FFLAGS"] = self.settings["fflags"]
+
+ def override_ldflags(self):
+ if "ldflags" in self.settings:
+ self.settings["LDFLAGS"] = self.settings["ldflags"]
+
+ def set_repos(self):
+ StageBase.set_repos(self)
+ if "repos" in self.settings:
+ log.warning(
+ 'Using an overlay for earlier stages could cause build issues.\n'
+ "If you break it, you buy it. Don't complain to us about it.\n"
+ "Don't say we did not warn you.")
+
+ def set_completion_action_sequences(self):
+ '''Override function for stage1
+
+ Its purpose is to move the new stage1root out of the seed stage
+ and rename it to the stage1 chroot_path after cleaning the seed stage
+ chroot for re-use in stage2 without the need to unpack it.
+ '''
+ if "fetch" not in self.settings["options"]:
+ self.finish_sequence.append(self.capture)
+ if "keepwork" in self.settings["options"]:
+ self.finish_sequence.append(self.clear_autoresume)
+ elif "seedcache" in self.settings["options"]:
+ self.finish_sequence.append(self.remove_autoresume)
+ self.finish_sequence.append(self.clean_stage1)
+ else:
+ self.finish_sequence.append(self.remove_autoresume)
+ self.finish_sequence.append(self.remove_chroot)
+
+ def clean_stage1(self):
+ '''seedcache is enabled, so salvage the /tmp/stage1root,
+ remove the seed chroot'''
+ log.notice('Salvaging the stage1root from the chroot path ...')
+ # move the self.settings["stage_path"] outside of the self.settings["chroot_path"]
+ tmp_path = normpath(self.settings["storedir"] + "/tmp/" + "stage1root")
+ if move_path(self.settings["stage_path"], tmp_path):
+ self.remove_chroot()
+ # move it to self.settings["chroot_path"]
+ if not move_path(tmp_path, self.settings["chroot_path"]):
+ log.error(
+ 'clean_stage1 failed, see previous log messages for details')
+ return False
+ log.notice(
+ 'Successfully moved and cleaned the stage1root for the seedcache')
+ return True
+ log.error(
+ 'clean_stage1 failed to move the stage1root to a temporary loation')
+ return False
diff --git a/catalyst/targets/stage2.py b/catalyst/targets/stage2.py
index 9658a493..786f1020 100644
--- a/catalyst/targets/stage2.py
+++ b/catalyst/targets/stage2.py
@@ -9,37 +9,40 @@ from catalyst.base.stagebase import StageBase
class stage2(StageBase):
- """
- Builder class for a stage2 installation tarball build.
- """
- def __init__(self,spec,addlargs):
- self.required_values=[]
- self.valid_values=["chost"]
- StageBase.__init__(self,spec,addlargs)
-
- # XXX: How do these override_foo() functions differ from the ones in
- # StageBase and why aren't they in stage3_target?
-
- def override_chost(self):
- if "chost" in self.settings:
- self.settings["CHOST"] = self.settings["chost"]
-
- def override_cflags(self):
- if "cflags" in self.settings:
- self.settings["CFLAGS"] = self.settings["cflags"]
-
- def override_cxxflags(self):
- if "cxxflags" in self.settings:
- self.settings["CXXFLAGS"] = self.settings["cxxflags"]
-
- def override_ldflags(self):
- if "ldflags" in self.settings:
- self.settings["LDFLAGS"] = self.settings["ldflags"]
-
- def set_portage_overlay(self):
- StageBase.set_portage_overlay(self)
- if "portage_overlay" in self.settings:
- log.warning(
- 'Using an overlay for earlier stages could cause build issues.\n'
- "If you break it, you buy it. Don't complain to us about it.\n"
- "Don't say we did not warn you.")
+ """
+ Builder class for a stage2 installation tarball build.
+ """
+ required_values = frozenset()
+ valid_values = required_values | frozenset([
+ "chost",
+ ])
+
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
+
+ # XXX: How do these override_foo() functions differ from the ones in
+ # StageBase and why aren't they in stage3_target?
+
+ def override_chost(self):
+ if "chost" in self.settings:
+ self.settings["CHOST"] = self.settings["chost"]
+
+ def override_cflags(self):
+ if "cflags" in self.settings:
+ self.settings["CFLAGS"] = self.settings["cflags"]
+
+ def override_cxxflags(self):
+ if "cxxflags" in self.settings:
+ self.settings["CXXFLAGS"] = self.settings["cxxflags"]
+
+ def override_ldflags(self):
+ if "ldflags" in self.settings:
+ self.settings["LDFLAGS"] = self.settings["ldflags"]
+
+ def set_repos(self):
+ StageBase.set_repos(self)
+ if "repos" in self.settings:
+ log.warning(
+ 'Using an overlay for earlier stages could cause build issues.\n'
+ "If you break it, you buy it. Don't complain to us about it.\n"
+ "Don't say we did not warn you.")
diff --git a/catalyst/targets/stage3.py b/catalyst/targets/stage3.py
index f0831932..d20ed679 100644
--- a/catalyst/targets/stage3.py
+++ b/catalyst/targets/stage3.py
@@ -8,21 +8,19 @@ from catalyst.base.stagebase import StageBase
class stage3(StageBase):
- """
- Builder class for a stage3 installation tarball build.
- """
- def __init__(self,spec,addlargs):
- self.required_values=[]
- self.valid_values=[]
- StageBase.__init__(self,spec,addlargs)
+ """
+ Builder class for a stage3 installation tarball build.
+ """
+ required_values = frozenset()
+ valid_values = frozenset()
- def set_portage_overlay(self):
- StageBase.set_portage_overlay(self)
- if "portage_overlay" in self.settings:
- log.warning(
- 'Using an overlay for earlier stages could cause build issues.\n'
- "If you break it, you buy it. Don't complain to us about it.\n"
- "Don't say we did not warn you.")
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
- def set_cleanables(self):
- StageBase.set_cleanables(self)
+ def set_repos(self):
+ StageBase.set_repos(self)
+ if "repos" in self.settings:
+ log.warning(
+ 'Using an overlay for earlier stages could cause build issues.\n'
+ "If you break it, you buy it. Don't complain to us about it.\n"
+ "Don't say we did not warn you.")
diff --git a/catalyst/targets/stage4.py b/catalyst/targets/stage4.py
index 71c1f302..35309b45 100644
--- a/catalyst/targets/stage4.py
+++ b/catalyst/targets/stage4.py
@@ -7,27 +7,57 @@ from catalyst.base.stagebase import StageBase
class stage4(StageBase):
- """
- Builder class for stage4.
- """
- def __init__(self,spec,addlargs):
- self.required_values=["stage4/packages"]
- self.valid_values=self.required_values[:]
- self.valid_values.extend(["stage4/use", "boot/kernel",
- "stage4/root_overlay", "stage4/fsscript",
- "stage4/gk_mainargs", "splash_theme",
- "portage_overlay", "stage4/rcadd", "stage4/rcdel",
- "stage4/linuxrc", "stage4/unmerge", "stage4/rm", "stage4/empty"])
- StageBase.__init__(self,spec,addlargs)
+ """
+ Builder class for stage4.
+ """
+ required_values = frozenset([
+ "stage4/packages",
+ ])
+ valid_values = required_values | frozenset([
+ "boot/kernel",
+ "repos",
+ "stage4/empty",
+ "stage4/fsscript",
+ "stage4/gk_mainargs",
+ "stage4/groups",
+ "stage4/linuxrc",
+ "stage4/rcadd",
+ "stage4/rcdel",
+ "stage4/rm",
+ "stage4/root_overlay",
+ "stage4/ssh_public_keys",
+ "stage4/unmerge",
+ "stage4/use",
+ "stage4/users",
+ ])
- def set_cleanables(self):
- self.settings["cleanables"]=["/var/tmp/*","/tmp/*"]
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
- def set_action_sequence(self):
- self.settings["action_sequence"] = ["unpack", "unpack_snapshot",
- "config_profile_link", "setup_confdir", "portage_overlay",
- "bind", "chroot_setup", "setup_environment", "build_packages",
- "build_kernel", "bootloader", "root_overlay", "fsscript",
- "preclean", "rcupdate", "unmerge", "unbind", "remove", "empty",
- "clean"]
- self.set_completion_action_sequences()
+ def set_cleanables(self):
+ StageBase.set_cleanables(self)
+
+ # We want to allow stage4's fsscript to generate a default
+ # /etc/resolv.conf
+ self.settings["cleanables"].remove('/etc/resolv.conf')
+
+ def set_action_sequence(self):
+ self.build_sequence.extend([
+ self.build_packages,
+ self.build_kernel,
+ self.bootloader,
+ self.root_overlay,
+ self.fsscript,
+ self.preclean,
+ self.rcupdate,
+ self.unmerge,
+ ])
+ self.finish_sequence.extend([
+ self.remove,
+ self.groups,
+ self.users,
+ self.ssh_public_keys,
+ self.empty,
+ self.clean,
+ ])
+ self.set_completion_action_sequences()
diff --git a/catalyst/targets/tinderbox.py b/catalyst/targets/tinderbox.py
deleted file mode 100644
index 6908793a..00000000
--- a/catalyst/targets/tinderbox.py
+++ /dev/null
@@ -1,47 +0,0 @@
-"""
-Tinderbox target
-"""
-# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation.
-
-import os
-
-from catalyst.support import cmd, CatalystError
-from catalyst.base.stagebase import StageBase
-
-
-class tinderbox(StageBase):
- """
- Builder class for the tinderbox target
- """
- def __init__(self,spec,addlargs):
- self.required_values=["tinderbox/packages"]
- self.valid_values=self.required_values[:]
- self.valid_values.extend(["tinderbox/use"])
- StageBase.__init__(self,spec,addlargs)
-
- def run_local(self):
- # tinderbox
- # example call: "grp.sh run xmms vim sys-apps/gleep"
- try:
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'run'] +
- self.settings['tinderbox/packages'], env=self.env)
-
- except CatalystError:
- self.unbind()
- raise CatalystError("Tinderbox aborting due to error.",
- print_traceback=True)
-
- def set_cleanables(self):
- self.settings['cleanables'] = [
- '/etc/resolv.conf',
- '/var/tmp/*',
- self.settings['portdir'],
- ]
-
- def set_action_sequence(self):
- #Default action sequence for run method
- self.settings["action_sequence"]=["unpack","unpack_snapshot",\
- "config_profile_link","setup_confdir","bind","chroot_setup",\
- "setup_environment","run_local","preclean","unbind","clean",\
- "clear_autoresume"]
diff --git a/catalyst/version.py b/catalyst/version.py
index fbbef174..dbada51a 100644
--- a/catalyst/version.py
+++ b/catalyst/version.py
@@ -1,10 +1,3 @@
-# Maintained in full by:
-# Catalyst Team <catalyst@gentoo.org>
-# Release Engineering Team <releng@gentoo.org>
-# Copyright: 2011 Brian Harring <ferringb@gmail.com>
-# License: BSD/GPL2
-# Copied & edited by: Brian Dolbec <dolsen@gentoo.org>
-
'''Version information and/or git version information
'''
@@ -12,62 +5,61 @@ import os
from snakeoil.version import get_git_version as get_ver
-__version__= "3.0.4"
+__version__ = "3.0.7"
_ver = None
def get_git_version(version=__version__):
- """Return: a string describing our version."""
- # pylint: disable=global-statement
- global _ver
- cwd = os.path.dirname(os.path.abspath(__file__))
- version_info = get_ver(cwd)
+ """Return: a string describing our version."""
+ # pylint: disable=global-statement
+ global _ver
+ cwd = os.path.dirname(os.path.abspath(__file__))
+ version_info = get_ver(cwd)
- if not version_info:
- s = "extended version info unavailable"
- elif version_info['tag'] == __version__:
- s = 'released %s' % (version_info['date'],)
- else:
- s = ('vcs version %s, date %s' %
- (version_info['rev'], version_info['date']))
+ if not version_info:
+ s = "extended version info unavailable"
+ elif version_info['tag'] == __version__:
+ s = 'released %s' % (version_info['date'],)
+ else:
+ s = ('vcs version %s, date %s' %
+ (version_info['rev'], version_info['date']))
- _ver = 'Catalyst %s\n%s' % (version, s)
+ _ver = 'Catalyst %s\n%s' % (version, s)
- return _ver
+ return _ver
def get_version(reset=False):
- '''Returns a saved release version string or the
- generated git release version.
- '''
- # pylint: disable=global-statement
- global __version__, _ver
- if _ver and not reset:
- return _ver
- try: # getting the fixed version
- from .verinfo import version
- _ver = version
- __version__ = version.split('\n')[0].split()[1]
- except ImportError: # get the live version
- version = get_git_version()
- return version
-
+ '''Returns a saved release version string or the
+ generated git release version.
+ '''
+ # pylint: disable=global-statement
+ global __version__, _ver
+ if _ver and not reset:
+ return _ver
+ try: # getting the fixed version
+ from .verinfo import version
+ _ver = version
+ __version__ = version.split('\n')[0].split()[1]
+ except ImportError: # get the live version
+ version = get_git_version()
+ return version
def set_release_version(version, root=None):
- '''Saves the release version along with the
- git log release information
+ '''Saves the release version along with the
+ git log release information
- @param version: string
- @param root: string, optional alternate root path to save to
- '''
- #global __version__
- filename = "verinfo.py"
- if not root:
- path = os.path.join(os.path.dirname(__file__), filename)
- else:
- path = os.path.join(root, filename)
- #__version__ = version
- ver = get_git_version(version)
- with open(path, 'w') as f:
- f.write("version = {0!r}".format(ver))
+ @param version: string
+ @param root: string, optional alternate root path to save to
+ '''
+ #global __version__
+ filename = "verinfo.py"
+ if not root:
+ path = os.path.join(os.path.dirname(__file__), filename)
+ else:
+ path = os.path.join(root, filename)
+ #__version__ = version
+ ver = get_git_version(version)
+ with open(path, 'w') as f:
+ f.write("version = {0!r}".format(ver))
diff --git a/doc/HOWTO.txt b/doc/HOWTO.txt
index b1d315e6..865e348d 100644
--- a/doc/HOWTO.txt
+++ b/doc/HOWTO.txt
@@ -1,4 +1,4 @@
-Catalyst is a release-buildcing tool for Gentoo. If you use Gentoo
+Catalyst is a release-building tool for Gentoo. If you use Gentoo
and want to roll your own live CD or bootable USB stick, this is the
way to go. First, get a Gentoo development box and install the
necessary tools:
@@ -22,7 +22,7 @@ Create a snapshot of your current Portage tree (you may want to
# catalyst --snapshot 20130131
# ls /var/tmp/catalyst/snapshots/
portage-20130131.tar.bz2
- portage-20130131.tar.bz2.CONTENTS
+ portage-20130131.tar.bz2.CONTENTS.gz
portage-20130131.tar.bz2.DIGESTS
where the storage location is relative to the default
@@ -44,7 +44,7 @@ For example,
Grab the tarball and put it where catalyst will find it:
# wget http://…/stage3-amd64-20121213.tar.bz2
- # wget http://…/stage3-amd64-20121213.tar.bz2.CONTENTS
+ # wget http://…/stage3-amd64-20121213.tar.bz2.CONTENTS.gz
# wget http://…/stage3-amd64-20121213.tar.bz2.DIGESTS.asc
# sha512sum -c stage3-amd64-20121213.tar.bz2.DIGESTS.asc
# gpg --verify stage3-amd64-20121213.tar.bz2.DIGESTS.asc
@@ -54,7 +54,7 @@ where the storage dir is `$storedir/builds/$source_subpath`
(`$storedir` from `catalyst.conf`, `$source_subpath` from your
`*.spec` file).
-`.*spec` files
+`*.spec` files
~~~~~~~~~~~~~~
`.*spec` files tell catalyst about the system you're trying to build.
@@ -89,7 +89,7 @@ which will build the target and install something like:
# ls /var/tmp/catalyst/builds/default/stage1-amd64-2013.1.*
/var/tmp/catalyst/builds/default/stage1-amd64-2013.1.tar.bz2
- /var/tmp/catalyst/builds/default/stage1-amd64-2013.1.tar.bz2.CONTENTS
+ /var/tmp/catalyst/builds/default/stage1-amd64-2013.1.tar.bz2.CONTENTS.gz
/var/tmp/catalyst/builds/default/stage1-amd64-2013.1.tar.bz2.DIGESTS
The name is an expansion of
@@ -216,16 +216,11 @@ the kernel, bootloader, filesystem, and other details. See
Live USBs
---------
-The easiest way to create a live USB is currently to install a live CD
-ISO using
-http://www.syslinux.org/wiki/index.php/Doc/isolinux#HYBRID_CD-ROM.2FHARD_DISK_MODE[isohybrid]
-and `dd`:
+The easiest way to create a live USB is to install a live CD ISO using
- # isohybrid filename.iso
# dd if=filename.iso of=/dev/sdX
replacing `X` with the appropriate drive letter for your USB disk.
-See https://bugs.gentoo.org/251719[bug 251719] for details.
Running catalyst from a Git checkout
------------------------------------
diff --git a/doc/catalyst-config.5.txt b/doc/catalyst-config.5.txt
index 957ab0e6..ca9335d6 100644
--- a/doc/catalyst-config.5.txt
+++ b/doc/catalyst-config.5.txt
@@ -26,24 +26,8 @@ and empty lines are interpreted as comments. For example:
# /etc/catalyst/catalyst.conf
digests="auto"
-contents="auto"
envscript="/etc/catalyst/catalystrc"
-options="autoresume bindist kerncache pkgcache seedcache snapcache"
-
-# source repo settings
-distdir="/usr/portage/distfiles"
-portdir="/usr/portage"
-
-# target repo info
-repo_basedir="/usr"
-repo_name="portage"
-target_distdir="/usr/portage/distfiles"
-target_pkgdir="/usr/portage/packages"
-
-# other catalyst settings
-sharedir="/usr/share/catalyst"
-snapshot_cache="/var/tmp/catalyst/snapshot_cache"
-storedir="/var/tmp/catalyst"
+options="autoresume bindist kerncache pkgcache seedcache"
---------------------------------
The possible keywords and their meanings are as follows:
@@ -54,54 +38,28 @@ Basic configuration
*digests*::
Create a `.DIGESTS` file containing the hash output from any of the
supported options below. Adding them all may take a long time.
-(example: `md5 sha1 sha512 whirlpool`). See the *SUPPORTED HASHES*
-section for a list of supported hashes.
-
-*contents*::
-Create a `.CONTENTS` file listing the contents of the file. If this
-variable is empty, no `.CONTENTS` will be generated at all. Supported
-values:
+(example: `md5 sha1 sha512 whirlpool`). Enabling ANY digest will
+generate `.sha256` file in addition `.DIGESTS` file. The set of
+supported hashes is dependent on the version of Python. To see the
+set of hashes supported by the version of Python in use, run
+
---
-auto::
-Strongly recommended
-
-tar-tv::
-Do `tar tvf FILE`
-
-tar-tvz::
-Do `tar tvzf FILE`
-
-tar-tvy::
-Do `tar tvyf FILE`
-
-isoinfo-l::
-Do `isoinfo -l -i FILE`
-
-isoinfo-f::
-Do `isoinfo -f -i FILE`. 'isoinfo-f' is the only option not chosen
-by the automatic algorithm.
---
+---------------------------------
+$ python3 -c 'import hashlib; print(hashlib.algorithms_available)'
+---------------------------------
*envscript*::
Environment script location, which allows users to set options such as
-HTTP proxies, `MAKEOPTS`, `GENTOO_MIRRORS`, or any other environment
-variables needed for building. The envscript file sets environment
-variables using POSIX shell notation:
+HTTP proxies, `GENTOO_MIRRORS`, or any other environment variables
+needed for building. The envscript file sets environment variables
+using POSIX shell notation:
+
---------------------------------
export FOO="bar"
---------------------------------
-*hash_function*::
-Internal hash function catalyst should use for things like autoresume,
-seedcache, etc. The default and fastest is `crc32`. You should not
-ever need to change this unless your OS does not support it. See the
-*SUPPORTED HASHES* section for a list of supported hashes.
-
-**options*::
+*options*::
Set different build-time options (example: `autoresume bindist
-kerncache pkgcache seedcache snapcache`). Supported values:
+kerncache pkgcache seedcache`). Supported values:
+
--
autoresume::
@@ -121,7 +79,7 @@ be closed invalid.
distcc::
Enable distcc support for building. You have to set distcc_hosts in
-your spec file.
+your config file.
icecream::
Enable icecream compiler cluster support for building.
@@ -140,12 +98,6 @@ seedcache::
Use the build output of a previous target if it exists to speed up the
creation of a new stage. This avoids unpacking the seed tarball.
-snapcache::
-Cache the snapshot so that it can be bind-mounted into the chroot.
-WARNING: moving parts of the portage tree from within fsscript *will*
-break your cache. The cache is unlinked before any empty or rm
-processing.
-
versioned_cache::
Name the cache directories (packagecache, kerncache) based on the version of a
spec file.
@@ -155,41 +107,50 @@ Repository settings
~~~~~~~~~~~~~~~~~~~
*distdir*::
-Source distfiles location used in generation of the stages. This is usually the
-hosts distfiles location. `/usr/portage/distfiles` should work for most
-default installations, but it should be set to match your hosts configuration.
+Defines the location of your local source file repository.
+Defaults to the host's DISTDIR.
-*portdir*::
-Source Gentoo tree location (primary repo). `/usr/portage/` should work for most
-default installations.
+*repos_storedir*::
+The directory in which git repositories exist for use by the snapshot target.
+Defaults to `${storedir}/repos`.
*repo_basedir*::
-The target repository directory to contain the rimary repo (gentoo repo) and
-any overlays. `/usr` is the historical location. But that is in the process of
-changing. `/var/gentoo/repos` is an option.
+The target repository directory to contain the primary repo (e.g.,
+gentoo repo) and any other repos. The default location is
+`/var/db/repos`.
*repo_name*::
-The name of the main repository (ie: gentoo). This has had a directory name
-of `portage` in the past. But it has an internal name of `gentoo`, which is
-what its directory name should be. This name is used in the snapshot name
-generated and also the directory name of the repository created with the
-snapshot target. The new general rule is that the directory name and its
-internal repo_name value should be the same.
+The name of the main repository (e.g. gentoo). The git repository at
+`${repos_storedir}/${repo_name}.git` will be used to produce the portdir sqfs
+snapshot.
*target_distdir*::
-This is the target distfiles directory location for the stage being created.
-This is important because this value will be stored in the stage's make.conf
-and will become the default location used if it is not edited by users.
-The default location is `/usr/portage/distfiles`.
+Defines the location of the local source file repository in the
+target. This will be written to the target's make.conf if it is not
+the default value of `/var/cache/distfiles`.
*target_pkgdir*::
-This is the target packages directory for storing binpkgs in the stage being
-built. This location is stored in the make.conf of the stage being built.
-The default location for this has typically been `/usr/portage/packages`
+Defines the location of binary packages in the target. This will be
+written to the target's make.conf if it is not the default value of
+`/var/cache/binpkgs`.
Other settings
~~~~~~~~~~~~~~
+*distcc_hosts*::
+These are the hosts used as distcc slaves when distcc is enabled in
+your `catalyst.conf` (example: `127.0.0.1 192.168.0.1`). It follows
+the same syntax as `distcc-config --set-hosts`.
+
+*jobs*::
+Integral value passed to *emerge(1)* as the parameter to --jobs and is
+used to define *MAKEOPTS* during the target build.
+
+*load-average*::
+Floating-point value passed to *emerge(1)* as the parameter to
+--load-average and is used to define *MAKEOPTS* during the target
+build.
+
*sharedir*::
Catalyst runtime script location. `/usr/share/catalyst` should work for
most default installations. If you are running catalyst from a Git
@@ -213,36 +174,13 @@ takes place in RAM. This feature requires a pretty large tmpfs
much RAM everything will fail horribly and it is not our fault.
-SUPPORTED HASHES
-----------------
-Supported hashes: adler32, crc32, crc32b, gost, haval128, haval160,
-haval192, haval224, haval256, md2, md4, md5, ripemd128, ripemd160,
-ripemd256, ripemd320, sha1, sha224, sha256, sha384, sha512, snefru128,
-snefru256, tiger, tiger128, tiger160, whirlpool.
-
-
BINARY PACKAGE DEPENDENCIES
---------------------------
This section is only important if you are using binary packages to
build your stages (by enabling the `pkgcache` option and restarting
incomplete builds).
-Before EAPI-5 introduced ABI sub-slots, the build-time compatibility
-of packages was not recorded. This leads to problems such as binary
-GCC packages built against mpc-0.8.2 (which installs libmpc.so.2)
-being installed on systems that only have mpc-1.0.1 (which installs
-libmpc.so.3), resulting in:
-
----------------------------------
-/usr/libexec/gcc/i686-pc-linux-gnu/4.6.3/cc1:
- error while loading shared libraries: libmpc.so.2:
- cannot open shared object file: No such file or directory
----------------------------------
-
-As long as there are packages in your stage that don't use ABI
-sub-slots, you may experience errors like this due to untracked ABI
-missmatches in binary packages. Packages generated by catalyst builds
-are currently namespaced:
+Packages generated by catalyst builds are namespaced:
If versioned_cache is set:
---------------------------------
@@ -253,19 +191,6 @@ Otherwise:
.../packages/<rel_type>/<target>-<subarch>/Packages
---------------------------------
-so running into these out-of-date packages is unlikely. You may run
-into problems if:
-
-* you enable `update_seed` in your stage1 spec after a previous run
- which generated packages linking against out-of-date seed libraries
- or
-* you update your snapshot and an untracked ABI dependency is bumped
- without a similar bump in the dependent package.
-
-without also bumping any of the package namespace variables in your
-spec. If you do make such a change, it's a good idea to clear the
-package cache in question and rebuild the packages from scratch.
-
FILES
-----
diff --git a/doc/catalyst-spec.5.txt b/doc/catalyst-spec.5.txt
index 250acded..96f9f3bd 100644
--- a/doc/catalyst-spec.5.txt
+++ b/doc/catalyst-spec.5.txt
@@ -62,9 +62,9 @@ allowing multiple concurrent builds. Usually, `default` will suffice.
*profile*::
This is the system profile to be used by catalyst to build this target
(example: `default/linux/x86/10.0/`). It is specified as a relative
-path from `profiles` in your portage snapshot
+path from `profiles` in your portdir snapshot
-*snapshot*::
+*snapshot_treeish*::
This specifies which snapshot to use for building this target
(example: `2006.1`).
@@ -74,21 +74,16 @@ This specifies where the seed stage for this target comes from
`$storedir/builds`. The `rel_type` is also used as a path prefix for
the seed.
-*distcc_hosts*::
-These are the hosts used as distcc slaves when distcc is enabled in
-your `catalyst.conf` (example: `127.0.0.1 192.168.0.1`). It follows
-the same syntax as `distcc-config --set-hosts` and is entirely
-optional.
-
*portage_confdir*::
This is an optional directory containing portage configuration files
(example: `/etc/portage`). It follows the same syntax as
`/etc/portage` and should be consistent across all targets to minimize
problems.
-*portage_overlay*::
-This option specifies the location to a portage overlay that you would
-like to use when building this target (example: `/usr/local/portage`).
+*repos*::
+This option specifies the location of the ebuild repositories that you would
+like to have used when building this target. It takes a space-separated list
+of directory names. (example: `/usr/local/portage`).
*pkgcache_path*::
This allows the optional directory containing the output packages for
@@ -109,28 +104,17 @@ This option controls quite a bit of catalyst internals and sets up
several defaults. Each type behaves slightly differently and is
explained below.
`gentoo-release-minimal`;; This creates an official minimal InstallCD.
- `gentoo-release-universal`;; This creates an official universal InstallCD.
`gentoo-release-livecd`;; This creates an official LiveCD environment.
- `gentoo-gamecd`;; This creates an official Gentoo GameCD.
`generic-livecd`;; This should be used for all non-official media.
This setting is supported by the livecd targets.
-*<target>/builddate*::
-Set the build date of the `<target>` (example: `20060107`). This
-setting is supported by the `netboot2` target.
-
*<target>/readme*::
This is for the README.txt on the root of the CD. For Gentoo
releases, we use a default README.txt, and this will be used on your
CD if you do not provide one yourself. We do not use this for the
official releases. This setting is supported by the livecd targets.
-*grp*::
-Since GRP is capable of building packages/source sets for more than
-one CD, this defines the layout for the directories under
-`$storedir/builds` (example: `src cd2`).
-
*update_seed*::
This is an optional setting supported by stage1 to tell catalyst if
it should update the seed stage or not (valid values: `yes no`).
@@ -180,18 +164,11 @@ Filesystem
*livecd/fstype*::
The fstype is used to determine what sort of CD we should build. This
is used to set the type of loopback filesystem that we will use on our
-CD. Possible values are as follows:
- `squashfs`;; This gives the best compression, but requires a kernel patch.
- `zisofs`;; This uses in-kernel compression and is supported on all platforms.
- `normal`;; This creates a loop without compression.
- `noloop`;; This copies the files to the CD directly, without using a
- loopback.
+CD. The only possible value is `squashfs`.
*livecd/fsops*::
The fsops are a list of optional parameters that can be passed to the
tool which will create the filesystem specified in *livecd/fstype*
-(example: `-root-owned`). It is valid for the following fstypes:
-`squashfs`, `jffs`, `jffs2`, and `cramfs`.
*livecd/iso*::
This is the full path and filename to the ISO image that the
@@ -209,34 +186,23 @@ Bootloader
This is required for livecd-stage2 on all arches except amd64 and x86 which can autogenerate one
if USE=system-bootloader is set.
The cdtar is essentially the bootloader for the CD. It also holds the
-main configuration for the bootloader. On x86/amd64, it also can
-include a small memory testing application, called memtest86+
-(example:
-`/usr/share/catalyst/livecd/cdtar/isolinux-2.13-memtest86+-cdtar.tar.bz2`).
+main configuration for the bootloader.
Kernel and boot issues
~~~~~~~~~~~~~~~~~~~~~~
-*<target>/splash_theme*::
-This is where you set the splash theme (example: `livecd-2006.1`).
-This theme must be present in `/etc/splash`, before the kernel has
-completed building. This setting is supported by the `stage4` and
-`livecd` targets.
-
*boot/kernel*::
This option is used to specify the number of kernels to build and also
the labels that will be used by the CD bootloader to refer to each
kernel image (example: `gentoo`).
*boot/kernel/<label>/sources*::
-*netboot/kernel/sources*::
This option tells catalyst which kernel sources to merge for this
kernel label (example: `gentoo-sources`). This can use normal portage
atoms to specify a specific version. `<label>` should match one of
the labels given to *boot/kernel*.
*boot/kernel/<label>/config*::
-*netboot/kernel/config*::
This option is the full path and filename to a kernel `.config` file
that is used by genkernel to compile the kernel this label applies to.
`<label>` should match one of the labels given to *boot/kernel*.
@@ -264,17 +230,11 @@ sources to keep the modules from overwriting each other. We do not
use this on the official media. `<label>` should match one of the
labels given to *boot/kernel*.
-*boot/kernel/<label>/machine_type*::
-This option is only for ppc64 machines (example: `ibm`). If used it
-will create the `/etc/yaboot.conf` entry used for booting an ibm
-powerpc machine. `<label>` should match one of the labels given to
-*boot/kernel*.
-
*boot/kernel/<label>/console*::
-This is only supported on ppc64 currently. This entry sets up the
-console boot parameters required for sending the output to the
-appropriate console (example: `tty0 ttyS0`). `<label>` should match
-one of the labels given to *boot/kernel*.
+This is only supported on with grub currently (x86, amd64, ia64, ppc,
+sparc). This entry sets up the console boot parameters required for
+sending the output to the appropriate console (example: `tty0 ttyS0`).
+`<label>` should match one of the labels given to *boot/kernel*.
*<target>/modblacklist*::
This is for blacklisting modules from being hotplugged that are known
@@ -283,7 +243,6 @@ will keep it from being auto-loaded, even if it is detected by
hotplug. This setting is supported by the `stage4` and `livecd`
targets.
-*netboot/kernel/use*::
*boot/kernel/<label>/use*::
This option sets the `USE` flags used to build the kernel and also any
packages which are defined under this kernel label (example: `pcmcia
@@ -305,16 +264,13 @@ to enable keymap selection.
Netboot
~~~~~~~
-*<target>/busybox_config*::
+*netboot/busybox_config*::
The netboot target builds busybox for its root filesystem. This
option is where you specify the full path and filename to your busybox
-configuration (example: `/tmp/busybox.config`). This setting is
-supported by the `netboot` and `netboot2` targets.
+configuration (example: `/tmp/busybox.config`).
-*netboot/base_tarball*::
-This is the full path and filename to the tarball to use as the base
-for the netboot image (example:
-`/usr/share/catalyst/netboot/netboot-base.tar.bz2`).
+*netboot/builddate*::
+Set the build date of the `<target>` (example: `20060107`).
Runlevels
~~~~~~~~~
@@ -330,7 +286,7 @@ the `stage4` and `livecd` targets.
This is for adding init scripts to runlevels. The syntax for the init
script is the script name, followed by a pipe, followed by the
runlevel in which you want the script to run. It looks like
-`spind|default` and is space delimited. We do not use this on the
+`acpid|default` and is space delimited. We do not use this on the
official media, as catalyst sets up the runlevels correctly for us.
This setting is supported by the `stage4` and `livecd` targets.
@@ -355,7 +311,7 @@ quite a few problems with these, so be careful with whatever `USE`
flags you add here. This is generally used for adding some
functionality that we do not want on by default for all Gentoo users,
but that we want on by default in our binaries. This setting is
-supported by the `stage4`, `netboot2`, `tinderbox`, and `grp` targets.
+supported by the `stage4` and `netboot` targets.
*<target>/packages*::
This is the set of packages that we will merge into the stage4 tarball
@@ -364,22 +320,15 @@ fxload irssi wpa_supplicant`). They will be built with the `USE`
flags configured above. These packages must not depend on a
configured kernel. If the package requires a configured kernel, then
it will be defined elsewhere. This setting is supported by the
-`stage4`, `netboot2`, and `tinderbox` targets.
-
-*netboot/packages*::
-These package names are also labels used later when determining what
-files to copy into your netboot image (example: `raidtools
-e2fsprogs`).
+`stage4`, and `netboot` targets.
-*<target>/packages/<label>/files*::
+*netboot/packages/<label>/files*::
This is where you tell catalyst which files from each package to copy
into the netboot image. `<label>` should match one of the labels
given to *netboot/packages*. For example:
netboot/packages/raidtools/files: /sbin/raidstart /sbin/mkraid
-This option is supported by the `netboot` and `netboot2` targets.
-
*netboot/extra_files*::
This is a list of any other files, not belonging to the above
packages, that you would wish to have copied into your netboot image
@@ -401,23 +350,6 @@ There are no checks on these packages, so be careful what you add
here. They can potentially break your target. This setting is
supported by the `stage4` and `livecd` targets.
-*grp/<label>/type*::
-This tells catalyst what type of GRP set this list of packages will
-create (example: `srcset`). Valid options here are `srcset` or
-`pkgset` to either download the source, or to build packages,
-respectively. `<label>` should match one of the labels given to
-*grp*.
-
-*grp/<label>/packages*::
-This is our list of packages that will comprise our package set
-(example: `dante tsocks sys-apps/eject minicom`). Packages listed for
-a `srcset` label should be used for grabbing things that need a
-compiled kernel to build, or things listed in the Handbook that should
-be available before the first reboot during an install. Pagekages
-listed for a `pkgset` label will be fetched, compiled, and installed
-in the target. `<label>` should match one of the labels given to
-*grp*.
-
Miscellaneous
~~~~~~~~~~~~~
@@ -456,27 +388,6 @@ This is typically used for adding the documentation, distfiles,
snapshots, and stages to the official media. These files will not be
available if `docache` is enabled, as they are outside the loop.
-*<target>/xinitrc*::
-This is used by catalyst to copy the specified file to
-`/etc/X11/xinit/xinitrc` and is used by the *<target>/type*
-`gentoo-gamecd` and `generic-livecd`. While the file will still be
-copied for any *<target>/type*, catalyst will only create the
-necessary `/etc/startx` for those types, so X will not be
-automatically started. This is useful also for setting up X on a CD
-where you do not wish X to start automatically. We do not use this on
-the release media. This setting is supported by the `stage4` and
-`livecd` targets.
-
-*livecd/xdm*::
-This is used by catalyst to determine which display manager you wish
-to become the default (example: `gdm`). This is used on the official
-Gentoo LiveCD and is valid for any `livecd/type`.
-
-*livecd/xsession*::
-This is used by catalyst to determine which X session should be
-started by default by the display manager (example: `gnome`). This is
-used on the official Gentoo LiveCD and is valid for any livecd/type.
-
*<target>/users*::
This option is used to create non-root users on your target. It takes
a space separated list of user names. These users will be added to
@@ -498,12 +409,6 @@ and is very useful in cleaning up stray files in `/etc` left over
after *stage4/unmerge* (example: `/lib/*.a /usr/lib/*.a`). This
setting is supported by the `stage4` and `livecd` targets.
-*gamecd/conf*::
-This option is only used when creating a GameCD. This specifies the
-file that contains the definitions for `GAME_NAME` and
-`GAME_EXECUTABLE`, which are used by the GameCD scripts to set some
-specific options for the game. This is not used on the release media.
-
FILES
-----
Example specfiles can be found in '/usr/share/doc/catalyst-{catalystversion}/examples'.
diff --git a/doc/catalyst.1.txt b/doc/catalyst.1.txt
index 5a7a4d64..217fc86a 100644
--- a/doc/catalyst.1.txt
+++ b/doc/catalyst.1.txt
@@ -18,9 +18,7 @@ DESCRIPTION
-----------
*catalyst* is the tool that the Gentoo Release Engineering team
utilizes to build all Gentoo Linux releases. It is capable of building
-installation stages, bootable LiveCDs, netboot images, and Gentoo Reference
-Platform (GRP) sets. *catalyst* is also capable of providing a simple
-tinderbox environment for ebuild/package testing.
+installation stages, bootable LiveCDs, and netboot images.
For more information, please visit the *catalyst* project page
on the web at 'https://wiki.gentoo.org/wiki/Catalyst'.
@@ -33,11 +31,6 @@ OPTIONS
This option is to be used to clear any autoresume points that have been saved
for this target. It is used in conjunction with *-f*, *-C*, or both.
-*--cli*|*-C* 'KEY'='VALUE' ...::
-This option is to be used in place of a specfile. All options are passed
-to *catalyst* on the commandline. Please note that this option must
-be the last option passed to *catalyst* for everything to work correctly.
-
*--config*|*-c* 'FILE'::
Tell *catalyst* to use a user-defined configuration file. A sample
configuration file is installed at '/etc/catalyst/catalyst.conf'.
@@ -46,6 +39,9 @@ configuration file is installed at '/etc/catalyst/catalyst.conf'.
*-d*::
Enable debugging mode
+*--enter-chroot*::
+Enter the chroot before starting the build.
+
*--fetchonly*::
*-F*::
This tells *catalyst* to only fetch distfiles for the given packages without
@@ -77,11 +73,6 @@ Print the version information and exit
EXAMPLES
--------
-Using the commandline option (*-C*, *--cli*) to build a Portage snapshot:
----------------------------------------------------
-# catalyst -C target=snapshot version_stamp=my_date
----------------------------------------------------
-
Using the specfile option (*-f*, *--file*) to build a stage target:
---------------------------------------------------
# catalyst -f stage1-specfile.spec
@@ -120,7 +111,7 @@ NOTES
stager projects, both of which were used to create pre-1.4 Gentoo releases.
*catalyst* was originally conceived and coded by both Daniel Robbins and
-John Davis. It is currently maintained by Chris Gianelloni and Eric Edgar and
+John Davis. It is currently maintained by the Catalyst Project Team and
has been mostly re-written.
diff --git a/doc/make_subarch_table_guidexml.py b/doc/make_subarch_table_guidexml.py
index 84624dc1..3c03f90c 100755
--- a/doc/make_subarch_table_guidexml.py
+++ b/doc/make_subarch_table_guidexml.py
@@ -1,121 +1,54 @@
#!/usr/bin/env python
-# Copyright (C) 2011 Sebastian Pipping <sebastian@pipping.org>
-# Copyright (C) 2013 Brian dolbec <dolsen@gentoo.org>
# Licensed under GPL v2 or later
-import os
-import re
+import pathlib
import sys
import textwrap
-
-
-_pattern_arch_generic = re.compile('^class arch_([a-z0-9_.-]+)\\(generic_([a-z0-9_.-]+)\\):')
-_pattern_arch_arch = re.compile('^class arch_([a-z0-9_.-]+)\\(arch_([a-z0-9_.-]+)\\):')
-_pattern_title = re.compile('"([a-z0-9_.-]+)"[ \\t]*:[ \\t]*arch_([a-z0-9_.-]+),?')
-
-_pattern_arch_genericliases = {
- 'armeb':'arm',
- 'sheb':'sh',
- 'mipsel':'mips',
- 'mips64el':'mips64',
-}
-
-
-def handle_line(line, subarch_title_to_subarch_id, subarch_id_to_pattern_arch_genericrch_id):
- x = _pattern_arch_generic.search(line)
- if x is not None:
- subarch = x.group(1)
- arch = x.group(2)
-
- # Apply alias grouping
- arch = _pattern_arch_genericliases.get(arch, arch)
-
- assert subarch not in subarch_id_to_pattern_arch_genericrch_id
- subarch_id_to_pattern_arch_genericrch_id[subarch] = arch
-
- return
-
- x = _pattern_arch_arch.search(line)
- if x is not None:
- child_subarch = x.group(1)
- parent_subarch = x.group(2)
-
- assert child_subarch not in subarch_id_to_pattern_arch_genericrch_id
- subarch_id_to_pattern_arch_genericrch_id[child_subarch] = subarch_id_to_pattern_arch_genericrch_id[parent_subarch]
-
- return
-
- for x in re.finditer(_pattern_title, line):
- subarch_title = x.group(1)
- subarch_id = x.group(2)
-
- assert subarch_title not in subarch_title_to_subarch_id
- subarch_title_to_subarch_id[subarch_title] = subarch_id
-
-
-def handle_file(fn, subarch_title_to_subarch_id, subarch_id_to_pattern_arch_genericrch_id):
- f = open(fn, 'r')
- for l in f:
- line = l.rstrip()
- handle_line(line, subarch_title_to_subarch_id, subarch_id_to_pattern_arch_genericrch_id)
- f.close()
-
-
-def dump(subarch_title_to_subarch_id, subarch_id_to_pattern_arch_genericrch_id):
- arch_id_to_subarch_titles = dict()
- for subarch_title, subarch_id in subarch_title_to_subarch_id.items():
- arch_id = subarch_id_to_pattern_arch_genericrch_id.get(subarch_id, subarch_id)
-
- if arch_id not in arch_id_to_subarch_titles:
- arch_id_to_subarch_titles[arch_id] = set()
- arch_id_to_subarch_titles[arch_id].add(subarch_title)
-
- # GuideXML version
- f = open('doc/subarches.generated.xml', 'w')
- f.write("""
-<table>
-<tr>
-<th>Architecture</th>
-<th>Sub-architectures</th>
-</tr>
-""")
- for arch_id, subarch_titles in sorted(arch_id_to_subarch_titles.items()):
- f.write("""<tr>
-<ti><c>%s</c></ti>
-<ti><c>%s</c></ti>
-</tr>
-""" % (arch_id, '\n'.join(textwrap.wrap(' '.join(sorted(subarch_titles)), 60))))
-
- f.write("""</table>
-""")
- f.close()
-
- # Asciidoc
- f = open('doc/subarches.generated.txt', 'w')
- for arch_id, subarch_titles in sorted(arch_id_to_subarch_titles.items()):
- f.write('*%s*::\n' % arch_id)
- f.write(' %s\n' % ', '.join(sorted(subarch_titles)))
- f.write('\n')
- f.close()
+import tomli
+
+
+def write_guidexml(arch_to_subarch):
+ with open('doc/subarches.generated.xml', 'w') as f:
+ f.write(textwrap.dedent("""\
+ <table>
+ <tr>
+ <th>Architecture</th>
+ <th>Sub-architectures</th>
+ </tr>
+ """))
+ for arch, subarches in sorted(arch_to_subarch.items()):
+ f.write(textwrap.dedent("""\
+ <tr>
+ <ti><c>%s</c></ti>
+ <ti><c>%s</c></ti>
+ </tr>
+ """) % (arch, '\n'.join(textwrap.wrap(' '.join(sorted(subarches)), 60))))
+ f.write("</table>\n")
+
+
+def write_asciidoc(arch_to_subarch):
+ with open('doc/subarches.generated.txt', 'w') as f:
+ for arch, subarches in sorted(arch_to_subarch.items()):
+ f.write('*%s*::\n' % arch)
+ f.write(' %s\n' % ', '.join(sorted(subarches)))
+ f.write('\n')
def main(_argv):
- subarch_title_to_subarch_id = dict()
- subarch_id_to_pattern_arch_genericrch_id = dict()
+ arch_to_subarch = {}
+ p = pathlib.Path('arch')
- for dirpath, _dirnames, filenames in os.walk('catalyst/arch'):
- for _fn in filenames:
- if not _fn.endswith('.py'):
- continue
- if _fn == '__init__.py':
- continue
+ for file in p.glob('*.toml'):
+ with file.open('rb') as f:
+ data = tomli.load(f)
- fn = os.path.join(dirpath, _fn)
- handle_file(fn, subarch_title_to_subarch_id, subarch_id_to_pattern_arch_genericrch_id)
+ for arch in [x for x in data if x != 'setarch']:
+ arch_to_subarch.update({arch: list(data[arch].keys())})
- dump(subarch_title_to_subarch_id, subarch_id_to_pattern_arch_genericrch_id)
+ write_guidexml(arch_to_subarch)
+ write_asciidoc(arch_to_subarch)
if __name__ == '__main__':
- main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/doc/make_target_table.py b/doc/make_target_table.py
index c4b49ad9..5d291e33 100755
--- a/doc/make_target_table.py
+++ b/doc/make_target_table.py
@@ -8,8 +8,6 @@
# source the testpath file then run "doc/make_target_table.py"
-from __future__ import print_function
-
import glob
import locale
import os
@@ -17,31 +15,31 @@ import sys
def main(_argv):
- source_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+ source_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- # Force consistent sorting order.
- locale.setlocale(locale.LC_COLLATE, 'C')
+ # Force consistent sorting order.
+ locale.setlocale(locale.LC_COLLATE, 'C')
- targets = list()
- for filename in glob.glob(os.path.join(source_root, 'catalyst/targets/*.py')):
- if '__init__' in filename:
- continue
+ targets = list()
+ for filename in glob.glob(os.path.join(source_root, 'catalyst/targets/*.py')):
+ if '__init__' in filename:
+ continue
- name = os.path.basename(filename)[0:-3]
- target_name = name.replace('_', '-')
- module_name = 'catalyst.targets.' + name
+ name = os.path.basename(filename)[0:-3]
+ target_name = name.replace('_', '-')
+ module_name = 'catalyst.targets.' + name
- __import__(module_name)
- module = sys.modules[module_name]
+ __import__(module_name)
+ module = sys.modules[module_name]
- targets.append((target_name, module))
+ targets.append((target_name, module))
- for target_name, module in sorted(targets, key=lambda x: x[0]):
- print('`%s`;;' % target_name)
- # Replace blank lines with `+` (asciidoc list item continuation)
- print(module.__doc__.strip().replace('\n\n', '\n+\n'))
- print('')
+ for target_name, module in sorted(targets, key=lambda x: x[0]):
+ print('`%s`;;' % target_name)
+ # Replace blank lines with `+` (asciidoc list item continuation)
+ print(module.__doc__.strip().replace('\n\n', '\n+\n'))
+ print('')
if __name__ == '__main__':
- main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/etc/catalyst.conf b/etc/catalyst.conf
index 7d7f7aba..867c7153 100644
--- a/etc/catalyst.conf
+++ b/etc/catalyst.conf
@@ -3,135 +3,88 @@
# Simple descriptions of catalyst settings. Please refer to the online
# documentation for more information.
-# Creates a .DIGESTS file containing the hash output from any of the supported
-# options below. Adding them all may take a long time on slower systems. The
-# special "auto" keyword will skip digests that the system does not support,
-# and if it's the only keyword given, will default to enabling all digests.
-# Supported hashes:
-# adler32, crc32, crc32b, gost, haval128, haval160, haval192, haval224,
-# haval256, md2, md4, md5, ripemd128, ripemd160, ripemd256, ripemd320, sha1,
-# sha224, sha256, sha384, sha512, snefru128, snefru256, tiger, tiger128,
-# tiger160, whirlpool
-digests="sha512 whirlpool"
-
-# Creates a .CONTENTS file listing the contents of the file. Pick from any of
-# the supported options below:
-# auto - strongly recommended
-# tar_tv - does 'tar tvf FILE'
-# tar_tvz - does 'tar tvzf FILE'
-# tar_tvy - does 'tar tvyf FILE'
-# isoinfo_l - does 'isoinfo -l -i FILE'
-# isoinfo_f - does 'isoinfo -f -i FILE'
-# 'isoinfo_f' is the only option not chosen by the automatic algorithm.
-# If this variable is empty, no .CONTENTS will be generated at all.
-contents="auto"
-
-# distdir specifies where your distfiles are located. This setting should
-# work fine for most default installations.
-distdir="/usr/portage/distfiles"
+# Creates a .DIGESTS file containing the hash output from each of the selected
+# hashes.
+#
+# To see a list of supported hashes, run
+#
+# $ python3 -c 'import hashlib; print(hashlib.algorithms_available)'
+#
+digests = ["blake2b", "sha512"]
# envscript allows users to set options such as http proxies, MAKEOPTS,
# GENTOO_MIRRORS, or any other environment variables needed for building.
# The envscript file sets environment variables like so:
# export FOO="bar"
-envscript="/etc/catalyst/catalystrc"
-
-# Internal hash function catalyst should use for things like autoresume,
-# seedcache, etc. The default and fastest is crc32. You should not ever need
-# to change this unless your OS does not support it.
-# Supported hashes:
-# adler32, crc32, crc32b, gost, haval128, haval160, haval192, haval224,
-# haval256, md2, md4, md5, ripemd128, ripemd160, ripemd256, ripemd320, sha1,
-# sha224, sha256, sha384, sha512, snefru128, snefru256, tiger, tiger128,
-# tiger160, whirlpool
-hash_function="crc32"
-
-# options set different build-time options for catalyst. Some examples are:
-# autoresume = Attempt to resume a failed build, clear the autoresume flags with
-# the -a option to the catalyst cmdline. -p will clear the autoresume flags
-# as well as your pkgcache and kerncache
-# ( This option is not fully tested, bug reports welcome )
-# bindist = enables the bindist USE flag, please see package specific definition,
-# however, it is suggested to enable this if redistributing builds.
-# This optional USE flag is normally cleaned from the make.conf file on
-# completion of the stage. For a non-cleaned version,
-# use sticky-config also (see below)
-# ccache = enables build time ccache support
-# distcc = enable distcc support for building. You have to set distcc_hosts in
-# your spec file.
-# icecream = enables icecream compiler cluster support for building
-# keepwork = Prevents the removal of the working chroot path and any autoresume
-# files or points.
-# kerncache = keeps a tbz2 of your built kernel and modules (useful if your
-# build stops in livecd-stage2)
-# pkgcache = keeps a tbz2 of every built package (useful if your build stops
-# prematurely)
-# preserve_libs = enables portage to preserve used libs when unmerging packages
-# (used on installcd-stage2 and stage4 targets)
-# seedcache = use the build output of a previous target if it exists to speed up
-# the copy
-# snapcache = cache the snapshot so that it can be bind-mounted into the chroot.
-# WARNING: moving parts of the portage tree from within fsscript *will* break
-# your cache. The cache is unlinked before any empty or rm processing, though.
-# sticky-config = enables the code that will keep any internal 'catalyst_use' flags
-# added to the USE= for building the stage. These are usually added for legal
-# or specific needs in building the the early stage. Mostly it is the
-# 'bindist' USE flag option that is used for legal reasons, please see its
-# specific definition. It will also keep any /etc/portage/package.*
-# files or directories.
-#
-# (These options can be used together)
-options="autoresume bindist kerncache pkgcache seedcache snapcache"
-
-# Source portdir specifies the source portage tree used by the snapshot target.
-portdir="/usr/portage"
-
-# Target portdir setting. It needs to be in 2 parts.
-# They will be used separately, then added together where needed.
-# eg:
-# repo_basedir="/var/lib/repos"
-# repo_name="gentoo"
-#
-repo_basedir="/usr"
-repo_name="portage"
-target_distdir="/usr/portage/distfiles"
-target_pkgdir="/usr/portage/packages"
-
-# sharedir specifies where all of the catalyst runtime executables
-# and other shared lib objects are.
-# Most users do not need to change this.
-sharedir="/usr/share/catalyst"
-
-# shdir specifies where all of the catalyst runtime executables are.
-shdir="%(sharedir)s/targets"
-
-# snapshot_cache specifies where the snapshots will be cached to if snapcache is
-# enabled in the options.
-snapshot_cache="/var/tmp/catalyst/snapshot_cache"
-
-# storedir specifies where catalyst will store everything that it builds, and
-# also where it will put its temporary files and caches.
-storedir="/var/tmp/catalyst"
-
-# source_matching specifies how catalyst will match non-specific file names
-# if the filename is not found as an exact match.
-# ie: a filename without the extension specified. "/path/to/foo"
-#
-# possible values are:
-# "strict" meaning if more than one file of that name is present with any
-# file extension, then it will raise an exception.
-# "loose" meaning it will search for an existing filename with an added
-# extension from an ordered list of extensions determined from the
-# decompressor_search_order specification in the spec file or (default)
-source_matching="strict"
+envscript = "/etc/catalyst/catalystrc"
+
+# options set different build-time options for catalyst.
+options = [
+ # Attempt to resume a failed build, clear the autoresume flags with the
+ # -a option to the catalyst cmdline. -p will clear the autoresume
+ # flags as well as your pkgcache and kerncache
+ "autoresume",
+
+ # Enables the bindist USE flag, please see package specific definition,
+ # however, it is suggested to enable this if redistributing builds.
+ # This optional USE flag is normally cleaned from the make.conf file on
+ # completion of the stage. For a non-cleaned version, use
+ # sticky-config also (see below)
+ "bindist",
+
+ # Enable FEATURES=ccache
+ # "ccache",
+
+ # Enable FEATURES=distcc. Make sure to set distcc_hosts too.
+ # "distcc",
+
+ # Enable FEATURES=icecream
+ # "icecream",
+
+ # Prevents the removal of the working chroot path and any autoresume
+ # files or points.
+ # "keepwork",
+
+ # keeps a tbz2 of your built kernel and modules (useful if your
+ # build stops in livecd-stage2)
+ "kerncache",
+
+ # Build and use binary packages
+ "pkgcache",
+
+ # Use the build output of a previous target if it exists rather than
+ # the tarball
+ "seedcache",
+
+ # enables the code that will keep any internal 'catalyst_use' flags
+ # added to the USE= for building the stage. These are usually added
+ # for legal or specific needs in building the the early stage. Mostly
+ # it is the 'bindist' USE flag option that is used for legal reasons,
+ # please see its specific definition. It will also keep any
+ # /etc/portage/package.* files or directories.
+ # "sticky-config",
+]
# port_logdir is where all build logs will be kept. This dir will be automatically cleaned
-# of all logs over 30 days old. If left undefined the logs will remain in the build directory
+# of ALL files over 7 days old. If left undefined the logs will remain in the build directory
# as usual and get cleaned every time a stage build is restarted.
-# port_logdir="/var/tmp/catalyst/tmp"
+# port_logdir = "/var/tmp/catalyst/logs"
# var_tmpfs_portage will mount a tmpfs for /var/tmp/portage so building takes place in RAM
# this feature requires a pretty large tmpfs ({open,libre}office needs ~8GB to build)
# WARNING: If you use too much RAM everything will fail horribly and it is not our fault.
# set size of /var/tmp/portage tmpfs in gigabytes
-# var_tmpfs_portage=16
+# var_tmpfs_portage = 16
+
+# Integral value passed to emerge as the parameter to --jobs and is used to
+# define MAKEOPTS during the target build.
+# jobs = 4
+
+# Floating-point value passed to emerge as the parameter to --load-average and
+# is used to define MAKEOPTS during the target build.
+# load-average = 4.0
+
+# If you want catalyst to drop a binrepos.conf into /etc/portage, then
+# define the binhost here. This value is concatenated with the configuration
+# option binrepo_path in the spec file to obtain the src-uri.
+# binhost = "https://gentoo.osuosl.org/releases/"
diff --git a/etc/catalystrc b/etc/catalystrc
index bcd729af..176f106a 100755
--- a/etc/catalystrc
+++ b/etc/catalystrc
@@ -1,5 +1,4 @@
#!/bin/bash
-# This is an example catalystrc. As such, it doesn't actually *do* anything.
-# Uncomment the following to increase the number of threads used to compile.
-# export MAKEOPTS="-j16"
+# export BINPKG_COMPRESS="gzip"
+# export BINPKG_COMPRESS_FLAGS="-7"
diff --git a/examples/gamecd.conf.example b/examples/gamecd.conf.example
deleted file mode 100644
index 955eac4e..00000000
--- a/examples/gamecd.conf.example
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-# these variables are to be used for creating the menu entry and also to tell
-# the CD what to execute once X starts
-GAME_NAME="Unreal Tournament 2004 Demo"
-GAME_EXECUTABLE="/usr/games/bin/ut2004-demo"
diff --git a/examples/generic_stage_template.spec b/examples/generic_stage_template.spec
index 40a3d5c0..9d91d07d 100644
--- a/examples/generic_stage_template.spec
+++ b/examples/generic_stage_template.spec
@@ -28,7 +28,7 @@ target:
rel_type:
# This is the system profile to be used by catalyst to build this target. It is
-# specified as a relative path from /usr/portage/profiles.
+# specified as a relative path from /var/db/repos/gentoo/profiles.
# example:
# profile: default-linux/x86/2006.1
profile:
@@ -82,13 +82,6 @@ compressor_arch":
#
decompressor_search_order: lbzip2 bzip2 tar pixz xz gzip squashfs
-# These are the hosts used as distcc slaves when distcc is enabled in your
-# catalyst.conf. It follows the same syntax as distcc-config --set-hosts and
-# is entirely optional.
-# example:
-# distcc_hosts: 127.0.0.1 192.168.0.1
-distcc_hosts:
-
# This is an optional directory containing portage configuration files. It
# follows the same syntax as /etc/portage and should be consistent across all
# targets to minimize problems.
@@ -96,11 +89,12 @@ distcc_hosts:
# portage_confdir: /etc/portage
portage_confdir:
-# This option specifies the location to a portage overlay that you would like to
-# have used when building this target.
+# This option specifies the location of the ebuild repositories that you would
+# like to have used when building this target. It takes a space-separated list
+# of directory names.
# example:
-# portage_overlay: /usr/local/portage
-portage_overlay:
+# repos: /usr/local/portage
+repos:
# This allows the optional directory containing the output packages for
# catalyst. Mainly used as a way for different spec files to access the same
diff --git a/examples/grp_template.spec b/examples/grp_template.spec
deleted file mode 100644
index 71160266..00000000
--- a/examples/grp_template.spec
+++ /dev/null
@@ -1,117 +0,0 @@
-# generic GRP (Gentoo Reference Platform) specfile
-# used to build a GRP set
-
-# The subarch can be any of the supported catalyst subarches (like athlon-xp).
-# Refer to "man catalyst" or <https://wiki.gentoo.org/wiki/Catalyst>
-# for supported subarches
-# example:
-# subarch: athlon-xp
-subarch:
-
-# The version stamp is an identifier for the build. It can be anything you wish
-# it to be, but it is usually a date.
-# example:
-# version_stamp: 2006.1
-version_stamp:
-
-# The target specifies what target we want catalyst to do. For GRP, the
-# supported targets are: grp
-# example:
-# target: grp
-target: grp
-
-# The rel_type defines what kind of build we are doing. This is merely another
-# identifier, but it useful for allowing multiple concurrent builds. Usually,
-# default will suffice.
-# example:
-# rel_type: default
-rel_type:
-
-# This is the system profile to be used by catalyst to build this target. It is
-# specified as a relative path from /usr/portage/profiles.
-# example:
-# profile: default-linux/x86/2006.1
-profile:
-
-# This specifies which snapshot to use for building this target.
-# example:
-# snapshot: 2006.1
-snapshot:
-
-# This specifies where the seed stage comes from for this target, The path is
-# relative to $clst_sharedir/builds. The rel_type is also used as a path prefix
-# for the seed.
-# example:
-# default/stage3-x86-2006.1
-source_subpath:
-
-# These are the hosts used as distcc slaves when distcc is enabled in your
-# catalyst.conf. It follows the same syntax as distcc-config --set-hosts and
-# is entirely optional.
-# example:
-# distcc_hosts: 127.0.0.1 192.168.0.1
-distcc_hosts:
-
-# This is an optional directory containing portage configuration files. It
-# follows the same syntax as /etc/portage and should be consistent across all
-# targets to minimize problems.
-# example:
-# portage_confdir: /etc/portage
-portage_confdir:
-
-# This option specifies the location to a portage overlay that you would like to
-# have used when building this target.
-# example:
-# portage_overlay: /usr/local/portage
-portage_overlay:
-
-# This allows the optional directory containing the output packages for
-# catalyst. Mainly used as a way for different spec files to access the same
-# cache directory. Default behavior is for this location to be autogenerated
-# by catalyst based on the spec file.
-# example:
-# pkgcache_path: /tmp/packages
-pkgcache_path:
-
-# Since GRP is capable of building packages/source sets for more than one CD,
-# this defines the layout for the directories under $clst_sharedir/builds.
-# example:
-# grp: src cd2
-grp: src cd2
-
-# GRP is also able to build packages with customized USE settings. However, it
-# is very possible to cause quite a few problems with these, so be careful with
-# whatever USE flags you add here. This is generally used for adding some
-# functionality that we do not want on by default for all Gentoo users, but that
-# we want on by default in our binaries. Some examples would be things like the
-# socks5 USE flag.
-# example:
-# grp/use: gtk2 gnome kde qt bonobo cdr esd gtkhtml mozilla mysql perl ruby tcltk cups ldap ssl tcpd -svga
-grp/use:
-
-# This tells catalyst what type of GRP set this list of packages will create.
-# Valid options here are srcset or pkgset to either download the source, or to
-# build packages, respectively.
-# example:
-# grp/src/type: srcset
-grp/src/type:
-
-# Since this is a srcset, these ebuilds will have their distfiles fetched and
-# the distfiles will be stored in the src directory under $clst_sharedir/builds.
-# Packages will not be made out of this list. We use this for grabbing things
-# that need a compiled kernel to build, or things listed in the Handbook that
-# should be available before the first reboot during an install.
-# example:
-# grp/src/packages: gentoo-sources udev vanilla-sources rp-pppoe speedtouch fcdsl fritzcapi globespan-adsl pptpclient slmodem lvm2 evms iputils vixie-cron fcron dcron sysklogd metalog syslog-ng raidtools jfsutils xfsprogs reiserfsprogs dosfstools ntfsprogs lilo grub isdn4k-utils iproute2 wireless-tools wpa_supplicant pcmcia-cs hotplug coldplug dhcpcd slocate genkernel ipw2100 ipw2200 fxload logrotate
-grp/src/packages:
-
-# This is mostly here for completeness. This is the pkgset definition.
-# example:
-# grp/cd2/type: pkgset
-grp/cd2/type:
-
-# This is our list of packages that will comprise our package set. These are
-# fetched, compiled, and the packages are stored under $clst_sharedir/builds.
-# example:
-# grp/cd2/packages: dante tsocks sys-apps/eject minicom links acpid apmd parted whois tcpdump cvs zip unzip netcat partimage app-admin/sudo app-cdr/cdrtools gnome emacs dev-lang/ruby enlightenment kde mozilla-firefox mozilla-thunderbird xfce4 openbox fluxbox sylpheed openoffice-bin gimp xemacs xmms abiword gaim xchat pan tetex xcdroast k3b samba nmap gradm ettercap ethereal mplayer
-grp/cd2/packages:
diff --git a/examples/livecd-stage1_template.spec b/examples/livecd-stage1_template.spec
index 90ae1990..b9edb87d 100644
--- a/examples/livecd-stage1_template.spec
+++ b/examples/livecd-stage1_template.spec
@@ -28,7 +28,7 @@ target:
rel_type:
# This is the system profile to be used by catalyst to build this target. It is
-# specified as a relative path from /usr/portage/profiles.
+# specified as a relative path from /var/db/repos/gentoo/profiles.
# example:
# profile: default-linux/x86/2006.1
profile:
@@ -45,13 +45,6 @@ snapshot:
# default/stage3-x86-2006.1
source_subpath:
-# These are the hosts used as distcc slaves when distcc is enabled in your
-# catalyst.conf. It follows the same syntax as distcc-config --set-hosts and
-# is entirely optional.
-# example:
-# distcc_hosts: 127.0.0.1 192.168.0.1
-distcc_hosts:
-
# This is an optional directory containing portage configuration files. It
# follows the same syntax as /etc/portage and should be consistent across all
# targets to minimize problems.
@@ -59,11 +52,12 @@ distcc_hosts:
# portage_confdir: /etc/portage
portage_confdir:
-# This option specifies the location to a portage overlay that you would like to
-# have used when building this target.
+# This option specifies the location of the ebuild repositories that you would
+# like to have used when building this target. It takes a space-separated list
+# of directory names.
# example:
-# portage_overlay: /usr/local/portage
-portage_overlay:
+# repos: /usr/local/portage
+repos:
# This allows the optional directory containing the output packages for
# catalyst. Mainly used as a way for different spec files to access the same
diff --git a/examples/livecd-stage2_template.spec b/examples/livecd-stage2_template.spec
index 8aa454c9..b0d2ecd6 100644
--- a/examples/livecd-stage2_template.spec
+++ b/examples/livecd-stage2_template.spec
@@ -28,7 +28,7 @@ target:
rel_type:
# This is the system profile to be used by catalyst to build this target. It is
-# specified as a relative path from /usr/portage/profiles.
+# specified as a relative path from /var/db/repos/gentoo/profiles.
# example:
# profile: default-linux/x86/2006.1
profile:
@@ -45,13 +45,6 @@ snapshot:
# default/livecd-stage1-x86-2006.1
source_subpath:
-# These are the hosts used as distcc slaves when distcc is enabled in your
-# catalyst.conf. It follows the same syntax as distcc-config --set-hosts and
-# is entirely optional.
-# example:
-# distcc_hosts: 127.0.0.1 192.168.0.1
-distcc_hosts:
-
# This is an optional directory containing portage configuration files. It
# follows the same syntax as /etc/portage and should be consistent across all
# targets to minimize problems.
@@ -59,11 +52,12 @@ distcc_hosts:
# portage_confdir: /etc/portage
portage_confdir:
-# This option specifies the location to a portage overlay that you would like to
-# have used when building this target.
+# This option specifies the location of the ebuild repositories that you would
+# like to have used when building this target. It takes a space-separated list
+# of directory names.
# example:
-# portage_overlay: /usr/local/portage
-portage_overlay:
+# repos: /usr/local/portage
+repos:
# This allows the optional directory containing the output packages for
# catalyst. Mainly used as a way for different spec files to access the same
@@ -83,27 +77,20 @@ kerncache_path:
# The fstype is used to determine what sort of CD we should build. This is
# used to set the type of loopback filesystem that we will use on our CD.
-# Possible options are as follows:
-# squashfs - This gives the best compression, but requires a kernel patch.
-# zisofs - This uses in-kernel compression and is supported on all platforms.
-# normal - This creates a loop without compression.
-# noloop - This copies the files to the CD directly, without using a loopback.
+# Possible options are as follows: squashfs
# example:
# livecd/fstype: squashfs
livecd/fstype:
# The fsops are a list of optional parameters that can be passed to the tool
# which will create the filesystem specified in livecd/fstype. It is valid for
-# the following fstypes: squashfs, jffs, jffs2, cramfs
-# example:
-# livecd/fsops: -root-owned
+# the following fstypes: squashfs
livecd/fsops:
# The cdtar is essentially the bootloader for the CD. It also holds the main
-# configuration for the bootloader. On x86/amd64, it also can include a small
-# memory testing application, called memtest86+.
+# configuration for the bootloader.
# example:
-# livecd/cdtar: /usr/share/catalyst/livecd/cdtar/isolinux-2.13-memtest86+-cdtar.tar.bz2
+# livecd/cdtar: /usr/share/catalyst/livecd/cdtar/[...].cdtar.tar.bz2
livecd/cdtar:
# This is the full path and filename to the ISO image that the livecd-stage2
@@ -124,12 +111,6 @@ livecd/iso:
# livecd/fsscript:
livecd/fsscript:
-# This is where you set the splash theme. This theme must be present in
-# /etc/splash, before the kernel has completed building.
-# example:
-# livecd/splash_theme: livecd-2006.1
-livecd/splash_theme:
-
# This is a set of arguments that get passed to the bootloader for your CD. It
# is used on the x86/amd64 release media to enable keymap selection.
# example:
@@ -154,9 +135,7 @@ livecd/linuxrc:
# This option controls quite a bit of catalyst internals and sets up several
# defaults. Each type behaves slightly differently and is explained below.
# gentoo-release-minimal - This creates an official minimal InstallCD.
-# gentoo-release-universal - This creates an official universal InstallCD.
# gentoo-release-livecd - This creates an official LiveCD environment.
-# gentoo-gamecd - This creates an official Gentoo GameCD.
# generic-livecd - This should be used for all non-official media.
# example:
# livecd/type: gentoo-release-minimal
@@ -188,7 +167,7 @@ livecd/modblacklist:
# This is for adding init scripts to runlevels. The syntax for the init script
# is the script name, followed by a pipe, followed by the runlevel in which you
-# want the script to run. It looks like spind|default and is space delimited.
+# want the script to run. It looks like acpid|default and is space delimited.
# We do not use this on the official media, as catalyst sets up the runlevels
# correctly for us. Since we do not use this, it is left blank below.
# This option will automatically create missing runlevels
@@ -222,30 +201,6 @@ livecd/overlay:
# livecd/root_overlay:
livecd/root_overlay:
-# This is used by catalyst to copy the specified file to /etc/X11/xinit/xinitrc
-# and is used by the livecd/type gentoo-gamecd and generic-livecd. While the
-# file will still be copied for any livecd/type, catalyst will only create the
-# necessary /etc/startx for those types, so X will not be automatically started.
-# This is useful also for setting up X on a CD where you do not wish X to start
-# automatically. We do not use this on the release media, so it is left blank.
-# example:
-# livecd/xinitrc:
-livecd/xinitrc:
-
-# This is used by catalyst to determine which display manager you wish to
-# become the default. This is used on the official Gentoo LiveCD and is valid
-# for any livecd/type.
-# example:
-# livecd/xdm: gdm
-livecd/xdm:
-
-# This is used by catalyst to determine which X session should be started by
-# default by the display manager. This is used on the official Gentoo LiveCD
-# and is valid for any livecd/type.
-# example:
-# livecd/xsession: gnome
-livecd/xsession:
-
# This option is used to create non-root users on your CD. It takes a space
# separated list of user names. These users will be added to the following
# groups: users,wheel,audio,games,cdrom,usb
@@ -260,14 +215,6 @@ livecd/users:
# livecd/volid: Gentoo Linux 2006.1 X86
livecd/volid:
-# This option is only used when creating a GameCD. This specifies the file that
-# contains the definitions for GAME_NAME and GAME_EXECUTABLE, which are used by
-# the GameCD scripts to set some specific options for the game. This is not
-# used on the release media, and is therefore blank.
-# example:
-# gamecd/conf:
-gamecd/conf:
-
# This option is used to specify the number of kernels to build and also the
# labels that will be used by the CD bootloader to refer to each kernel image.
# example:
@@ -317,12 +264,6 @@ boot/kernel/gentoo/extraversion:
# boot/kernel/gentoo/packages: pcmcia-cs speedtouch slmodem globespan-adsl hostap-driver hostap-utils ipw2100 ipw2200 fritzcapi fcdsl cryptsetup
boot/kernel/gentoo/packages:
-# This option is only for ppc64 machines. If used it will create the /etc/yaboot.conf
-# entry used for booting a ibm powerpc machine.
-# example:
-# boot/kernel/gentoo/machine_type: ibm
-boot/kernel/gentoo/machine_type:
-
# This is only supported on ppc64 currently. This entry sets up the console=
# boot parameters required for sending the output to the appropriate console.
# example:
@@ -331,10 +272,11 @@ boot/kernel/gentoo/machine_type:
# boot/kernel/gentoo/console: tty0 ttyS0
boot/kernel/gentoo/console:
-# This feature will make sha512 checksums for every file in the iso (including files provided by livecd/overlay
-# These checksums can be verified at boot using the genkernel option "verify" added to the kernel line.
-# Currently this feature will be enabled if livecd/verify is defined to *any* value, leave commented to disable.
-#livecd/verify: true
+# Enables the generation of a isoroot_b2sums file containing a BLAKE2 digest of
+# each file in the ISO. When 'livecd/bootargs' contains 'verify' this feature
+# will be used to verify the contents of the ISO at boot time.
+# No checksums are generated if this is left commented.
+#livecd/verify: blake2
# This feature controls the depclean run after fsscript and before unmerge.
# The default is unset, and will run emerge --depclean --with-bdeps=n which results
@@ -354,11 +296,11 @@ livecd/unmerge:
# rid of files that don't belong to a particular package, or removing files from
# a package that you wish to keep, but won't need the full functionality.
# example:
-# livecd/empty: /var/tmp /var/cache /var/db /var/empty /var/lock /var/log /var/run /var/spool /var/state /tmp /usr/portage /usr/share/man /usr/share/info /usr/share/unimaps /usr/include /usr/share/zoneinfo /usr/share/dict /usr/share/doc /usr/share/ss /usr/share/state /usr/share/texinfo /usr/lib/python2.2 /usr/lib/portage /usr/share/gettext /usr/share/i18n /usr/share/rfc /usr/lib/X11/config /usr/lib/X11/etc /usr/lib/X11/doc /usr/src /usr/share/doc /usr/share/man /etc/cron.daily /etc/cron.hourly /etc/cron.monthly /etc/cron.weekly /etc/logrotate.d /etc/rsync /usr/lib/awk /usr/lib/ccache /usr/lib/gcc-config /usr/lib/nfs /usr/local /usr/diet/include /usr/diet/man /usr/share/consolefonts/partialfonts /usr/share/consoletrans /usr/share/emacs /usr/share/gcc-data /usr/share/genkernel /etc/bootsplash/gentoo /etc/bootsplash/gentoo-highquality /etc/splash/gentoo /etc/splash/emergence /usr/share/gnuconfig /usr/share/lcms /usr/share/locale /etc/skel
+# livecd/empty: /var/tmp /var/cache /var/db /var/empty /var/lock /var/log /var/run /var/spool /var/state /tmp /var/db/repos/gentoo /usr/share/man /usr/share/info /usr/share/unimaps /usr/include /usr/share/zoneinfo /usr/share/dict /usr/share/doc /usr/share/ss /usr/share/state /usr/share/texinfo /usr/lib/python2.2 /usr/lib/portage /usr/share/gettext /usr/share/i18n /usr/share/rfc /usr/lib/X11/config /usr/lib/X11/etc /usr/lib/X11/doc /usr/src /usr/share/doc /usr/share/man /etc/cron.daily /etc/cron.hourly /etc/cron.monthly /etc/cron.weekly /etc/logrotate.d /etc/rsync /usr/lib/awk /usr/lib/ccache /usr/lib/gcc-config /usr/lib/nfs /usr/local /usr/diet/include /usr/diet/man /usr/share/consolefonts/partialfonts /usr/share/consoletrans /usr/share/emacs /usr/share/gcc-data /usr/share/genkernel /usr/share/gnuconfig /usr/share/lcms /usr/share/locale /etc/skel
livecd/empty:
# This option tells catalyst to clean specific files from the filesystem and is
# very useful in cleaning up stray files in /etc left over after livecd/unmerge.
# example:
-# livecd/rm: /lib/*.a /usr/lib/*.a /usr/lib/gcc-lib/*/*/libgcj* /etc/dispatch-conf.conf /etc/etc-update.conf /etc/*- /etc/issue* /etc/portage/make.conf /etc/man.conf /etc/*.old /root/.viminfo /usr/sbin/bootsplash* /usr/sbin/fb* /usr/sbin/fsck.cramfs /usr/sbin/fsck.minix /usr/sbin/mkfs.minix /usr/sbin/mkfs.bfs /usr/sbin/mkfs.cramfs /lib/security/pam_access.so /lib/security/pam_chroot.so /lib/security/pam_debug.so /lib/security/pam_ftp.so /lib/security/pam_issue.so /lib/security/pam_mail.so /lib/security/pam_motd.so /lib/security/pam_mkhomedir.so /lib/security/pam_postgresok.so /lib/security/pam_rhosts_auth.so /lib/security/pam_userdb.so /usr/share/consolefonts/1* /usr/share/consolefonts/7* /usr/share/consolefonts/8* /usr/share/consolefonts/9* /usr/share/consolefonts/A* /usr/share/consolefonts/C* /usr/share/consolefonts/E* /usr/share/consolefonts/G* /usr/share/consolefonts/L* /usr/share/consolefonts/M* /usr/share/consolefonts/R* /usr/share/consolefonts/a* /usr/share/consolefonts/c* /usr/share/consolefonts/dr* /usr/share/consolefonts/g* /usr/share/consolefonts/i* /usr/share/consolefonts/k* /usr/share/consolefonts/l* /usr/share/consolefonts/r* /usr/share/consolefonts/s* /usr/share/consolefonts/t* /usr/share/consolefonts/v* /etc/splash/livecd-2006.1/16* /etc/splash/livecd-2006.1/12* /etc/splash/livecd-2006.1/6* /etc/splash/livecd-2006.1/8* /etc/splash/livecd-2006.1/images/silent-16* /etc/splash/livecd-2006.1/images/silent-12* /etc/splash/livecd-2006.1/images/silent-6* /etc/splash/livecd-2006.1/images/silent-8* /etc/splash/livecd-2006.1/images/verbose-16* /etc/splash/livecd-2006.1/images/verbose-12* /etc/splash/livecd-2006.1/images/verbose-6* /etc/splash/livecd-2006.1/images/verbose-8* /etc/portage/make.conf.example /etc/make.globals /etc/resolv.conf
+# livecd/rm: /lib/*.a /usr/lib/*.a /usr/lib/gcc-lib/*/*/libgcj* /etc/dispatch-conf.conf /etc/etc-update.conf /etc/*- /etc/issue* /etc/portage/make.conf /etc/man.conf /etc/*.old /root/.viminfo /usr/sbin/fb* /usr/sbin/fsck.cramfs /usr/sbin/fsck.minix /usr/sbin/mkfs.minix /usr/sbin/mkfs.bfs /usr/sbin/mkfs.cramfs /lib/security/pam_access.so /lib/security/pam_chroot.so /lib/security/pam_debug.so /lib/security/pam_ftp.so /lib/security/pam_issue.so /lib/security/pam_mail.so /lib/security/pam_motd.so /lib/security/pam_mkhomedir.so /lib/security/pam_postgresok.so /lib/security/pam_rhosts_auth.so /lib/security/pam_userdb.so /usr/share/consolefonts/1* /usr/share/consolefonts/7* /usr/share/consolefonts/8* /usr/share/consolefonts/9* /usr/share/consolefonts/A* /usr/share/consolefonts/C* /usr/share/consolefonts/E* /usr/share/consolefonts/G* /usr/share/consolefonts/L* /usr/share/consolefonts/M* /usr/share/consolefonts/R* /usr/share/consolefonts/a* /usr/share/consolefonts/c* /usr/share/consolefonts/dr* /usr/share/consolefonts/g* /usr/share/consolefonts/i* /usr/share/consolefonts/k* /usr/share/consolefonts/l* /usr/share/consolefonts/r* /usr/share/consolefonts/s* /usr/share/consolefonts/t* /usr/share/consolefonts/v* /etc/portage/make.conf.example /etc/make.globals /etc/resolv.conf
livecd/rm:
diff --git a/examples/netboot2_template.spec b/examples/netboot2_template.spec
deleted file mode 100644
index 987cf9ed..00000000
--- a/examples/netboot2_template.spec
+++ /dev/null
@@ -1,301 +0,0 @@
-subarch: mips3
-version_stamp: 2006.0
-target: netboot2
-rel_type: default
-profile: uclibc/mips
-snapshot: 20060107
-source_subpath: default/stage3-mips-uclibc-mips3-2006.126
-
-# This option specifies the location to a portage overlay that you would like to
-# have used when building this target.
-# example:
-# portage_overlay: /usr/local/portage
-portage_overlay:
-
-boot/kernel: ip22r4k ip22r5k ip27r10k ip28r10k ip30r10k ip32r5k
-boot/kernel/ip22r4k/sources: =mips-sources-2.6.14.5
-boot/kernel/ip22r5k/sources: =mips-sources-2.6.14.5
-boot/kernel/ip27r10k/sources: =mips-sources-2.6.14.5
-boot/kernel/ip28r10k/sources: =mips-sources-2.6.14.5
-boot/kernel/ip30r10k/sources: =mips-sources-2.6.14.5
-boot/kernel/ip32r5k/sources: =mips-sources-2.6.14.5
-
-boot/kernel/ip22r4k/config: /usr/share/genkernel/mips/ip22r4k-2006_0.cf
-boot/kernel/ip22r5k/config: /usr/share/genkernel/mips/ip22r5k-2006_0.cf
-boot/kernel/ip27r10k/config: /usr/share/genkernel/mips/ip27r10k-2006_0.cf
-boot/kernel/ip28r10k/config: /usr/share/genkernel/mips/ip28r10k-2006_0.cf
-boot/kernel/ip30r10k/config: /usr/share/genkernel/mips/ip30r10k-2006_0.cf
-boot/kernel/ip32r5k/config: /usr/share/genkernel/mips/ip32r5k-2006_0.cf
-
-boot/kernel/ip22r4k/use: -doc
-boot/kernel/ip22r5k/use: -doc
-boot/kernel/ip27r10k/use: -doc ip27
-boot/kernel/ip28r10k/use: -doc ip28
-boot/kernel/ip30r10k/use: -doc ip30
-boot/kernel/ip32r5k/use: -doc
-
-boot/kernel/ip22r4k/gk_kernargs: --kernel-cross-compile=mips-unknown-linux-gnu- --makeopts=-j2
-boot/kernel/ip22r5k/gk_kernargs: --kernel-cross-compile=mips-unknown-linux-gnu- --makeopts=-j2
-boot/kernel/ip27r10k/gk_kernargs: --kernel-cross-compile=mips64-unknown-linux-gnu- --makeopts=-j2
-boot/kernel/ip28r10k/gk_kernargs: --kernel-cross-compile=mips64-unknown-linux-gnu- --makeopts=-j2
-boot/kernel/ip30r10k/gk_kernargs: --kernel-cross-compile=mips64-unknown-linux-gnu- --makeopts=-j2
-boot/kernel/ip32r5k/gk_kernargs: --kernel-cross-compile=mips64-unknown-linux-gnu- --makeopts=-j2
-
-netboot2/builddate: 20060107
-netboot2/busybox_config: /usr/share/genkernel/mips/nb-busybox.cf
-
-netboot2/use:
- -*
- multicall
- readline
- ssl
-
-netboot2/packages:
- com_err
- dropbear
- dvhtool
- e2fsprogs
- gcc-mips64
- jfsutils
- mdadm
- nano
- ncurses
- openssl
- popt
- portmap
- reiserfsprogs
- rsync
- sdparm
- ss
- ttcp
- uclibc
- util-linux
- wget
- xfsprogs
-
-netboot2/packages/com_err/files:
- /lib/libcom_err.so
- /lib/libcom_err.so.2
- /lib/libcom_err.so.2.1
- /usr/bin/compile_et
- /usr/lib/libcom_err.so
-
-netboot2/packages/dropbear/files:
- /usr/bin/dbclient
- /usr/bin/dbscp
- /usr/bin/dropbearconvert
- /usr/bin/dropbearkey
- /usr/bin/dropbearmulti
- /usr/sbin/dropbear
-
-netboot2/packages/dvhtool/files:
- /usr/sbin/dvhtool
-
-netboot2/packages/e2fsprogs/files:
- /bin/chattr
- /bin/lsattr
- /bin/uuidgen
- /lib/libblkid.so
- /lib/libblkid.so.1
- /lib/libblkid.so.1.0
- /lib/libe2p.so
- /lib/libe2p.so.2
- /lib/libe2p.so.2.3
- /lib/libext2fs.so
- /lib/libext2fs.so.2
- /lib/libext2fs.so.2.4
- /lib/libuuid.so
- /lib/libuuid.so.1
- /lib/libuuid.so.1.2
- /sbin/badblocks
- /sbin/blkid
- /sbin/debugfs
- /sbin/dumpe2fs
- /sbin/e2fsck
- /sbin/e2image
- /sbin/e2label
- /sbin/filefrag
- /sbin/findfs
- /sbin/fsck
- /sbin/fsck.ext2
- /sbin/fsck.ext3
- /sbin/logsave
- /sbin/mke2fs
- /sbin/mkfs.ext2
- /sbin/mkfs.ext3
- /sbin/resize2fs
- /sbin/tune2fs
- /usr/lib/e2initrd_helper
- /usr/lib/libblkid.so
- /usr/lib/libe2p.so
- /usr/lib/libext2fs.so
- /usr/lib/libuuid.so
- /usr/sbin/mklost+found
-
-netboot2/packages/jfsutils/files:
- /sbin/fsck.jfs
- /sbin/jfs_fsck
- /sbin/jfs_mkfs
- /sbin/jfs_tune
- /sbin/mkfs.jfs
-
-netboot2/packages/mdadm/files:
- /etc/mdadm.conf
- /sbin/mdadm
-
-netboot2/packages/nano/files:
- /bin/nano
- /bin/rnano
- /usr/bin/nano
-
-netboot2/packages/ncurses/files:
- /etc/terminfo
- /lib/libcurses.so
- /lib/libncurses.so
- /lib/libncurses.so.5
- /lib/libncurses.so.5.4
- /usr/bin/toe
- /usr/lib/libcurses.so
- /usr/lib/libform.so
- /usr/lib/libform.so.5
- /usr/lib/libform.so.5.4
- /usr/lib/libmenu.so
- /usr/lib/libmenu.so.5
- /usr/lib/libmenu.so.5.4
- /usr/lib/libncurses.so
- /usr/lib/libpanel.so
- /usr/lib/libpanel.so.5
- /usr/lib/libpanel.so.5.4
- /usr/lib/terminfo
- /usr/share/tabset/std
- /usr/share/tabset/stdcrt
- /usr/share/tabset/vt100
- /usr/share/tabset/vt300
- /usr/share/terminfo/a/ansi
- /usr/share/terminfo/d/dumb
- /usr/share/terminfo/e/eterm
- /usr/share/terminfo/l/linux
- /usr/share/terminfo/r/rxvt
- /usr/share/terminfo/s/screen
- /usr/share/terminfo/s/sun
- /usr/share/terminfo/v/vt100
- /usr/share/terminfo/v/vt102
- /usr/share/terminfo/v/vt200
- /usr/share/terminfo/v/vt220
- /usr/share/terminfo/v/vt52
- /usr/share/terminfo/x/xterm
- /usr/share/terminfo/x/xterm-color
- /usr/share/terminfo/x/xterm-xfree86
-
-netboot2/packages/openssl/files:
- /usr/lib/libcrypto.so
- /usr/lib/libcrypto.so.0
- /usr/lib/libcrypto.so.0.9.7
- /usr/lib/libssl.so
- /usr/lib/libssl.so.0
- /usr/lib/libssl.so.0.9.7
-
-netboot2/packages/popt/files:
- /usr/lib/libpopt.so
- /usr/lib/libpopt.so.0
- /usr/lib/libpopt.so.0.0.0
-
-netboot2/packages/portmap/files:
- /sbin/portmap
-
-netboot2/packages/reiserfsprogs/files:
- /sbin/fsck.reiserfs
- /sbin/mkfs.reiserfs
- /sbin/mkreiserfs
- /sbin/reiserfsck
- /sbin/reiserfstune
-
-netboot2/packages/rsync/files:
- /usr/bin/rsync
-
-netboot2/packages/sdparm/files:
- /usr/bin/sdparm
-
-netboot2/packages/ss/files:
- /lib/libss.so
- /lib/libss.so.2
- /lib/libss.so.2.0
- /usr/bin/mk_cmds
- /usr/lib/libss.so
-
-netboot2/packages/ttcp/files:
- /usr/bin/ttcp
-
-netboot2/packages/uclibc/files:
- /etc/ld.so.cache
- /lib/ld-uClibc-0.9.27.so
- /lib/ld-uClibc.so.0
- /lib/libc.so.0
- /lib/libcrypt-0.9.27.so
- /lib/libcrypt.so.0
- /lib/libdl-0.9.27.so
- /lib/libdl.so.0
- /lib/libm-0.9.27.so
- /lib/libm.so.0
- /lib/libnsl-0.9.27.so
- /lib/libnsl.so.0
- /lib/libpthread-0.9.27.so
- /lib/libpthread.so.0
- /lib/libresolv-0.9.27.so
- /lib/libresolv.so.0
- /lib/librt-0.9.27.so
- /lib/librt.so.0
- /lib/libthread_db-0.9.27.so
- /lib/libthread_db.so.1
- /lib/libuClibc-0.9.27.so
- /lib/libutil-0.9.27.so
- /lib/libutil.so.0
- /sbin/ldconfig
- /usr/bin/getent
- /usr/bin/ldd
- /usr/lib/Scrt1.o
- /usr/lib/crt0.o
- /usr/lib/crt1.o
- /usr/lib/crti.o
- /usr/lib/crtn.o
- /usr/lib/libc.so
- /usr/lib/libcrypt.so
- /usr/lib/libdl.so
- /usr/lib/libm.so
- /usr/lib/libnsl.so
- /usr/lib/libpthread.so
- /usr/lib/libresolv.so
- /usr/lib/librt.so
- /usr/lib/libthread_db.so
- /usr/lib/libutil.so
-
-netboot2/packages/util-linux/files:
- /sbin/fdisk
- /sbin/mkfs
- /sbin/mkswap
- /sbin/swapoff
- /sbin/swapon
- /usr/bin/ddate
- /usr/bin/setterm
- /usr/bin/whereis
-
-netboot2/packages/wget/files:
- /usr/bin/wget
-
-netboot2/packages/xfsprogs/files:
- /bin/xfs_copy
- /bin/xfs_growfs
- /bin/xfs_info
- /lib/libhandle.so
- /lib/libhandle.so.1
- /lib/libhandle.so.1.0.3
- /sbin/fsck.xfs
- /sbin/mkfs.xfs
- /sbin/xfs_repair
-
-# Setting the option overrides the location of the pkgcache
-pkgcache_path:
-
-# Setting the option overrides the location of the kerncache
-kerncache_path:
-
diff --git a/examples/netboot_template.spec b/examples/netboot_template.spec
index 0cffc661..07b3b53f 100644
--- a/examples/netboot_template.spec
+++ b/examples/netboot_template.spec
@@ -1,152 +1,302 @@
-# generic netboot image specfile
-# used to build a network bootable image
+subarch: mips3
+version_stamp: 2006.0
+target: netboot
+rel_type: default
+profile: uclibc/mips
+snapshot: 20060107
+source_subpath: default/stage3-mips-uclibc-mips3-2006.126
-# The subarch can be any of the supported catalyst subarches (like athlon-xp).
-# Refer to "man catalyst" or <https://wiki.gentoo.org/wiki/Catalyst>
-# for supported subarches
+# This option specifies the location of the ebuild repositories that you would
+# like to have used when building this target. It takes a space-separated list
+# of directory names.
# example:
-# subarch: athlon-xp
-subarch:
+# repos: /usr/local/portage
+repos:
-# The version stamp is an identifier for the build. It can be anything you wish
-# it to be, but it is usually a date.
-# example:
-# version_stamp: 2006.1
-version_stamp:
+boot/kernel: ip22r4k ip22r5k ip27r10k ip28r10k ip30r10k ip32r5k
+boot/kernel/ip22r4k/sources: =mips-sources-2.6.14.5
+boot/kernel/ip22r5k/sources: =mips-sources-2.6.14.5
+boot/kernel/ip27r10k/sources: =mips-sources-2.6.14.5
+boot/kernel/ip28r10k/sources: =mips-sources-2.6.14.5
+boot/kernel/ip30r10k/sources: =mips-sources-2.6.14.5
+boot/kernel/ip32r5k/sources: =mips-sources-2.6.14.5
-# The target specifies what target we want catalyst to do. For building a
-# netboot image, we use the netboot target.
-# example:
-# target: netboot
-target:
+boot/kernel/ip22r4k/config: /usr/share/genkernel/mips/ip22r4k-2006_0.cf
+boot/kernel/ip22r5k/config: /usr/share/genkernel/mips/ip22r5k-2006_0.cf
+boot/kernel/ip27r10k/config: /usr/share/genkernel/mips/ip27r10k-2006_0.cf
+boot/kernel/ip28r10k/config: /usr/share/genkernel/mips/ip28r10k-2006_0.cf
+boot/kernel/ip30r10k/config: /usr/share/genkernel/mips/ip30r10k-2006_0.cf
+boot/kernel/ip32r5k/config: /usr/share/genkernel/mips/ip32r5k-2006_0.cf
-# The rel_type defines what kind of build we are doing. This is merely another
-# identifier, but it useful for allowing multiple concurrent builds. Usually,
-# default will suffice.
-# example:
-# rel_type: default
-rel_type:
+boot/kernel/ip22r4k/use: -doc
+boot/kernel/ip22r5k/use: -doc
+boot/kernel/ip27r10k/use: -doc ip27
+boot/kernel/ip28r10k/use: -doc ip28
+boot/kernel/ip30r10k/use: -doc ip30
+boot/kernel/ip32r5k/use: -doc
-# This is the system profile to be used by catalyst to build this target. It is
-# specified as a relative path from /usr/portage/profiles.
-# example:
-# profile: default-linux/x86/2006.1
-profile:
+boot/kernel/ip22r4k/gk_kernargs: --kernel-cross-compile=mips-unknown-linux-gnu- --makeopts=-j2
+boot/kernel/ip22r5k/gk_kernargs: --kernel-cross-compile=mips-unknown-linux-gnu- --makeopts=-j2
+boot/kernel/ip27r10k/gk_kernargs: --kernel-cross-compile=mips64-unknown-linux-gnu- --makeopts=-j2
+boot/kernel/ip28r10k/gk_kernargs: --kernel-cross-compile=mips64-unknown-linux-gnu- --makeopts=-j2
+boot/kernel/ip30r10k/gk_kernargs: --kernel-cross-compile=mips64-unknown-linux-gnu- --makeopts=-j2
+boot/kernel/ip32r5k/gk_kernargs: --kernel-cross-compile=mips64-unknown-linux-gnu- --makeopts=-j2
-# This specifies which snapshot to use for building this target.
-# example:
-# snapshot: 2006.1
-snapshot:
+netboot/builddate: 20060107
+netboot/busybox_config: /usr/share/genkernel/mips/nb-busybox.cf
-# This specifies where the seed stage comes from for this target, The path is
-# relative to $clst_sharedir/builds. The rel_type is also used as a path prefix
-# for the seed.
-# example:
-# default/stage3-x86-2006.1
-source_subpath:
+netboot/use:
+ -*
+ multicall
+ readline
+ ssl
-# These are the hosts used as distcc slaves when distcc is enabled in your
-# catalyst.conf. It follows the same syntax as distcc-config --set-hosts and
-# is entirely optional.
-# example:
-# distcc_hosts: 127.0.0.1 192.168.0.1
-distcc_hosts:
+netboot/packages:
+ com_err
+ dropbear
+ dvhtool
+ e2fsprogs
+ gcc-mips64
+ jfsutils
+ mdadm
+ nano
+ ncurses
+ openssl
+ popt
+ portmap
+ reiserfsprogs
+ rsync
+ sdparm
+ ss
+ ttcp
+ uclibc
+ util-linux
+ wget
+ xfsprogs
-# This is an optional directory containing portage configuration files. It
-# follows the same syntax as /etc/portage and should be consistent across all
-# targets to minimize problems.
-# example:
-# portage_confdir: /etc/portage
-portage_confdir:
+netboot/packages/com_err/files:
+ /lib/libcom_err.so
+ /lib/libcom_err.so.2
+ /lib/libcom_err.so.2.1
+ /usr/bin/compile_et
+ /usr/lib/libcom_err.so
-# This option specifies the location to a portage overlay that you would like to
-# have used when building this target.
-# example:
-# portage_overlay: /usr/local/portage
-portage_overlay:
+netboot/packages/dropbear/files:
+ /usr/bin/dbclient
+ /usr/bin/dbscp
+ /usr/bin/dropbearconvert
+ /usr/bin/dropbearkey
+ /usr/bin/dropbearmulti
+ /usr/sbin/dropbear
-# This allows the optional directory containing the output packages for
-# catalyst. Mainly used as a way for different spec files to access the same
-# cache directory. Default behavior is for this location to be autogenerated
-# by catalyst based on the spec file.
-# example:
-# pkgcache_path: /tmp/packages
-pkgcache_path:
+netboot/packages/dvhtool/files:
+ /usr/sbin/dvhtool
-# This allows the optional directory containing the output packages for kernel
-# builds. Mainly used as a way for different spec files to access the same
-# cache directory. Default behavior is for this location to be autogenerated
-# by catalyst based on the spec file.
-# example:
-# kerncache_path: /tmp/kernel
-kerncache_path:
+netboot/packages/e2fsprogs/files:
+ /bin/chattr
+ /bin/lsattr
+ /bin/uuidgen
+ /lib/libblkid.so
+ /lib/libblkid.so.1
+ /lib/libblkid.so.1.0
+ /lib/libe2p.so
+ /lib/libe2p.so.2
+ /lib/libe2p.so.2.3
+ /lib/libext2fs.so
+ /lib/libext2fs.so.2
+ /lib/libext2fs.so.2.4
+ /lib/libuuid.so
+ /lib/libuuid.so.1
+ /lib/libuuid.so.1.2
+ /sbin/badblocks
+ /sbin/blkid
+ /sbin/debugfs
+ /sbin/dumpe2fs
+ /sbin/e2fsck
+ /sbin/e2image
+ /sbin/e2label
+ /sbin/filefrag
+ /sbin/findfs
+ /sbin/fsck
+ /sbin/fsck.ext2
+ /sbin/fsck.ext3
+ /sbin/logsave
+ /sbin/mke2fs
+ /sbin/mkfs.ext2
+ /sbin/mkfs.ext3
+ /sbin/resize2fs
+ /sbin/tune2fs
+ /usr/lib/e2initrd_helper
+ /usr/lib/libblkid.so
+ /usr/lib/libe2p.so
+ /usr/lib/libext2fs.so
+ /usr/lib/libuuid.so
+ /usr/sbin/mklost+found
-# This option tells catalyst which kernel sources to merge for building this
-# image. This can use normal portage atoms to specify a specific version.
-# example:
-# netboot/kernel/sources: gentoo-sources
-netboot/kernel/sources:
+netboot/packages/jfsutils/files:
+ /sbin/fsck.jfs
+ /sbin/jfs_fsck
+ /sbin/jfs_mkfs
+ /sbin/jfs_tune
+ /sbin/mkfs.jfs
-# This option is the full path and filename to a kernel .config file that is
-# used by genkernel to compile the kernel for this image.
-# example:
-# netboot/kernel/config: /tmp/2.6.11-netboot.config
-netboot/kernel/config:
+netboot/packages/mdadm/files:
+ /etc/mdadm.conf
+ /sbin/mdadm
-# This option sets the USE flags used to build the kernel. These USE flags are
-# additive from the default USE for the specified profile.
-# example:
-# netboot/kernel/use: ultra1
-netboot/kernel/use:
+netboot/packages/nano/files:
+ /bin/nano
+ /bin/rnano
+ /usr/bin/nano
-# This option sets the USE flags with which the optional packages below are
-# built. Like the kernel USE, they are additive.
-# example:
-# netboot/use:
-netboot/use:
+netboot/packages/ncurses/files:
+ /etc/terminfo
+ /lib/libcurses.so
+ /lib/libncurses.so
+ /lib/libncurses.so.5
+ /lib/libncurses.so.5.4
+ /usr/bin/toe
+ /usr/lib/libcurses.so
+ /usr/lib/libform.so
+ /usr/lib/libform.so.5
+ /usr/lib/libform.so.5.4
+ /usr/lib/libmenu.so
+ /usr/lib/libmenu.so.5
+ /usr/lib/libmenu.so.5.4
+ /usr/lib/libncurses.so
+ /usr/lib/libpanel.so
+ /usr/lib/libpanel.so.5
+ /usr/lib/libpanel.so.5.4
+ /usr/lib/terminfo
+ /usr/share/tabset/std
+ /usr/share/tabset/stdcrt
+ /usr/share/tabset/vt100
+ /usr/share/tabset/vt300
+ /usr/share/terminfo/a/ansi
+ /usr/share/terminfo/d/dumb
+ /usr/share/terminfo/e/eterm
+ /usr/share/terminfo/l/linux
+ /usr/share/terminfo/r/rxvt
+ /usr/share/terminfo/s/screen
+ /usr/share/terminfo/s/sun
+ /usr/share/terminfo/v/vt100
+ /usr/share/terminfo/v/vt102
+ /usr/share/terminfo/v/vt200
+ /usr/share/terminfo/v/vt220
+ /usr/share/terminfo/v/vt52
+ /usr/share/terminfo/x/xterm
+ /usr/share/terminfo/x/xterm-color
+ /usr/share/terminfo/x/xterm-xfree86
-# The netboot target builds busybox for its root filesystem. This option is
-# where you specify the full path and filename to your busybox configuration.
-# example
-# netboot/busybox_config: /tmp/busybox.config
-netboot/busybox_config:
+netboot/packages/openssl/files:
+ /usr/lib/libcrypto.so
+ /usr/lib/libcrypto.so.0
+ /usr/lib/libcrypto.so.0.9.7
+ /usr/lib/libssl.so
+ /usr/lib/libssl.so.0
+ /usr/lib/libssl.so.0.9.7
-# This is the full path and filename to the tarball to use as the base for the
-# netboot image.
-# example:
-# netboot/base_tarball: /usr/share/catalyst/netboot/netboot-base.tar.bz2
-netboot/base_tarball:
+netboot/packages/popt/files:
+ /usr/lib/libpopt.so
+ /usr/lib/libpopt.so.0
+ /usr/lib/libpopt.so.0.0.0
-# These are the packages that will be built for your netboot image using the USE
-# flags set in netboot/use. These package names are also labels used later when
-# determining what files to copy into your netboot image.
-# example:
-# netboot/packages: raidtools xfsprogs e2fsprogs reiserfsprogs
+netboot/packages/portmap/files:
+ /sbin/portmap
-# This is where you tell catalyst which files from each package to copy into the
-# netboot image.
-# example:
-# netboot/packages/raidtools/files: /sbin/raidstart /sbin/mkraid /sbin/detect_multipath /sbin/raidreconf /sbin/raidstop /sbin/raidhotadd /sbin/raidhotremove /sbin/raidsetfaulty /sbin/raid0run
-netboot/packages/raidtools/files:
+netboot/packages/reiserfsprogs/files:
+ /sbin/fsck.reiserfs
+ /sbin/mkfs.reiserfs
+ /sbin/mkreiserfs
+ /sbin/reiserfsck
+ /sbin/reiserfstune
-# Here is the same thing for xfsprogs.
-# example:
-# netboot/packages/xfsprogs/files: /sbin/mkfs.xfs /sbin/xfs_repair /bin/xfs_check
-netboot/packages/xfsprogs/files:
+netboot/packages/rsync/files:
+ /usr/bin/rsync
-# Here is the same thing for e2fsprogs.
-# example:
-# netboot/packages/e2fsprogs/files: /sbin/mke2fs
-netboot/packages/e2fsprogs/files:
+netboot/packages/sdparm/files:
+ /usr/bin/sdparm
-# Here is the same thing for reiserfsprogs.
-# example:
-# netboot/packages/reiserfsprogs/files: /sbin/mkreiserfs
-netboot/packages/reiserfsprogs/files:
+netboot/packages/ss/files:
+ /lib/libss.so
+ /lib/libss.so.2
+ /lib/libss.so.2.0
+ /usr/bin/mk_cmds
+ /usr/lib/libss.so
-# This is a list of any other files, not belonging to the above packages, that
-# you would wish to have copied into your netboot image.
-# example:
-# netboot/extra_files: /lib/libresolv.so.2 /lib/libnss_compat.so.2 /lib/libnss_dns.so.2 /lib/libnss_files.so.2 /sbin/consoletype
-netboot/extra_files:
+netboot/packages/ttcp/files:
+ /usr/bin/ttcp
+
+netboot/packages/uclibc/files:
+ /etc/ld.so.cache
+ /lib/ld-uClibc-0.9.27.so
+ /lib/ld-uClibc.so.0
+ /lib/libc.so.0
+ /lib/libcrypt-0.9.27.so
+ /lib/libcrypt.so.0
+ /lib/libdl-0.9.27.so
+ /lib/libdl.so.0
+ /lib/libm-0.9.27.so
+ /lib/libm.so.0
+ /lib/libnsl-0.9.27.so
+ /lib/libnsl.so.0
+ /lib/libpthread-0.9.27.so
+ /lib/libpthread.so.0
+ /lib/libresolv-0.9.27.so
+ /lib/libresolv.so.0
+ /lib/librt-0.9.27.so
+ /lib/librt.so.0
+ /lib/libthread_db-0.9.27.so
+ /lib/libthread_db.so.1
+ /lib/libuClibc-0.9.27.so
+ /lib/libutil-0.9.27.so
+ /lib/libutil.so.0
+ /sbin/ldconfig
+ /usr/bin/getent
+ /usr/bin/ldd
+ /usr/lib/Scrt1.o
+ /usr/lib/crt0.o
+ /usr/lib/crt1.o
+ /usr/lib/crti.o
+ /usr/lib/crtn.o
+ /usr/lib/libc.so
+ /usr/lib/libcrypt.so
+ /usr/lib/libdl.so
+ /usr/lib/libm.so
+ /usr/lib/libnsl.so
+ /usr/lib/libpthread.so
+ /usr/lib/libresolv.so
+ /usr/lib/librt.so
+ /usr/lib/libthread_db.so
+ /usr/lib/libutil.so
+
+netboot/packages/util-linux/files:
+ /sbin/fdisk
+ /sbin/mkfs
+ /sbin/mkswap
+ /sbin/swapoff
+ /sbin/swapon
+ /usr/bin/ddate
+ /usr/bin/setterm
+ /usr/bin/whereis
+
+netboot/packages/wget/files:
+ /usr/bin/wget
+
+netboot/packages/xfsprogs/files:
+ /bin/xfs_copy
+ /bin/xfs_growfs
+ /bin/xfs_info
+ /lib/libhandle.so
+ /lib/libhandle.so.1
+ /lib/libhandle.so.1.0.3
+ /sbin/fsck.xfs
+ /sbin/mkfs.xfs
+ /sbin/xfs_repair
+
+# Setting the option overrides the location of the pkgcache
+pkgcache_path:
+
+# Setting the option overrides the location of the kerncache
+kerncache_path:
diff --git a/examples/stage4_template.spec b/examples/stage4_template.spec
index 4066bf5b..02910c88 100644
--- a/examples/stage4_template.spec
+++ b/examples/stage4_template.spec
@@ -28,7 +28,7 @@ target:
rel_type:
# This is the system profile to be used by catalyst to build this target. It is
-# specified as a relative path from /usr/portage/profiles.
+# specified as a relative path from /var/db/repos/gentoo/profiles.
# example:
# profile: default-linux/x86/2006.1
profile:
@@ -45,13 +45,6 @@ snapshot:
# default/stage3-x86-2006.1
source_subpath:
-# These are the hosts used as distcc slaves when distcc is enabled in your
-# catalyst.conf. It follows the same syntax as distcc-config --set-hosts and
-# is entirely optional.
-# example:
-# distcc_hosts: 127.0.0.1 192.168.0.1
-distcc_hosts:
-
# This is an optional directory containing portage configuration files. It
# follows the same syntax as /etc/portage and should be consistent across all
# targets to minimize problems.
@@ -59,11 +52,12 @@ distcc_hosts:
# portage_confdir: /etc/portage
portage_confdir:
-# This option specifies the location to a portage overlay that you would like to
-# have used when building this target.
+# This option specifies the location of the ebuild repositories that you would
+# like to have used when building this target. It takes a space-separated list
+# of directory names.
# example:
-# portage_overlay: /usr/local/portage
-portage_overlay:
+# repos: /usr/local/portage
+repos:
# This allows the optional directory containing the output packages for
# catalyst. Mainly used as a way for different spec files to access the same
@@ -109,12 +103,6 @@ stage4/packages:
# stage4/fsscript:
stage4/fsscript:
-# This is where you set the splash theme. This theme must be present in
-# /etc/splash, before the kernel has completed building.
-# example:
-# stage4/splash_theme: livecd-2006.1
-stage4/splash_theme:
-
# This is a set of arguments that will be passed to genkernel for all kernels
# defined in this target. It is useful for passing arguments to genkernel that
# are not otherwise available via the stage4-stage2 spec file.
@@ -141,14 +129,14 @@ stage4/motd:
# This is for blacklisting modules from being hotplugged that are known to cause
# problems. Putting a module name here will keep it from being auto-loaded,
-# even if ti is detected by hotplug.
+# even if it is detected by hotplug.
# example:
# stage4/modblacklist: 8139cp
stage4/modblacklist:
# This is for adding init scripts to runlevels. The syntax for the init script
# is the script name, followed by a pipe, followed by the runlevel in which you
-# want the script to run. It looks like spind|default and is space delimited.
+# want the script to run. It looks like acpid|default and is space delimited.
# We do not use this on the official media, as catalyst sets up the runlevels
# correctly for us. Since we do not use this, it is left blank below.
# example:
@@ -173,25 +161,37 @@ stage4/rcdel:
# stage4/root_overlay:
stage4/root_overlay:
-# This is used by catalyst to copy the specified file to /etc/X11/xinit/xinitrc
-# and is used by the stage4/type gentoo-gamecd and generic-livecd. While the
-# file will still be copied for any stage4/type, catalyst will only create the
-# necessary /etc/startx for those types, so X will not be automatically started.
-# This is useful also for setting up X on a CD where you do not wish X to start
-# automatically. We do not use this on the release media, so it is left blank.
-# example:
-# stage4/xinitrc:
-stage4/xinitrc:
-
-# This option is used to create non-root users on your CD. It takes a space
-# separated list of user names. These users will be added to the following
-# groups: users,wheel,audio,games,cdrom,usb
-# If this is specified in your spec file, then the first user is also the user
-# used to start X. Since this is not used on the release media, it is blank.
-# example:
+# This option is used to create groups. It takes a carriage-return separated
+# list of group names. For instance:
+# stage4/groups:
+# admin
+# web_group
+# sudo_group
+stage4/groups:
+
+# This option is used to create non-root users. It takes a carriage-return
+# separated list of user names. For instance:
# stage4/users:
+# john.doe
+# foo.bar
+#
+# These users are NOT added to any specific group. You can specify one
+# or more groups to add the user(s) to using an equal sign followed by a comma
+# separated list. For instance:
+# stage4/users:
+# john.doe=wheel,audio,cdrom
+# foo.bar=www,audio
stage4/users:
+# This option is used to copy an SSH public key into a user's .ssh directory.
+# Catalyst will copy the SSH public key in the ~/.ssh/authorized_keys file and
+# set the file permission to 0644. It takes a carriage-return separated list of
+# users with a equal sign followed by the SSH public key path. For instance:
+# stage4/ssh_public_keys:
+# john.doe=/path/to/johns/public/key/id_rsa.pub
+# foo.bar=/path/to/foos/public/key/id_ed25519.pub
+stage4/ssh_public_keys:
+
# This option is used to specify the number of kernels to build and also the
# labels that will be used by the CD bootloader to refer to each kernel image.
# example:
@@ -252,11 +252,11 @@ stage4/unmerge:
# rid of files that don't belong to a particular package, or removing files from
# a package that you wish to keep, but won't need the full functionality.
# example:
-# stage4/empty: /var/tmp /var/cache /var/db /var/empty /var/lock /var/log /var/run /var/spool /var/state /tmp /usr/portage /usr/share/man /usr/share/info /usr/share/unimaps /usr/include /usr/share/zoneinfo /usr/share/dict /usr/share/doc /usr/share/ss /usr/share/state /usr/share/texinfo /usr/lib/python2.2 /usr/lib/portage /usr/share/gettext /usr/share/i18n /usr/share/rfc /usr/lib/X11/config /usr/lib/X11/etc /usr/lib/X11/doc /usr/src /usr/share/doc /usr/share/man /etc/cron.daily /etc/cron.hourly /etc/cron.monthly /etc/cron.weekly /etc/logrotate.d /etc/rsync /usr/lib/awk /usr/lib/ccache /usr/lib/gcc-config /usr/lib/nfs /usr/local /usr/diet/include /usr/diet/man /usr/share/consolefonts/partialfonts /usr/share/consoletrans /usr/share/emacs /usr/share/gcc-data /usr/share/genkernel /etc/splash/gentoo /etc/splash/emergence /usr/share/gnuconfig /usr/share/lcms /usr/share/locale /etc/skel
+# stage4/empty: /var/tmp /var/cache /var/db /var/empty /var/lock /var/log /var/run /var/spool /var/state /tmp /var/db/repos/gentoo /usr/share/man /usr/share/info /usr/share/unimaps /usr/include /usr/share/zoneinfo /usr/share/dict /usr/share/doc /usr/share/ss /usr/share/state /usr/share/texinfo /usr/lib/python2.2 /usr/lib/portage /usr/share/gettext /usr/share/i18n /usr/share/rfc /usr/lib/X11/config /usr/lib/X11/etc /usr/lib/X11/doc /usr/src /usr/share/doc /usr/share/man /etc/cron.daily /etc/cron.hourly /etc/cron.monthly /etc/cron.weekly /etc/logrotate.d /etc/rsync /usr/lib/awk /usr/lib/ccache /usr/lib/gcc-config /usr/lib/nfs /usr/local /usr/diet/include /usr/diet/man /usr/share/consolefonts/partialfonts /usr/share/consoletrans /usr/share/emacs /usr/share/gcc-data /usr/share/genkernel /usr/share/gnuconfig /usr/share/lcms /usr/share/locale /etc/skel
stage4/empty:
# This option tells catalyst to clean specific files from the filesystem and is
# very usefu in cleaning up stray files in /etc left over after stage4/unmerge.
# example:
-# stage4/rm: /lib/*.a /usr/lib/*.a /usr/lib/gcc-lib/*/*/libgcj* /etc/dispatch-conf.conf /etc/etc-update.conf /etc/*- /etc/issue* /etc/portage/make.conf /etc/man.conf /etc/*.old /root/.viminfo /usr/sbin/fb* /usr/sbin/fsck.cramfs /usr/sbin/fsck.minix /usr/sbin/mkfs.minix /usr/sbin/mkfs.bfs /usr/sbin/mkfs.cramfs /lib/security/pam_access.so /lib/security/pam_chroot.so /lib/security/pam_debug.so /lib/security/pam_ftp.so /lib/security/pam_issue.so /lib/security/pam_mail.so /lib/security/pam_motd.so /lib/security/pam_mkhomedir.so /lib/security/pam_postgresok.so /lib/security/pam_rhosts_auth.so /lib/security/pam_userdb.so /usr/share/consolefonts/1* /usr/share/consolefonts/7* /usr/share/consolefonts/8* /usr/share/consolefonts/9* /usr/share/consolefonts/A* /usr/share/consolefonts/C* /usr/share/consolefonts/E* /usr/share/consolefonts/G* /usr/share/consolefonts/L* /usr/share/consolefonts/M* /usr/share/consolefonts/R* /usr/share/consolefonts/a* /usr/share/consolefonts/c* /usr/share/consolefonts/dr* /usr/share/consolefonts/g* /usr/share/consolefonts/i* /usr/share/consolefonts/k* /usr/share/consolefonts/l* /usr/share/consolefonts/r* /usr/share/consolefonts/s* /usr/share/consolefonts/t* /usr/share/consolefonts/v* /etc/splash/livecd-2006.1/16* /etc/splash/livecd-2006.1/12* /etc/splash/livecd-2006.1/6* /etc/splash/livecd-2006.1/8* /etc/splash/livecd-2006.1/images/silent-16* /etc/splash/livecd-2006.1/images/silent-12* /etc/splash/livecd-2006.1/images/silent-6* /etc/splash/livecd-2006.1/images/silent-8* /etc/splash/livecd-2006.1/images/verbose-16* /etc/splash/livecd-2006.1/images/verbose-12* /etc/splash/livecd-2006.1/images/verbose-6* /etc/splash/livecd-2006.1/images/verbose-8* /etc/portage/make.conf.example /etc/make.globals /etc/resolv.conf
+# stage4/rm: /lib/*.a /usr/lib/*.a /usr/lib/gcc-lib/*/*/libgcj* /etc/dispatch-conf.conf /etc/etc-update.conf /etc/*- /etc/issue* /etc/portage/make.conf /etc/man.conf /etc/*.old /root/.viminfo /usr/sbin/fb* /usr/sbin/fsck.cramfs /usr/sbin/fsck.minix /usr/sbin/mkfs.minix /usr/sbin/mkfs.bfs /usr/sbin/mkfs.cramfs /lib/security/pam_access.so /lib/security/pam_chroot.so /lib/security/pam_debug.so /lib/security/pam_ftp.so /lib/security/pam_issue.so /lib/security/pam_mail.so /lib/security/pam_motd.so /lib/security/pam_mkhomedir.so /lib/security/pam_postgresok.so /lib/security/pam_rhosts_auth.so /lib/security/pam_userdb.so /usr/share/consolefonts/1* /usr/share/consolefonts/7* /usr/share/consolefonts/8* /usr/share/consolefonts/9* /usr/share/consolefonts/A* /usr/share/consolefonts/C* /usr/share/consolefonts/E* /usr/share/consolefonts/G* /usr/share/consolefonts/L* /usr/share/consolefonts/M* /usr/share/consolefonts/R* /usr/share/consolefonts/a* /usr/share/consolefonts/c* /usr/share/consolefonts/dr* /usr/share/consolefonts/g* /usr/share/consolefonts/i* /usr/share/consolefonts/k* /usr/share/consolefonts/l* /usr/share/consolefonts/r* /usr/share/consolefonts/s* /usr/share/consolefonts/t* /usr/share/consolefonts/v* /etc/portage/make.conf.example /etc/make.globals /etc/resolv.conf
stage4/rm:
diff --git a/examples/tinderbox_template.spec b/examples/tinderbox_template.spec
deleted file mode 100644
index f1af09c8..00000000
--- a/examples/tinderbox_template.spec
+++ /dev/null
@@ -1,93 +0,0 @@
-# generic tinderbox specfile
-
-# The subarch can be any of the supported catalyst subarches (like athlon-xp).
-# Refer to "man catalyst" or <https://wiki.gentoo.org/wiki/Catalyst>
-# for supported subarches
-# example:
-# subarch: athlon-xp
-subarch:
-
-# The version stamp is an identifier for the build. It can be anything you wish
-# it to be, but it is usually a date.
-# example:
-# version_stamp: 2006.1
-version_stamp:
-
-# The target specifies what target we want catalyst to do.
-# example:
-# target: tinderbox
-target: tinderbox
-
-# The rel_type defines what kind of build we are doing. This is merely another
-# identifier, but it useful for allowing multiple concurrent builds. Usually,
-# default will suffice.
-# example:
-# rel_type: default
-rel_type:
-
-# This is the system profile to be used by catalyst to build this target. It is
-# specified as a relative path from /usr/portage/profiles.
-# example:
-# profile: default-linux/x86/2006.1
-profile:
-
-# This specifies which snapshot to use for building this target.
-# example:
-# snapshot: 2006.1
-snapshot:
-
-# This specifies where the seed stage comes from for this target, The path is
-# relative to $clst_sharedir/builds. The rel_type is also used as a path prefix
-# for the seed.
-# example:
-# default/stage3-x86-2006.1
-source_subpath:
-
-# These are the hosts used as distcc slaves when distcc is enabled in your
-# catalyst.conf. It follows the same syntax as distcc-config --set-hosts and
-# is entirely optional.
-# example:
-# distcc_hosts: 127.0.0.1 192.168.0.1
-distcc_hosts:
-
-# This is an optional directory containing portage configuration files. It
-# follows the same syntax as /etc/portage and should be consistent across all
-# targets to minimize problems.
-# example:
-# portage_confdir: /etc/portage
-portage_confdir:
-
-# This option specifies the location to a portage overlay that you would like to
-# have used when building this target.
-# example:
-# portage_overlay: /usr/local/portage
-portage_overlay:
-
-# This allows the optional directory containing the output packages for
-# catalyst. Mainly used as a way for different spec files to access the same
-# cache directory. Default behavior is for this location to be autogenerated
-# by catalyst based on the spec file.
-# example:
-# pkgcache_path: /tmp/packages
-pkgcache_path:
-
-# The tinderbox target can build packages with any USE settings. However, it
-# should be noted that these settings are additive to the settings in the
-# chosen profile. This is extremely useful when testing possible changed to a
-# profile or package.
-# example:
-# tinderbox/use: gtk2 gnome kde qt bonobo cdr esd gtkhtml mozilla mysql perl ruby tcltk cups ldap ssl tcpd -svga
-tinderbox/use:
-
-# This is the list of packages that will be built by the tinderbox target.
-# Each of these is considered a separate target to test, and catalyst will use
-# rsync to reset the build area to the default from the source_subpath before
-# each package. This allows for testing USE changes on individual packages as
-# well as for dependency issues.
-# exampleL
-# tinderbox/packages: dante tsocks sys-apps/eject minicom links acpid apmd parted whois tcpdump cvs zip unzip netcat partimage app-admin/sudo app-cdr/cdrtools gnome emacs dev-lang/ruby enlightenment kde mozilla-firefox mozilla-thunderbird xfce4 openbox fluxbox sylpheed openoffice-bin gimp xemacs xmms abiword gaim xchat pan tetex xcdroast k3b samba nmap gradm ettercap ethereal mplayer
-tinderbox/packages:
-
-# Setting the option overrides the location of the kerncache
-kerncache_path:
-
diff --git a/livecd/cdtar/elilo-3.6-cdtar.tar.bz2 b/livecd/cdtar/elilo-3.6-cdtar.tar.bz2
deleted file mode 100644
index 6d8da372..00000000
--- a/livecd/cdtar/elilo-3.6-cdtar.tar.bz2
+++ /dev/null
Binary files differ
diff --git a/livecd/cdtar/grub-memtest86+-cdtar.tar.bz2 b/livecd/cdtar/grub-memtest86+-cdtar.tar.bz2
deleted file mode 100644
index e5736ccb..00000000
--- a/livecd/cdtar/grub-memtest86+-cdtar.tar.bz2
+++ /dev/null
Binary files differ
diff --git a/livecd/cdtar/isolinux-3.72-cdtar.tar.bz2 b/livecd/cdtar/isolinux-3.72-cdtar.tar.bz2
deleted file mode 100644
index ceb9bd8b..00000000
--- a/livecd/cdtar/isolinux-3.72-cdtar.tar.bz2
+++ /dev/null
Binary files differ
diff --git a/livecd/cdtar/isolinux-3.72-memtest86+-cdtar.tar.bz2 b/livecd/cdtar/isolinux-3.72-memtest86+-cdtar.tar.bz2
deleted file mode 100644
index 8df43494..00000000
--- a/livecd/cdtar/isolinux-3.72-memtest86+-cdtar.tar.bz2
+++ /dev/null
Binary files differ
diff --git a/livecd/cdtar/silo-1.4.13-sparc-cdtar.tar.bz2 b/livecd/cdtar/silo-1.4.13-sparc-cdtar.tar.bz2
deleted file mode 100644
index 3d24672e..00000000
--- a/livecd/cdtar/silo-1.4.13-sparc-cdtar.tar.bz2
+++ /dev/null
Binary files differ
diff --git a/livecd/cdtar/yaboot-1.3.13-cdtar.tar.bz2 b/livecd/cdtar/yaboot-1.3.13-cdtar.tar.bz2
deleted file mode 100644
index a4bec73b..00000000
--- a/livecd/cdtar/yaboot-1.3.13-cdtar.tar.bz2
+++ /dev/null
Binary files differ
diff --git a/livecd/files/README.txt b/livecd/files/README.txt
index 1be3778d..ebf67691 100644
--- a/livecd/files/README.txt
+++ b/livecd/files/README.txt
@@ -99,9 +99,9 @@ passwd=foo Sets whatever follows the equals as the root password, which
noload=X This causes the initial ramdisk to skip the loading of a
specific module that may be causing a problem. Syntax matches
that of doload.
+nogui This causes an X-enabled LiveCD to not automatically start X,
+ but rather, to drop to the command line instead.
nonfs Disables the starting of portmap/nfsmount on boot.
-nox This causes an X-enabled LiveCD to not automatically start X,
- but rather, to drop to the command line instead.
scandelay This causes the CD to pause for 10 seconds during certain
portions the boot process to allow for devices that are slow to
initialize to be ready for use.
diff --git a/livecd/files/gamecd.motd.txt b/livecd/files/gamecd.motd.txt
deleted file mode 100644
index 7ee2d9c2..00000000
--- a/livecd/files/gamecd.motd.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-
-To (re)start ##GAME_NAME, please type "startx" at the prompt below.
-
-Please report any bugs you find to https://bugs.gentoo.org. Be sure to include
-detailed information about how to reproduce the bug you are reporting.
-
-Thank you for using Gentoo Linux!
-
diff --git a/livecd/files/livecd-bashrc b/livecd/files/livecd-bashrc
index 18b8f1d8..a9bf588e 100644
--- a/livecd/files/livecd-bashrc
+++ b/livecd/files/livecd-bashrc
@@ -1,14 +1 @@
#!/bin/bash
-
-if [ ! "$(grep nox /proc/cmdline)" ]
-then
- if [ -x /usr/bin/X ]
- then
- if [ -e /etc/startx -a $(tty) = "/dev/tty1" ];
- then
- rm -f /etc/startx
- ##STARTX
- [ -f /etc/motd ] && cat /etc/motd
- fi
- fi
-fi
diff --git a/livecd/files/livecd-local.start b/livecd/files/livecd-local.start
index 11a7d28a..a7bb2bef 100644
--- a/livecd/files/livecd-local.start
+++ b/livecd/files/livecd-local.start
@@ -4,11 +4,6 @@
# This is a good place to load any misc.
# programs on startup ( 1>&2 )
-#if [ -d /usr/livecd/gconf ]
-#then
-# ln -sf /usr/livecd/gconf /etc/gconf
-#fi
-
#if [ -d /usr/livecd/db ]
#then
# ln -sf /usr/livecd/db /var/db
@@ -16,10 +11,10 @@
if [ -d /usr/livecd/profiles ]
then
- ln -sf /usr/livecd/profiles /usr/portage/profiles
+ ln -sf /usr/livecd/profiles /var/db/repos/gentoo/profiles
fi
if [ -d /usr/livecd/eclass ]
then
- ln -sf /usr/livecd/eclass /usr/portage/eclass
+ ln -sf /usr/livecd/eclass /var/db/repos/gentoo/eclass
fi
diff --git a/livecd/files/livecd.motd.txt b/livecd/files/livecd.motd.txt
index fe4c0918..029c2f6e 100644
--- a/livecd/files/livecd.motd.txt
+++ b/livecd/files/livecd.motd.txt
@@ -1,10 +1,11 @@
-To (re)start X Windows, please type "##DISPLAY_MANAGER" at the prompt below.
-There is also a rescue session for X using twm if you simply use "startx".
+The latest version of the Handbook is always available from the Gentoo web
+site by typing "links https://wiki.gentoo.org/wiki/Handbook".
-You can start the installer by typing "installer" at the prompt below.
+To start an ssh server on this system, type "/etc/init.d/sshd start". If you
+need to log in remotely as root, type "passwd root" to reset root's password
+to a known value.
Please report any bugs you find to https://bugs.gentoo.org. Be sure to include
detailed information about how to reproduce the bug you are reporting.
Thank you for using Gentoo Linux!
-
diff --git a/livecd/files/minimal.motd.txt b/livecd/files/minimal.motd.txt
index 3058b854..029c2f6e 100644
--- a/livecd/files/minimal.motd.txt
+++ b/livecd/files/minimal.motd.txt
@@ -7,5 +7,5 @@ to a known value.
Please report any bugs you find to https://bugs.gentoo.org. Be sure to include
detailed information about how to reproduce the bug you are reporting.
-Thank you for using Gentoo Linux!
+Thank you for using Gentoo Linux!
diff --git a/livecd/files/universal.motd.txt b/livecd/files/universal.motd.txt
deleted file mode 100644
index 403b8729..00000000
--- a/livecd/files/universal.motd.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-
-Stage tarball(s), distfiles and packages can be found in /mnt/cdrom/.
-
-You can view the networkless installation instructions for this release by
-typing "links /mnt/cdrom/docs/handbook/html".
diff --git a/livecd/files/x86-F2.msg b/livecd/files/x86-F2.msg
deleted file mode 100644
index eb1638b8..00000000
--- a/livecd/files/x86-F2.msg
+++ /dev/null
@@ -1,22 +0,0 @@
-Gentoo Linux LiveCD boot options - [F1 to display available kernels]
-
-Please hit F1 to see the available kernels on this livecd. Please note that
-the -nofb counterparts to each kernel disable the framebuffer
-and splash images. Additionally, the memtest86 boot option is available
-to test local RAM for errors. To use memtest86, just type 'memtest86'.
-
-This lists the possible command line options that can be used to tweak the boot
-process of this CD. This lists the Gentoo-specific options, along with a few
-options that are built-in to the kernel, but that have been proven very useful
-to our users. Also, all options that start with "do" have a "no" inverse, that
-does the opposite. For example, "doscsi" enables SCSI support in the initial
-ramdisk boot, while "noscsi" disables it.
-
-To list the options, please press keys from F3 through F7.
-
-F3: Hardware (Page 1)
-F4: Hardware (Page 2)
-F5: Hardware (Page 3)
-F6: Volume Management
-F7: Misc.
-
diff --git a/livecd/files/x86-F3.msg b/livecd/files/x86-F3.msg
deleted file mode 100644
index e0ec2bd8..00000000
--- a/livecd/files/x86-F3.msg
+++ /dev/null
@@ -1,22 +0,0 @@
-Hardware options (Page 1):
-acpi=on This loads support for ACPI and also causes the acpid daemon to
- be started by the CD on boot. This is only needed if your
- system requires ACPI to function properly. This is not
- required for Hyperthreading support.
-acpi=off Completely disables ACPI. This is useful on some older systems
- and is also a requirement for using APM. This will disable any
- Hyperthreading support of your processor.
-console=X This sets up serial console access for the CD. The first
- option is the device, usually ttyS0 on x86, followed by any
- connection options, which are comma separated. The default
- options are 9600,8,n,1.
-dmraid=X This allows for passing options to the device-mapper RAID
- subsystem. Options should be encapsulated in quotes.
-doapm This loads APM driver support. This requires you to also use
- acpi=off.
-dopcmcia This loads support for PCMCIA and Cardbus hardware and also
- causes the pcmcia cardmgr to be started by the CD on boot.
- This is only required when booting from PCMCIA/Cardbus devices.
-doscsi This loads support for most SCSI controllers. This is also a
- requirement for booting most USB devices, as they use the SCSI
- subsystem of the kernel.
diff --git a/livecd/files/x86-F4.msg b/livecd/files/x86-F4.msg
deleted file mode 100644
index 77ded0e3..00000000
--- a/livecd/files/x86-F4.msg
+++ /dev/null
@@ -1,20 +0,0 @@
-Hardware options (Page 2):
-hda=stroke This allows you to partition the whole hard disk even when your
- BIOS is unable to handle large disks. This option is only used
- on machines with an older BIOS. Replace hda with the device
- that is requiring this option.
-ide=nodma This forces the disabling of DMA in the kernel and is required
- by some IDE chipsets and also by some CDROM drives. If your
- system is having trouble reading from your IDE CDROM, try this
- option. This also disables the default hdparm settings from
- being executed.
-noapic This disables the Advanced Programmable Interrupt Controller
- that is present on newer motherboards. It has been known to
- cause some problems on older hardware.
-nodetect This disables all of the autodetection done by the CD,
- including device autodetection and DHCP probing. This is
- useful for doing debugging of a failing CD or driver.
-nodhcp This disables DHCP probing on detected network cards. This is
- useful on networks with only static addresses.
-nodmraid Disables support for device-mapper RAID, such as that used for
- on-board IDE/SATA RAID controllers.
diff --git a/livecd/files/x86-F5.msg b/livecd/files/x86-F5.msg
deleted file mode 100644
index adfb0197..00000000
--- a/livecd/files/x86-F5.msg
+++ /dev/null
@@ -1,22 +0,0 @@
-Hardware options (Page 3):
-nofirewire This disables the loading of Firewire modules. This should
- only be necessary if your Firewire hardware is causing
- a problem with booting the CD.
-nogpm This diables gpm console mouse support.
-nohotplug This disables the loading of the hotplug and coldplug init
- scripts at boot. This is useful for doing debugging of a
- failing CD or driver.
-nokeymap This disables the keymap selection used to select non-US
- keyboard layouts.
-nolapic This disables the local APIC on Uniprocessor kernels.
-nosata This disables the loading of Serial ATA modules. This is used
- if your system is having problems with the SATA subsystem.
-nosmp This disables SMP, or Symmetric Multiprocessing, on SMP-enabled
- kernels. This is useful for debugging SMP-related issues with
- certain drivers and motherboards.
-nosound This disables sound support and volume setting. This is useful
- for systems where sound support causes problems.
-nousb This disables the autoloading of USB modules. This is useful
- for debugging USB issues.
-slowusb This adds some extra pauses into the boot process for slow
- USB CDROMs, like in the IBM BladeCenter.
diff --git a/livecd/files/x86-F6.msg b/livecd/files/x86-F6.msg
deleted file mode 100644
index b61ee9c9..00000000
--- a/livecd/files/x86-F6.msg
+++ /dev/null
@@ -1,14 +0,0 @@
-Volume/Device Management:
-doevms This enables support for IBM's pluggable EVMS, or Enterprise
- Volume Management System. This is not safe to use with lvm2.
-dolvm This enables support for Linux's Logical Volume Management.
- This is not safe to use with evms2.
-Screen reader access:
-speakup.synth=synth starts speakup using a given synthesizer.
- supported synths are acntpc, acntsa, apollo, audptr, bns,
- decext, dectlk, dtlk, keypc, ltlk, spkout and txprt.
- Also, soft is supported for software speech and dummy is
- supported for testing.
-speakup.quiet=1 sets the synthesizer not to speak until a key is pressed.
-speakup_SYNTH.port=n sets the port for internal synthesizers.
-speakup_SYNTH.ser=n sets the serial port for external synthesizers.
diff --git a/livecd/files/x86-F7.msg b/livecd/files/x86-F7.msg
deleted file mode 100644
index 82306245..00000000
--- a/livecd/files/x86-F7.msg
+++ /dev/null
@@ -1,22 +0,0 @@
-Other options:
-debug Enables debugging code. This might get messy, as it displays
- a lot of data to the screen.
-docache This caches the entire runtime portion of the CD into RAM,
- which allows you to umount /mnt/cdrom and mount another CDROM.
- This option requires that you have at least twice as much
- available RAM as the size of the CD.
-doload=X This causes the initial ramdisk to load any module listed, as
- well as dependencies. Replace X with the module name.
- Multiple modules can be specified by a comma-separated list.
-noload=X This causes the initial ramdisk to skip the loading of a
- specific module that may be causing a problem. Syntax matches
- that of doload.
-nox This causes an X-enabled LiveCD to not automatically start X,
- but rather, to drop to the command line instead.
-scandelay This causes the CD to pause for 10 seconds during certain
- portions the boot process to allow for devices that are slow to
- initialize to be ready for use.
-scandelay=X This allows you to specify a given delay, in seconds, to be
- added to certain portions of the boot process to allow for
- devices that are slow to initialize to be ready for use.
- Replace X with the number of seconds to pause.
diff --git a/setup.py b/setup.py
index c6b52dcf..fc1ac005 100755
--- a/setup.py
+++ b/setup.py
@@ -1,8 +1,6 @@
#!/usr/bin/env python
"""Catalyst is a release building tool used by Gentoo Linux"""
-from __future__ import print_function
-
import codecs as _codecs
from distutils.core import setup as _setup, Command as _Command
from email.utils import parseaddr as _parseaddr
@@ -19,100 +17,101 @@ _maintainer_name, _maintainer_email = _parseaddr(__maintainer__)
def _posix_path(path):
- """Convert a native path to a POSIX path
+ """Convert a native path to a POSIX path
- Distutils wants all paths to be written in the Unix convention
- (i.e. slash-separated) [1], so that's what we'll do here.
+ Distutils wants all paths to be written in the Unix convention
+ (i.e. slash-separated) [1], so that's what we'll do here.
- [1]: https://docs.python.org/2/distutils/setupscript.html
- """
- if _os.path.sep != '/':
- return path.replace(_os.path.sep, '/')
- return path
+ [1]: https://docs.python.org/2/distutils/setupscript.html
+ """
+ if _os.path.sep != '/':
+ return path.replace(_os.path.sep, '/')
+ return path
def _files(prefix, root):
- """Iterate through all the file paths under `root`
-
- Yielding `(target_dir, (file_source_paths, ...))` tuples.
- """
- for dirpath, _dirnames, filenames in _os.walk(root):
- reldir = _os.path.relpath(dirpath, root)
- install_directory = _posix_path(
- _os.path.join(prefix, reldir))
- file_source_paths = [
- _posix_path(_os.path.join(dirpath, filename))
- for filename in filenames]
- yield (install_directory, file_source_paths)
-
-
-_data_files = [('/etc/catalyst', ['etc/catalyst.conf','etc/catalystrc']),
- ('/usr/share/man/man1', ['files/catalyst.1']),
- ('/usr/share/man/man5', ['files/catalyst-config.5', 'files/catalyst-spec.5'])
- ]
+ """Iterate through all the file paths under `root`
+
+ Yielding `(target_dir, (file_source_paths, ...))` tuples.
+ """
+ for dirpath, _dirnames, filenames in _os.walk(root):
+ reldir = _os.path.relpath(dirpath, root)
+ install_directory = _posix_path(
+ _os.path.join(prefix, reldir))
+ file_source_paths = [
+ _posix_path(_os.path.join(dirpath, filename))
+ for filename in filenames]
+ yield (install_directory, file_source_paths)
+
+
+_data_files = [('/etc/catalyst', ['etc/catalyst.conf', 'etc/catalystrc']),
+ ('/usr/share/man/man1', ['files/catalyst.1']),
+ ('/usr/share/man/man5',
+ ['files/catalyst-config.5', 'files/catalyst-spec.5'])
+ ]
+_data_files.extend(_files('share/catalyst/arch', 'arch'))
_data_files.extend(_files('share/catalyst/livecd', 'livecd'))
_data_files.extend(_files('share/catalyst/targets', 'targets'))
class set_version(_Command):
- '''Saves the specified release version information
- '''
- description = "hardcode script's version using VERSION from environment"
- user_options = [] # [(long_name, short_name, desc),]
-
- def initialize_options (self):
- pass
-
- def finalize_options (self):
- pass
-
- def run(self):
- # pylint: disable=global-statement
- global __version__
- try:
- version = _os.environ['VERSION']
- except KeyError:
- print("Try setting 'VERSION=x.y.z' on the command line... Aborting")
- return
- _set_release_version(version)
- __version__ = _get_version()
- print("Version set to:\n", __version__)
+ '''Saves the specified release version information
+ '''
+ description = "hardcode script's version using VERSION from environment"
+ user_options = [] # [(long_name, short_name, desc),]
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ # pylint: disable=global-statement
+ global __version__
+ try:
+ version = _os.environ['VERSION']
+ except KeyError:
+ print("Try setting 'VERSION=x.y.z' on the command line... Aborting")
+ return
+ _set_release_version(version)
+ __version__ = _get_version()
+ print("Version set to:\n", __version__)
_setup(
- name=_package_name,
- version=__version__,
- maintainer=_maintainer_name,
- maintainer_email=_maintainer_email,
- url='https://wiki.gentoo.org/wiki/Catalyst',
- download_url='http://distfiles.gentoo.org/distfiles/{0}-{1}.tar.bz2'.format(
- _package_name, __version__),
- license='GNU General Public License (GPL)',
- platforms=['all'],
- description=__doc__,
- long_description=_codecs.open(
- _os.path.join(_this_dir, 'README'), 'r', 'utf-8').read(),
- classifiers=[
- 'Development Status :: 5 - Production/Stable',
- 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
- 'Intended Audience :: System Administrators',
- 'Operating System :: POSIX',
- 'Topic :: System :: Archiving :: Packaging',
- 'Topic :: System :: Installation/Setup',
- 'Topic :: System :: Software Distribution',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
- ],
- scripts=['bin/{0}'.format(_package_name)],
- packages=[
- _package_name,
- '{0}.arch'.format(_package_name),
- '{0}.base'.format(_package_name),
- '{0}.targets'.format(_package_name),
- ],
- data_files=_data_files,
- provides=[_package_name],
- cmdclass={
- 'set_version': set_version
- },
- )
+ name=_package_name,
+ version=__version__,
+ maintainer=_maintainer_name,
+ maintainer_email=_maintainer_email,
+ url='https://wiki.gentoo.org/wiki/Catalyst',
+ download_url='http://distfiles.gentoo.org/distfiles/{0}-{1}.tar.bz2'.format(
+ _package_name, __version__),
+ license='GNU General Public License (GPL)',
+ platforms=['all'],
+ description=__doc__,
+ long_description=_codecs.open(
+ _os.path.join(_this_dir, 'README'), 'r', 'utf-8').read(),
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
+ 'Intended Audience :: System Administrators',
+ 'Operating System :: POSIX',
+ 'Topic :: System :: Archiving :: Packaging',
+ 'Topic :: System :: Installation/Setup',
+ 'Topic :: System :: Software Distribution',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.8',
+ ],
+ scripts=['bin/{0}'.format(_package_name)],
+ packages=[
+ _package_name,
+ '{0}.base'.format(_package_name),
+ '{0}.targets'.format(_package_name),
+ ],
+ data_files=_data_files,
+ provides=[_package_name],
+ cmdclass={
+ 'set_version': set_version
+ },
+)
diff --git a/targets/embedded/chroot.sh b/targets/embedded/chroot.sh
new file mode 100755
index 00000000..11068388
--- /dev/null
+++ b/targets/embedded/chroot.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+source /tmp/chroot-functions.sh
+
+echo "Installing dependencies..."
+ROOT=/ run_merge -o "${clst_embedded_packages}"
+
+export ROOT="${clst_root_path}"
+mkdir -p "$ROOT"
+
+INSTALL_MASK="${clst_install_mask}" \
+ run_merge -1 -O "${clst_embedded_packages}"
diff --git a/targets/embedded/embedded-controller.sh b/targets/embedded/controller.sh
index c0b62ea3..c2e5994c 100755
--- a/targets/embedded/embedded-controller.sh
+++ b/targets/embedded/controller.sh
@@ -1,17 +1,13 @@
#!/bin/bash
source ${clst_shdir}/support/functions.sh
-source ${clst_shdir}/support/filesystem-functions.sh
case ${1} in
- enter)
- ;;
-
build_packages)
shift
export clst_packages="$*"
exec_in_chroot \
- ${clst_shdir}/${clst_target}/${clst_target}-chroot.sh
+ ${clst_shdir}/${clst_target}/chroot.sh
;;
preclean)
@@ -21,7 +17,7 @@ case ${1} in
# export root_fs_path="${clst_chroot_path}/tmp/mergeroot"
# install -d ${clst_image_path}
-# ${clst_shdir}/embedded/embedded-fs-runscript.sh \
+# ${clst_shdir}/embedded/fs-runscript.sh \
# ${clst_embedded_fs_type} || exit 1
# imagesize=`du -sk ${clst_image_path}/root.img | cut -f1`
# echo "Created ${clst_embedded_fs_type} image at \
@@ -34,21 +30,14 @@ case ${1} in
exec_in_chroot ${clst_shdir}/support/pre-kmerge.sh
;;
- post-kmerge)
- # Cleans up the build environment after the kernels are compiled
- exec_in_chroot ${clst_shdir}/support/post-kmerge.sh
- ;;
-
kernel)
shift
- export clst_kname="${1}"
- # if we have our own linuxrc, copy it in
- if [ -n "${clst_linuxrc}" ]
- then
- cp -pPR ${clst_linuxrc} ${clst_chroot_path}/tmp/linuxrc
- fi
+ export kname="${1}"
+
+ [ -n "${clst_linuxrc}" ] && \
+ copy_to_chroot ${clst_linuxrc} /tmp/linuxrc
exec_in_chroot ${clst_shdir}/support/kmerge.sh
- delete_from_chroot tmp/linuxrc
+ delete_from_chroot /tmp/linuxrc
;;
target_image_setup)
diff --git a/targets/embedded/embedded-chroot.sh b/targets/embedded/embedded-chroot.sh
deleted file mode 100755
index 56d40a4e..00000000
--- a/targets/embedded/embedded-chroot.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-source /tmp/chroot-functions.sh
-
-# Setup the environment
-export DESTROOT="${clst_root_path}"
-export clst_root_path="/"
-
-setup_pkgmgr
-
-echo "Installing dependencies into ${DESTROOT}..."
-run_merge -o "${clst_embedded_packages}"
-
-export clst_root_path="${DESTROOT}"
-export INSTALL_MASK="${clst_install_mask}"
-
-run_merge -1 -O "${clst_embedded_packages}"
diff --git a/targets/embedded/embedded-fs-runscript.sh b/targets/embedded/embedded-fs-runscript.sh
deleted file mode 100755
index 8d5abab1..00000000
--- a/targets/embedded/embedded-fs-runscript.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-
-die() {
- echo "${1}"
- exit 1
-}
-
-# 1 = mkfs path, 2 = fs name, 3 = pkg name
-fs_check() {
- if [ ! -e ${1} ]; then
- die "You must install ${3} in order to produce ${2} images"
- fi
-}
-
-case ${1} in
- jffs)
- fs_check /usr/sbin/mkfs.jffs jffs sys-fs/mtd
- mkfs.jffs -d ${root_fs_path} -o ${clst_image_path}/root.img \
- ${clst_embedded_fs_ops} || die "Could not create a jffs filesystem"
- ;;
- jffs2)
- fs_check /usr/sbin/mkfs.jffs2 jffs2 sys-fs/mtd
- mkfs.jffs2 --root=${root_fs_path} --output=${clst_image_path}/root.img\
- ${clst_embedded_fs_ops} || die "Could not create a jffs2 filesystem"
- ;;
-
- cramfs)
- fs_check /sbin/mkcramfs cramfs sys-fs/cramfs
- mkcramfs ${clst_embedded_fs_ops} ${root_fs_path} \
- ${clst_image_path}/root.img || \
- die "Could not create a cramfs filesystem"
- ;;
-
- squashfs)
- fs_check /usr/bin/mksquashfs squashfs sys-fs/squashfs-tools
- mksquashfs ${root_fs_path} ${clst_image_path}/root.img \
- ${clst_embedded_fs_ops} || \
- die "Could not create a squashfs filesystem"
- ;;
-
- *)
- ;;
-esac
-exit $?
diff --git a/targets/embedded/fs-runscript.sh b/targets/embedded/fs-runscript.sh
new file mode 100755
index 00000000..dd7c7531
--- /dev/null
+++ b/targets/embedded/fs-runscript.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+die() {
+ echo "${1}"
+ exit 1
+}
+
+# 1 = mkfs path, 2 = fs name, 3 = pkg name
+fs_check() {
+ if [ ! -e ${1} ]; then
+ die "You must install ${3} in order to produce ${2} images"
+ fi
+}
+
+case ${1} in
+ jffs2)
+ fs_check /usr/sbin/mkfs.jffs2 jffs2 sys-fs/mtd
+ mkfs.jffs2 --root=${root_fs_path} --output=${clst_image_path}/root.img\
+ ${clst_embedded_fs_ops} || die "Could not create a jffs2 filesystem"
+ ;;
+
+ squashfs)
+ fs_check /usr/bin/gensquashfs squashfs sys-fs/squashfs-tools-ng
+ gensquashfs -k -D ${root_fs_path} -q ${clst_embedded_fs_ops} \
+ ${clst_image_path}/root.img ||
+ die "Could not create a squashfs filesystem"
+ ;;
+esac
+exit $?
diff --git a/targets/embedded/embedded-preclean-chroot.sh b/targets/embedded/preclean-chroot.sh
index 5353f67d..5353f67d 100755
--- a/targets/embedded/embedded-preclean-chroot.sh
+++ b/targets/embedded/preclean-chroot.sh
diff --git a/targets/grp/grp-chroot.sh b/targets/grp/grp-chroot.sh
deleted file mode 100755
index 6690c91b..00000000
--- a/targets/grp/grp-chroot.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-source /tmp/chroot-functions.sh
-
-## START BUILD
-setup_pkgmgr
-
-export DISTDIR="/tmp/grp/${clst_grp_target}"
-export PKGDIR="/tmp/grp/${clst_grp_target}"
-
-if [ "${clst_grp_type}" = "pkgset" ]
-then
- export clst_myemergeopts="${clst_myemergeopts} --noreplace"
-else
- export clst_FETCH=1
- # This is necessary since we're setting the above variable and the emerge
- # opts have already been set
- setup_myemergeopts
-fi
-
-run_merge ${clst_grp_packages} || exit 1
diff --git a/targets/grp/grp-controller.sh b/targets/grp/grp-controller.sh
deleted file mode 100755
index 0f1a5b5b..00000000
--- a/targets/grp/grp-controller.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-source ${clst_shdir}/support/functions.sh
-
-case $1 in
- enter)
- ${clst_CHROOT} ${clst_chroot_path}
- ;;
-
- run)
- shift
- export clst_grp_type=$1
- shift
- export clst_grp_target=$1
- shift
-
- export clst_grp_packages="$*"
- exec_in_chroot ${clst_shdir}/grp/grp-chroot.sh
- ;;
-
- preclean)
- exec_in_chroot ${clst_shdir}/grp/grp-preclean-chroot.sh
- ;;
-
- clean)
- exit 0
- ;;
-
- *)
- exit 1
- ;;
-
-esac
-exit $?
diff --git a/targets/grp/grp-preclean-chroot.sh b/targets/grp/grp-preclean-chroot.sh
deleted file mode 100755
index 98c166fe..00000000
--- a/targets/grp/grp-preclean-chroot.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-source /tmp/chroot-functions.sh
-
-cleanup_stages
-
-gconftool-2 --shutdown
diff --git a/targets/livecd-stage1/livecd-stage1-chroot.sh b/targets/livecd-stage1/chroot.sh
index 9ddf8d42..d143927e 100755
--- a/targets/livecd-stage1/livecd-stage1-chroot.sh
+++ b/targets/livecd-stage1/chroot.sh
@@ -2,7 +2,4 @@
source /tmp/chroot-functions.sh
-## START BUILD
-setup_pkgmgr
-
run_merge --update --deep --newuse "${clst_packages}"
diff --git a/targets/livecd-stage1/livecd-stage1-controller.sh b/targets/livecd-stage1/controller.sh
index 7bf3bce1..ae897da9 100755
--- a/targets/livecd-stage1/livecd-stage1-controller.sh
+++ b/targets/livecd-stage1/controller.sh
@@ -10,12 +10,7 @@ case $1 in
export clst_packages="$*"
mkdir -p ${clst_chroot_path}/usr/livecd ${clst_chroot_path}/tmp
exec_in_chroot \
- ${clst_shdir}/${clst_target}/${clst_target}-chroot.sh
- echo "${clst_packages}" > ${clst_chroot_path}/tmp/packages.txt
- ;;
-
- clean)
- find ${clst_chroot_path}/usr/lib -iname "*.pyc" -exec rm -f {} \;
+ ${clst_shdir}/${clst_target}/chroot.sh
;;
esac
exit $?
diff --git a/targets/livecd-stage1/livecd-stage1-preclean-chroot.sh b/targets/livecd-stage1/preclean-chroot.sh
index 5353f67d..5353f67d 100755
--- a/targets/livecd-stage1/livecd-stage1-preclean-chroot.sh
+++ b/targets/livecd-stage1/preclean-chroot.sh
diff --git a/targets/livecd-stage2/livecd-stage2-controller.sh b/targets/livecd-stage2/controller.sh
index 5834c837..57d947a1 100755
--- a/targets/livecd-stage2/livecd-stage2-controller.sh
+++ b/targets/livecd-stage2/controller.sh
@@ -1,7 +1,6 @@
#!/bin/bash
source ${clst_shdir}/support/functions.sh
-source ${clst_shdir}/support/filesystem-functions.sh
case $1 in
pre-kmerge)
@@ -9,27 +8,21 @@ case $1 in
exec_in_chroot ${clst_shdir}/support/pre-kmerge.sh
;;
- post-kmerge)
- # Cleans up the build environment after the kernels are compiled
- exec_in_chroot ${clst_shdir}/support/post-kmerge.sh
- ;;
-
kernel)
shift
- export clst_kname="$1"
+ export kname="$1"
- # if we have our own linuxrc, copy it in
- if [ -n "${clst_linuxrc}" ]
- then
- cp -pPR ${clst_linuxrc} ${clst_chroot_path}/tmp/linuxrc
- fi
+ [ -n "${clst_linuxrc}" ] && \
+ copy_to_chroot ${clst_linuxrc} /tmp/linuxrc
exec_in_chroot ${clst_shdir}/support/kmerge.sh
- delete_from_chroot tmp/linuxrc
+ delete_from_chroot /tmp/linuxrc
- extract_modules ${clst_chroot_path} ${clst_kname}
- #16:12 <@solar> kernel_name=foo
- #16:13 <@solar> eval clst_boot_kernel_${kernel_name}_config=bar
- #16:13 <@solar> eval echo \$clst_boot_kernel_${kernel_name}_config
+ extract_modules ${clst_chroot_path} ${kname}
+ ;;
+
+ pre-distkmerge)
+ # Install dracut
+ exec_in_chroot ${clst_shdir}/support/pre-distkmerge.sh
;;
preclean)
@@ -42,12 +35,7 @@ case $1 in
echo "${clst_livecd_type}. You should switch to using"
echo "generic-livecd instead."
fi
- cp -pPR ${clst_sharedir}/livecd/files/generic.motd.txt \
- ${clst_sharedir}/livecd/files/universal.motd.txt \
- ${clst_sharedir}/livecd/files/minimal.motd.txt \
- ${clst_sharedir}/livecd/files/livecd.motd.txt \
- ${clst_sharedir}/livecd/files/gamecd.motd.txt \
- ${clst_chroot_path}/etc
+ cp -pPR ${clst_sharedir}/livecd/files/*.motd.txt ${clst_chroot_path}/etc
;;
*)
if [ -n "${clst_livecd_motd}" ]
@@ -64,32 +52,11 @@ case $1 in
${clst_chroot_path}/root/.bash_profile
cp -f ${clst_sharedir}/livecd/files/livecd-local.start \
${clst_chroot_path}/etc/conf.d/local.start
-
- # execute copy gamecd.conf if we're a gamecd
- if [ "${clst_livecd_type}" = "gentoo-gamecd" ]
- then
- if [ -n "${clst_gamecd_conf}" ]
- then
- cp -f ${clst_gamecd_conf} ${clst_chroot_path}/tmp/gamecd.conf
- else
- echo "gamecd/conf is required for a gamecd!"
- exit 1
- fi
- fi
;;
livecd-update)
# Now, finalize and tweak the livecd fs (inside of the chroot)
exec_in_chroot ${clst_shdir}/support/livecdfs-update.sh
-
- # Move over the xinitrc (if applicable)
- # This is moved here, so we can override any default xinitrc
- if [ -n "${clst_livecd_xinitrc}" ]
- then
- mkdir -p ${clst_chroot_path}/etc/X11/xinit
- cp -f ${clst_livecd_xinitrc} \
- ${clst_chroot_path}/etc/X11/xinit/xinitrc
- fi
;;
rc-update)
@@ -101,16 +68,11 @@ case $1 in
;;
clean)
- if [ "${clst_livecd_type}" = "gentoo-gamecd" ] \
- || [ "${clst_livecd_type}" = "gentoo-release-minimal" ] \
- || [ "${clst_livecd_type}" = "gentoo-release-universal" ]
+ if [ "${clst_livecd_type}" = "gentoo-release-minimal" ]
then
# Clean out man, info and doc files
rm -rf ${clst_chroot_path}/usr/share/{man,doc,info}/*
- # Zap all .pyc and .pyo files
- find ${clst_chroot_path}/usr/lib* -iname "*.py[co]" -exec rm -f {} \;
fi
- rm -f ${clst_chroot_path}/tmp/packages.txt
;;
bootloader)
@@ -135,6 +97,10 @@ case $1 in
cp -f ${clst_sharedir}/livecd/files/README.txt $1
fi
+ if [ -e ${clst_chroot_path}/boot/memtest86plus/ ]; then
+ cp -rv ${clst_chroot_path}/boot/memtest86plus/* $1
+ fi
+
case ${clst_livecd_type} in
gentoo-release-livecd)
mkdir -p $1/snapshots
@@ -147,8 +113,7 @@ case $1 in
exit 1
fi
fi
- cp -f ${clst_snapshot_path} $1/snapshots
- cp -f ${clst_snapshot_path}.DIGESTS $1/snapshots
+ cp -f ${clst_snapshot_path}{,.DIGESTS} $1/snapshots
;;
gentoo-release-livedvd)
targets="distfiles snapshots stages"
@@ -171,8 +136,7 @@ case $1 in
continue
;;
snapshots)
- cp -f ${clst_snapshot_path} $1/snapshots
- cp -f ${clst_snapshot_path}.DIGESTS $1/snapshots
+ cp -f ${clst_snapshot_path}{,.DIGESTS} $1/snapshots
;;
stages)
### TODO: make this copy stages
diff --git a/targets/netboot/controller.sh b/targets/netboot/controller.sh
new file mode 100755
index 00000000..cc946c2c
--- /dev/null
+++ b/targets/netboot/controller.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+source ${clst_shdir}/support/functions.sh
+
+case ${1} in
+ build_packages)
+ echo ">>> Building packages ..."
+ shift
+ ROOT="/" \
+ clst_packages="$*" \
+ exec_in_chroot \
+ ${clst_shdir}/${clst_target}/pkg.sh
+ ;;
+
+ pre-kmerge)
+ # Sets up the build environment before any kernels are compiled
+ exec_in_chroot ${clst_shdir}/support/pre-kmerge.sh
+ ;;
+
+ kernel)
+ shift
+ export kname="$1"
+
+ [ -n "${clst_linuxrc}" ] && \
+ copy_to_chroot ${clst_linuxrc} /tmp/linuxrc
+ [ -n "${clst_busybox_config}" ] && \
+ copy_to_chroot ${clst_busybox_config} /tmp/busy-config
+
+ exec_in_chroot ${clst_shdir}/support/kmerge.sh
+
+ delete_from_chroot /tmp/linuxrc
+ delete_from_chroot /tmp/busy-config
+
+ extract_modules ${clst_chroot_path} ${kname}
+ ;;
+
+ image)
+ # Creates the base initramfs image for the netboot
+ echo -e ">>> Preparing Image ..."
+ shift
+
+ # Copy remaining files over to the initramfs target
+ clst_files="${@}" \
+ exec_in_chroot \
+ ${clst_shdir}/${clst_target}/copyfile.sh
+ ;;
+
+ final)
+ # For each arch, fetch the kernel images and put them in builds/
+ echo -e ">>> Copying completed kernels to ${clst_target_path}/ ..."
+ ${clst_shdir}/support/netboot-final.sh
+ ;;
+
+ clean)
+ exit 0;;
+
+ *)
+ exit 1;;
+esac
+
+exit $?
diff --git a/targets/netboot2/netboot2-copyfile.sh b/targets/netboot/copyfile.sh
index cc1a1181..cc1a1181 100755
--- a/targets/netboot2/netboot2-copyfile.sh
+++ b/targets/netboot/copyfile.sh
diff --git a/targets/netboot2/nb-busybox.cf b/targets/netboot/nb-busybox.cf
index db02678f..db02678f 100644
--- a/targets/netboot2/nb-busybox.cf
+++ b/targets/netboot/nb-busybox.cf
diff --git a/targets/netboot/netboot-chroot.sh b/targets/netboot/netboot-chroot.sh
deleted file mode 100755
index 3115cac8..00000000
--- a/targets/netboot/netboot-chroot.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-source /tmp/chroot-functions.sh
-
-# START BUILD
-run_merge "${clst_packages}"
diff --git a/targets/netboot/netboot-combine.sh b/targets/netboot/netboot-combine.sh
deleted file mode 100755
index 5eef0d01..00000000
--- a/targets/netboot/netboot-combine.sh
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/bin/bash
-
-source ${clst_shdir}/support/chroot-functions.sh
-source ${clst_shdir}/support/functions.sh
-source ${clst_shdir}/support/filesystem-functions.sh
-
-update_env_settings
-
-setup_myfeatures
-
-# Ssetup our environment
-export FEATURES="${clst_myfeatures}"
-
-# First install the boot package that we need
-booter=""
-case ${clst_hostarch} in
- alpha)
- booter=""
- ;;
- arm)
- booter=""
- ;;
- hppa)
- booter=palo
- ;;
- sparc*)
- booter=sparc-utils
- ;;
- x86|amd64)
- booter=netboot
- ;;
- *)
- exit 1
- ;;
-esac
-
-#if [ ! -z "${booter}" ] ; then
-# run_merge ${booter} || exit 1
-#fi
-
-extract_kernels ${clst_chroot_path}/tmp
-
-# Then generate the netboot image ! :D
-for kname in ${clst_boot_kernel}
-do
- mkdir -p ${clst_chroot_path}/tmp/staging/initrd-${kname}
- cp -r ${clst_chroot_path}/tmp/image ${clst_chroot_path}/tmp/staging/initrd-${kname}
- extract_modules ${clst_chroot_path}/tmp/staging/initrd-${kname} ${kname}
- create_normal_loop ${clst_chroot_path}/tmp/staging/initrd-${kname} ${clst_target_path}/ initrd-${kname}.igz
- rm -r ${clst_chroot_path}/tmp/staging/initrd-${kname}
-
- case ${clst_hostarch} in
- alpha)
- # Until aboot is patched this is broken currently.
- # please use catalyst 1.1.5 or older
-
- #TEST TEST TEST TEST
- #https://lists.debian.org/debian-alpha/2004/07/msg00094.html
- #make \
- # -C /usr/src/linux \
- # INITRD=/initrd.gz \
- # HPATH="/usr/src/linux/include" \
- # vmlinux bootpfile \
- # || exit 1
- #cp /usr/src/linux/arch/alpha/boot/bootpfile /netboot.alpha || exit 1
- ;;
- arm)
- #TEST TEST TEST TEST
- cp /${clst_chroot_path}/tmp/${kname} /netboot-${kname}.arm || exit 1
- cat /${clst_target_path}/initrd-${kname}.igz >> /${clst_target_path}/netboot-${kname}.arm || exit 1
- #make \
- # -C /usr/src/linux \
- # INITRD=/initrd.gz \
- # bootpImage \
- # || exit 1
- ;;
- hppa)
- # We have to remove the previous image because the file is
- # considered as a tape by palo and then not truncated but rewritten.
- #TEST TEST TEST TEST
- rm -f /netboot-${kname}.hppa
-
- palo \
- -k /${clst_chroot_path}/tmp/${kname} \
- -r /${clst_target_path}/initrd-${kname}.igz \
- -s /${clst_target_path}/netboot-${kname}.hppa \
- -f foo \
- -b /usr/share/palo/iplboot \
- -c "0/vmlinux root=/dev/ram0 ${cmdline_opts}" \
- || exit 1
- ;;
- sparc*)
- #TEST TEST TEST TEST
- #elftoaout -o /netboot-${kname}.${clst_hostarch} /usr/src/linux/vmlinux
- #elftoaout -o /netboot-${kname}.${clst_hostarch} /${kname}
- #piggy=${clst_hostarch/sparc/piggyback}
- #${piggy} /netboot-${kname}.${clst_hostarch} /usr/src/linux/System.map /initrd-${kname}.igz
- ;;
- x86)
- mknbi-linux \
- -k /${clst_chroot_path}/tmp/${kname} \
- -r /${clst_target_path}/initrd-${kname}.igz \
- -o /${clst_target_path}/netboot-${kname}.x86 \
- -x \
- -a "root=/dev/ram0 ${cmdline_opts}" \
- || exit 1
- ;;
- *)
- exit 1
- ;;
- esac
-done
diff --git a/targets/netboot/netboot-controller.sh b/targets/netboot/netboot-controller.sh
deleted file mode 100755
index 93feb266..00000000
--- a/targets/netboot/netboot-controller.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-
-source ${clst_shdir}/support/functions.sh
-source ${clst_shdir}/support/filesystem-functions.sh
-
-
-case ${1} in
- #### Couldnt busybox step be in packages ....
- build_packages)
- shift
- clst_root_path="/" \
- clst_packages="$*" \
- exec_in_chroot \
- ${clst_shdir}/${clst_target}/${clst_target}-chroot.sh
- ;;
-
- busybox)
- # Custom busybox config support
- if [ -f "${clst_netboot_busybox_config}" ]
- then
- mkdir -p ${clst_chroot_path}/etc/busybox/${clst_CHOST}
- cp -v ${clst_netboot_busybox_config} \
- ${clst_chroot_path}/etc/busybox/${clst_CHOST}/busybox.config
- clst_use="savedconfig"
- fi
-
- # Main Busybox emerge
- clst_root_path="/" \
- clst_use="${clst_use} netboot make-busybox-symlinks" \
- clst_myemergeopts="${clst_myemergeopts} -O" \
- clst_packages="busybox" \
- exec_in_chroot \
- ${clst_shdir}/${clst_target}/${clst_target}-chroot.sh
- ;;
-
- pre-kmerge)
- # Sets up the build environment before any kernels are compiled
- #exec_in_chroot ${clst_shdir}/support/pre-kmerge.sh
- ;;
-
- post-kmerge)
- # Cleans up the build environment after the kernels are compiled
- #exec_in_chroot ${clst_shdir}/support/post-kmerge.sh
- ;;
-
- kernel)
- shift
- export clst_kname="$1"
- export clst_root_path="/"
- #exec_in_chroot ${clst_shdir}/support/pre-kmerge.sh
- #exec_in_chroot ${clst_shdir}/support/kmerge.sh
- #exec_in_chroot ${clst_shdir}/support/post-kmerge.sh
- #extract_kernels kernels
- ;;
-
- image)
- #Creates the base initrd image for the netboot
- shift
- # Could this step be a parameter in case there is a different
- # baselayout to add???
- clst_myemergeopts="${clst_myemergeopts} --nodeps" \
- clst_packages="netboot-base" \
- exec_in_chroot \
- ${clst_shdir}/${clst_target}/${clst_target}-chroot.sh
-
- clst_files="${@}" \
- exec_in_chroot \
- ${clst_shdir}/${clst_target}/${clst_target}-image.sh
- ;;
-
- finish)
- ${clst_shdir}/${clst_target}/${clst_target}-combine.sh
- ;;
-
- clean)
- exit 0;;
-
- *)
- exit 1;;
-esac
-
-exit $?
diff --git a/targets/netboot/netboot-image.sh b/targets/netboot/netboot-image.sh
deleted file mode 100755
index 10bdaed3..00000000
--- a/targets/netboot/netboot-image.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-source /tmp/chroot-functions.sh
-
-update_env_settings
-
-echo "Copying files to ${clst_root_path}"
-clst_files="/bin/busybox ${clst_files} "
-for f in ${clst_files}
-do
- copy_file ${f}
-done
-echo "Done copying files"
diff --git a/targets/netboot2/netboot2-pkg.sh b/targets/netboot/pkg.sh
index 29da7134..2ad9491c 100755
--- a/targets/netboot2/netboot2-pkg.sh
+++ b/targets/netboot/pkg.sh
@@ -4,10 +4,10 @@ source /tmp/chroot-functions.sh
update_env_settings
-setup_myfeatures
+setup_features
show_debug
# START BUILD
-run_merge ${clst_packages}
+ROOT="$ROOT" run_merge ${clst_packages}
diff --git a/targets/netboot2/netboot2-controller.sh b/targets/netboot2/netboot2-controller.sh
deleted file mode 100755
index 322d9a86..00000000
--- a/targets/netboot2/netboot2-controller.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/bash
-
-source ${clst_shdir}/support/functions.sh
-source ${clst_shdir}/support/filesystem-functions.sh
-
-case ${1} in
- build_packages)
- echo ">>> Building packages ..."
- shift
- clst_root_path="/" \
- clst_packages="$*" \
- exec_in_chroot \
- ${clst_shdir}/${clst_target}/${clst_target}-pkg.sh
- ;;
-
- pre-kmerge)
- # Sets up the build environment before any kernels are compiled
- exec_in_chroot ${clst_shdir}/support/pre-kmerge.sh
- ;;
-
- post-kmerge)
- # Cleans up the build environment after the kernels are compiled
- exec_in_chroot ${clst_shdir}/support/post-kmerge.sh
- ;;
-
- kernel)
- shift
- export clst_kname="$1"
-
- # if we have our own linuxrc, copy it in
- if [ -n "${clst_linuxrc}" ]
- then
- cp -pPR ${clst_linuxrc} ${clst_chroot_path}/tmp/linuxrc
- fi
- if [ -n "${clst_busybox_config}" ]
- then
- cp ${clst_busybox_config} ${clst_chroot_path}/tmp/busy-config
- fi
-
- exec_in_chroot ${clst_shdir}/support/kmerge.sh
-
- delete_from_chroot tmp/linuxrc
- delete_from_chroot tmp/busy-config
-
- extract_modules ${clst_chroot_path} ${clst_kname}
- #16:12 <@solar> kernel_name=foo
- #16:13 <@solar> eval clst_boot_kernel_${kernel_name}_config=bar
- #16:13 <@solar> eval echo \$clst_boot_kernel_${kernel_name}_config
- ;;
-
- image)
- # Creates the base initramfs image for the netboot
- echo -e ">>> Preparing Image ..."
- shift
-
- # Copy remaining files over to the initramfs target
- clst_files="${@}" \
- exec_in_chroot \
- ${clst_shdir}/${clst_target}/${clst_target}-copyfile.sh
- ;;
-
- final)
- # For each arch, fetch the kernel images and put them in builds/
- echo -e ">>> Copying completed kernels to ${clst_target_path}/ ..."
- ${clst_shdir}/support/netboot2-final.sh
- ;;
-
- clean)
- exit 0;;
-
- *)
- exit 1;;
-esac
-
-exit $?
diff --git a/targets/stage1/build.py b/targets/stage1/build.py
index fa4fd136..85675511 100755
--- a/targets/stage1/build.py
+++ b/targets/stage1/build.py
@@ -1,21 +1,19 @@
-#!/usr/bin/python
+#!/usr/bin/python3
import os
import sys
import portage
+from portage.dep import dep_getkey
+from portage.util import grabfile_package, stack_lists
# this loads files from the profiles ...
# wrap it here to take care of the different
# ways portage handles stacked profiles
-# last case is for portage-2.1_pre*
+
+
def scan_profile(path):
- if "grab_stacked" in dir(portage):
- return portage.grab_stacked(path, portage.settings.profiles, portage.grabfile, incremental_lines=1)
- else:
- if "grab_multiple" in dir(portage):
- return portage.stack_lists(portage.grab_multiple(path, portage.settings.profiles, portage.grabfile), incremental=1)
- else:
- return portage.stack_lists([portage.grabfile_package(os.path.join(x, path)) for x in portage.settings.profiles], incremental=1)
+ return stack_lists([grabfile_package(os.path.join(x, path)) for x in portage.settings.profiles], incremental=1)
+
# loaded the stacked packages / packages.build files
pkgs = scan_profile("packages")
@@ -27,14 +25,14 @@ buildpkgs = scan_profile("packages.build")
# we replace the buildpkg item with the one in the
# system profile (it may have <,>,=,etc... operators
# and version numbers)
-for idx in range(0, len(pkgs)):
- try:
- bidx = buildpkgs.index(portage.dep_getkey(pkgs[idx]))
- buildpkgs[bidx] = pkgs[idx]
- if buildpkgs[bidx][0:1] == "*":
- buildpkgs[bidx] = buildpkgs[bidx][1:]
- except Exception:
- pass
+for pkg in pkgs:
+ try:
+ bidx = buildpkgs.index(dep_getkey(pkg))
+ buildpkgs[bidx] = pkg
+ if buildpkgs[bidx][0:1] == "*":
+ buildpkgs[bidx] = buildpkgs[bidx][1:]
+ except Exception:
+ pass
for b in buildpkgs:
- sys.stdout.write(b + " ")
+ sys.stdout.write(b + " ")
diff --git a/targets/stage1/chroot.sh b/targets/stage1/chroot.sh
new file mode 100755
index 00000000..e0587b59
--- /dev/null
+++ b/targets/stage1/chroot.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+
+source /tmp/chroot-functions.sh
+
+for module_path in /usr/lib/*/site-packages/portage/__init__.py; do
+ # Find the python interpreter
+ interpreter=$(echo $module_path | cut -d/ -f4)
+
+ buildpkgs=($($interpreter /tmp/build.py 2>/dev/null))
+ [[ $? == 0 ]] && break
+done
+
+## Sanity check profile
+if [[ ${#buildpkgs[@]} -eq 0 ]]; then
+ echo "Your profile seems to be broken."
+ echo "Could not build a list of build packages."
+ echo "Double check your ${clst_port_conf}/make.profile link and the 'packages' files."
+ exit 1
+fi
+
+# Setup our environment
+[ -n "${clst_BINDIST}" ] && BINDIST="bindist"
+BOOTSTRAP_USE="$(portageq envvar BOOTSTRAP_USE)"
+
+FEATURES="${FEATURES} nodoc noman noinfo"
+
+sed -i -e 's:BINPKG_COMPRESS="bzip2":BINPKG_COMPRESS="zstd":' \
+ /usr/share/portage/config/make.globals
+
+# We need to ensure the base stage3 has USE="bindist"
+# if BINDIST is set to avoid issues with openssl / openssh
+[ -e ${clst_make_conf} ] && echo "USE=\"${BINDIST} ${USE}\"" >> ${clst_make_conf}
+
+# Update stage3
+if [ -n "${clst_update_seed}" ]; then
+ if [ "${clst_update_seed}" == "yes" ]; then
+ echo "Updating seed stage..."
+ if [ -n "${clst_update_seed_command}" ]; then
+ ROOT=/ run_merge --buildpkg=n "${clst_update_seed_command}"
+ elif grep -q '^\[changed-subslot\]' /usr/share/portage/config/sets/portage.conf; then
+ ROOT=/ run_merge --ignore-built-slot-operator-deps y @changed-subslot
+ else
+ ROOT=/ run_merge --update --deep --newuse --complete-graph --rebuild-if-new-ver gcc
+ fi
+ elif [ "${clst_update_seed}" != "no" ]; then
+ echo "Invalid setting for update_seed: ${clst_update_seed}"
+ exit 1
+ fi
+
+ # reset emerge options for the target
+ clst_update_seed=no setup_emerge_opts
+else
+ echo "Skipping seed stage update..."
+fi
+
+# Clear USE
+[ -e ${clst_make_conf} ] && sed -i -e "/^USE=\"${BINDIST} ${USE}\"/d" ${clst_make_conf}
+
+export ROOT="${clst_root_path}"
+mkdir -p "$ROOT"
+
+## START BUILD
+# First, we drop in a known-good baselayout
+[ -e ${clst_make_conf} ] && echo "USE=\"${USE} build\"" >> ${clst_make_conf}
+run_merge --oneshot --nodeps sys-apps/baselayout
+sed -i "/USE=\"${USE} build\"/d" ${clst_make_conf}
+
+echo "$locales" > /etc/locale.gen
+for etc in /etc "$ROOT"/etc; do
+ echo "LANG=C.UTF8" > ${etc}/env.d/02locale
+done
+update_env_settings
+
+# Now, we install our packages
+if [ -e ${clst_make_conf} ]; then
+ echo "CATALYST_USE=\"-* build ${BINDIST} ${clst_CATALYST_USE}\"" >> ${clst_make_conf}
+ echo "USE=\"\${CATALYST_USE} ${USE} ${BOOTSTRAP_USE} ${clst_HOSTUSE}\"" >> ${clst_make_conf}
+
+ for useexpand in ${clst_HOSTUSEEXPAND}; do
+ x="clst_${useexpand}"
+ echo "${useexpand}=\"${!x}\"" \
+ >> ${clst_make_conf}
+ done
+fi
+
+run_merge --implicit-system-deps=n --oneshot "${buildpkgs[@]}"
+
+# TODO: Drop this when locale-gen in stable glibc supports ROOT.
+#
+# locale-gen does not support the ROOT variable, and as such glibc simply does
+# not run locale-gen when ROOT is set. Since we've set LANG, we need to run
+# locale-gen explicitly.
+if [ -x "$(command -v locale-gen)" ]; then
+ locale-gen --destdir "$ROOT"/ || die "locale-gen failed"
+fi
+
+# Why are we removing these? Don't we need them for final make.conf?
+for useexpand in ${clst_HOSTUSEEXPAND}; do
+ x="clst_${useexpand}"
+ sed -i "/${useexpand}=\"${!x}\"/d" \
+ ${clst_make_conf}
+done
+
+# Clear USE
+[ -e ${clst_make_conf} ] && sed -i -e "/^CATALYST_USE/d" ${clst_make_conf}
+[ -e ${clst_make_conf} ] && sed -i -e "/^USE=\"/s/\${CATALYST_USE} ${USE} ${BOOTSTRAP_USE}//" ${clst_make_conf}
diff --git a/targets/stage1/stage1-controller.sh b/targets/stage1/controller.sh
index ac813de0..0db1614d 100755
--- a/targets/stage1/stage1-controller.sh
+++ b/targets/stage1/controller.sh
@@ -3,27 +3,24 @@
source "${clst_shdir}/support/functions.sh"
case "$1" in
- enter)
- ;;
-
run)
cp "${clst_shdir}/stage1/build.py" "${clst_chroot_path}/tmp"
# Setup "ROOT in chroot" dir
- install -d "${clst_chroot_path}/${clst_root_path}/etc"
- install -d "${clst_chroot_path}/${clst_root_path}${clst_port_conf}"
+ install -d "${clst_stage_path}/etc"
+ install -d "${clst_stage_path}/${clst_port_conf}"
# Setup make.conf and make.profile link in "ROOT in chroot":
copy_to_chroot "${clst_chroot_path}${clst_make_conf}" "${clst_root_path}${clst_port_conf}"
# Enter chroot, execute our build script
exec_in_chroot \
- "${clst_shdir}/${clst_target}/${clst_target}-chroot.sh" \
+ "${clst_shdir}/${clst_target}/chroot.sh" \
|| exit 1
;;
preclean)
- exec_in_chroot "${clst_shdir}/${clst_target}/${clst_target}-preclean-chroot.sh" || exit 1
+ exec_in_chroot "${clst_shdir}/${clst_target}/preclean-chroot.sh" || exit 1
;;
clean)
diff --git a/targets/stage1/stage1-preclean-chroot.sh b/targets/stage1/preclean-chroot.sh
index 1b623f18..2dc761e9 100755
--- a/targets/stage1/stage1-preclean-chroot.sh
+++ b/targets/stage1/preclean-chroot.sh
@@ -8,8 +8,6 @@ source /tmp/chroot-functions.sh
update_env_settings
show_debug
-# Right now these will parse the unpacked stage3 but change things
-# inside of /tmp/stage1root due to ROOT env variable
setup_gcc
setup_binutils
@@ -24,8 +22,6 @@ fi
# Clean out man, info and doc files
rm -rf "${ROOT}"/usr/share/{man,doc,info}/*
-# Zap all .pyc and .pyo files
-find "${ROOT}"/ -iname "*.py[co]" -exec rm -f {} \;
# unset ROOT for safety (even though cleanup_stages doesn't use it)
unset ROOT
diff --git a/targets/stage1/stage1-chroot.sh b/targets/stage1/stage1-chroot.sh
deleted file mode 100755
index 0caf49ee..00000000
--- a/targets/stage1/stage1-chroot.sh
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/bin/bash
-
-source /tmp/chroot-functions.sh
-
-# We do this first, so we know our package list for --debug
-export clst_buildpkgs="$(/tmp/build.py)"
-
-# Setup our environment
-[ -n "${clst_BINDIST}" ] && BINDIST="bindist"
-BOOTSTRAP_USE="$(portageq envvar BOOTSTRAP_USE)"
-
-FEATURES="${clst_myfeatures} nodoc noman noinfo -news"
-
-## Sanity check profile
-if [ -z "${clst_buildpkgs}" ]
-then
- echo "Your profile seems to be broken."
- echo "Could not build a list of build packages."
- echo "Double check your ${clst_port_conf}/make.profile link and the 'packages' files."
- exit 1
-fi
-
-## Setup seed pkgmgr to ensure latest
-clst_root_path=/ setup_pkgmgr "build"
-
-# We need to ensure the base stage3 has USE="bindist"
-# if BINDIST is set to avoid issues with openssl / openssh
-[ -e ${clst_make_conf} ] && echo "USE=\"${BINDIST} ${USE}\"" >> ${clst_make_conf}
-
-# Update stage3
-if [ -n "${clst_update_seed}" ]; then
- if [ "${clst_update_seed}" == "yes" ]; then
- echo "Updating seed stage..."
- if [ -n "${clst_update_seed_command}" ]; then
- clst_root_path=/ run_merge "--buildpkg=n ${clst_update_seed_command}"
- else
- clst_root_path=/ run_merge "--update --deep --newuse --complete-graph --rebuild-if-new-ver gcc"
- fi
- elif [ "${clst_update_seed}" != "no" ]; then
- echo "Invalid setting for update_seed: ${clst_update_seed}"
- exit 1
- fi
-
- # reset emerge options for the target
- clst_update_seed=no setup_myemergeopts
-else
- echo "Skipping seed stage update..."
-fi
-
-# Clear USE
-[ -e ${clst_make_conf} ] && ${clst_sed} -i -e "/^USE=\"${BINDIST} ${USE}\"/d" ${clst_make_conf}
-make_destpath /tmp/stage1root
-
-## START BUILD
-# First, we drop in a known-good baselayout
-[ -e ${clst_make_conf} ] && echo "USE=\"${USE} -build\"" >> ${clst_make_conf}
-run_merge "--oneshot --nodeps sys-apps/baselayout"
-${clst_sed} -i "/USE=\"${USE} -build\"/d" ${clst_make_conf}
-
-# Now, we install our packages
-if [ -e ${clst_make_conf} ]; then
- echo "CATALYST_USE=\"-* build ${BINDIST} ${clst_CATALYST_USE}\"" >> ${clst_make_conf}
- echo "USE=\"\${CATALYST_USE} ${USE} ${BOOTSTRAP_USE} ${clst_HOSTUSE}\"" >> ${clst_make_conf}
-
- for useexpand in ${clst_HOSTUSEEXPAND}; do
- x="clst_${useexpand}"
- echo "${useexpand}=\"${!x}\"" \
- >> ${clst_make_conf}
- done
-fi
-
-run_merge "--oneshot ${clst_buildpkgs}"
-
-# Why are we removing these? Don't we need them for final make.conf?
-for useexpand in ${clst_HOSTUSEEXPAND}; do
- x="clst_${useexpand}"
- ${clst_sed} -i "/${useexpand}=\"${!x}\"/d" \
- ${clst_make_conf}
-done
-
-# Clear USE
-[ -e ${clst_make_conf} ] && ${clst_sed} -i -e "/^CATALYST_USE/d" ${clst_make_conf}
-[ -e ${clst_make_conf} ] && ${clst_sed} -i -e "/^USE=\"/s/\${CATALYST_USE} ${USE} ${BOOTSTRAP_USE}//" ${clst_make_conf}
diff --git a/targets/stage2/chroot.sh b/targets/stage2/chroot.sh
new file mode 100755
index 00000000..4448239d
--- /dev/null
+++ b/targets/stage2/chroot.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+source /tmp/chroot-functions.sh
+
+# Setup the environment
+export FEATURES="${FEATURES} nodoc noman noinfo"
+export CONFIG_PROTECT="-* /etc/locale.gen"
+
+echo "$locales" > /etc/locale.gen
+
+## START BUILD
+${clst_repo_basedir}/${clst_repo_name}/scripts/bootstrap.sh ${bootstrap_opts[@]} || exit 1
+
+# Replace modified /etc/locale.gen with default
+etc-update --automode -5
diff --git a/targets/stage2/controller.sh b/targets/stage2/controller.sh
new file mode 100755
index 00000000..8ee51a5b
--- /dev/null
+++ b/targets/stage2/controller.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+source ${clst_shdir}/support/functions.sh
+
+case $1 in
+ run)
+ shift
+ export clst_packages="$*"
+ exec_in_chroot \
+ ${clst_shdir}/${clst_target}/chroot.sh
+ ;;
+
+ preclean)
+ exec_in_chroot ${clst_shdir}/${clst_target}/preclean-chroot.sh
+ ;;
+
+ clean)
+ exit 0
+ ;;
+
+ *)
+ exit 1
+ ;;
+esac
+exit $?
diff --git a/targets/stage2/stage2-preclean-chroot.sh b/targets/stage2/preclean-chroot.sh
index 3b693d81..84b267ec 100755
--- a/targets/stage2/stage2-preclean-chroot.sh
+++ b/targets/stage2/preclean-chroot.sh
@@ -11,15 +11,15 @@ cleanup_stages
if [ -n "${clst_CCACHE}" ]
then
- run_merge -C dev-util/ccache || exit 1
+ run_merge -C dev-util/ccache
fi
if [ -n "${clst_DISTCC}" ]
then
- run_merge -C sys-devel/distcc || exit 1
+ run_merge -C sys-devel/distcc
fi
if [ -n "${clst_ICECREAM}" ]
then
- run_merge -C sys-devel/icecream || exit 1
+ run_merge -C sys-devel/icecream
fi
diff --git a/targets/stage2/stage2-chroot.sh b/targets/stage2/stage2-chroot.sh
deleted file mode 100755
index 5fac858f..00000000
--- a/targets/stage2/stage2-chroot.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-source /tmp/chroot-functions.sh
-
-# Setup the environment
-export FEATURES="${clst_myfeatures} nodoc noman noinfo -news"
-
-## START BUILD
-${clst_repo_basedir}/${clst_repo_name}/scripts/bootstrap.sh ${bootstrap_opts} || exit 1
diff --git a/targets/stage2/stage2-controller.sh b/targets/stage2/stage2-controller.sh
deleted file mode 100755
index 25e51208..00000000
--- a/targets/stage2/stage2-controller.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-source ${clst_shdir}/support/functions.sh
-
-# Only put commands in this section that you want every target to execute.
-# This is a global default file and will affect every target
-case $1 in
- enter)
- ${clst_CHROOT} ${clst_chroot_path}
- ;;
-
- run)
- shift
- export clst_packages="$*"
- exec_in_chroot \
- ${clst_shdir}/${clst_target}/${clst_target}-chroot.sh
- ;;
-
- preclean)
- exec_in_chroot ${clst_shdir}/${clst_target}/${clst_target}-preclean-chroot.sh
- ;;
-
- clean)
- exit 0
- ;;
-
- *)
- exit 1
- ;;
-esac
-exit $?
diff --git a/targets/stage3/chroot.sh b/targets/stage3/chroot.sh
new file mode 100755
index 00000000..7dc1b6ea
--- /dev/null
+++ b/targets/stage3/chroot.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+source /tmp/chroot-functions.sh
+
+export CONFIG_PROTECT="-* /etc/locale.gen"
+
+echo "$locales" > /etc/locale.gen
+
+run_merge -e --update --deep --with-bdeps=y @system
+
+# Replace modified /etc/locale.gen with default
+etc-update --automode -5
diff --git a/targets/stage3/controller.sh b/targets/stage3/controller.sh
new file mode 100755
index 00000000..63f245a8
--- /dev/null
+++ b/targets/stage3/controller.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+source ${clst_shdir}/support/functions.sh
+
+case $1 in
+ run)
+ shift
+ export clst_packages="$*"
+ exec_in_chroot ${clst_shdir}/${clst_target}/chroot.sh
+ ;;
+
+ preclean)
+ exec_in_chroot ${clst_shdir}/${clst_target}/preclean-chroot.sh
+ ;;
+
+ clean)
+ exit 0
+ ;;
+
+ *)
+ exit 1
+ ;;
+esac
+exit $?
diff --git a/targets/stage3/stage3-preclean-chroot.sh b/targets/stage3/preclean-chroot.sh
index 33a700a6..1d801572 100755
--- a/targets/stage3/stage3-preclean-chroot.sh
+++ b/targets/stage3/preclean-chroot.sh
@@ -13,11 +13,11 @@ if [ -n "${clst_DISTCC}" ]
then
portageq has_version / sys-devel/distcc
if [ $? == 0 ]; then
- run_merge -C sys-devel/distcc || exit 1
+ run_merge -C sys-devel/distcc
fi
fi
if [ -n "${clst_ICECREAM}" ]
then
- run_merge -C sys-devel/icecream || exit 1
+ run_merge -C sys-devel/icecream
fi
diff --git a/targets/stage3/stage3-chroot.sh b/targets/stage3/stage3-chroot.sh
deleted file mode 100755
index 4f8bb0ee..00000000
--- a/targets/stage3/stage3-chroot.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-source /tmp/chroot-functions.sh
-
-## START BUILD
-setup_pkgmgr
-
-run_merge "-e --update --deep --with-bdeps=y @system"
diff --git a/targets/stage3/stage3-controller.sh b/targets/stage3/stage3-controller.sh
deleted file mode 100755
index df1479ea..00000000
--- a/targets/stage3/stage3-controller.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-source ${clst_shdir}/support/functions.sh
-
-# Only put commands in this section that you want every target to execute.
-# This is a global default file and will affect every target
-case $1 in
- enter)
- ${clst_CHROOT} ${clst_chroot_path}
- ;;
-
- run)
- shift
- export clst_packages="$*"
- exec_in_chroot ${clst_shdir}/${clst_target}/${clst_target}-chroot.sh
- ;;
-
- preclean)
- exec_in_chroot ${clst_shdir}/${clst_target}/${clst_target}-preclean-chroot.sh
- ;;
-
- clean)
- exit 0
- ;;
-
- *)
- exit 1
- ;;
-esac
-exit $?
diff --git a/targets/stage4/stage4-chroot.sh b/targets/stage4/chroot.sh
index d1838644..3b107e5c 100755
--- a/targets/stage4/stage4-chroot.sh
+++ b/targets/stage4/chroot.sh
@@ -2,9 +2,6 @@
source /tmp/chroot-functions.sh
-## START BUILD
-setup_pkgmgr
-
echo "Bringing system up to date using profile specific use flags"
run_merge -u @system
diff --git a/targets/stage4/controller.sh b/targets/stage4/controller.sh
new file mode 100755
index 00000000..9c1866e4
--- /dev/null
+++ b/targets/stage4/controller.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+source ${clst_shdir}/support/functions.sh
+
+case $1 in
+ pre-kmerge)
+ # Sets up the build environment before any kernels are compiled
+ exec_in_chroot ${clst_shdir}/support/pre-kmerge.sh
+ ;;
+
+ kernel)
+ shift
+ export kname="$1"
+
+ # If we have our own linuxrc, copy it in
+ [ -n "${clst_linuxrc}" ] && \
+ copy_to_chroot ${clst_linuxrc} /tmp/linuxrc
+ exec_in_chroot ${clst_shdir}/support/kmerge.sh
+ delete_from_chroot /tmp/linuxrc
+
+ extract_modules ${clst_chroot_path} ${kname}
+ ;;
+
+ build_packages)
+ shift
+ export clst_packages="$*"
+ exec_in_chroot ${clst_shdir}/${clst_target}/chroot.sh
+ ;;
+
+ preclean)
+ exec_in_chroot ${clst_shdir}/${clst_target}/preclean-chroot.sh
+ ;;
+
+ rc-update)
+ exec_in_chroot ${clst_shdir}/support/rc-update.sh
+ ;;
+
+ fsscript)
+ exec_in_chroot ${clst_fsscript}
+ ;;
+
+ livecd-update)
+ # Now, finalize and tweak the livecd fs (inside of the chroot)
+ exec_in_chroot ${clst_shdir}/support/livecdfs-update.sh
+ ;;
+
+ bootloader)
+ exit 0
+ ;;
+
+ target_image_setup)
+ shift
+ ${clst_shdir}/support/target_image_setup.sh $1
+ ;;
+
+ unmerge)
+ shift
+ export clst_packages="$*"
+ exec_in_chroot ${clst_shdir}/support/unmerge.sh
+ ;;
+
+ iso)
+ shift
+ ${clst_shdir}/support/create-iso.sh $1
+ ;;
+
+ clean)
+ exit 0
+ ;;
+
+ *)
+ exit 1
+ ;;
+esac
+exit $?
diff --git a/targets/stage4/stage4-preclean-chroot.sh b/targets/stage4/preclean-chroot.sh
index a22b6c1c..a22b6c1c 100755
--- a/targets/stage4/stage4-preclean-chroot.sh
+++ b/targets/stage4/preclean-chroot.sh
diff --git a/targets/stage4/stage4-controller.sh b/targets/stage4/stage4-controller.sh
deleted file mode 100755
index d42f302f..00000000
--- a/targets/stage4/stage4-controller.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/bash
-
-source ${clst_shdir}/support/functions.sh
-
-# Only put commands in this section that you want every target to execute.
-# This is a global default file and will affect every target
-case $1 in
- enter)
- ${clst_CHROOT} ${clst_chroot_path}
- ;;
-
- pre-kmerge)
- # Sets up the build environment before any kernels are compiled
- exec_in_chroot ${clst_shdir}/support/pre-kmerge.sh
- ;;
-
- post-kmerge)
- # Cleans up the build environment after the kernels are compiled
- exec_in_chroot ${clst_shdir}/support/post-kmerge.sh
- ;;
-
- kernel)
- shift
- export clst_kname="$1"
- # If we have our own linuxrc, copy it in
- if [ -n "${clst_linuxrc}" ]
- then
- cp -pPR ${clst_linuxrc} ${clst_chroot_path}/tmp/linuxrc
- fi
- exec_in_chroot ${clst_shdir}/support/kmerge.sh
- delete_from_chroot tmp/linuxrc
- extract_modules ${clst_chroot_path} ${clst_kname}
- # Do we need this one?
-# extract_kernel ${clst_chroot_path}/boot ${clst_kname}
- ;;
-
- build_packages)
- shift
- export clst_packages="$*"
- exec_in_chroot ${clst_shdir}/${clst_target}/${clst_target}-chroot.sh
- ;;
-
- preclean)
- exec_in_chroot ${clst_shdir}/${clst_target}/${clst_target}-preclean-chroot.sh ${clst_root_path}
- ;;
-
- rc-update)
- exec_in_chroot ${clst_shdir}/support/rc-update.sh
- ;;
-
- fsscript)
- exec_in_chroot ${clst_fsscript}
- ;;
-
- livecd-update)
- # Now, finalize and tweak the livecd fs (inside of the chroot)
- exec_in_chroot ${clst_shdir}/support/livecdfs-update.sh
-
- # Move over the xinitrc (if applicable)
- # This is moved here, so we can override any default xinitrc
- if [ -n "${clst_livecd_xinitrc}" ]
- then
- cp -f ${clst_livecd_xinitrc} \
- ${clst_chroot_path}/etc/X11/xinit/xinitrc
- fi
- ;;
-
- bootloader)
- exit 0
- ;;
-
- target_image_setup)
- shift
- ${clst_shdir}/support/target_image_setup.sh $1
- ;;
-
- unmerge)
- shift
- export clst_packages="$*"
- exec_in_chroot ${clst_shdir}/support/unmerge.sh
- ;;
-
- iso)
- shift
- ${clst_shdir}/support/create-iso.sh $1
- ;;
-
- clean)
- exit 0
- ;;
-
- *)
- exit 1
- ;;
-esac
-exit $?
diff --git a/targets/support/bootloader-setup.sh b/targets/support/bootloader-setup.sh
index e4735782..455a4f07 100755
--- a/targets/support/bootloader-setup.sh
+++ b/targets/support/bootloader-setup.sh
@@ -1,30 +1,60 @@
#!/bin/bash
source ${clst_shdir}/support/functions.sh
-source ${clst_shdir}/support/filesystem-functions.sh
# $1 is the destination root
-# We handle boot loader a little special. Most arches require a cdtar with bootloader files
-# but we can generate one for amd64/x86 now
-if [ -n "${clst_cdtar}" ]
-then
+if [[ -n ${clst_cdtar} ]]; then
extract_cdtar $1
-elif [ "${clst_buildarch}" = "x86" ] || [ "${clst_buildarch}" = "amd64" ]
-then
- #assume if there is no cdtar and we are on a supported arch that the user just wants us to handle this
- create_bootloader $1
-else
- #While this seems a little crazy, it's entirely possible the bootloader is just shoved in isoroot overlay
- echo "No cdtar and unable to auto generate boot loader files... good luck"
fi
extract_kernels $1/boot
-check_bootargs
-check_filesystem_type
-default_append_line="root=/dev/ram0 init=/linuxrc ${cmdline_opts} ${custom_kopts} cdroot"
-[ -n "${clst_splash_theme}" ] && default_append_line="${default_append_line} splash=silent,theme:${clst_livecd_splash_theme} CONSOLE=/dev/tty1 quiet"
+cmdline_opts=()
+
+# Add any additional options
+if [ -n "${clst_livecd_bootargs}" ]
+then
+ for x in ${clst_livecd_bootargs}
+ do
+ cmdline_opts+=(${x})
+ done
+fi
+
+case ${clst_fstype} in
+ squashfs)
+ cmdline_opts+=(looptype=squashfs loop=/image.squashfs)
+ ;;
+ jffs2)
+ cmdline_opts+=(looptype=jffs2 loop=/image.jffs2)
+ ;;
+esac
+
+# Optional memtest setups
+memtest_grub() {
+ if [[ -e $1/memtest64.bios ]]; then
+ echo 'if [ "x$grub_platform" = xpc ]; then'
+ echo ' menuentry "Memtest86+ 64bit BIOS" {'
+ echo ' linux "/memtest64.bios"'
+ echo ' }'
+ echo 'fi'
+ fi
+ if [[ -e $1/memtest.efi64 ]]; then
+ echo 'if [ "x$grub_platform" = xefi ]; then'
+ echo ' menuentry "Memtest86+ 64bit UEFI" {'
+ echo ' chainloader "/memtest.efi64"'
+ echo ' }'
+ echo 'fi'
+ fi
+ if [[ -e $1/memtest32.bios ]]; then
+ echo 'menuentry "Memtest86+ 32bit BIOS" {'
+ echo ' linux "/memtest32.bios"'
+ echo '}'
+ fi
+}
+
+default_append_line=(${cmdline_opts[@]} cdroot)
+default_dracut_append_line=(root=live:CDLABEL=ISOIMAGE rd.live.dir=/ rd.live.squashimg=image.squashfs)
case ${clst_hostarch} in
alpha)
@@ -35,17 +65,17 @@ case ${clst_hostarch} in
for x in ${clst_boot_kernel}
do
echo -n "${bctr}:/boot/${x} " >> ${acfg}
- echo -n "initrd=/boot/${x}.igz root=/dev/ram0 " >> ${acfg}
- echo "init=/linuxrc ${cmdline_opts} cdroot" >> ${acfg}
+ echo -n "initrd=/boot/${x}.igz " >> ${acfg}
+ echo "${cmdline_opts[@]} cdroot" >> ${acfg}
((bctr=${bctr}+1))
done
# Pass 2 is for serial
- cmdline_opts="${cmdline_opts} console=ttyS0"
+ cmdline_opts+=(console=ttyS0)
for x in ${clst_boot_kernel}
do
echo -n "${bctr}:/boot/${x} " >> ${acfg}
- echo -n "initrd=/boot/${x}.igz root=/dev/ram0 " >> ${acfg}
- echo "init=/linuxrc ${cmdline_opts} cdroot" >> ${acfg}
+ echo -n "initrd=/boot/${x}.igz " >> ${acfg}
+ echo "${cmdline_opts[@]} cdroot" >> ${acfg}
((bctr=${bctr}+1))
done
;;
@@ -63,16 +93,10 @@ case ${clst_hostarch} in
boot_kernel_common_name=${first/%32/}
boot_kernel_common_name=${boot_kernel_common_name/%64/}
- for x in ${clst_boot_kernel}
- do
- eval kopts=\$clst_boot_kernel_${x}_kernelopts
- my_kopts="${my_kopts} ${kopts}"
- done
-
# copy the bootloader for the final image
cp /usr/share/palo/iplboot $1/boot/
- echo "--commandline=0/${boot_kernel_common_name} initrd=${first}.igz ${default_append_line} ${my_kopts}" >> ${icfg}
+ echo "--commandline=0/${boot_kernel_common_name} initrd=${first}.igz ${default_append_line[@]}" >> ${icfg}
echo "--bootloader=boot/iplboot" >> ${icfg}
echo "--ramdisk=boot/${first}.igz" >> ${icfg}
for x in ${clst_boot_kernel}
@@ -80,8 +104,7 @@ case ${clst_hostarch} in
echo "--recoverykernel=boot/${x}" >> ${icfg}
done
;;
- ppc*|powerpc*)
- # GRUB2 Openfirmware
+ amd64|arm64|ia64|ppc*|powerpc*|sparc*|x86|i?86)
kern_subdir=/boot
iacfg=$1/boot/grub/grub.cfg
mkdir -p $1/boot/grub
@@ -92,26 +115,38 @@ case ${clst_hostarch} in
echo '' >> ${iacfg}
for x in ${clst_boot_kernel}
do
- eval "clst_kernel_console=\$clst_boot_kernel_${x}_console"
- eval "clst_kernel_machine_type=\$clst_boot_kernel_${x}_machine_type"
- eval custom_kopts=\$${x}_kernelopts
+ eval "kernel_console=\$clst_boot_kernel_${x}_console"
+ eval "distkernel=\$clst_boot_kernel_${x}_distkernel"
echo "menuentry 'Boot LiveCD (kernel: ${x})' --class gnu-linux --class os {" >> ${iacfg}
- echo " linux ${kern_subdir}/${x} ${default_append_line}" >> ${iacfg}
+ if [ ${distkernel} = "yes" ]
+ then
+ echo " search --no-floppy --set=root -l 'ISOIMAGE'" >> ${iacfg}
+ echo " linux ${kern_subdir}/${x} ${default_dracut_append_line[@]}" >> ${iacfg}
+ else
+ echo " linux ${kern_subdir}/${x} ${default_append_line[@]}" >> ${iacfg}
+ fi
echo " initrd ${kern_subdir}/${x}.igz" >> ${iacfg}
echo "}" >> ${iacfg}
echo "" >> ${iacfg}
echo "menuentry 'Boot LiveCD (kernel: ${x}) (cached)' --class gnu-linux --class os {" >> ${iacfg}
- echo " linux ${kern_subdir}/${x} ${default_append_line} docache" >> ${iacfg}
+ if [ ${distkernel} = "yes" ]
+ then
+ echo " search --no-floppy --set=root -l 'ISOIMAGE'" >> ${iacfg}
+ echo " linux ${kern_subdir}/${x} ${default_dracut_append_line[@]} rd.live.ram=1" >> ${iacfg}
+ else
+ echo " linux ${kern_subdir}/${x} ${default_append_line[@]} docache" >> ${iacfg}
+ fi
+
echo " initrd ${kern_subdir}/${x}.igz" >> ${iacfg}
echo "}" >> ${iacfg}
- if [ -n "${clst_kernel_console}" ]
+ if [ -n "${kernel_console}" ]
then
echo "submenu 'Special console options (kernel: ${x})' --class gnu-linux --class os {" >> ${iacfg}
- for y in ${clst_kernel_console}
+ for y in ${kernel_console}
do
echo "menuentry 'Boot LiveCD (kernel: ${x} console=${y})' --class gnu-linux --class os {" >> ${iacfg}
- echo " linux ${kern_subdir}/${x} ${default_append_line} console=${y}" >> ${iacfg}
+ echo " linux ${kern_subdir}/${x} ${default_append_line[@]} console=${y}" >> ${iacfg}
echo " initrd ${kern_subdir}/${x}.igz" >> ${iacfg}
echo "}" >> ${iacfg}
echo "" >> ${iacfg}
@@ -120,169 +155,7 @@ case ${clst_hostarch} in
fi
echo "" >> ${iacfg}
done
- ;;
- sparc*)
- # NO SOFTLEVEL SUPPORT YET
- scfg=$1/boot/silo.conf
- echo "default=\"help\"" > ${scfg}
- echo "message=\"/boot/boot.msg\"" >> ${scfg}
-
- for x in ${clst_boot_kernel}
- do
- echo >> ${icfg}
- echo "image=\"/boot/${x}\"" >> ${scfg}
- echo -e "\tlabel=\"${x}\"" >> ${scfg}
- echo -e "\tappend=\"initrd=/boot/${x}.igz root=/dev/ram0 init=/linuxrc ${cmdline_opts} cdroot\"" >> ${scfg}
-
- done
-
- echo "image=\"cat /boot/silo.conf\"" >> ${scfg}
- echo -e "label=\"config\"" >> ${scfg}
- echo "image=\"cat /boot/video.msg\"" >> ${scfg}
- echo -e "label=\"video\"" >> ${scfg}
- echo "image=\"cat /boot/help.msg\"" >> ${scfg}
- echo -e "label=\"help\"" >> ${scfg}
- echo "image=\"cat /boot/parameters.msg\"" >> ${scfg}
- echo -e "label=\"parameters\"" >> ${scfg}
- ;;
- ia64)
- # NO SOFTLEVEL SUPPORT YET
- iacfg=$1/boot/elilo.conf
- echo 'prompt' > ${iacfg}
- echo 'message=/efi/boot/elilo.msg' >> ${iacfg}
- echo 'chooser=simple' >> ${iacfg}
- echo 'timeout=50' >> ${iacfg}
- echo 'relocatable' >> ${iacfg}
- echo >> ${iacfg}
- for x in ${clst_boot_kernel}
- do
- echo "image=/efi/boot/${x}" >> ${iacfg}
- echo " label=${x}" >> ${iacfg}
- echo ' append="'initrd=${x}.igz ${default_append_line}'"' >> ${iacfg}
- echo " initrd=/efi/boot/${x}.igz" >> ${iacfg}
- echo >> ${iacfg}
- echo "image=/efi/boot/${x}" >> ${iacfg}
- echo " label=${x}-serial">> ${iacfg}
- echo ' append="'initrd=${x}.igz ${default_append_line}' console=tty0 console=ttyS0,9600"' >> ${iacfg}
- echo " initrd=/efi/boot/${x}.igz" >> ${iacfg}
- echo >> ${iacfg}
- echo "image=/efi/boot/${x}" >> ${iacfg}
- echo " label=${x}-ilo">> ${iacfg}
- echo ' append="'initrd=${x}.igz ${default_append_line}' console=tty0 console=ttyS3,9600"' >> ${iacfg}
- echo " initrd=/efi/boot/${x}.igz" >> ${iacfg}
- echo >> ${iacfg}
- echo "image=/efi/boot/${x}" >> ${iacfg}
- echo " label=${x}-sgi">> ${iacfg}
- echo ' append="'initrd=${x}.igz ${default_append_line}' console=tty0 console=ttySG0,115200"' >> ${iacfg}
- echo " initrd=/efi/boot/${x}.igz" >> ${iacfg}
- echo >> ${iacfg}
- mv $1/boot/${x}{,.igz} $1/boot/efi/boot
- done
- cp ${iacfg} $1/boot/efi/boot
- ;;
- x86|amd64)
- if [ -e $1/isolinux/isolinux.bin ]
- then
- # the rest of this function sets up the config file for isolinux
- icfg=$1/isolinux/isolinux.cfg
- kmsg=$1/isolinux/kernels.msg
- echo "default ${first}" > ${icfg}
- echo "timeout 150" >> ${icfg}
- echo "ontimeout localhost" >> ${icfg}
- echo "prompt 1" >> ${icfg}
- echo "display boot.msg" >> ${icfg}
- echo "F1 kernels.msg" >> ${icfg}
- echo "F2 F2.msg" >> ${icfg}
- echo "F3 F3.msg" >> ${icfg}
- echo "F4 F4.msg" >> ${icfg}
- echo "F5 F5.msg" >> ${icfg}
- echo "F6 F6.msg" >> ${icfg}
- echo "F7 F7.msg" >> ${icfg}
-
- echo "Available kernels:" > ${kmsg}
- for i in 2 3 4 5 6 7
- do
- cp ${clst_sharedir}/livecd/files/x86-F$i.msg \
- $1/isolinux/F$i.msg
- done
-
- for x in ${clst_boot_kernel}
- do
- eval custom_kopts=\$${x}_kernelopts
- echo "APPENDING CUSTOM KERNEL ARGS: ${custom_kopts}"
- echo >> ${icfg}
-
- eval "clst_kernel_softlevel=\$clst_boot_kernel_${x}_softlevel"
-
- if [ -n "${clst_kernel_softlevel}" ]
- then
- for y in ${clst_kernel_softlevel}
- do
- echo "label ${x}-${y}" >> ${icfg}
- echo " kernel /boot/${x}" >> ${icfg}
- echo " append ${default_append_line} softlevel=${y} initrd=/boot/${x}.igz vga=791" >> ${icfg}
-
- echo >> ${icfg}
- echo " ${x}" >> ${kmsg}
- echo "label ${x}-${y}-nofb" >> ${icfg}
- echo " kernel /boot/${x}" >> ${icfg}
- echo " append ${default_append_line} softlevel=${y} initrd=/boot/${x}.igz" >> ${icfg}
- echo >> ${icfg}
- echo " ${x}-nofb" >> ${kmsg}
- done
- else
- echo "label ${x}" >> ${icfg}
- echo " kernel /boot/${x}" >> ${icfg}
- echo " append ${default_append_line} initrd=/boot/${x}.igz vga=791" >> ${icfg}
- echo >> ${icfg}
- echo " ${x}" >> ${kmsg}
- echo "label ${x}-nofb" >> ${icfg}
- echo " kernel /boot/${x}" >> ${icfg}
- echo " append ${default_append_line} initrd=/boot/${x}.igz" >> ${icfg}
- echo >> ${icfg}
- echo " ${x}-nofb" >> ${kmsg}
- fi
- done
-
- if [ -f $1/isolinux/memtest86 ]
- then
- echo >> $icfg
- echo " memtest86" >> $kmsg
- echo "label memtest86" >> $icfg
- echo " kernel memtest86" >> $icfg
- fi
- echo >> $icfg
- echo "label localhost" >> $icfg
- echo " localboot -1" >> $icfg
- echo " MENU HIDE" >> $icfg
- fi
-
- # GRUB2
- if [ -d $1/grub ] || [ -f "$1/boot/EFI/BOOT/BOOTX64.EFI" ]
- then
- #the grub dir may not exist, better safe than sorry
- [ -d "$1/grub" ] || mkdir -p "$1/grub"
-
- iacfg=$1/grub/grub.cfg
- echo 'set default=0' > ${iacfg}
- echo 'set gfxpayload=keep' >> ${iacfg}
- echo 'set timeout=10' >> ${iacfg}
- echo 'insmod all_video' >> ${iacfg}
- echo '' >> ${iacfg}
- for x in ${clst_boot_kernel}
- do
- echo "menuentry 'Boot LiveCD (kernel: ${x})' --class gnu-linux --class os {" >> ${iacfg}
- echo " linux /boot/${x} ${default_append_line}" >> ${iacfg}
- echo " initrd /boot/${x}.igz" >> ${iacfg}
- echo "}" >> ${iacfg}
- echo "" >> ${iacfg}
- echo "menuentry 'Boot LiveCD (kernel: ${x}) (cached)' --class gnu-linux --class os {" >> ${iacfg}
- echo " linux /boot/${x} ${default_append_line} docache" >> ${iacfg}
- echo " initrd /boot/${x}.igz" >> ${iacfg}
- echo "}" >> ${iacfg}
- echo "" >> ${iacfg}
- done
- fi
+ memtest_grub $1 >> ${iacfg}
;;
mips)
# NO SOFTLEVEL SUPPORT YET
@@ -294,9 +167,7 @@ case ${clst_hostarch} in
# CD image, and then pass these components to the
# `sgibootcd` tool which outputs a final CD image
scratch="${1}"
- [ ! -d "${scratch}/kernels" ] && mkdir ${scratch}/kernels
- [ ! -d "${scratch}/kernels/misc" ] && mkdir ${scratch}/kernels/misc
- [ ! -d "${scratch}/arcload" ] && mkdir ${scratch}/arcload
+ mkdir -p ${scratch}/{kernels/misc,arcload}
echo "" > ${scratch}/arc.cf
# Move kernel binaries to ${scratch}/kernels, and
diff --git a/targets/support/chroot-functions.sh b/targets/support/chroot-functions.sh
index 506d43f2..d8472d46 100755
--- a/targets/support/chroot-functions.sh
+++ b/targets/support/chroot-functions.sh
@@ -1,8 +1,5 @@
#!/bin/bash
-# Set the profile
-eselect profile set ${clst_target_profile}
-
# Trap these signals and kill ourselves if received
# Force ourselves to die if any of these signals are received
# most likely our controlling terminal is gone
@@ -29,29 +26,6 @@ if [[ -z "${clst_CHOST}" ]] ; then
fi
fi
-check_genkernel_version() {
- local version parts=() major minor
-
- version=$(genkernel --version)
- if [[ -z ${version} ]] ; then
- echo "ERROR: Could not detect genkernel version!"
- exit 1
- fi
- printf 'Genkernel version '%s' found ... ' "${version}"
-
- IFS='.' read -a parts <<<"${version}"
- major=${parts[0]}
- minor=${parts[1]}
- if [[ ${major} -gt 3 || ( ${major} -eq 3 && ${minor} -ge 3 ) ]] ; then
- echo "OK"
- else
- echo "FAIL"
- echo "ERROR: Your genkernel version is too low in your seed stage."
- echo " genkernel version 3.3.0 or greater is required."
- exit 1
- fi
-}
-
get_libdir() {
ABI=$(portageq envvar ABI)
DEFAULT_ABI=$(portageq envvar DEFAULT_ABI)
@@ -76,18 +50,19 @@ get_libdir() {
echo ${var}
}
-setup_myfeatures(){
- setup_myemergeopts
- export FEATURES="-news clean-logs"
+setup_features() {
+ setup_emerge_opts
+ local features=(-news binpkg-multi-instance clean-logs parallel-install)
+ export FEATURES="${features[@]}"
if [ -n "${clst_CCACHE}" ]
then
- export clst_myfeatures="${clst_myfeatures} ccache"
- clst_root_path=/ run_merge --oneshot --noreplace dev-util/ccache || exit 1
+ features+=(ccache)
+ ROOT=/ run_merge --oneshot --noreplace dev-util/ccache
fi
if [ -n "${clst_DISTCC}" ]
then
- export clst_myfeatures="${clst_myfeatures} distcc"
+ features+=(distcc)
export DISTCC_HOSTS="${clst_distcc_hosts}"
[ -e ${clst_make_conf} ] && \
echo 'USE="${USE} -avahi -gtk -gnome"' >> ${clst_make_conf}
@@ -97,28 +72,27 @@ setup_myfeatures(){
# reinstall if it isn't found.
if [ "$(getent passwd distcc | cut -d: -f1)" != "distcc" ]
then
- clst_root_path=/ run_merge --oneshot sys-devel/distcc || exit 1
+ ROOT=/ run_merge --oneshot sys-devel/distcc
else
- clst_root_path=/ run_merge --oneshot --noreplace sys-devel/distcc || exit 1
+ ROOT=/ run_merge --oneshot --noreplace sys-devel/distcc
fi
- ${clst_sed} -i '/USE="${USE} -avahi -gtk -gnome"/d' ${clst_make_conf}
+ sed -i '/USE="${USE} -avahi -gtk -gnome"/d' ${clst_make_conf}
mkdir -p /etc/distcc
echo "${clst_distcc_hosts}" > /etc/distcc/hosts
# This sets up automatic cross-distcc-fu according to
# https://wiki.gentoo.org/wiki/Distcc/Cross-Compiling
CHOST=$(portageq envvar CHOST)
- LIBDIR=$(get_libdir)
- cd /usr/${LIBDIR}/distcc/bin
+ cd /usr/lib/distcc/bin
rm cc gcc g++ c++ 2>/dev/null
- echo -e '#!/bin/bash\nexec /usr/'${LIBDIR}'/distcc/bin/'${CHOST}'-g${0:$[-2]} "$@"' > ${CHOST}-wrapper
- chmod a+x /usr/${LIBDIR}/distcc/bin/${CHOST}-wrapper
+ echo -e '#!/bin/bash\nexec /usr/lib/distcc/bin/'${CHOST}'-g${0:$[-2]} "$@"' > ${CHOST}-wrapper
+ chmod a+x /usr/lib/distcc/bin/${CHOST}-wrapper
for i in cc gcc g++ c++; do ln -s ${CHOST}-wrapper ${i}; done
fi
if [ -n "${clst_ICECREAM}" ]
then
- clst_root_path=/ run_merge --oneshot --noreplace sys-devel/icecream || exit 1
+ ROOT=/ run_merge --oneshot --noreplace sys-devel/icecream
# This sets up automatic cross-icecc-fu according to
# http://www.gentoo-wiki.info/HOWTO_Setup_An_ICECREAM_Compile_Cluster
@@ -132,34 +106,49 @@ setup_myfeatures(){
export PATH="/usr/lib/icecc/bin:${PATH}"
export PREROOTPATH="/usr/lib/icecc/bin"
fi
- export FEATURES="${clst_myfeatures} -news"
+ export FEATURES="${features[@]}"
}
-setup_myemergeopts(){
- if [[ "${clst_VERBOSE}" == "true" ]]
+setup_emerge_opts() {
+ emerge_opts=()
+ bootstrap_opts=()
+
+ if [ -n "${clst_VERBOSE}" ]
then
- clst_myemergeopts="--verbose"
- bootstrap_opts="${bootstrap_opts} -v"
+ emerge_opts+=(--verbose)
+ bootstrap_opts+=(-v)
else
- clst_myemergeopts="--quiet"
- bootstrap_opts="${bootstrap_opts} -q"
+ emerge_opts+=(--quiet)
+ bootstrap_opts+=(-q)
fi
if [ -n "${clst_FETCH}" ]
then
- export bootstrap_opts="${bootstrap_opts} -f"
- export clst_myemergeopts="${clst_myemergeopts} -f"
- # if we have PKGCACHE, and either update_seed is empty or 'no', make and use binpkgs
- elif [ -n "${clst_PKGCACHE}" ] && [ -z "${clst_update_seed}" -o "${clst_update_seed}" = "no" ]
+ emerge_opts+=(--fetchonly)
+ bootstrap_opts+=(-f)
+ fi
+ if [ -n "${clst_jobs}" ]
+ then
+ emerge_opts+=(--jobs "${clst_jobs}")
+ fi
+ if [ -n "${clst_load_average}" ]
+ then
+ emerge_opts+=(--load-average "${clst_load_average}")
+ fi
+
+ if [ -n "${clst_PKGCACHE}" ] && [ -z "${clst_update_seed}" -o "${clst_update_seed}" = "no" ]
then
- export clst_myemergeopts="${clst_myemergeopts} --usepkg --buildpkg --binpkg-respect-use=y --newuse"
- export bootstrap_opts="${bootstrap_opts} -r"
+ emerge_opts+=(--usepkg --buildpkg --binpkg-respect-use=y --newuse)
+ bootstrap_opts+=(-r)
fi
+
+ export emerge_opts
+ export bootstrap_opts
}
setup_binutils(){
if [ -x /usr/bin/binutils-config ]
then
- my_binutils=$( cd /etc/env.d/binutils; ls ${clst_CHOST}-* | head -n 1 )
+ my_binutils=$( cd ${ROOT}/etc/env.d/binutils; ls ${clst_CHOST}-* | head -n 1 )
if [ -z "${my_binutils}" ]
then
my_binutils=1
@@ -171,7 +160,7 @@ setup_binutils(){
setup_gcc(){
if [ -x /usr/bin/gcc-config ]
then
- my_gcc=$( cd /etc/env.d/gcc; ls ${clst_CHOST}-* | head -n 1 )
+ my_gcc=$( cd ${ROOT}/etc/env.d/gcc; ls ${clst_CHOST}-* | head -n 1 )
if [ -z "${my_gcc}" ]
then
my_gcc=1
@@ -180,41 +169,13 @@ setup_gcc(){
fi
}
-setup_pkgmgr(){
- # Set bindist USE flag if clst_BINDIST is set
- # this is handled independantly in stage2, changes here should be mirrored there
- #if [ "${clst_target}" != "stage1" ] && [ -e "${clst_make_conf}" ] \
- # && [ -n "${clst_BINDIST}" ]; then
- # if grep -q ^USE "${clst_make_conf}"; then
- # echo "USE=\"\${USE} bindist\"" >> "${clst_make_conf}"
- # else
- # echo "USE=\"bindist\"" >> "${clst_make_conf}"
- # fi
- #fi
-
- # We need to merge our package manager with USE="build" set in case it is
- # portage to avoid frying our /etc/portage/make.conf file. Otherwise, we could
- # just let emerge @system could merge it.
- # Use --update or portage might just waste time/cycles and reinstall the same version.
- # Use --newuse to make sure it rebuilds with any changed use flags.
- if [ -n "$1" ];then
- echo "Adding USE=\"\${USE} $1\" to make.conf for portage build"
- [ -e "${clst_make_conf}" ] && echo "USE=\"\${USE} $1\"" >> "${clst_make_conf}"
- run_merge --oneshot --update --newuse sys-apps/portage
- ${clst_sed} -i "/USE=\"\${USE} $1\"/d" "${clst_make_conf}"
- else
- run_merge --oneshot --update --newuse sys-apps/portage
- fi
-}
-
cleanup_distcc() {
- LIBDIR=$(get_libdir)
- rm -rf /etc/distcc/hosts
+ rm -f /etc/distcc/hosts
for i in cc gcc c++ g++; do
- rm -f /usr/${LIBDIR}/distcc/bin/${i}
- ln -s /usr/bin/distcc /usr/${LIBDIR}/distcc/bin/${i}
+ rm -f /usr/lib/distcc/bin/${i}
+ ln -s /usr/bin/distcc /usr/lib/distcc/bin/${i}
done
- rm -f /usr/${LIBDIR}/distcc/bin/*-wrapper
+ rm -f /usr/lib/distcc/bin/*-wrapper
}
cleanup_icecream() {
@@ -227,7 +188,6 @@ cleanup_icecream() {
}
cleanup_stages() {
- make_destpath
if [ -n "${clst_DISTCC}" ]
then
cleanup_distcc
@@ -237,7 +197,7 @@ cleanup_stages() {
cleanup_icecream
fi
case ${clst_target} in
- stage3|system)
+ stage3)
run_merge --depclean --with-bdeps=y
;;
*)
@@ -245,7 +205,7 @@ cleanup_stages() {
;;
esac
case ${clst_target} in
- stage1|stage2|stage3|system)
+ stage1|stage2|stage3)
rm -f /var/lib/portage/world
touch /var/lib/portage/world
;;
@@ -255,11 +215,10 @@ cleanup_stages() {
esac
# Remove bindist from use
- # this is handled independantly in stage2, changes here should be mirrored there
- ${clst_sed} -i "/USE=\"\${USE} bindist\"/d" "${clst_make_conf}"
- ${clst_sed} -i "/USE=\"bindist\"/d" "${clst_make_conf}"
+ sed -i "/USE=\"\${USE} bindist\"/d" "${clst_make_conf}"
+ sed -i "/USE=\"bindist\"/d" "${clst_make_conf}"
- [ "${clst_target}" != "tinderbox" ] && rm -f /var/log/emerge.log /var/log/portage/elog/*
+ rm -f /var/log/emerge.log /var/log/portage/elog/*
}
update_env_settings(){
@@ -273,47 +232,25 @@ die() {
exit 1
}
-make_destpath() {
- # ROOT is / by default, so remove any ROOT= settings from make.conf
- ${clst_sed} -i '/ROOT=/d' ${clst_make_conf}
- export ROOT=/
- if [ "${1}" != "/" -a -n "${1}" ]
- then
- echo "ROOT=\"${1}\"" >> ${clst_make_conf}
- export ROOT=${1}
- fi
- if [ ! -d ${ROOT} ]
- then
- install -d ${ROOT}
- fi
-}
-
run_merge() {
- # Sets up the ROOT= parameter
- # with no options ROOT=/
- make_destpath ${clst_root_path}
-
export EMERGE_WARNING_DELAY=0
export CLEAN_DELAY=0
- export EBEEP_IGNORE=0
- export EPAUSE_IGNORE=0
- export CONFIG_PROTECT="-*"
+ [[ $CONFIG_PROTECT != "-*"* ]] && export CONFIG_PROTECT="-*"
- if [[ "${clst_VERBOSE}" == "true" ]]
+ if [ -n "${clst_VERBOSE}" ]
then
- echo "ROOT=${ROOT} emerge ${clst_myemergeopts} -pt $@" || exit 1
- emerge ${clst_myemergeopts} -pt $@ || exit 3
+ echo "ROOT=${ROOT} emerge ${emerge_opts[@]} -pt $@" || exit 1
+ ROOT="$ROOT" emerge ${emerge_opts[@]} -pt $@ || exit 3
fi
- echo "emerge ${clst_myemergeopts} $@" || exit 1
+ echo "emerge ${emerge_opts[@]} $@" || exit 1
- emerge ${clst_myemergeopts} $@ || exit 1
+ ROOT="$ROOT" emerge ${emerge_opts[@]} $@ || exit 1
}
show_debug() {
- if [ "${clst_DEBUG}" = "1" ]
+ if [ -n "${clst_DEBUG}" ]
then
- unset PACKAGES
echo "DEBUG:"
echo "Profile/target info:"
echo "Profile inheritance:"
@@ -346,103 +283,13 @@ show_debug() {
fi
}
-run_default_funcs() {
- if [ "${RUN_DEFAULT_FUNCS}" != "no" ]
- then
- update_env_settings
- setup_myfeatures
- show_debug
- fi
-}
-
-# Functions
-# Copy libs of a executable in the chroot
-function copy_libs() {
- # Check if it's a dynamix exec
- ldd ${1} > /dev/null 2>&1 || return
-
- for lib in `ldd ${1} | awk '{ print $3 }'`
- do
- echo ${lib}
- if [ -e ${lib} ]
- then
- if [ ! -e ${clst_root_path}/${lib} ]
- then
- copy_file ${lib}
- [ -e "${clst_root_path}/${lib}" ] && \
- strip -R .comment -R .note ${clst_root_path}/${lib} \
- || echo "WARNING : Cannot strip lib ${clst_root_path}/${lib} !"
- fi
- else
- echo "WARNING : Some library was not found for ${lib} !"
- fi
- done
-}
-
-function copy_symlink() {
- STACK=${2}
- [ "${STACK}" = "" ] && STACK=16 || STACK=$((${STACK} - 1 ))
-
- if [ ${STACK} -le 0 ]
- then
- echo "WARNING : ${TARGET} : too many levels of symbolic links !"
- return
- fi
-
- [ ! -e ${clst_root_path}/`dirname ${1}` ] && \
- mkdir -p ${clst_root_path}/`dirname ${1}`
- [ ! -e ${clst_root_path}/${1} ] && \
- cp -vfdp ${1} ${clst_root_path}/${1}
-
- if [[ -n $(type -p realpath) ]]; then
- TARGET=`realpath ${1}`
- else
- TARGET=`readlink -f ${1}`
- fi
- if [ -h ${TARGET} ]
- then
- copy_symlink ${TARGET} ${STACK}
- else
- copy_file ${TARGET}
- fi
-}
-
-function copy_file() {
- f="${1}"
-
- if [ ! -e "${f}" ]
- then
- echo "WARNING : File not found : ${f}"
- continue
- fi
-
- [ ! -e ${clst_root_path}/`dirname ${f}` ] && \
- mkdir -p ${clst_root_path}/`dirname ${f}`
- [ ! -e ${clst_root_path}/${f} ] && \
- cp -vfdp ${f} ${clst_root_path}/${f}
- if [ -x ${f} -a ! -h ${f} ]
- then
- copy_libs ${f}
- strip -R .comment -R .note ${clst_root_path}/${f} > /dev/null 2>&1
- elif [ -h ${f} ]
- then
- copy_symlink ${f}
- fi
-}
-
-create_handbook_icon() {
- # This function creates a local icon to the Gentoo Handbook
- echo "[Desktop Entry]
-Encoding=UTF-8
-Version=1.0
-Type=Link
-URL=file:///mnt/cdrom/docs/handbook/html/index.html
-Terminal=false
-Name=Gentoo Linux Handbook
-GenericName=Gentoo Linux Handbook
-Comment=This is a link to the local copy of the Gentoo Linux Handbook.
-Icon=text-editor" > /usr/share/applications/gentoo-handbook.desktop
-}
+readonly locales="
+C.UTF8 UTF-8
+"
-# We do this everywhere, so why not put it in this script
-run_default_funcs
+if [[ ${RUN_DEFAULT_FUNCS} != no ]]
+then
+ update_env_settings
+ setup_features
+ show_debug
+fi
diff --git a/targets/support/create-iso.sh b/targets/support/create-iso.sh
index 1b0f05e1..74c24a1d 100755
--- a/targets/support/create-iso.sh
+++ b/targets/support/create-iso.sh
@@ -1,7 +1,6 @@
#!/bin/bash
source ${clst_shdir}/support/functions.sh
-source ${clst_shdir}/support/filesystem-functions.sh
## START RUNSCRIPT
@@ -12,16 +11,24 @@ case ${clst_hostarch} in
cdmakerpkg="dev-libs/libisoburn"
;;
mips)
- cdmaker="sgibootcd"
+ cdmaker="sgibootcd"
cdmakerpkg="sys-boot/sgibootcd"
;;
- ppc*)
- cdmaker="grub-mkrescue"
- cdmakerpkg="dev-libs/libisoburn and sys-boot/grub:2"
- ;;
+ ppc*|powerpc*|sparc*)
+ cdmaker="grub-mkrescue"
+ cdmakerpkg="dev-libs/libisoburn and sys-boot/grub:2"
+ ;;
+ amd64|arm64|ia64|x86|i?86)
+ cdmaker="grub-mkrescue"
+ # grub-mkrescue requires:
+ # xorriso from libisoburn
+ # mkisofs from cdrtools
+ # mformat from mtools
+ cdmakerpkg="sys-fs/mtools, dev-libs/libisoburn, sys-boot/grub:2, and app-cdr/cdrtools"
+ ;;
*)
cdmaker="mkisofs"
- cdmakerpkg="app-cdr/cdrkit or app-cdr/cdrtools"
+ cdmakerpkg="app-cdr/cdrtools"
;;
esac
@@ -45,6 +52,9 @@ then
arm)
clst_iso_volume_id="Gentoo Linux - ARM"
;;
+ arm64)
+ clst_iso_volume_id="Gentoo Linux - ARM64"
+ ;;
hppa)
clst_iso_volume_id="Gentoo Linux - HPPA"
;;
@@ -87,23 +97,23 @@ if [ "${#clst_iso_volume_id}" -gt 32 ]; then
echo "new: '${clst_iso_volume_id}'" 1>&2
fi
-if [ "${clst_fstype}" == "zisofs" ]
-then
- mkisofs_zisofs_opts="-z"
-else
- mkisofs_zisofs_opts=""
-fi
-
-#we want to create a sha512sum for every file on the iso so we can verify it
-#from genkernel during boot. Here we make a function to create the sha512sums
+# Generate list of checksums that genkernel can use to verify the contents of
+# the ISO
isoroot_checksum() {
- echo "Creating checksums for all files included in the iso, please wait..."
- find "${clst_target_path}" -type f ! -name 'isoroot_checksums' ! -name 'isolinux.bin' -exec sha512sum {} + > "${clst_target_path}"/isoroot_checksums
- ${clst_sed} -i "s#${clst_target_path}/\?##" "${clst_target_path}"/isoroot_checksums
+ [ -z "${clst_livecd_verify}" ] && return
+
+ echo ">> Creating checksums for all files included in the ISO"
+
+ pushd "${clst_target_path}"
+ find -type f -exec b2sum {} + > /tmp/isoroot_b2sums
+ popd
+
+ mv /tmp/isoroot_b2sums "${clst_target_path}"/
}
run_mkisofs() {
- [ -n "${clst_livecd_verify}" ] && isoroot_checksum
+ isoroot_checksum
+
echo "Running \"mkisofs ${@}\""
mkisofs "${@}" || die "Cannot make ISO image"
}
@@ -111,192 +121,86 @@ run_mkisofs() {
# Here we actually create the ISO images for each architecture
case ${clst_hostarch} in
alpha)
- echo ">> xorriso -as genisofs -alpha-boot boot/bootlx -R -l -J ${mkisofs_zisofs_opts} -V \"${clst_iso_volume_id}\" -o \"${1}\" \"${clst_target_path}\""
- xorriso -as genisofs -alpha-boot boot/bootlx -R -l -J ${mkisofs_zisofs_opts} -V "${clst_iso_volume_id}" -o "${1}" "${clst_target_path}" || die "Cannot make ISO image"
+ isoroot_checksum
+
+ echo ">> xorriso -as genisofs -alpha-boot boot/bootlx -R -l -J -V \"${clst_iso_volume_id}\" -o \"${1}\" \"${clst_target_path}\""
+ xorriso -as genisofs -alpha-boot boot/bootlx -R -l -J -V "${clst_iso_volume_id}" -o "${1}" "${clst_target_path}" || die "Cannot make ISO image"
;;
arm)
;;
hppa)
echo ">> Running mkisofs to create iso image...."
- run_mkisofs -R -l -J ${mkisofs_zisofs_opts} -V "${clst_iso_volume_id}" -o "${1}" "${clst_target_path}"/
+ run_mkisofs -R -l -J -V "${clst_iso_volume_id}" -o "${1}" "${clst_target_path}"/
pushd "${clst_target_path}/"
palo -f boot/palo.conf -C "${1}"
popd
;;
- ia64)
- if [ ! -e "${clst_target_path}/gentoo.efimg" ]
- then
- iaSizeTemp=$(du -sk --apparent-size "${clst_target_path}/boot" 2>/dev/null)
- iaSizeB=$(echo ${iaSizeTemp} | cut '-d ' -f1)
- iaSize=$((${iaSizeB}+64)) # Add slack
-
- dd if=/dev/zero of="${clst_target_path}/gentoo.efimg" bs=1k \
- count=${iaSize}
- mkfs.vfat -F 16 -n GENTOO "${clst_target_path}/gentoo.efimg"
-
- mkdir "${clst_target_path}/gentoo.efimg.mountPoint"
- mount -t vfat -o loop "${clst_target_path}/gentoo.efimg" \
- "${clst_target_path}/gentoo.efimg.mountPoint"
-
- echo '>> Populating EFI image...'
- cp -rv "${clst_target_path}"/boot/* \
- "${clst_target_path}/gentoo.efimg.mountPoint" || die "Failed to populate EFI image"
-
- umount "${clst_target_path}/gentoo.efimg.mountPoint"
- rmdir "${clst_target_path}/gentoo.efimg.mountPoint"
- else
- echo ">> Found populated EFI image at \
- ${clst_target_path}/gentoo.efimg"
- fi
- echo '>> Removing /boot...'
- rm -rf "${clst_target_path}/boot"
-
- echo ">> Running mkisofs to create iso image...."
- run_mkisofs -R -l -b gentoo.efimg -c boot.cat -no-emul-boot -J ${mkisofs_zisofs_opts} -V "${clst_iso_volume_id}" -o "${1}" "${clst_target_path}"/
- ;;
mips)
- case ${clst_fstype} in
- squashfs)
- # $clst_target_path/[kernels|arcload] already exists, create loopback and sgibootcd
- [ ! -d "${clst_target_path}/loopback" ] && mkdir "${clst_target_path}/loopback"
- [ ! -d "${clst_target_path}/sgibootcd" ] && mkdir "${clst_target_path}/sgibootcd"
-
- # Setup variables
- [ -f "${clst_target_path}/livecd" ] && rm -f "${clst_target_path}/livecd"
- img="${clst_target_path}/loopback/image.squashfs"
- knl="${clst_target_path}/kernels"
- arc="${clst_target_path}/arcload"
- cfg="${clst_target_path}/sgibootcd/sgibootcd.cfg"
- echo "" > "${cfg}"
-
- # If the image file exists in $clst_target_path, move it to the loopback dir
- [ -e "${clst_target_path}/image.squashfs" ] \
- && mv -f "${clst_target_path}/image.squashfs" "${clst_target_path}/loopback"
-
- # An sgibootcd config is essentially a collection of commandline params
- # stored in a text file. We could pass these on the command line, but it's
- # far easier to generate a config file and pass it to sgibootcd versus using a
- # ton of commandline params.
- #
- # f= indicates files to go into DVH (disk volume header) in an SGI disklabel
- # format: f=</path/to/file>@<DVH name>
- # p0= the first partition holds the LiveCD rootfs image
- # format: p0=</path/to/image>
- # p8= the eighth partition is the DVH partition
- # p10= the tenth partition is the disk volume partition
- # format: p8= is always "#dvh" and p10= is always "#volume"
-
- # Add the kernels to the sgibootcd config
- for x in ${clst_boot_kernel}; do
- echo -e "f=${knl}/${x}@${x}" >> ${cfg}
- done
-
- # Next, the bootloader binaries and config
- echo -e "f=${arc}/sash64@sash64" >> ${cfg}
- echo -e "f=${arc}/sashARCS@sashARCS" >> ${cfg}
- echo -e "f=${arc}/arc.cf@arc.cf" >> ${cfg}
-
- # Next, the Loopback Image
- echo -e "p0=${img}" >> ${cfg}
-
- # Finally, the required SGI Partitions (dvh, volume)
- echo -e "p8=#dvh" >> ${cfg}
- echo -e "p10=#volume" >> ${cfg}
-
- # All done; feed the config to sgibootcd and end up with an image
- # c= the config file
- # o= output image (burnable to CD; readable by fdisk)
- /usr/bin/sgibootcd c=${cfg} o=${clst_iso}
- ;;
- *) die "SGI LiveCD(s) only support the 'squashfs' fstype!" ;;
- esac
- ;;
- ppc*|powerpc*)
- echo ">> Running grub-mkrescue to create iso image...."
- grub-mkrescue -o "${1}" "${clst_target_path}"
- ;;
- sparc*)
- # Old silo (<=1.2.6) requires a specially built mkisofs
- # We try to autodetect this in a simple way, said mkisofs
- # should be in the cdtar, otherwise use the new style.
- if [ -x "${clst_target_path}/boot/mkisofs.sparc.fu" ]
- then
- mv "${clst_target_path}/boot/mkisofs.sparc.fu" /tmp
- echo "Running mkisofs.sparc.fu to create iso image...."
- echo "/tmp/mkisofs.sparc.fu ${mkisofs_zisofs_opts} -o ${1} -D -r -pad -quiet -S 'boot/cd.b' -B '/boot/second.b' -s '/boot/silo.conf' -V \"${clst_iso_volume_id}\" ${clst_target_path}/"
- /tmp/mkisofs.sparc.fu ${mkisofs_zisofs_opts} -o "${1}" -D -r -pad -quiet -S 'boot/cd.b' -B '/boot/second.b' -s '/boot/silo.conf' -V "${clst_iso_volume_id}" "${clst_target_path}"/ || die "Cannot make ISO image"
- rm /tmp/mkisofs.sparc.fu
- else
- echo "Running mkisofs to create iso image...."
- run_mkisofs -J -R -l ${mkisofs_zisofs_opts} -V "${clst_iso_volume_id}" -o "${1}" -G "${clst_target_path}/boot/isofs.b" -B ... "${clst_target_path}"/
+ if [[ ${clst_fstype} != squashfs ]]; then
+ die "SGI LiveCD(s) only support the 'squashfs' fstype!"
fi
+ # $clst_target_path/[kernels|arcload] already exists, create loopback and sgibootcd
+ [ ! -d "${clst_target_path}/loopback" ] && mkdir "${clst_target_path}/loopback"
+ [ ! -d "${clst_target_path}/sgibootcd" ] && mkdir "${clst_target_path}/sgibootcd"
+
+ # Setup variables
+ [ -f "${clst_target_path}/livecd" ] && rm -f "${clst_target_path}/livecd"
+ img="${clst_target_path}/loopback/image.squashfs"
+ knl="${clst_target_path}/kernels"
+ arc="${clst_target_path}/arcload"
+ cfg="${clst_target_path}/sgibootcd/sgibootcd.cfg"
+ echo "" > "${cfg}"
+
+ # If the image file exists in $clst_target_path, move it to the loopback dir
+ [ -e "${clst_target_path}/image.squashfs" ] \
+ && mv -f "${clst_target_path}/image.squashfs" "${clst_target_path}/loopback"
+
+ # An sgibootcd config is essentially a collection of commandline params
+ # stored in a text file. We could pass these on the command line, but it's
+ # far easier to generate a config file and pass it to sgibootcd versus using a
+ # ton of commandline params.
+ #
+ # f= indicates files to go into DVH (disk volume header) in an SGI disklabel
+ # format: f=</path/to/file>@<DVH name>
+ # p0= the first partition holds the LiveCD rootfs image
+ # format: p0=</path/to/image>
+ # p8= the eighth partition is the DVH partition
+ # p10= the tenth partition is the disk volume partition
+ # format: p8= is always "#dvh" and p10= is always "#volume"
+
+ # Add the kernels to the sgibootcd config
+ for x in ${clst_boot_kernel}; do
+ echo -e "f=${knl}/${x}@${x}" >> ${cfg}
+ done
+
+ # Next, the bootloader binaries and config
+ echo -e "f=${arc}/sash64@sash64" >> ${cfg}
+ echo -e "f=${arc}/sashARCS@sashARCS" >> ${cfg}
+ echo -e "f=${arc}/arc.cf@arc.cf" >> ${cfg}
+
+ # Next, the Loopback Image
+ echo -e "p0=${img}" >> ${cfg}
+
+ # Finally, the required SGI Partitions (dvh, volume)
+ echo -e "p8=#dvh" >> ${cfg}
+ echo -e "p10=#volume" >> ${cfg}
+
+ # All done; feed the config to sgibootcd and end up with an image
+ # c= the config file
+ # o= output image (burnable to CD; readable by fdisk)
+ /usr/bin/sgibootcd c=${cfg} o=${clst_iso}
;;
- x86|amd64)
- # detect if an EFI bootloader is desired
- if [ -d "${clst_target_path}/boot/efi" ] || \
- [ -d "${clst_target_path}/boot/EFI" ] || \
- [ -e "${clst_target_path}/gentoo.efimg" ]
- then
- if [ -e "${clst_target_path}/gentoo.efimg" ]
- then
- echo "Found prepared EFI boot image at \
- ${clst_target_path}/gentoo.efimg"
- else
- echo "Preparing EFI boot image"
- if [ -d "${clst_target_path}/boot/efi" ] && [ ! -d "${clst_target_path}/boot/EFI" ]; then
- echo "Moving /boot/efi to /boot/EFI"
- mv "${clst_target_path}/boot/efi" "${clst_target_path}/boot/EFI"
- fi
- # prepare gentoo.efimg from clst_target_path /boot/EFI dir
- iaSizeTemp=$(du -sk "${clst_target_path}/boot/EFI" 2>/dev/null)
- iaSizeB=$(echo ${iaSizeTemp} | cut '-d ' -f1)
- iaSize=$((${iaSizeB}+64)) # add slack, tested near minimum for overhead
- echo "Creating loopback file of size ${iaSize}kB"
- dd if=/dev/zero of="${clst_target_path}/gentoo.efimg" bs=1k \
- count=${iaSize}
- echo "Formatting loopback file with FAT16 FS"
- mkfs.vfat -F 16 -n GENTOOLIVE "${clst_target_path}/gentoo.efimg"
-
- mkdir "${clst_target_path}/gentoo.efimg.mountPoint"
- echo "Mounting FAT16 loopback file"
- mount -t vfat -o loop "${clst_target_path}/gentoo.efimg" \
- "${clst_target_path}/gentoo.efimg.mountPoint"
-
- echo "Populating EFI image file from ${clst_target_path}/boot/EFI"
- cp -rv "${clst_target_path}"/boot/EFI/ \
- "${clst_target_path}/gentoo.efimg.mountPoint" || die "Failed to populate EFI image file"
-
- umount "${clst_target_path}/gentoo.efimg.mountPoint"
- rmdir "${clst_target_path}/gentoo.efimg.mountPoint"
+ amd64|arm64|ia64|ppc*|powerpc*|sparc*|x86|i?86)
+ isoroot_checksum
- echo "Copying /boot/EFI to /EFI for rufus compatability"
- cp -rv "${clst_target_path}"/boot/EFI/ "${clst_target_path}"
- fi
- fi
+ extra_opts=("-joliet" "-iso-level" "3")
+ case ${clst_hostarch} in
+ sparc*) extra_opts+=("--sparc-boot") ;;
+ esac
- if [ -e "${clst_target_path}/isolinux/isolinux.bin" ]; then
- echo '** Found ISOLINUX bootloader'
- if [ -e "${clst_target_path}/gentoo.efimg" ]; then
- # have BIOS isolinux, plus an EFI loader image
- echo '** Found GRUB2 EFI bootloader'
- echo 'Creating ISO using both ISOLINUX and EFI bootloader'
- run_mkisofs -J -R -l ${mkisofs_zisofs_opts} -V "${clst_iso_volume_id}" -o "${1}" -b isolinux/isolinux.bin -c isolinux/boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table -eltorito-alt-boot -eltorito-platform efi -b gentoo.efimg -no-emul-boot -z "${clst_target_path}"/
- isohybrid --uefi "${1}"
- else
- echo 'Creating ISO using ISOLINUX bootloader'
- run_mkisofs -J -R -l ${mkisofs_zisofs_opts} -V "${clst_iso_volume_id}" -o "${1}" -b isolinux/isolinux.bin -c isolinux/boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table "${clst_target_path}"/
- isohybrid "${1}"
- fi
- elif [ -e "${clst_target_path}/gentoo.efimg" ]; then
- echo '** Found GRUB2 EFI bootloader'
- echo 'Creating ISO using EFI bootloader'
- run_mkisofs -J -R -l ${mkisofs_zisofs_opts} -V "${clst_iso_volume_id}" -o "${1}" -b gentoo.efimg -c boot.cat -no-emul-boot "${clst_target_path}"/
- else
- echo '** Found no known bootloader'
- echo 'Creating ISO with fingers crossed that you know what you are doing...'
- run_mkisofs -J -R -l ${mkisofs_zisofs_opts} -V "${clst_iso_volume_id}" -o "${1}" "${clst_target_path}"/
- fi
+ echo ">> Running grub-mkrescue to create iso image...."
+ grub-mkrescue "${extra_opts[@]}" -o "${1}" "${clst_target_path}"
;;
esac
exit $?
diff --git a/targets/support/depclean.sh b/targets/support/depclean.sh
index 56f9a669..f99134e0 100755
--- a/targets/support/depclean.sh
+++ b/targets/support/depclean.sh
@@ -2,8 +2,6 @@
source /tmp/chroot-functions.sh
-# If the user enabled PRESERVE_LIBS in options, tell portage to preserve them.
-[ -n "${clst_PRESERVE_LIBS}" ] && FEATURES="${clst_myfeatures} preserve-libs"
if [ "${clst_livecd_depclean}" = "keepbdeps" ]; then
run_merge --depclean --with-bdeps=y
else
diff --git a/targets/support/filesystem-functions.sh b/targets/support/filesystem-functions.sh
deleted file mode 100755
index 0c144ba8..00000000
--- a/targets/support/filesystem-functions.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/bin/bash
-
-# Dont forget to update functions.sh check_looptype
-# $1 is the target directory for the filesystem
-
-create_normal_loop() {
- export source_path="${clst_destpath}"
- export destination_path="$1"
- export loopname="image.loop"
-
- # We get genkernel-built kernels and initrds in place, create the loopback
- # file system on $clst_target_path, mount it, copy our bootable filesystem
- # over, umount it, and have a ready-to-burn ISO tree at $clst_target_path.
-
- echo "Calculating size of loopback filesystem..."
- loopsize=`du -ks ${source_path} | cut -f1`
- [ "${loopsize}" = "0" ] && loopsize=1
- # Add 4MB for filesystem slop
- loopsize=`expr ${loopsize} + 4096`
- echo "Creating loopback file..."
- dd if=/dev/zero of=${destination_path}/${loopname} bs=1k count=${loopsize} \
- || die "${loopname} creation failure"
- mke2fs -m 0 -F -q ${destination_path}/${loopname} \
- || die "Couldn't create ext2 filesystem"
- install -d ${destination_path}/loopmount
- sync; sync; sleep 3 # Try to work around 2.6.0+ loopback bug
- mount -t ext2 -o loop ${destination_path}/${loopname} \
- ${destination_path}/loopmount \
- || die "Couldn't mount loopback ext2 filesystem"
- sync; sync; sleep 3 # Try to work around 2.6.0+ loopback bug
- echo "cp -pPR ${source_path}/* ${destination_path}/loopmount"
- cp -pPR ${source_path}/* ${destination_path}/loopmount
- [ $? -ne 0 ] && { umount ${destination_path}/${loopname}; \
- die "Couldn't copy files to loopback ext2 filesystem"; }
- umount ${destination_path}/loopmount \
- || die "Couldn't unmount loopback ext2 filesystem"
- rm -rf ${destination_path}/loopmount
- # Now, $clst_target_path should contain a proper bootable image for our
- # ISO, including boot loader and loopback filesystem.
-}
-
-create_zisofs() {
- rm -rf "$1/zisofs" > /dev/null 2>&1
- echo "Creating zisofs..."
- mkzftree -z 9 -p2 "${clst_destpath}" "$1/zisofs" \
- || die "Could not run mkzftree, did you emerge zisofs"
-}
-
-create_noloop() {
- echo "Copying files for image (no loop)..."
- cp -pPR "${clst_destpath}"/* "$1" \
- || die "Could not copy files to image (no loop)"
-}
-
-create_squashfs() {
- echo "Creating squashfs..."
- export loopname="image.squashfs"
- mksquashfs "${clst_destpath}" "$1/${loopname}" ${clst_fsops} -noappend \
- || die "mksquashfs failed, did you emerge squashfs-tools?"
-}
-
-create_jffs() {
- echo "Creating jffs..."
- export loopname="image.jffs"
- # fs_check /usr/sbin/mkfs.jffs jffs sys-fs/mtd
- mkfs.jffs -d ${clst_destpath} -o $1/${loopname} ${clst_fsops} \
- || die "Could not create a jffs filesystem"
-}
-
-create_jffs2(){
- echo "Creating jffs2..."
- export loopname="image.jffs"
- # fs_check /usr/sbin/mkfs.jffs2 jffs2 sys-fs/mtd
- mkfs.jffs2 --root=${clst_destpath} --output=$1/${loopname} ${clst_fsops} \
- || die "Could not create a jffs2 filesystem"
-}
-
-create_cramfs(){
- echo "Creating cramfs..."
- export loopname="image.cramfs"
- #fs_check /sbin/mkcramfs cramfs sys-fs/cramfs
- mkcramfs ${clst_fsops} ${clst_destpath} $1/${loopname} \
- || die "Could not create a cramfs filesystem"
-}
diff --git a/targets/support/functions.sh b/targets/support/functions.sh
index 836dc9cc..4340a3c7 100755
--- a/targets/support/functions.sh
+++ b/targets/support/functions.sh
@@ -1,48 +1,35 @@
#!/bin/bash
copy_to_chroot() {
- local src_file=$1
- local dest_dir=${clst_chroot_path}${2:-/tmp}
- mkdir -p ${dest_dir}
- echo "copying ${src_file##*/} to ${dest_dir}"
- cp -pPR "${src_file}" "${dest_dir}"/
+ local src="${1}"
+ local dst="${clst_chroot_path}/${2:-/tmp}"
+ cp -pPR "${src}" "${dst}"
}
-delete_from_chroot(){
- if [ -e ${clst_chroot_path}${1} ]
- then
- echo "removing ${clst_chroot_path}${1} from the chroot"
- rm -f ${clst_chroot_path}${1}
- fi
+delete_from_chroot() {
+ rm -f "${clst_chroot_path}/${1}"
}
-exec_in_chroot(){
# Takes the full path to the source file as its argument
# copies the file to the /tmp directory of the chroot
# and executes it.
+exec_in_chroot() {
local file_name=$(basename ${1})
- local subdir=${2}
- local destdir="${subdir}/tmp"
-
- echo "Copying ${file_name} to ${destdir}"
- copy_to_chroot ${1} ${destdir}
- copy_to_chroot ${clst_shdir}/support/chroot-functions.sh \
- ${destdir}
- chroot_path=${clst_chroot_path}${subdir}
+ copy_to_chroot ${1}
+ copy_to_chroot ${clst_shdir}/support/chroot-functions.sh
- echo "Ensure the file has the executable bit set"
- chmod +x ${chroot_path}/${destdir}/${file_name}
+ # Ensure the file has the executable bit set
+ chmod +x ${clst_chroot_path}/tmp/${file_name}
echo "Running ${file_name} in chroot:"
- echo " ${clst_CHROOT} ${chroot_path} ${destdir}/${file_name}"
- ${clst_CHROOT} ${chroot_path} .${destdir}/${file_name} || exit 1
+ echo " ${clst_CHROOT} ${clst_chroot_path} /tmp/${file_name}"
+ ${clst_CHROOT} "${clst_chroot_path}" "/tmp/${file_name}" || exit 1
- delete_from_chroot ${destdir}/${file_name}
- delete_from_chroot ${destdir}/chroot-functions.sh
+ delete_from_chroot /tmp/${file_name}
+ delete_from_chroot /tmp/chroot-functions.sh
}
-#return codes
die() {
echo "$1"
exit 1
@@ -56,82 +43,13 @@ extract_cdtar() {
tar -I lbzip2 -xpf ${clst_cdtar} -C $1 || die "Couldn't extract cdtar ${cdtar}"
}
-create_bootloader() {
- # For amd64 and x86 we attempt to copy boot loader files from the live system and configure it right
- # this prevents (among other issues) needing to keep a cdtar up to date. All files are thrown into $clst_target_path
- # Future improvement may make bootloaders optional, but for now there is only one option
- if [ -x "/usr/bin/grub2-mkstandalone" ]; then
- grubmkstndaln="/usr/bin/grub2-mkstandalone"
- elif [ -x "/usr/bin/grub-mkstandalone" ]; then
- grubmkstndaln="/usr/bin/grub-mkstandalone"
- else
- die "Unable to find grub-mkstandalone"
- fi
-
- pushd "${1}" || die "Failed to enter livecd dir ${1}"
-
- # while $1/grub is unused here, it triggers grub config building in bootloader-setup.sh
- mkdir -p boot/EFI/BOOT isolinux
- #create boot.msg for isolinux
- echo "Gentoo Linux Installation LiveCD http://www.gentoo.org/" > isolinux/boot.msg
- echo "Enter to boot; F1 for kernels F2 for options." >> isolinux/boot.msg
- echo "Press any key in the next 15 seconds or we'll try to boot from disk." >> isolinux/boot.msg
- #install isolinux files
- if [ -f /usr/share/syslinux/isolinux.bin ]; then
- cp /usr/share/syslinux/isolinux.bin isolinux/
- #isolinux support files
- for i in libcom32.c32 libutil.c32 ldlinux.c32 reboot.c32 vesamenu.c32; do
- if [ -f "/usr/share/syslinux/${i}" ]; then
- cp "/usr/share/syslinux/${i}" isolinux/
- fi
- done
- #isolinux hardware detection toolkit, useful for system info and debugging
- if [ -f "/usr/share/syslinux/hdt.c32" ]; then
- cp /usr/share/syslinux/hdt.c32 isolinux/
- if [ -f "/usr/share/misc/pci.ids" ]; then
- cp /usr/share/misc/pci.ids isolinux/
- fi
- fi
- #memtest goes under isolinux since it doesn't work for uefi right now
- if [ -f /usr/share/memtest86+/memtest ]; then
- cp /usr/share/memtest86+/memtest.bin isolinux/memtest86
- else
- echo "Missing /usr/share/memtest86+/memtest.bin, this livecd will not have memtest86+ support. Enable USE=system-bootloader on catalyst to pull in the correct deps"
- fi
- else
- echo "Missing /usr/share/syslinux/isolinux.bin, this livecd will not bios boot. Enable USE=system-bootloader on catalyst to pull in the correct deps"
- fi
-
- #create grub-stub.cfg for embedding in grub-mkstandalone
- echo "insmod part_gpt" > grub-stub.cfg
- echo "insmod part_msdos" >> grub-stub.cfg
- echo "search --no-floppy --set=root --file /livecd" >> grub-stub.cfg
- echo "configfile /grub/grub.cfg" >> grub-stub.cfg
-
- # some 64 bit machines have 32 bit UEFI, and you might want to boot 32 bit on a 64 bit machine, so we take the safest path and include both
- # set up 32 bit uefi
- ${grubmkstndaln} /boot/grub/grub.cfg=./grub-stub.cfg --compress=xz -O i386-efi -o ./boot/EFI/BOOT/grubia32.efi --themes= || die "Failed to make grubia32.efi"
- #secure boot shim
- cp /usr/share/shim/BOOTIA32.EFI boot/EFI/BOOT/
- cp /usr/share/shim/mmia32.efi boot/EFI/BOOT/
-
- #set up 64 bit uefi
- ${grubmkstndaln} /boot/grub/grub.cfg=./grub-stub.cfg --compress=xz -O x86_64-efi -o ./boot/EFI/BOOT/grubx64.efi --themes= || die "Failed to make grubx64.efi"
- #secure boot shim
- cp /usr/share/shim/BOOTX64.EFI boot/EFI/BOOT/
- cp /usr/share/shim/mmx64.efi boot/EFI/BOOT/
-
- rm grub-stub.cfg || echo "Failed to remove grub-stub.cfg, but this hurts nothing"
- popd || die "Failed to leave livecd dir"
-}
-
extract_kernels() {
# extract multiple kernels
# $1 = Destination
# ${clst_target_path}/kernel is often a good choice for ${1}
# Takes the relative desination dir for the kernel as an arguement
- # i.e boot or isolinux
+ # i.e boot
[ -z "$clst_boot_kernel" ] && \
die "Required key boot/kernel not defined, exiting"
# install the kernels built in kmerge.sh
@@ -149,33 +67,34 @@ extract_kernels() {
mkdir -p ${1}/
tar -I lbzip2 -xf ${kbinary} -C ${1}/
- # change config name from "config-*" to "gentoo", for example
- #mv ${1}/config-* ${1}/${x}-config
- rm ${1}/config-*
+ # change config name from "config-*" to "gentoo-config", for example
+ mv ${1}/config-* ${1}/${x}-config
# change kernel name from "kernel" to "gentoo", for example
if [ -e ${1}/kernel-* ]
then
mv ${1}/kernel-* ${1}/${x}
fi
-
- # change kernel name from "kernelz" to "gentoo", for example
if [ -e ${1}/kernelz-* ]
then
mv ${1}/kernelz-* ${1}/${x}
fi
+ if [ -e ${1}/vmlinuz-* ]
+ then
+ mv ${1}/vmlinuz-* ${1}/${x}
+ fi
# change initrd name from "initrd" to "gentoo.igz", for example
if [ -e ${1}/initrd-* ]
then
mv ${1}/initrd-* ${1}/${x}.igz
fi
-
if [ -e ${1}/initramfs-* ]
then
mv ${1}/initramfs-* ${1}/${x}.igz
fi
+ # rename "System.map" to "System-gentoo.map", for example
if [ -e ${1}/System.map-* ]
then
mv ${1}/System.map-* ${1}/System-${x}.map
@@ -196,70 +115,3 @@ extract_modules() {
echo "Can't find kernel modules tarball at ${kmodules}. Skipping...."
fi
}
-extract_kernel() {
- # $1 = Destination
- # $2 = kname
-
- kbinary="${clst_chroot_path}/tmp/kerncache/${2}-kernel-initrd-${clst_version_stamp}.tar.bz2"
- [ ! -e "${kbinary}" ] && die "Can't find kernel tarball at ${kbinary}"
- mkdir -p ${1}/
- tar -I lbzip2 -xf ${kbinary} -C ${1}/
- # change config name from "config-*" to "gentoo", for example
- #mv ${1}/config-* ${1}/${2}-config
- rm ${1}/config-*
-
- # change kernel name from "kernel" to "gentoo", for example
- mv ${1}/kernel-* ${1}/${2}
-
- # change initrd name from "initrd" to "gentoo.igz", for example
- if [ -e ${1}/initrd-* ]
- then
- mv ${1}/initrd-* ${1}/${2}.igz
- fi
-
- # change initramfs name from "initramfs" to "gentoo.igz", for example
- if [ -e ${1}/initramfs-* ]
- then
- mv ${1}/initramfs-* ${1}/${2}.igz
- fi
-}
-
-check_bootargs(){
- # Add any additional options
- if [ -n "${clst_livecd_bootargs}" ]
- then
- for x in ${clst_livecd_bootargs}
- do
- cmdline_opts="${cmdline_opts} ${x}"
- done
- fi
-}
-
-check_filesystem_type(){
- case ${clst_fstype} in
- normal)
- cmdline_opts="${cmdline_opts} looptype=normal loop=/image.loop"
- ;;
- zisofs)
- cmdline_opts="${cmdline_opts} looptype=zisofs loop=/zisofs"
- ;;
- noloop)
- ;;
- squashfs)
- cmdline_opts="${cmdline_opts} looptype=squashfs loop=/image.squashfs"
- ;;
- jffs)
- cmdline_opts="${cmdline_opts} looptype=jffs loop=/image.jffs"
- ;;
- jffs2)
- cmdline_opts="${cmdline_opts} looptype=jffs2 loop=/image.jffs2"
- ;;
- cramfs)
- cmdline_opts="${cmdline_opts} looptype=cramfs loop=/image.cramfs"
- ;;
- esac
-}
-
-run_crossdev() {
- crossdev ${clst_CHOST}
-}
diff --git a/targets/support/kill-chroot-pids.sh b/targets/support/kill-chroot-pids.sh
deleted file mode 100755
index ea8ee402..00000000
--- a/targets/support/kill-chroot-pids.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-# Script to kill processes found running in the chroot.
-
-if [ "${clst_chroot_path}" == "/" ]
-then
- echo "Aborting .... clst_chroot_path is set to /"
- echo "This is very dangerous"
- exit 1
-fi
-
-if [ "${clst_chroot_path}" == "" ]
-then
- echo "Aborting .... clst_chroot_path is NOT set"
- echo "This is very dangerous"
- exit 1
-fi
-
-j=0
-declare -a pids
-# Get files and dirs in /proc
-for i in `ls /proc`
-do
- # Test for directories
- if [ -d /proc/$i ]
- then
- # Search for exe containing string inside ${clst_chroot_path}
- ls -la --color=never /proc/$i 2>&1 |grep exe|grep ${clst_chroot_path} > /dev/null
-
- # If found
- if [ $? == 0 ]
- then
- # Assign the pid into the pids array
- pids[$j]=$i
- j=$(($j+1))
- fi
- fi
-done
-
-if [ ${j} -gt 0 ]
-then
- echo
- echo "Killing process(es)"
- echo "pid: process name"
- for pid in ${pids[@]}
- do
- P_NAME=$(ls -la --color=never /proc/${pid} 2>&1 |grep exe|grep ${clst_chroot_path}|awk '{print $11}')
- echo ${pid}: ${P_NAME}
- done
- echo
- echo "Press Ctrl-C within 10 seconds to abort"
-
- sleep 10
-
- for pid in ${pids[@]}
- do
- kill -9 ${pid}
- done
-
- # Small sleep here to give the process(es) a chance to die before running unbind again.
- sleep 5
-
-fi
diff --git a/targets/support/kmerge.sh b/targets/support/kmerge.sh
index c43db8d1..41fac810 100755
--- a/targets/support/kmerge.sh
+++ b/targets/support/kmerge.sh
@@ -2,307 +2,211 @@
source /tmp/chroot-functions.sh
-check_genkernel_version
-
install -d /tmp/kerncache
-PKGDIR=/tmp/kerncache/${clst_kname}/ebuilds
-setup_gk_args() {
+distkmerge_get_image_path() {
+ case ${clst_basearch} in
+ amd64|x86)
+ echo arch/x86/boot/bzImage
+ ;;
+ arm64)
+ echo arch/arm64/boot/Image.gz
+ ;;
+ arm)
+ echo arch/arm/boot/zImage
+ ;;
+ hppa|ppc|ppc64)
+ echo ./vmlinux
+ ;;
+ riscv)
+ echo arch/riscv/boot/Image.gz
+ ;;
+ *)
+ die "unsupported ARCH=${clst_basearch}"
+ ;;
+ esac
+}
+
+genkernel_compile() {
# default genkernel args
GK_ARGS=(
- "${clst_kernel_gk_kernargs[@]}"
- --cachedir=/tmp/kerncache/${clst_kname}-genkernel_cache-${clst_version_stamp}
+ "${kernel_gk_kernargs[@]}"
+ --cachedir=/tmp/kerncache/${kname}-genkernel_cache-${clst_version_stamp}
--no-mountboot
--kerneldir=/usr/src/linux
- --modulespackage=/tmp/kerncache/${clst_kname}-modules-${clst_version_stamp}.tar.bz2
- --minkernpackage=/tmp/kerncache/${clst_kname}-kernel-initrd-${clst_version_stamp}.tar.bz2 all
+ --modulespackage=/tmp/kerncache/${kname}-modules-${clst_version_stamp}.tar.bz2
+ --minkernpackage=/tmp/kerncache/${kname}-kernel-initrd-${clst_version_stamp}.tar.bz2 all
)
# extra genkernel options that we have to test for
- if [ -n "${clst_gk_mainargs}" ]
- then
+ if [[ -n ${clst_gk_mainargs} ]]; then
GK_ARGS+=(${clst_gk_mainargs})
fi
- if [ -n "${clst_KERNCACHE}" ]
- then
- GK_ARGS+=(--kerncache=/tmp/kerncache/${clst_kname}-kerncache-${clst_version_stamp}.tar.bz2)
+ if [[ -n ${clst_KERNCACHE} ]]; then
+ GK_ARGS+=(--kerncache=/tmp/kerncache/${kname}-kerncache-${clst_version_stamp}.tar.bz2)
fi
- if [ -e /var/tmp/${clst_kname}.config ]
- then
- GK_ARGS+=(--kernel-config=/var/tmp/${clst_kname}.config)
+ if [[ -e /var/tmp/${kname}.config ]]; then
+ GK_ARGS+=(--kernel-config=/var/tmp/${kname}.config)
fi
-
- if [ -n "${clst_splash_theme}" ]
- then
- GK_ARGS+=(--splash=${clst_splash_theme})
- # Setup case structure for livecd_type
- case ${clst_livecd_type} in
- gentoo-release-minimal|gentoo-release-universal)
- case ${clst_hostarch} in
- amd64|x86)
- GK_ARGS+=(--splash-res=1024x768)
- ;;
- esac
- ;;
- esac
- fi
-
- if [ -d "/tmp/initramfs_overlay/${clst_initramfs_overlay}" ]
- then
- GK_ARGS+=(--initramfs-overlay=/tmp/initramfs_overlay/${clst_initramfs_overlay})
+ if [[ -d /tmp/initramfs_overlay/${initramfs_overlay} ]]; then
+ GK_ARGS+=(--initramfs-overlay=/tmp/initramfs_overlay/${initramfs_overlay})
fi
- if [ -n "${clst_CCACHE}" ]
- then
+ if [[ -n ${clst_CCACHE} ]]; then
GK_ARGS+=(--kernel-cc=/usr/lib/ccache/bin/gcc --utils-cc=/usr/lib/ccache/bin/gcc)
fi
-
- if [ -n "${clst_linuxrc}" ]
- then
+ if [[ -n ${clst_linuxrc} ]]; then
GK_ARGS+=(--linuxrc=/tmp/linuxrc)
fi
-
- if [ -n "${clst_busybox_config}" ]
- then
+ if [[ -n ${clst_busybox_config} ]]; then
GK_ARGS+=(--busybox-config=/tmp/busy-config)
fi
-
- if [ "${clst_target}" == "netboot2" ]
- then
+ if [[ ${clst_target} == netboot ]]; then
GK_ARGS+=(--netboot)
- if [ -n "${clst_merge_path}" ]
- then
+ if [[ -n ${clst_merge_path} ]]; then
GK_ARGS+=(--initramfs-overlay="${clst_merge_path}")
fi
fi
-
- if [[ "${clst_VERBOSE}" == "true" ]]
- then
+ if [[ -n ${clst_VERBOSE} ]]; then
GK_ARGS+=(--loglevel=2)
fi
-}
-
-genkernel_compile(){
- eval "clst_initramfs_overlay=\$clst_boot_kernel_${filtered_kname}_initramfs_overlay"
- eval "clst_kernel_merge=\$clst_boot_kernel_${filtered_kname}_packages"
- setup_gk_args
- #echo "The GK_ARGS are"
- #echo ${GK_ARGS[@]}
- export clst_kernel_merge
- export clst_initramfs_overlay
- # Build our list of kernel packages
- case ${clst_livecd_type} in
- gentoo-release-live*)
- if [ -n "${clst_kernel_merge}" ]
- then
- mkdir -p /usr/livecd
- echo "${clst_kernel_merge}" > /usr/livecd/kernelpkgs.txt
- fi
- ;;
- esac
- # Build with genkernel using the set options
- # callback is put here to avoid escaping issues
- if [[ "${clst_VERBOSE}" == "true" ]]
- then
- gk_callback_opts="-vN"
+ if [[ -n ${clst_VERBOSE} ]]; then
+ gk_callback_opts=(-vN)
else
- gk_callback_opts="-qN"
+ gk_callback_opts=(-qN)
fi
- PKGDIR=${PKGDIR}
- if [ -n "${clst_KERNCACHE}" ]
- then
- gk_callback_opts="${gk_callback_opts} -kb"
+ if [[ -n ${clst_KERNCACHE} ]]; then
+ gk_callback_opts+=(-kb)
fi
- if [ -n "${clst_FETCH}" ]
- then
- gk_callback_opts="${gk_callback_opts} -f"
+ if [[ -n ${clst_FETCH} ]]; then
+ gk_callback_opts+=(-f)
fi
- if [ "${clst_kernel_merge}" != "" ]
- then
- genkernel --callback="emerge ${gk_callback_opts} ${clst_kernel_merge}" \
+
+ if [[ -n ${kernel_merge} ]]; then
+ gk_callback=${gk_callback_opts[@]}
+ genkernel --callback="emerge ${gk_callback} ${kernel_merge}" \
"${GK_ARGS[@]}" || exit 1
else
genkernel "${GK_ARGS[@]}" || exit 1
fi
- if [ -n "${clst_KERNCACHE}" -a -e /var/tmp/${clst_kname}.config ]
- then
- md5sum /var/tmp/${clst_kname}.config | awk '{print $1}' > \
- /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.CONFIG
- fi
-}
-
-build_kernel() {
- genkernel_compile
}
[ -n "${clst_ENVSCRIPT}" ] && source /tmp/envscript
-export CONFIG_PROTECT="-*"
# Set the timezone for the kernel build
rm /etc/localtime
cp -f /usr/share/zoneinfo/UTC /etc/localtime
-filtered_kname=${clst_kname/-/_}
-filtered_kname=${clst_kname/\//_}
-filtered_kname=${filtered_kname/\./_}
-
-eval "clst_kernel_use=\$clst_boot_kernel_${filtered_kname}_use"
-eval eval clst_kernel_gk_kernargs=( \$clst_boot_kernel_${filtered_kname}_gk_kernargs )
-eval "clst_ksource=\$clst_boot_kernel_${filtered_kname}_sources"
+eval "initramfs_overlay=\$clst_boot_kernel_${kname}_initramfs_overlay"
+eval "kernel_merge=\$clst_boot_kernel_${kname}_packages"
+eval "kernel_use=\$clst_boot_kernel_${kname}_use"
+eval eval kernel_gk_kernargs=( \$clst_boot_kernel_${kname}_gk_kernargs )
+eval eval kernel_dracut_kernargs=( \$clst_boot_kernel_${kname}_dracut_args )
+eval "ksource=\$clst_boot_kernel_${kname}_sources"
+eval "distkernel=\$clst_boot_kernel_${kname}_distkernel"
-if [ -z "${clst_ksource}" ]
-then
- clst_ksource="virtual/linux-sources"
+if [[ ${distkernel} = "yes" ]] ; then
+ [[ -z ${ksource} ]] && ksource="sys-kernel/gentoo-kernel"
+else
+ [[ -z ${ksource} ]] && ksource="sys-kernel/gentoo-sources"
fi
-# Don't use pkgcache here, as the kernel source may get emerged with different
-# USE variables (and thus different patches enabled/disabled.) Also, there's no
-# real benefit in using the pkgcache for kernel source ebuilds.
+kernel_version=$(portageq best_visible / "${ksource}")
+if [[ -n ${clst_KERNCACHE} ]]; then
+ mkdir -p "/tmp/kerncache/${kname}"
+ pushd "/tmp/kerncache/${kname}" >/dev/null
-# Check if we have a match in kerncach
-
-if [ -n "${clst_KERNCACHE}" ]
-then
-
- USE_MATCH=0
- if [ -e /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.USE ]
- then
- STR1=$(for i in `cat /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.USE`; do echo $i; done|sort)
- STR2=$(for i in ${clst_kernel_use}; do echo $i; done|sort)
- if [ "${STR1}" = "${STR2}" ]
- then
- #echo "USE Flags match"
- USE_MATCH=1
- else
- [ -d /tmp/kerncache/${clst_kname}/ebuilds ] && \
- rm -r /tmp/kerncache/${clst_kname}/ebuilds
- [ -e /tmp/kerncache/${clst_kname}/usr/src/linux/.config ] && \
- rm /tmp/kerncache/${clst_kname}/usr/src/linux/.config
- fi
- fi
-
- EXTRAVERSION_MATCH=0
- if [ -e /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.EXTRAVERSION ]
- then
- STR1=`cat /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.EXTRAVERSION`
- STR2=${clst_kextraversion}
- if [ "${STR1}" = "${STR2}" ]
- then
- #echo "EXTRAVERSION match"
- EXTRAVERSION_MATCH=1
- fi
- fi
+ echo "${kernel_use}" > /tmp/USE
+ echo "${kernel_version}" > /tmp/VERSION
+ echo "${clst_kextraversion}" > /tmp/EXTRAVERSION
- CONFIG_MATCH=0
- if [ -e /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.CONFIG ]
- then
- if [ ! -e /var/tmp/${clst_kname}.config ]
- then
- CONFIG_MATCH=1
- else
- STR1=`cat /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.CONFIG`
- STR2=`md5sum /var/tmp/${clst_kname}.config|awk '{print $1}'`
- if [ "${STR1}" = "${STR2}" ]
- then
- CONFIG_MATCH=1
- fi
- fi
+ if cmp -s {/tmp/,}USE && \
+ cmp -s {/tmp/,}VERSION && \
+ cmp -s {/tmp/,}EXTRAVERSION && \
+ cmp -s /var/tmp/${kname}.config CONFIG; then
+ cached_kernel_found="true"
fi
- # install dependencies of kernel sources ahead of time in case
- # package.provided generated below causes them not to be (re)installed
- PKGDIR=${PKGDIR} clst_myemergeopts="--quiet --update --newuse --onlydeps" run_merge "${clst_ksource}" || exit 1
+ rm -f /tmp/{USE,VERSION,EXTRAVERSION}
+ popd >/dev/null
+fi
- # Create the kerncache directory if it doesn't exists
- mkdir -p /tmp/kerncache/${clst_kname}
+if [[ ! ${cached_kernel_found} ]]; then
+ if [[ ${distkernel} = "yes" ]] ; then
+USE="-initramfs" run_merge --update "${ksource}"
+ else
+USE="symlink" run_merge --update "${ksource}"
+ fi
+fi
- if [ -e /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.KERNELVERSION ]
- then
- KERNELVERSION=$(</tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.KERNELVERSION)
- mkdir -p ${clst_port_conf}/profile
- echo "${KERNELVERSION}" > ${clst_port_conf}/profile/package.provided
- else
- rm -f ${clst_port_conf}/profile/package.provided
- fi
+if [[ -n ${clst_KERNCACHE} ]]; then
+ SOURCESDIR="/tmp/kerncache/${kname}/sources"
+ if [[ ! ${cached_kernel_found} ]]; then
+ echo "Moving kernel sources to ${SOURCESDIR} ..."
- # Don't use package.provided if there's a pending up/downgrade
- if [[ "$(portageq best_visible / ${clst_ksource})" == "${KERNELVERSION}" ]]; then
- echo "No pending updates for ${clst_ksource}"
- else
- echo "Pending updates for ${clst_ksource}, removing package.provided"
- rm ${clst_port_conf}/profile/package.provided
+ rm -rf "${SOURCESDIR}"
+ mv $(readlink -f /usr/src/linux) "${SOURCESDIR}"
fi
+ ln -snf "${SOURCESDIR}" /usr/src/linux
+fi
- [ -L /usr/src/linux ] && rm -f /usr/src/linux
-
- PKGDIR=${PKGDIR} clst_myemergeopts="--quiet --update --newuse" run_merge "${clst_ksource}" || exit 1
-
- SOURCESDIR="/tmp/kerncache/${clst_kname}/sources"
- if [ -L /usr/src/linux ]
- then
-
- # A kernel was merged, move it to $SOURCESDIR
- [ -e ${SOURCESDIR} ] && rm -Rf ${SOURCESDIR}
+if [[ ${distkernel} = "yes" ]] ; then
+ # Build external kernel modules
+ if [[ -n ${kernel_merge} ]]; then
+ run_merge ${kernel_merge}
+ fi
- KERNELVERSION=`portageq best_visible / "${clst_ksource}"`
- echo "${KERNELVERSION}" > /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.KERNELVERSION
+ # Kernel already built, let's run dracut to make initramfs
+ distkernel_source_path=$(equery -Cq f ${ksource} | grep "/usr/src/linux-" -m1)
+ distkernel_image_path=$(distkmerge_get_image_path)
+ distkernel_version=${distkernel_source_path##"/usr/src/linux-"}
- echo "Moving kernel sources to ${SOURCESDIR} ..."
- mv `readlink -f /usr/src/linux` ${SOURCESDIR}
+ DRACUT_ARGS=(
+ "${kernel_dracut_kernargs[@]}"
+ --force
+ --kernel-image="${distkernel_source_path}/${distkernel_image_path}"
+ --kver="${distkernel_version}"
+ )
- fi
- ln -sf ${SOURCESDIR} /usr/src/linux
+ dracut "${DRACUT_ARGS[@]}" || exit 1
- # If catalyst has set to a empty string, extraversion wasn't specified so we
- # skip this part
- if [ "${EXTRAVERSION_MATCH}" = "0" ]
- then
- if [ ! "${clst_kextraversion}" = "" ]
- then
- echo "Setting extraversion to ${clst_kextraversion}"
- ${clst_sed} -i -e "s:EXTRAVERSION \(=.*\):EXTRAVERSION \1-${clst_kextraversion}:" /usr/src/linux/Makefile
- echo ${clst_kextraversion} > /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.EXTRAVERSION
- else
- touch /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.EXTRAVERSION
- fi
- fi
+ # Create minkernel package to mimic genkernel's behaviour
+ cd /boot
+ tar jcvf /tmp/kerncache/${kname}-kernel-initrd-${clst_version_stamp}.tar.bz2 System.map* config* initramfs* vmlinuz*
+ cd /
+ tar jcvf /tmp/kerncache/${kname}-modules-${clst_version_stamp}.tar.bz2 lib/modules
else
- run_merge "${clst_ksource}" || exit 1
- #ensure that there is a /usr/src/linux symlink and it points to the sources we just installed
- echo "Adjusting /usr/src/linux to point to \
-$(portageq contents / $(portageq best_visible / "${clst_ksource}" 2>/dev/null) 2>/dev/null | grep --color=never '/usr/src/' | head -n1 2>/dev/null)"
- ln -snf $(portageq contents / $(portageq best_visible / "${clst_ksource}" 2>/dev/null) 2>/dev/null | grep --color=never '/usr/src/' | head -n1 2>/dev/null) \
- /usr/src/linux
- if [ ! "${clst_kextraversion}" = "" ]
- then
- echo "Setting extraversion to ${clst_kextraversion}"
- ${clst_sed} -i -e "s:EXTRAVERSION \(=.*\):EXTRAVERSION \1-${clst_kextraversion}:" /usr/src/linux/Makefile
- fi
+ if [[ -n ${clst_kextraversion} ]]; then
+ echo "Setting EXTRAVERSION to ${clst_kextraversion}"
+
+ if [[ -e /usr/src/linux/Makefile.bak ]]; then
+ cp /usr/src/linux/Makefile{.bak,}
+ else
+ cp /usr/src/linux/Makefile{,.bak}
+ fi
+ sed -i -e "s:EXTRAVERSION \(=.*\):EXTRAVERSION \1-${clst_kextraversion}:" \
+ /usr/src/linux/Makefile
+ fi
+
+ genkernel_compile
fi
+# Write out CONFIG, USE, VERSION, and EXTRAVERSION files
+if [[ -n ${clst_KERNCACHE} && ! ${cached_kernel_found} ]]; then
+ pushd "/tmp/kerncache/${kname}" >/dev/null
-# Update USE flag in make.conf
-[ -e ${clst_make_conf} ] && \
- echo "USE=\"\${USE} ${clst_kernel_use} build\"" >> ${clst_make_conf}
-
-make_destpath
-
-
-build_kernel
-${clst_sed} -i "/USE=\"\${USE} ${clst_kernel_use} \"/d" ${clst_make_conf}
-# grep out the kernel version so that we can do our modules magic
-VER=`grep ^VERSION\ \= /usr/src/linux/Makefile | awk '{ print $3 };'`
-PAT=`grep ^PATCHLEVEL\ \= /usr/src/linux/Makefile | awk '{ print $3 };'`
-SUB=`grep ^SUBLEVEL\ \= /usr/src/linux/Makefile | awk '{ print $3 };'`
-EXV=`grep ^EXTRAVERSION\ \= /usr/src/linux/Makefile | ${clst_sed} -e "s/EXTRAVERSION =//" -e "s/ //g"`
-clst_fudgeuname=${VER}.${PAT}.${SUB}${EXV}
-
-unset USE
+ cp /var/tmp/${kname}.config CONFIG
+ echo "${kernel_use}" > USE
+ echo "${kernel_version}" > VERSION
+ echo "${clst_kextraversion}" > EXTRAVERSION
+ popd >/dev/null
+fi
-if [ -n "${clst_KERNCACHE}" ]
-then
- echo ${clst_kernel_use} > /tmp/kerncache/${clst_kname}/${clst_kname}-${clst_version_stamp}.USE
+if [[ ! ${cached_kernel_found} ]]; then
+ run_merge --deselect "${ksource}"
+ # This was breaking multi-kernel iso builds, probably not needed
+ # rm /usr/src/linux
fi
diff --git a/targets/support/livecdfs-update.sh b/targets/support/livecdfs-update.sh
index 2c551b42..687b9d4e 100755
--- a/targets/support/livecdfs-update.sh
+++ b/targets/support/livecdfs-update.sh
@@ -7,7 +7,8 @@ source /tmp/chroot-functions.sh
# Allow root logins to our CD by default
if [ -e /etc/ssh/sshd_config ]
then
- ${clst_sed} -i 's:^#PermitRootLogin\ yes:PermitRootLogin\ yes:' \
+ sed -i \
+ -e '/^#PermitRootLogin/c# Allow root login with password on livecds.\nPermitRootLogin Yes' \
/etc/ssh/sshd_config
fi
@@ -16,14 +17,8 @@ rm -rf /etc/localtime
cp /usr/share/zoneinfo/UTC /etc/localtime
# Setup the hostname
-if [ "${clst_livecd_type}" == "gentoo-gamecd" ]
-then
- echo 'HOSTNAME="gamecd"' > /etc/conf.d/hostname
- echo "127.0.0.1 gamecd.gentoo gamecd localhost" > /etc/hosts
-else
- echo 'HOSTNAME="livecd"' > /etc/conf.d/hostname
- echo "127.0.0.1 livecd.gentoo livecd localhost" > /etc/hosts
-fi
+echo 'hostname="livecd"' > /etc/conf.d/hostname
+echo "127.0.0.1 livecd.gentoo livecd localhost" > /etc/hosts
# Since we're an official Gentoo release, we do things the official Gentoo way.
# As such, we override livecd/users.
@@ -32,10 +27,6 @@ case ${clst_livecd_type} in
user_comment="Gentoo default user"
clst_livecd_users="gentoo"
;;
- gentoo-gamecd)
- user_comment="Gentoo GameCD default user"
- clst_livecd_users="gamecd"
- ;;
esac
# Add any users
@@ -45,7 +36,6 @@ then
default_comment="Default LiveCD User"
[ -z "${user_comment}" ] && user_comment=${default_comment}
- # Here we check to see if games exists for bug #125498
if [ "$(getent group games | cut -d: -f1)" != "games" ]
then
echo "Adding games group"
@@ -61,258 +51,79 @@ then
useradd -G users,wheel,audio,plugdev,games,cdrom,disk,floppy,usb \
-g 100 -c "${user_comment}" -m ${x}
chown -R ${x}:users /home/${x}
- if [ -n "${clst_livecd_xdm}" -a -n "${clst_livecd_xsession}" ]
- then
- echo "[Desktop]" > /home/${x}/.dmrc
- echo "Session=${clst_livecd_xsession}" >> /home/${x}/.dmrc
- chown -R ${x}:users /home/${x}
- fi
done
fi
# Setup sudoers
if [ -f /etc/sudoers ]
then
- ${clst_sed} -i '/NOPASSWD: ALL/ s/^# //' /etc/sudoers
+ sed -i '/NOPASSWD: ALL/ s/^# //' /etc/sudoers
fi
-# Setup links for ethernet devices
-cd /etc/init.d
-ln -sf net.lo net.eth1
-ln -sf net.lo net.eth2
-ln -sf net.lo net.eth3
-ln -sf net.lo net.eth4
-
# Add this for hwsetup/mkx86config
mkdir -p /etc/sysconfig
-# Tweak the livecd fstab so that users know not to edit it
-# https://bugs.gentoo.org/60887
-echo "####################################################" > /etc/fstab
-echo "## ATTENTION: THIS IS THE FSTAB ON THE LIVECD ##" >> /etc/fstab
-echo "## PLEASE EDIT THE FSTAB at /mnt/gentoo/etc/fstab ##" >> /etc/fstab
-echo "####################################################" >> /etc/fstab
+cat <<EOF > /etc/fstab
+####################################################
+## ATTENTION: THIS IS THE FSTAB ON THE LIVECD ##
+## PLEASE EDIT THE FSTAB at /mnt/gentoo/etc/fstab ##
+####################################################
# fstab tweaks
-echo "tmpfs / tmpfs defaults 0 0" >> /etc/fstab
-echo "tmpfs ${clst_repo_basedir}/${clst_repo_name} tmpfs defaults 0 0" >> /etc/fstab
-# If /usr/lib/X11/xkb/compiled then make it tmpfs
-if [ -d /usr/lib/X11/xkb/compiled ]
-then
- echo "tmpfs /usr/lib/X11/xkb/compiled tmpfs defaults 0 0" >> \
- /etc/fstab
-fi
+tmpfs / tmpfs defaults 0 0
+EOF
-# Tweak the livecd make.conf so that users know not to edit it
-# https://bugs.gentoo.org/144647
mv ${clst_make_conf} ${clst_make_conf}.old
-echo "####################################################" >> ${clst_make_conf}
-echo "## ATTENTION: THIS IS THE MAKE.CONF ON THE LIVECD ##" >> ${clst_make_conf}
-echo "## PLEASE EDIT /mnt/gentoo${clst_make_conf} INSTEAD ##" >> ${clst_make_conf}
-echo "####################################################" >> ${clst_make_conf}
+cat <<EOF > ${clst_make_conf}
+####################################################
+## ATTENTION: THIS IS THE MAKE.CONF ON THE LIVECD ##
+## PLEASE EDIT /mnt/gentoo${clst_make_conf} INSTEAD ##
+####################################################
+EOF
cat ${clst_make_conf}.old >> ${clst_make_conf}
-# devfs tweaks
-[ -e /etc/devfsd.conf ] && ${clst_sed} -i '/dev-state/ s:^:#:' /etc/devfsd.conf
-
# Add some helpful aliases
-echo "alias cp='cp -i'" >> /etc/profile
-echo "alias mv='mv -i'" >> /etc/profile
-echo "alias rm='rm -i'" >> /etc/profile
-echo "alias ls='ls --color=auto'" >> /etc/profile
-echo "alias ll='ls -l'" >> /etc/profile
-echo "alias grep='grep --color=auto'" >> /etc/profile
-
-# Make sure we have the latest pci,usb and hotplug ids. Older versions of
-# pciutils and usbutils used /sbin, where newer versions use /usr/sbin.
-[ -x /sbin/update-pciids ] && /sbin/update-pciids
-[ -x /sbin/update-usbids ] && /sbin/update-usbids
-[ -x /usr/sbin/update-pciids ] && /usr/sbin/update-pciids
-[ -x /usr/sbin/update-usbids ] && /usr/sbin/update-usbids
-if [ -d /usr/share/hwdata ]
-then
- # If we have uncompressed pci and usb ids files, symlink them.
- [ -f /usr/share/misc/pci.ids ] && [ -f /usr/share/hwdata/pci.ids ] && \
- rm -f /usr/share/hwdata/pci.ids && ln -s /usr/share/misc/pci.ids \
- /usr/share/hwdata/pci.ids
- [ -f /usr/share/misc/usb.ids ] && [ -f /usr/share/hwdata/usb.ids ] && \
- rm -f /usr/share/hwdata/usb.ids && ln -s /usr/share/misc/usb.ids \
- /usr/share/hwdata/usb.ids
- # If we have compressed pci and usb files, we download our own copies.
- [ -f /usr/share/misc/pci.ids.gz ] && [ -f /usr/share/hwdata/pci.ids ] && \
- rm -f /usr/share/hwdata/pci.ids && wget -O /usr/share/hwdata/pci.ids \
- http://pciids.sourceforge.net/v2.2/pci.ids
- [ -f /usr/share/misc/usb.ids.gz ] && [ -f /usr/share/hwdata/usb.ids ] && \
- rm -f /usr/share/hwdata/usb.ids && wget -O /usr/share/hwdata/usb.ids \
- http://www.linux-usb.org/usb.ids
-fi
-
-# Setup opengl in /etc (if configured)
-[ -x /usr/sbin/openglify ] && /usr/sbin/openglify
-
-# Setup configured display manager
-if [ -n "${clst_livecd_xdm}" ]
-then
- ${clst_sed} -i \
- -e "s:^#\\?DISPLAYMANAGER=.\+$:DISPLAYMANAGER=\"${clst_livecd_xdm}\":" \
- /etc/rc.conf
- ${clst_sed} -i \
- -e "s:^#\\?DISPLAYMANAGER=.\+$:DISPLAYMANAGER=\"${clst_livecd_xdm}\":" \
- /etc/conf.d/xdm
-fi
-
-# Setup configured default X Session
-if [ -n "${clst_livecd_xsession}" ]
-then
- echo "XSESSION=\"${clst_livecd_xsession}\"" > /etc/env.d/90xsession
-fi
-
-# touch /etc/asound.state
-touch /etc/asound.state
+cat <<EOF >> /etc/profile
+alias cp='cp -i'
+alias mv='mv -i'
+alias rm='rm -i'
+alias ls='ls --color=auto'
+alias ll='ls -l'
+alias grep='grep --color=auto'
+EOF
# Tweak the MOTD for Gentoo releases
case ${clst_livecd_type} in
- gentoo-release-universal)
- cat /etc/generic.motd.txt /etc/universal.motd.txt \
- /etc/minimal.motd.txt > /etc/motd
- ${clst_sed} -i 's:^##GREETING:Welcome to the Gentoo Linux Universal Installation CD!:' /etc/motd
- ;;
gentoo-release-minimal)
cat /etc/generic.motd.txt /etc/minimal.motd.txt > /etc/motd
- ${clst_sed} -i 's:^##GREETING:Welcome to the Gentoo Linux Minimal Installation CD!:' /etc/motd
+ sed -i 's:^##GREETING:Welcome to the Gentoo Linux Minimal Installation CD!:' /etc/motd
;;
gentoo-release-live*)
- cat /etc/generic.motd.txt \
- /etc/minimal.motd.txt /etc/livecd.motd.txt > /etc/motd
- ${clst_sed} -i -e 's:^##GREETING:Welcome to the Gentoo Linux LiveCD!:' \
- -e "s:##DISPLAY_MANAGER:${clst_livecd_xdm}:" /etc/motd
- ;;
- gentoo-gamecd)
- cat /etc/generic.motd.txt /etc/gamecd.motd.txt > /etc/motd
- ${clst_sed} -i 's:^##GREETING:Welcome to the Gentoo Linux ##GAME_NAME GameCD!:' /etc/motd
+ cat /etc/generic.motd.txt /etc/livecd.motd.txt > /etc/motd
+ sed -i -e 's:^##GREETING:Welcome to the Gentoo Linux LiveCD!:' /etc/motd
;;
esac
-rm -f /etc/generic.motd.txt /etc/universal.motd.txt /etc/minimal.motd.txt /etc/livecd.motd.txt /etc/gamecd.motd.txt
-
-# Setup splash (if called for)
-if [ -n "${clst_livecd_splash_theme}" ]
-then
- if [ -d /etc/splash/${clst_livecd_splash_theme} ]
- then
- ${clst_sed} -i \
- -e "s:# SPLASH_THEME=\"gentoo\":SPLASH_THEME=\"${clst_livecd_splash_theme}\":" \
- -e "/^# SPLASH_TTYS=/ s/^#//" \
- /etc/conf.d/splash
- rm -f /etc/splash/default
- ln -s /etc/splash/${clst_livecd_splash_theme} /etc/splash/default
- else
- echo "Error, cannot setup splash theme ${clst_livecd_splash_theme}"
- exit 1
- fi
-fi
-
-# Clear out locales
-case ${clst_livecd_type} in
- gentoo-release-minimal|gentoo-release-universal|gentoo-gamecd)
- rm -rf /usr/lib/locale/{a,b,c,d,e{l,n_{A,B,C,D,G,H,I,N,P,S,US.,Z},s,t,u},f,g,h,i,j,k,l,m,n,o,p,r,s,t,u,v,w,x,y,z}*
- ;;
-esac
+rm -f /etc/generic.motd.txt /etc/minimal.motd.txt /etc/livecd.motd.txt
# Post configuration
case ${clst_livecd_type} in
- gentoo-gamecd )
- # We grab our configuration
- if [ -e /tmp/gamecd.conf ]
- then
- source /tmp/gamecd.conf || exit 1
- rm /tmp/gamecd.conf
-
- # Here we replace out game information into several files
- ${clst_sed} -i -e "s:##GAME_NAME:${GAME_NAME}:" /etc/motd
-
- # Here we setup our xinitrc
- echo "exec ${GAME_EXECUTABLE}" > /etc/X11/xinit/xinitrc
- fi
-
- # This is my hack to reduce tmpfs usage
- mkdir -p /usr/livecd/db/pkg/x11-base
- mv -f /var/db/pkg/x11-base/xorg* /usr/livecd/db/pkg/x11-base
- rm -rf /var/db
-
- touch /etc/startx
- ;;
- gentoo-release-live*)
- # Setup Gnome theme
- if [ "${clst_livecd_xsession}" == "gnome" ]
- then
- gconftool-2 --direct \
- --config-source xml:readwrite:/etc/gconf/gconf.xml.defaults \
- --type string --set /desktop/gnome/interface/font_name "Sans 9"
- fi
-
- # Remove locking on screensaver
- gconftool-2 --direct \
- --config-source=xml:readwrite:/etc/gconf/gconf.xml.defaults -s \
- -t bool /apps/gnome-screensaver/lock_enabled false >/dev/null
-
- # Setup GDM
- if [ "${clst_livecd_xdm}" == "gdm" ]
- then
- if [ ! -e /etc/X11/gdm/gdm.conf ] && [ -e /usr/share/gdm/defaults.conf ]
- then
- if [ -n "${clst_livecd_users}" ] && [ -n "${first_user}" ]
- then
- sedxtra="\nTimedLogin=${first_user}"
- else
- sedxtra=""
- fi
-
- cp -f /etc/X11/gdm/custom.conf /etc/X11/gdm/custom.conf.old
-
- sed -i \
- -e "s:\(\[daemon\]\)$:\1\nTimedLoginEnable=true\nTimedLoginDelay=10${sedxtra}:" \
- -e 's:\(\[greeter\]\)$:\1\nGraphicalTheme=gentoo-emergence:' \
- /etc/X11/gdm/custom.conf
- else
- cp -f /etc/X11/gdm/gdm.conf /etc/X11/gdm/gdm.conf.old
- ${clst_sed} -i \
- -e 's:TimedLoginEnable=false:TimedLoginEnable=true:' \
- -e 's:TimedLoginDelay=30:TimedLoginDelay=10:' \
- -e 's:AllowRemoteRoot=true:AllowRemoteRoot=false:' \
- -e ':^#GraphicalTheme=: s:^#::' \
- -e 's:^GraphicalTheme=.*$:GraphicalTheme=gentoo-emergence:' \
- /etc/X11/gdm/gdm.conf
-
- if [ -n "${clst_livecd_users}" ] && [ -n "${first_user}" ]
- then
- ${clst_sed} -i \
- -e "s:TimedLogin=:TimedLogin=${first_user}:" \
- /etc/X11/gdm/gdm.conf
- fi
- fi
- fi
-
- # This gives us our list of system packages for the installer
- mkdir -p /usr/livecd
- ### XXX: Andrew says we don't need this anymore
- USE="-* $(cat /var/db/pkg/sys-libs/glibc*/USE)" emerge -eqp @system | grep -e '^\[ebuild' | ${clst_sed} -e 's:^\[ebuild .\+\] ::' -e 's: .\+$::' > /usr/livecd/systempkgs.txt
-
- # This is my hack to reduce tmpfs usage
- cp -r ${clst_repo_basedir}/${clst_repo_name}/profiles /usr/livecd
- cp -r ${clst_repo_basedir}/${clst_repo_name}/eclass /usr/livecd
- rm -rf /usr/livecd/profiles/{co*,default-{1*,a*,b*,d*,h*,i*,m*,p*,s*,x*},g*,hardened-*,n*,x*}
- mv -f /etc/gconf /usr/livecd
- ln -sf /usr/livecd/gconf /etc/gconf
- mv -f /var/db /usr/livecd
- ln -sf /usr/livecd/db /var/db
-
+ gentoo-release-*)
# Clear out lastlog
rm -f /var/log/lastlog && touch /var/log/lastlog
- # Create our Handbook icon
- [ -e /docs/handbook/index.html ] && create_handbook_icon
- [ -n "${clst_livecd_overlay}" ] && [ -e ${clst_livecd_overlay}/docs/handbook/index.html ] && create_handbook_icon
+ cat <<-EOF > /usr/share/applications/gentoo-handbook.desktop
+ [Desktop Entry]
+ Encoding=UTF-8
+ Version=1.0
+ Type=Link
+ URL=https://wiki.gentoo.org/wiki/Handbook:Main_Page
+ Terminal=false
+ Name=Gentoo Linux Handbook
+ GenericName=Gentoo Linux Handbook
+ Comment=This is a link to Gentoo Linux Handbook.
+ Icon=text-editor
+ EOF
# Copy our icons into place and build home directories
if [ -n "${clst_livecd_users}" ]
@@ -324,57 +135,12 @@ case ${clst_livecd_type} in
[ -e /usr/share/applications/gentoo-handbook.desktop ] && \
cp -f /usr/share/applications/gentoo-handbook.desktop \
/home/${username}/Desktop
- # Copy our installer icons
- if [ -e /usr/share/applications/installer-gtk.desktop ]
- then
- cp -f /usr/share/applications/installer-gtk.desktop \
- /home/${username}/Desktop
- cp -f /usr/share/applications/installer-dialog.desktop \
- /home/${username}/Desktop
- ${clst_sed} -i -e \
- 's:Exec=installer-dialog:Exec=sudo installer-dialog:' \
- /home/${username}/Desktop/installer-dialog.desktop
- ${clst_sed} -i -e 's:Exec=installer-gtk:Exec=installer:' \
- /home/${username}/Desktop/installer-gtk.desktop
- fi
chown -R ${username}:100 /home/${username}
done
fi
;;
generic-livecd )
- # This is my hack to reduce tmpfs usage
- mkdir -p /usr/livecd
-
- if [ -d /etc/gconf ]
- then
- mv -f /etc/gconf /usr/livecd
- ln -sf /usr/livecd/gconf /etc/gconf
- fi
-
- if [ -e /usr/livecd/kernelpkgs.txt ]
- then
- rm -f /usr/livecd/kernelpkgs.txt
- fi
-
- touch /etc/startx
- ;;
- * )
- if [ -e /usr/livecd/kernelpkgs.txt ]
- then
- rm -f /usr/livecd/kernelpkgs.txt
- fi
;;
esac
-# We want the first user to be used when auto-starting X
-if [ -e /etc/startx ]
-then
- ${clst_sed} -i "s:##STARTX:echo startx | su - '${first_user}':" /root/.bashrc
-fi
-
-if [ -e /lib/rcscripts/addons/udev-start.sh ]
-then
- ${clst_sed} -i "s:\t\[\[ -x /sbin/evms_activate:\t\[\[ -x \${CDBOOT} \]\] \&\& \[\[ -x /sbin/evms_activate:" /lib/rcscripts/addons/udev-start.sh
-fi
-
env-update
diff --git a/targets/support/netboot2-final.sh b/targets/support/netboot-final.sh
index ebd51878..fc0de880 100755
--- a/targets/support/netboot2-final.sh
+++ b/targets/support/netboot-final.sh
@@ -1,15 +1,12 @@
#!/bin/bash
source ${clst_shdir}/support/functions.sh
-source ${clst_shdir}/support/filesystem-functions.sh
-
extract_kernels ${clst_target_path}/boot
# Move kernel binaries to ${clst_target_path}/kernels, and
# move everything else to ${clst_target_path}/kernels/misc
-mkdir ${clst_target_path}/kernels
-mkdir ${clst_target_path}/kernels/misc
+mkdir -p ${clst_target_path}/kernels/misc
for x in ${clst_boot_kernel}; do
mv ${clst_target_path}/boot/${x} ${clst_target_path}/kernels
@@ -17,17 +14,12 @@ for x in ${clst_boot_kernel}; do
mv ${clst_target_path}/boot/System-${x}.map ${clst_target_path}/kernels/misc
done
+rm -f ${clst_target_path}/boot/gentoo-config
rmdir ${clst_target_path}/boot
# Any post-processing necessary for each architecture can be done here. This
# may include things like sparc's elftoaout, x86's PXE boot, etc.
case ${clst_hostarch} in
- alpha)
- sleep 0
- ;;
- arm)
- sleep 0
- ;;
hppa)
# Only one kernel should be there
kname=${clst_boot_kernel[0]}
@@ -39,7 +31,7 @@ case ${clst_hostarch} in
-s ${clst_target_path}/${kname}-hppa.lif \
-f /dev/null \
-b /usr/share/palo/iplboot \
- -c "0/vmlinux initrd=0/ramdisk root=/dev/ram0" \
+ -c "0/vmlinux initrd=0/ramdisk" \
|| exit 1
;;
@@ -51,14 +43,8 @@ case ${clst_hostarch} in
fi
for x in ${clst_boot_kernel}; do
elftoaout ${clst_target_path}/kernels/${x} -o ${clst_target_path}/${x}-a.out
- ${piggyback} ${clst_target_path}/${x}-a.out ${clst_target_path}/kernels/misc/System.map-${x} ${clst_target_path}/kernels/misc/${x}.igz
+ ${piggyback} ${clst_target_path}/${x}-a.out ${clst_target_path}/kernels/misc/System-${x}.map ${clst_target_path}/kernels/misc/${x}.igz
done
;;
- ia64)
- sleep 0
- ;;
- x86|amd64)
- sleep 0
- ;;
esac
exit $?
diff --git a/targets/support/post-kmerge.sh b/targets/support/post-kmerge.sh
deleted file mode 100755
index 58b2e08d..00000000
--- a/targets/support/post-kmerge.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-RUN_DEFAULT_FUNCS="no"
-
-source /tmp/chroot-functions.sh
-
-# Only run depscan.sh if modules exist
-if [ -n "$(ls /lib/modules)" ]
-then
- find /lib/modules -name modules.dep -exec touch {} \;
-fi
diff --git a/targets/support/pre-distkmerge.sh b/targets/support/pre-distkmerge.sh
new file mode 100644
index 00000000..08409a93
--- /dev/null
+++ b/targets/support/pre-distkmerge.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+RUN_DEFAULT_FUNCS="yes"
+
+source /tmp/chroot-functions.sh
+
+run_merge --oneshot sys-kernel/dracut
diff --git a/targets/support/pre-kmerge.sh b/targets/support/pre-kmerge.sh
index dd51ae9a..ed825f5f 100755
--- a/targets/support/pre-kmerge.sh
+++ b/targets/support/pre-kmerge.sh
@@ -1,33 +1,26 @@
#!/bin/bash
+RUN_DEFAULT_FUNCS="yes"
+
source /tmp/chroot-functions.sh
-case ${clst_hostarch} in
- hppa)
- got_32=0
- got_64=0
- for i in ${clst_boot_kernel}
- do
- if [ "${i: -2}" == "32" ]
- then
- if [ $got_32 -eq 1 ]
- then
- die "Only one 32 bit kernel can be configured"
- fi
- got_32=1
- elif [ "${i: -2}" == "64" ]
- then
- if [ $got_64 -eq 1 ]
- then
- die "Only one 64 bit kernel can be configured"
- fi
- got_64=1
- else
- die "Kernel names must end by either 32 or 64"
- fi
- done
- ;;
-esac
+if [[ ${clst_hostarch} == hppa ]]; then
+ for i in ${clst_boot_kernel}; do
+ case ${i} in
+ *32)
+ let num32++
+ ;;
+ *64)
+ let num64++
+ ;;
+ *)
+ die "Kernel names must end with either \"32\" or \"64\""
+ ;;
+ esac
+ done
+ [[ $num32 -gt 1 ]] && die "Only one 32-bit kernel can be configured"
+ [[ $num64 -gt 1 ]] && die "Only one 64-bit kernel can be configured"
+fi
-run_merge --oneshot genkernel
+run_merge --oneshot sys-kernel/genkernel
install -d /tmp/kerncache
diff --git a/targets/support/rc-update.sh b/targets/support/rc-update.sh
index 3941f2e0..0ebd9a48 100755
--- a/targets/support/rc-update.sh
+++ b/targets/support/rc-update.sh
@@ -9,8 +9,6 @@ then
rc-update --all del consolefont
# We need to add this one, unconditionally
rc-update add autoconfig default
- [[ -e /etc/init.d/splash ]] && rc-update add splash default
- [[ -e /etc/init.d/fbcondecor ]] && rc-update add fbcondecor default
[[ -e /etc/init.d/sysklogd ]] && rc-update add sysklogd default
[[ -e /etc/init.d/metalog ]] && rc-update add metalog default
[[ -e /etc/init.d/syslog-ng ]] && rc-update add syslog-ng default
@@ -18,16 +16,9 @@ then
# Do some livecd_type specific rc-update changes
case ${clst_livecd_type} in
- gentoo-gamecd)
- rc-update add spind default
- ;;
gentoo-release-live*)
- rc-update add spind default
rc-update add xdm default
;;
- generic-livecd)
- rc-update add spind default
- ;;
esac
fi
diff --git a/targets/support/target_image_setup.sh b/targets/support/target_image_setup.sh
index 559bc56c..b0e6546c 100755
--- a/targets/support/target_image_setup.sh
+++ b/targets/support/target_image_setup.sh
@@ -1,45 +1,17 @@
#!/bin/bash
source ${clst_shdir}/support/functions.sh
-source ${clst_shdir}/support/filesystem-functions.sh
-# Make the directory if it doesnt exist
-mkdir -p $1
+mkdir -p "${1}"
-loopret=1
+echo "Creating ${clst_fstype} filesystem"
case ${clst_fstype} in
- normal)
- create_normal_loop $1
- loopret=$?
- ;;
- zisofs)
- create_zisofs $1
- loopret=$?
- ;;
- noloop)
- create_noloop $1
- loopret=$?
- ;;
squashfs)
- create_squashfs $1
- loopret=$?
- ;;
- jffs)
- create_jffs $1
- loopret=$?
+ gensquashfs -k -D "${clst_stage_path}" -q ${clst_fsops} "${1}/image.squashfs" \
+ || die "Failed to create squashfs filesystem"
;;
jffs2)
- create_jffs2 $1
- loopret=$?
- ;;
- cramfs)
- create_cramfs $1
- loopret=$?
+ mkfs.jffs2 --root="${clst_stage_path}" --output="${1}/image.jffs" "${clst_fsops}" \
+ || die "Failed to create jffs2 filesystem"
;;
esac
-
-if [ ${loopret} = "1" ]
-then
- die "Filesystem not setup"
-fi
-exit $loopret
diff --git a/targets/support/unmerge.sh b/targets/support/unmerge.sh
index 0c0c0374..9f112735 100755
--- a/targets/support/unmerge.sh
+++ b/targets/support/unmerge.sh
@@ -2,8 +2,6 @@
source /tmp/chroot-functions.sh
-# If the user enabled PRESERVE_LIBS in options, tell portage to preserve them.
-[ -n "${clst_PRESERVE_LIBS}" ] && FEATURES="${clst_myfeatures} preserve-libs"
run_merge -C ${clst_packages}
exit 0
diff --git a/targets/tinderbox/tinderbox-chroot.sh b/targets/tinderbox/tinderbox-chroot.sh
deleted file mode 100755
index 431912e4..00000000
--- a/targets/tinderbox/tinderbox-chroot.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-source /tmp/chroot-functions.sh
-
-# START THE BUILD
-setup_pkgmgr
-
-# Backup pristine system
-
-rsync -avx --exclude "/root/" --exclude "/tmp/" --exclude "${clst_repo_basedir}/${clst_repo_name}/" / \
- /tmp/rsync-bak/
-
-for x in ${clst_tinderbox_packages}
-do
- if [[ "${clst_VERBOSE}" == "true" ]]
- then
- run_merge --usepkg --buildpkg --newuse -vp $x
- fi
-
- mkdir -p /tmp/packages/$x
- export PORT_LOGDIR="/tmp/packages/$x"
- run_merge $x
-
- if [ "$?" != "0" ]
- then
- echo "! $x" >> /tmp/tinderbox.log
- else
- echo "$x" >> /tmp/tinderbox.log
- fi
- echo "Syncing from original pristine tinderbox snapshot..."
- rsync -avx --delete --exclude "/root/*" --exclude "/tmp/" --exclude \
- "${clst_repo_basedir}/${clst_repo_name}/" /tmp/rsync-bak/ /
-done
diff --git a/targets/tinderbox/tinderbox-controller.sh b/targets/tinderbox/tinderbox-controller.sh
deleted file mode 100755
index 3dbc76f1..00000000
--- a/targets/tinderbox/tinderbox-controller.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-source ${clst_shdir}/support/functions.sh
-
-case $1 in
- run)
- shift
- exec_in_chroot ${clst_shdir}/tinderbox/tinderbox-chroot.sh
- ;;
- preclean)
- #exec_in_chroot ${clst_shdir}/tinderbox/tinderbox-preclean-chroot.sh
- delete_from_chroot /tmp/chroot-functions.sh
- ;;
- clean)
- exit 0
- ;;
- *)
- exit 1
- ;;
-esac
-exit $?
diff --git a/targets/tinderbox/tinderbox-preclean-chroot.sh b/targets/tinderbox/tinderbox-preclean-chroot.sh
deleted file mode 100755
index 5353f67d..00000000
--- a/targets/tinderbox/tinderbox-preclean-chroot.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-
-source /tmp/chroot-functions.sh
-
-cleanup_stages