aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--grs/Daemon.py18
-rw-r--r--grs/Execute.py22
-rw-r--r--grs/ISOIt.py6
-rw-r--r--grs/Interpret.py122
-rw-r--r--grs/Kernel.py22
-rw-r--r--grs/Log.py4
-rw-r--r--grs/PivotChroot.py8
-rw-r--r--grs/Populate.py22
-rw-r--r--grs/Rotator.py16
-rw-r--r--grs/Seed.py4
-rw-r--r--grs/WorldConf.py40
11 files changed, 142 insertions, 142 deletions
diff --git a/grs/Daemon.py b/grs/Daemon.py
index 0cefaef..b97f4e0 100644
--- a/grs/Daemon.py
+++ b/grs/Daemon.py
@@ -67,23 +67,23 @@ class Daemon:
sys.exit(1)
# Dup stdin to /dev/null, and stdout and stderr to grs-daemon-<pid>.err
- si = open(os.devnull, 'r')
- os.dup2(si.fileno(), sys.stdin.fileno())
+ _si = open(os.devnull, 'r')
+ os.dup2(_si.fileno(), sys.stdin.fileno())
os.makedirs('/var/log/grs', mode=0o755, exist_ok=True)
- se = open('/var/log/grs/grs-daemon-%d.err' % os.getpid(), 'a+')
+ _se = open('/var/log/grs/grs-daemon-%d.err' % os.getpid(), 'a+')
sys.stdout.flush()
- os.dup2(se.fileno(), sys.stdout.fileno())
+ os.dup2(_se.fileno(), sys.stdout.fileno())
sys.stderr.flush()
- os.dup2(se.fileno(), sys.stderr.fileno())
+ os.dup2(_se.fileno(), sys.stderr.fileno())
# Use atexit to remove the pidfile when we shutdown.
# No matter where the exit is initiated, eg from Execute.py
# we are sure that atexit() will run and delete the pidfile.
atexit.register(self.delpid)
- with open(self.pidfile, 'w') as pf:
- pf.write('%d\n' % os.getpid())
+ with open(self.pidfile, 'w') as _pf:
+ _pf.write('%d\n' % os.getpid())
def delpid(self):
@@ -99,8 +99,8 @@ class Daemon:
# 1) If the pidfile is stale, remove it and startup as usual.
# 2) If we're already running, then don't start a second instance.
try:
- with open(self.pidfile, 'r') as pf:
- pid = int(pf.read().strip())
+ with open(self.pidfile, 'r') as _pf:
+ pid = int(_pf.read().strip())
except IOError:
pid = None
diff --git a/grs/Execute.py b/grs/Execute.py
index c42cab2..3afbecc 100644
--- a/grs/Execute.py
+++ b/grs/Execute.py
@@ -44,8 +44,8 @@ class Execute():
"""
def signalexit():
pid = os.getpid()
- f.write('SENDING SIGTERM to pid = %d\n' % pid)
- f.close()
+ _file.write('SENDING SIGTERM to pid = %d\n' % pid)
+ _file.close()
try:
for i in range(10):
os.kill(pid, signal.SIGTERM)
@@ -63,10 +63,10 @@ class Execute():
extra_env = dict(os.environ, **extra_env)
if logfile:
- f = open(logfile, 'a')
- proc = subprocess.Popen(args, stdout=f, stderr=f, env=extra_env, shell=shell)
+ _file = open(logfile, 'a')
+ proc = subprocess.Popen(args, stdout=_file, stderr=_file, env=extra_env, shell=shell)
else:
- f = sys.stderr
+ _file = sys.stderr
proc = subprocess.Popen(args, env=extra_env, shell=shell)
try:
@@ -77,18 +77,18 @@ class Execute():
timed_out = True
if not timed_out:
- # rc = None if we had a timeout
- rc = proc.returncode
- if rc:
- f.write('EXIT CODE: %d\n' % rc)
+ # _rc = None if we had a timeout
+ _rc = proc.returncode
+ if _rc:
+ _file.write('EXIT CODE: %d\n' % _rc)
if not failok:
signalexit()
if timed_out:
- f.write('TIMEOUT ERROR: %s\n' % cmd)
+ _file.write('TIMEOUT ERROR: %s\n' % cmd)
if not failok:
signalexit()
# Only close a logfile, don't close sys.stderr!
if logfile:
- f.close()
+ _file.close()
diff --git a/grs/ISOIt.py b/grs/ISOIt.py
index ae6b217..d24a542 100644
--- a/grs/ISOIt.py
+++ b/grs/ISOIt.py
@@ -76,9 +76,9 @@ class ISOIt(HashIt):
'bin', 'dev', 'etc', 'mnt/cdrom', 'mnt/squashfs', 'mnt/tmpfs', 'proc', 'sbin', 'sys',
'tmp', 'usr/bin', 'usr/sbin', 'var', 'var/run'
]
- for p in root_paths:
- d = os.path.join(initramfs_root, p)
- os.makedirs(d, mode=0o755, exist_ok=True)
+ for _path in root_paths:
+ _dir = os.path.join(initramfs_root, _path)
+ os.makedirs(_dir, mode=0o755, exist_ok=True)
# Copy the static busybox to the initramfs root.
# TODO: we are assuming a static busybox, so we should check.
diff --git a/grs/Interpret.py b/grs/Interpret.py
index db5bdd4..4e411fd 100644
--- a/grs/Interpret.py
+++ b/grs/Interpret.py
@@ -55,12 +55,12 @@ class Interpret(Daemon):
"""
mypid = os.getpid()
while True:
- with open(os.path.join(self.subcgroupdir, 'tasks'), 'r') as f:
- lines = f.readlines()
+ with open(os.path.join(self.subcgroupdir, 'tasks'), 'r') as _file:
+ lines = _file.readlines()
if len(lines) <= 1:
break
- for p in lines:
- pid = int(p.strip())
+ for _pid in lines:
+ pid = int(_pid.strip())
if mypid == pid:
continue
try:
@@ -73,13 +73,13 @@ class Interpret(Daemon):
except ProcessLookupError:
pass
try:
- md.umount_all()
+ _md.umount_all()
except NameError:
pass
sys.exit(signum + 128)
- def smartlog(l, obj, has_obj=True):
+ def smartlog(_log, obj, has_obj=True):
""" This logs whether or not we have a grammatically incorrect
directive, or we are doing a mock run, and returns whether
or not we should execute the directive:
@@ -87,10 +87,10 @@ class Interpret(Daemon):
False = don't skip it
"""
if (has_obj and not obj) or (not has_obj and obj):
- lo.log('Bad command: %s' % l)
+ _lo.log('Bad command: %s' % _log)
return True
if self.mock_run:
- lo.log(l)
+ _lo.log(_log)
return True
return False
@@ -121,59 +121,59 @@ class Interpret(Daemon):
# Initialize all the classes that will run the directives from
# the build script. Note that we expect these classes to just
# initialize some variables but not do any work in their initializers.
- lo = Log(logfile)
- sy = Synchronize(repo_uri, name, libdir, logfile)
- se = Seed(stage_uri, tmpdir, portage_configroot, package, logfile)
- md = MountDirectories(portage_configroot, package, logfile)
- po = Populate(libdir, workdir, portage_configroot, logfile)
- ru = RunScript(libdir, portage_configroot, logfile)
- pc = PivotChroot(tmpdir, portage_configroot, logfile)
- ke = Kernel(libdir, portage_configroot, kernelroot, package, logfile)
- bi = TarIt(name, portage_configroot, logfile)
- io = ISOIt(name, libdir, tmpdir, portage_configroot, logfile)
+ _lo = Log(logfile)
+ _sy = Synchronize(repo_uri, name, libdir, logfile)
+ _se = Seed(stage_uri, tmpdir, portage_configroot, package, logfile)
+ _md = MountDirectories(portage_configroot, package, logfile)
+ _po = Populate(libdir, workdir, portage_configroot, logfile)
+ _ru = RunScript(libdir, portage_configroot, logfile)
+ _pc = PivotChroot(tmpdir, portage_configroot, logfile)
+ _ke = Kernel(libdir, portage_configroot, kernelroot, package, logfile)
+ _bi = TarIt(name, portage_configroot, logfile)
+ _io = ISOIt(name, libdir, tmpdir, portage_configroot, logfile)
# Just in case /var/tmp/grs doesn't already exist.
os.makedirs(tmpdir, mode=0o755, exist_ok=True)
# Rotate any prevously existing logs and make unmount any existing
# bind mounts from a previous run that were not cleaned up.
- lo.rotate_logs()
- md.umount_all()
+ _lo.rotate_logs()
+ _md.umount_all()
# Both sync() + seed() do not need build script directives.
# sync() is done unconditionally for an update run.
progress = os.path.join(tmpdir, '.completed_sync')
if not os.path.exists(progress) or self.update_run:
- sy.sync()
+ _sy.sync()
stampit(progress)
# seed() is never done for an update run
progress = os.path.join(tmpdir, '.completed_seed')
if not os.path.exists(progress) and not self.update_run:
- se.seed()
+ _se.seed()
stampit(progress)
# Read the build script and execute a line at a time.
build_script = os.path.join(libdir, 'build')
- with open(build_script, 'r') as s:
+ with open(build_script, 'r') as _file:
line_number = 0
medium_type = None
- for l in s.readlines():
+ for _line in _file.readlines():
line_number += 1
# Skip lines with initial # as comments.
- m = re.search(r'^(#).*$', l)
- if m:
+ _match = re.search(r'^(#).*$', _line)
+ if _match:
continue
# For a release run, execute every line of the build script.
# For an update run, exexute only lines with a leading +.
ignore_stamp = False
- m = re.search(r'^(\+)(.*)$', l)
- if m:
+ _match = re.search(r'^(\+)(.*)$', _line)
+ if _match:
# There is a leading +, so remove it and skip if doing an update run
ignore_stamp = self.update_run
- l = m.group(2)
+ _line = _match.group(2)
else:
# There is no leading +, so skip if this is an update run
if self.update_run:
@@ -187,11 +187,11 @@ class Interpret(Daemon):
# single 'verb', or a 'verb obj' pair. While restrictive,
# its good enough for now.
try:
- m = re.search(r'(\S+)\s+(\S+)', l)
- verb = m.group(1)
- obj = m.group(2)
+ _match = re.search(r'(\S+)\s+(\S+)', _line)
+ verb = _match.group(1)
+ obj = _match.group(2)
except AttributeError:
- verb = l.strip()
+ verb = _line.strip()
obj = None
# This long concatenated if is where the semantics of the
@@ -202,81 +202,81 @@ class Interpret(Daemon):
stampit(progress)
continue
if verb == 'log':
- if smartlog(l, obj):
+ if smartlog(_line, obj):
stampit(progress)
continue
if obj == 'stamp':
- lo.log('='*80)
+ _lo.log('='*80)
else:
- lo.log(obj)
+ _lo.log(obj)
elif verb == 'mount':
- if smartlog(l, obj, False):
+ if smartlog(_line, obj, False):
stampit(progress)
continue
- md.mount_all()
+ _md.mount_all()
elif verb == 'unmount':
- if smartlog(l, obj, False):
+ if smartlog(_line, obj, False):
stampit(progress)
continue
- md.umount_all()
+ _md.umount_all()
elif verb == 'populate':
- if smartlog(l, obj):
+ if smartlog(_line, obj):
stampit(progress)
continue
- po.populate(cycle=int(obj))
+ _po.populate(cycle=int(obj))
elif verb == 'runscript':
- if smartlog(l, obj):
+ if smartlog(_line, obj):
stampit(progress)
continue
- ru.runscript(obj)
+ _ru.runscript(obj)
elif verb == 'pivot':
- if smartlog(l, obj):
+ if smartlog(_line, obj):
stampit(progress)
continue
- pc.pivot(obj, md)
+ _pc.pivot(obj, _md)
elif verb == 'kernel':
- if smartlog(l, obj, False):
+ if smartlog(_line, obj, False):
stampit(progress)
continue
- ke.kernel()
+ _ke.kernel()
elif verb == 'tarit':
# 'tarit' can either be just a verb,
# or a 'verb obj' pair.
if obj:
- smartlog(l, obj, True)
- bi.tarit(obj)
+ smartlog(_line, obj, True)
+ _bi.tarit(obj)
else:
- smartlog(l, obj, False)
- bi.tarit()
+ smartlog(_line, obj, False)
+ _bi.tarit()
medium_type = 'tarit'
elif verb == 'isoit':
# 'isoit' can either be just a verb,
# or a 'verb obj' pair.
if obj:
- smartlog(l, obj, True)
- io.isoit(obj)
+ smartlog(_line, obj, True)
+ _io.isoit(obj)
else:
- smartlog(l, obj, False)
- io.isoit()
+ smartlog(_line, obj, False)
+ _io.isoit()
medium_type = 'isoit'
elif verb == 'hashit':
- if smartlog(l, obj, False):
+ if smartlog(_line, obj, False):
stampit(progress)
continue
if medium_type == 'tarit':
- bi.hashit()
+ _bi.hashit()
elif medium_type == 'isoit':
- io.hashit()
+ _io.hashit()
else:
raise Exception('Unknown medium to hash.')
else:
- lo.log('Bad command: %s' % l)
+ _lo.log('Bad command: %s' % _line)
stampit(progress)
# Just in case the build script lacks a final unmount, if we
# are done, then let's make sure we clean up after ourselves.
try:
- md.umount_all()
+ _md.umount_all()
except NameError:
pass
diff --git a/grs/Kernel.py b/grs/Kernel.py
index 326b56c..cd2ecee 100644
--- a/grs/Kernel.py
+++ b/grs/Kernel.py
@@ -41,8 +41,8 @@ class Kernel():
def parse_kernel_config(self):
""" Parse the version to be built/installed from the kernel-config file. """
- with open(self.kernel_config, 'r') as f:
- lines = f.readlines()
+ with open(self.kernel_config, 'r') as _file:
+ lines = _file.readlines()
# Are we building a modular kernel or statically linked?
has_modules = 'CONFIG_MODULES=y\n' in lines
# The third line is the version line in the kernel config file.
@@ -50,20 +50,20 @@ class Kernel():
# The version line looks like the following:
# Linux/x86 4.0.6-hardened-r2 Kernel Configuration
# The 2nd group contains the version.
- m = re.search(r'^#\s+(\S+)\s+(\S+).+$', version_line)
- gentoo_version = m.group(2)
+ _match = re.search(r'^#\s+(\S+)\s+(\S+).+$', version_line)
+ gentoo_version = _match.group(2)
try:
# Either the verison is of the form '4.0.6-hardened-r2' with two -'s
- m = re.search(r'(\S+?)-(\S+?)-(\S+)', gentoo_version)
- vanilla_version = m.group(1)
- flavor = m.group(2)
- revision = m.group(3)
+ _match = re.search(r'(\S+?)-(\S+?)-(\S+)', gentoo_version)
+ vanilla_version = _match.group(1)
+ flavor = _match.group(2)
+ revision = _match.group(3)
pkg_name = flavor + '-sources-' + vanilla_version + '-' + revision
except AttributeError:
# Or the verison is of the form '4.0.6-hardened' with one -
- m = re.search(r'(\S+?)-(\S+)', gentoo_version)
- vanilla_version = m.group(1)
- flavor = m.group(2)
+ _match = re.search(r'(\S+?)-(\S+)', gentoo_version)
+ vanilla_version = _match.group(1)
+ flavor = _match.group(2)
pkg_name = flavor + '-sources-' + vanilla_version
pkg_name = '=sys-kernel/' + pkg_name
return (gentoo_version, pkg_name, has_modules)
diff --git a/grs/Log.py b/grs/Log.py
index a7ac1a3..3a8ec8a 100644
--- a/grs/Log.py
+++ b/grs/Log.py
@@ -21,8 +21,8 @@ class Log(Rotator):
current_time = datetime.datetime.now(datetime.timezone.utc)
unix_timestamp = current_time.timestamp()
msg = '[%f] %s' % (unix_timestamp, msg)
- with open(self.logfile, 'a') as f:
- f.write('%s\n' % msg)
+ with open(self.logfile, 'a') as _file:
+ _file.write('%s\n' % msg)
def rotate_logs(self, upper_limit=20):
diff --git a/grs/PivotChroot.py b/grs/PivotChroot.py
index ce9a35c..8958c99 100644
--- a/grs/PivotChroot.py
+++ b/grs/PivotChroot.py
@@ -35,11 +35,11 @@ class PivotChroot(Rotator):
self.logfile = logfile
- def pivot(self, subchroot, md):
+ def pivot(self, subchroot, _md):
# If any directories are mounted, unmount them before pivoting.
- some_mounted, all_mounted = md.are_mounted()
+ some_mounted, all_mounted = _md.are_mounted()
if some_mounted:
- md.umount_all()
+ _md.umount_all()
# Move the system's portage configroot out of the way to system.0,
# then pivot the inner chroot to system.
@@ -50,4 +50,4 @@ class PivotChroot(Rotator):
# Be conservative: only if all the directories were mounted on the old
# system portage configroot to we remount on the newly pivoted root.
if all_mounted:
- md.mount_all()
+ _md.mount_all()
diff --git a/grs/Populate.py b/grs/Populate.py
index c6dce8d..8232c36 100644
--- a/grs/Populate.py
+++ b/grs/Populate.py
@@ -66,11 +66,11 @@ class Populate():
# { 1:['/path/to', 'a'], 1:['/path/to', 'b'], 2:...}
cycled_files = {}
for dirpath, dirnames, filenames in os.walk(self.workdir):
- for f in filenames:
- m = re.search(r'^(.+)\.CYCLE\.(\d+)', f)
- if m:
- filename = m.group(1)
- cycle_no = int(m.group(2))
+ for _file in filenames:
+ _match = re.search(r'^(.+)\.CYCLE\.(\d+)', _file)
+ if _match:
+ filename = _match.group(1)
+ cycle_no = int(_match.group(2))
cycled_files.setdefault(cycle_no, [])
cycled_files[cycle_no].append([dirpath, filename])
# If cycle is just a boolean, then default to the maximum cycle number.
@@ -81,14 +81,14 @@ class Populate():
# Go through cycled_files dictionary and either
# 1. rename the file if it matches the desired cycle number,
# 2. delete the file otherwise.
- for c in cycled_files:
- for f in cycled_files[c]:
- dirpath = f[0]
- filename = f[1]
+ for _cycle in cycled_files:
+ for _file in cycled_files[_cycle]:
+ dirpath = _file[0]
+ filename = _file[1]
new_file = os.path.join(dirpath, filename)
- old_file = "%s.CYCLE.%d" % (new_file, c)
+ old_file = "%s.CYCLE.%d" % (new_file, _cycle)
if os.path.isfile(old_file):
- if c == cycle_no:
+ if _cycle == cycle_no:
os.rename(old_file, new_file)
else:
os.remove(old_file)
diff --git a/grs/Rotator.py b/grs/Rotator.py
index 9daf7d2..a3684a9 100644
--- a/grs/Rotator.py
+++ b/grs/Rotator.py
@@ -44,22 +44,22 @@ class Rotator():
"""
objs = glob.glob('%s.*' % obj)
indexed_obj = {}
- for o in objs:
- m = re.search(r'^.+\.(\d+)$', o)
- indexed_obj[int(m.group(1))] = o
+ for _obj in objs:
+ _match = re.search(r'^.+\.(\d+)$', _obj)
+ indexed_obj[int(_match.group(1))] = _obj
count = list(indexed_obj.keys())
count.sort()
count.reverse()
- for c in count:
- current_obj = indexed_obj[c]
- if c >= upper_limit:
+ for _count in count:
+ current_obj = indexed_obj[_count]
+ if _count >= upper_limit:
try:
shutil.rmtree(current_obj)
except NotADirectoryError:
os.unlink(current_obj)
continue
- m = re.search(r'^(.+)\.\d+$', current_obj)
- next_obj = '%s.%d' % (m.group(1), c+1)
+ _match = re.search(r'^(.+)\.\d+$', current_obj)
+ next_obj = '%s.%d' % (_match.group(1), _count+1)
shutil.move(current_obj, next_obj)
diff --git a/grs/Seed.py b/grs/Seed.py
index 2bc429f..f610228 100644
--- a/grs/Seed.py
+++ b/grs/Seed.py
@@ -49,8 +49,8 @@ class Seed(Rotator):
# Download a stage tarball if we don't have one
if not os.path.isfile(self.filepath):
request = urllib.request.urlopen(self.stage_uri)
- with open(self.filepath, 'wb') as f:
- shutil.copyfileobj(request, f)
+ with open(self.filepath, 'wb') as _file:
+ shutil.copyfileobj(request, _file)
# Because python's tarfile sucks
cmd = 'tar --xattrs -xf %s -C %s' % (self.filepath, self.portage_configroot)
diff --git a/grs/WorldConf.py b/grs/WorldConf.py
index d28f300..bad4f83 100644
--- a/grs/WorldConf.py
+++ b/grs/WorldConf.py
@@ -42,8 +42,8 @@ class WorldConf():
dpath = os.path.join(CONST.PORTAGE_CONFIGDIR, directory)
if not os.path.isdir(dpath):
continue
- for f in os.listdir(dpath):
- fpath = os.path.join(dpath, f)
+ for _file in os.listdir(dpath):
+ fpath = os.path.join(dpath, _file)
if os.path.isfile(fpath):
os.remove(fpath)
@@ -52,14 +52,14 @@ class WorldConf():
delimiters=':', allow_no_value=True, comment_prefixes=None
)
config.read(CONST.WORLD_CONFIG)
- for s in config.sections():
+ for _section in config.sections():
for (directory, value) in config[s].items():
- p_slot_atom = re.sub(r'[/:]', '_', s)
+ p_slot_atom = re.sub(r'[/:]', '_', _section)
dpath = os.path.join(CONST.PORTAGE_CONFIGDIR, directory)
fpath = os.path.join(dpath, p_slot_atom)
os.makedirs(dpath, mode=0o755, exist_ok=True)
- with open(fpath, 'w') as g:
- g.write('%s\n' % value)
+ with open(fpath, 'w') as _file:
+ _file.write('%s\n' % value)
@staticmethod
@@ -73,23 +73,23 @@ class WorldConf():
# Remove all installed pkgs from the set of all portage packages.
uninstalled = portdb.cp_all()
- for p in vardb.cp_all():
+ for _cp in vardb.cp_all():
try:
- uninstalled.remove(p)
+ uninstalled.remove(_cp)
except ValueError:
- print('%s installed on local system, but not in portage repo anymore.' % p)
+ print('%s installed on local system, but not in portage repo anymore.' % _cp)
# Construct a list of canonical named files for uninstalled pkgs.
slot_atoms = []
- for p in uninstalled:
- cpv = portdb.cp_list(p)[0]
- slotvar = portdb.aux_get(cpv, ['SLOT'])[0]
+ for _cp in uninstalled:
+ _cpv = portdb.cp_list(_cp)[0]
+ slotvar = portdb.aux_get(_cpv, ['SLOT'])[0]
try:
- m = re.search(r'(.+?)\/(.+)', slotvar)
- slot = m.group(1)
+ _match = re.search(r'(.+?)\/(.+)', slotvar)
+ slot = _match.group(1)
except AttributeError:
slot = slotvar
- slot_atoms.append(re.sub(r'[/:]', '_', '%s:%s' % (p, slot)))
+ slot_atoms.append(re.sub(r'[/:]', '_', '%s:%s' % (_cp, slot)))
# Also let's get a list of all the possible canonical file names
config = configparser.RawConfigParser(
@@ -97,8 +97,8 @@ class WorldConf():
)
config.read(CONST.WORLD_CONFIG)
canon = []
- for s in config.sections():
- p_slot_atom = re.sub(r'[/:]', '_', s)
+ for _section in config.sections():
+ p_slot_atom = re.sub(r'[/:]', '_', _section)
canon.append(p_slot_atom)
# Walk through all files in /etc/portage and remove any files for uninstalled pkgs.
@@ -108,7 +108,7 @@ class WorldConf():
continue
# Remove all filenames that match uninstalled slot_atoms or are not in the canon
- for f in filenames:
- fpath = os.path.realpath(os.path.join(dirpath, f))
- if f in slot_atoms or not f in canon:
+ for _file in filenames:
+ fpath = os.path.realpath(os.path.join(dirpath, _file))
+ if _file in slot_atoms or not _file in canon:
os.remove(fpath)