-import optparse
import logging
import os
import os.path
-import pwd
-import hashlib
-import errno
-import time
+import itertools
-import wizard
-from wizard import deploy, util, shell, sset, command
-from wizard.command import migrate
+from wizard import deploy, shell, sset, command
def main(argv, baton):
options, args = parse_args(argv, baton)
app = args[0]
base_args = calculate_base_args(options)
- sh = make_shell(options)
- seen = make_serialized_set(options)
- my_uid = os.getuid() # to see if we have root
- if options.log_dir:
- # must not be on AFS
- try:
- os.mkdir(options.log_dir)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
- if options.force:
- options.log_dir = os.path.join(options.log_dir, str(int(time.time())))
- os.mkdir(options.log_dir) # if fails, be fatal
- os.chmod(options.log_dir, 0o777)
+ sh = shell.ParallelShell.make(options.no_parallelize, options.max_processes)
+ command.create_logdir(options.log_dir)
+ seen = sset.make(options.seen)
+ is_root = not os.getuid()
+ report = command.open_reports(options.log_dir)
# loop stuff
errors = {}
i = 0
- for line in deploy.get_install_lines(options.versions_path):
- child_args = list(base_args)
- # validate and filter the deployments
- try:
- d = deploy.Deployment.parse(line)
- except deploy.DeploymentParseError, deploy.NoSuchApplication:
- continue
- name = d.application.name
- if name != app: continue
+ deploys = deploy.parse_install_lines(app, options.versions_path)
+ requested_deploys = itertools.islice(deploys, options.limit)
+ for i, d in enumerate(requested_deploys, 1):
+ # check if we want to punt due to --limit
if d.location in seen:
continue
- # security check: see if the user's directory is the prefix of
- # the deployment we're upgrading
- if not my_uid:
- uid = util.get_dir_uid(d.location)
- real = os.path.realpath(d.location)
- if not real.startswith(pwd.getpwuid(uid).pw_dir + "/"):
- logging.error("Security check failed, owner of deployment and owner of home directory mismatch for %s" % d.location)
- continue
+ if is_root and not command.security_check_homedir(d.location):
+ continue
+ logging.info("Processing %s" % d.location)
+ child_args = list(base_args)
# calculate the log file, if a log dir was specified
if options.log_dir:
- log_file = os.path.join(options.log_dir, shorten(d.location))
+ log_file = command.calculate_log_name(options.log_dir, i)
child_args.append("--log-file=" + log_file)
- # check if we want to punt due to --limit
- i += 1
- if i > options.limit:
- break
# actual meat
- def make_on_pair(d):
+ def make_on_pair(d, i):
+ # we need to make another stack frame so that d and i get specific bindings.
def on_success(stdout, stderr):
+ if stderr:
+ report.warnings.write("%s\n" % d.location) # pylint: disable-msg=E1101
+ logging.warning("Warnings [%04d] %s:\n%s" % (i, d.location, stderr))
seen.add(d.location)
def on_error(e):
if e.name == "wizard.command.migrate.AlreadyMigratedError" or \
name = e.name
if name not in errors: errors[name] = []
errors[name].append(d)
- logging.error("%s in %s" % (name, d.location))
+ logging.error("%s in [%04d] %s" % (name, i, d.location))
+ report.errors.write("%s\n" % d.location) # pylint: disable-msg=E1101
return (on_success, on_error)
- on_success, on_error = make_on_pair(d)
- sh.wait() # wait for a parallel processing slot to be available
+ on_success, on_error = make_on_pair(d, i)
sh.call("wizard", "migrate", d.location, *child_args,
on_success=on_success, on_error=on_error)
sh.join()
autoinstalls for a particular application found by parallel-find,
but with advanced reporting.
-When doing an actual run, it is recommended to use --seen to
-be able to resume gracefully (without it, mass-migrate must
-stat every install to find out if it migrated it yet).
-
This command is intended to be run as root on a server with
-the scripts AFS patch. You may run it as an unpriviledged
-user for testing purposes, but then you MUST NOT run this on
-untrusted repositories."""
+the scripts AFS patch."""
parser = command.WizardOptionParser(usage)
- parser.add_option("--no-parallelize", dest="no_parallelize", action="store_true",
- default=False, help="Turn off parallelization")
- parser.add_option("--dry-run", dest="dry_run", action="store_true",
- default=False, help="Print commands that would be run. Implies --no-parallelize")
- parser.add_option("--max", dest="max",
- default=10, help="Maximum subprocesses to run concurrently")
- parser.add_option("--seen", dest="seen",
- default=None, help="File to read/write paths of already processed installs. These will be skipped.")
+ baton.push(parser, "log_dir")
+ baton.push(parser, "seen")
+ baton.push(parser, "no_parallelize")
+ baton.push(parser, "dry_run")
+ baton.push(parser, "max_processes")
parser.add_option("--force", dest="force", action="store_true",
default=False, help="Force migrations to occur even if .scripts or .git exists.")
- parser.add_option("--limit", dest="limit", type="int",
- default=0, help="Limit the number of autoinstalls to look at.")
+ baton.push(parser ,"limit")
baton.push(parser, "versions_path")
baton.push(parser, "srv_path")
- baton.push(parser, "log_dir")
options, args, = parser.parse_all(argv)
if len(args) > 1:
parser.error("too many arguments")
elif not args:
parser.error("must specify application to migrate")
- if options.dry_run:
- options.no_parallelize = True
return options, args
def calculate_base_args(options):
- base_args = command.makeBaseArgs(options, dry_run="--dry-run", srv_path="--srv-path", force="--force")
- return base_args
-
-def shorten(dir):
- hash = hashlib.sha1(dir).hexdigest()[0:7]
- return hash + dir.replace('/', '~')
-
-def make_shell(options):
- if options.no_parallelize:
- sh = shell.DummyParallelShell()
- else:
- sh = shell.ParallelShell(max=int(options.max))
- return sh
+ return command.make_base_args(options, dry_run="--dry-run", srv_path="--srv-path",
+ force="--force")
-def make_serialized_set(options):
- if options.seen:
- seen = sset.SerializedSet(options.seen)
- else:
- seen = sset.DummySerializedSet()
- return seen