]> scripts.mit.edu Git - wizard.git/blobdiff - wizard/command/mass_migrate.py
Rewrite parametrize to use new parametrizeWithVars
[wizard.git] / wizard / command / mass_migrate.py
index 88f008736d26599a27fc2b90326a61b95f9471df..956399a009d034a86a16e4ef5ea505981be4738b 100644 (file)
@@ -1,91 +1,54 @@
-import optparse
 import logging
 import os
 import os.path
-import pwd
-import hashlib
-import errno
-import time
+import itertools
 
-import wizard
-from wizard import deploy, util, shell, sset, command
-from wizard.command import migrate
+from wizard import deploy, report, shell, sset, command
 
 def main(argv, baton):
     options, args = parse_args(argv, baton)
     app = args[0]
     base_args = calculate_base_args(options)
-    sh = make_shell(options)
-    seen = make_serialized_set(options)
-    my_uid = os.getuid() # to see if we have root
-    warnings_logname = "/tmp/wizard-migrate-warnings.log"
-    errors_logname = "/tmp/wizard-migrate-errors.log"
-    if options.log_dir:
-        # must not be on AFS
-        try:
-            os.mkdir(options.log_dir)
-        except OSError as e:
-            if e.errno != errno.EEXIST:
-                raise
-            if options.force:
-                options.log_dir = os.path.join(options.log_dir, str(int(time.time())))
-                os.mkdir(options.log_dir) # if fails, be fatal
-        os.chmod(options.log_dir, 0o777)
-        warnings_logname = os.path.join(options.log_dir, "warnings.log")
-        errors_logname = os.path.join(options.log_dir, "errors.log")
-    warnings_log = open(warnings_logname, "a")
-    errors_log = open(errors_logname, "a")
+    sh = shell.ParallelShell.make(options.no_parallelize, options.max_processes)
+    command.create_logdir(options.log_dir)
+    seen = sset.make(options.seen)
+    is_root = not os.getuid()
+    runtime = report.make_fresh(options.log_dir, "success", "warnings", "errors")
     # loop stuff
     errors = {}
     i = 0
-    for line in deploy.get_install_lines(options.versions_path):
-        child_args = list(base_args)
-        # validate and filter the deployments
-        try:
-            d = deploy.Deployment.parse(line)
-        except deploy.DeploymentParseError, deploy.NoSuchApplication:
-            continue
-        name = d.application.name
-        if name != app: continue
+    deploys = deploy.parse_install_lines(app, options.versions_path, user=options.user)
+    requested_deploys = itertools.islice(deploys, options.limit)
+    for i, d in enumerate(requested_deploys, 1):
+        # check if we want to punt due to --limit
         if d.location in seen:
             continue
-        # security check: see if the user's directory is the prefix of
-        # the deployment we're upgrading
-        if not my_uid:
-            uid = util.get_dir_uid(d.location)
-            real = os.path.realpath(d.location)
-            if not real.startswith(pwd.getpwuid(uid).pw_dir + "/"):
-                logging.error("Security check failed, owner of deployment and owner of home directory mismatch for %s" % d.location)
-                continue
-        # check if we want to punt due to --limit
-        i += 1
-        if options.limit and i > options.limit:
-            break
+        if is_root and not command.security_check_homedir(d.location):
+            continue
+        logging.info("Processing %s" % d.location)
+        child_args = list(base_args)
         # calculate the log file, if a log dir was specified
         if options.log_dir:
-            log_file = os.path.join(options.log_dir, shorten(i, d.location))
+            log_file = command.calculate_log_name(options.log_dir, i)
             child_args.append("--log-file=" + log_file)
         # actual meat
         def make_on_pair(d, i):
+            # we need to make another stack frame so that d and i get specific bindings.
             def on_success(stdout, stderr):
                 if stderr:
-                    warnings_log.write("%s\n" % d.location)
-                    logging.warning("Warnings [%04d] %s:\n%s" % (d.location, i, stderr))
-                seen.add(d.location)
+                    logging.warning("Warnings [%04d] %s:\n%s" % (i, d.location, stderr))
+                    runtime.write("warnings", i, d.location)
+                runtime.write("success", i, d.location)
             def on_error(e):
                 if e.name == "wizard.command.migrate.AlreadyMigratedError" or \
                    e.name == "AlreadyMigratedError":
-                    seen.add(d.location)
                     logging.info("Skipped already migrated %s" % d.location)
                 else:
-                    name = e.name
-                    if name not in errors: errors[name] = []
-                    errors[name].append(d)
-                    logging.error("%s in [%04d] %s" % (name, i, d.location))
-                    errors_log.write("%s\n" % d.location)
+                    errors.setdefault(e.name, []).append(d)
+                    logging.error("%s in [%04d] %s", e.name, i, d.location)
+                    runtime.write("errors", i, d.location)
             return (on_success, on_error)
         on_success, on_error = make_on_pair(d, i)
-        sh.wait() # wait for a parallel processing slot to be available
         sh.call("wizard", "migrate", d.location, *child_args,
                       on_success=on_success, on_error=on_error)
     sh.join()
@@ -100,56 +63,28 @@ Essentially equivalent to running '%prog migrate' on all
 autoinstalls for a particular application found by parallel-find,
 but with advanced reporting.
 
-When doing an actual run, it is recommended to use --seen to
-be able to resume gracefully (without it, mass-migrate must
-stat every install to find out if it migrated it yet).
-
 This command is intended to be run as root on a server with
-the scripts AFS patch.  You may run it as an unpriviledged
-user for testing purposes, but then you MUST NOT run this on
-untrusted repositories."""
+the scripts AFS patch."""
     parser = command.WizardOptionParser(usage)
-    parser.add_option("--no-parallelize", dest="no_parallelize", action="store_true",
-            default=False, help="Turn off parallelization")
-    parser.add_option("--dry-run", dest="dry_run", action="store_true",
-            default=False, help="Print commands that would be run. Implies --no-parallelize")
-    parser.add_option("--max", dest="max",
-            default=10, help="Maximum subprocesses to run concurrently")
-    parser.add_option("--seen", dest="seen",
-            default=None, help="File to read/write paths of already processed installs. These will be skipped.")
+    baton.push(parser, "log_dir")
+    baton.push(parser, "seen")
+    baton.push(parser, "no_parallelize")
+    baton.push(parser, "dry_run")
+    baton.push(parser, "max_processes")
     parser.add_option("--force", dest="force", action="store_true",
             default=False, help="Force migrations to occur even if .scripts or .git exists.")
-    parser.add_option("--limit", dest="limit", type="int",
-            default=0, help="Limit the number of autoinstalls to look at.")
+    baton.push(parser ,"limit")
     baton.push(parser, "versions_path")
     baton.push(parser, "srv_path")
-    baton.push(parser, "log_dir")
+    baton.push(parser, "user")
     options, args, = parser.parse_all(argv)
     if len(args) > 1:
         parser.error("too many arguments")
     elif not args:
         parser.error("must specify application to migrate")
-    if options.dry_run:
-        options.no_parallelize = True
     return options, args
 
 def calculate_base_args(options):
-    base_args = command.makeBaseArgs(options, dry_run="--dry-run", srv_path="--srv-path", force="--force")
-    return base_args
-
-def shorten(i, dir):
-    return "%04d" % i + dir.replace('/', '-') + ".log"
-
-def make_shell(options):
-    if options.no_parallelize:
-        sh = shell.DummyParallelShell()
-    else:
-        sh = shell.ParallelShell(max=int(options.max))
-    return sh
+    return command.make_base_args(options, dry_run="--dry-run", srv_path="--srv-path",
+            force="--force")
 
-def make_serialized_set(options):
-    if options.seen:
-        seen = sset.SerializedSet(options.seen)
-    else:
-        seen = sset.DummySerializedSet()
-    return seen