9 from wizard import deploy, report, scripts, shell, sset, command
10 from wizard.command import upgrade
12 def main(argv, baton):
13 options, args = parse_args(argv, baton)
15 sh = shell.ParallelShell.make(options.no_parallelize, options.max_processes)
16 command.create_logdir(options.log_dir)
19 'up_to_date': 'were up-to-date',
20 'not_migrated': 'were not migrated',
21 'merge': 'had merge failures',
22 'verify': 'had web verification errors',
23 'backup_failure': 'had a backup failure',
24 'blacklisted': 'were blacklisted',
27 os.unlink(os.path.join(options.log_dir, 'merge.txt'))
28 status = (report.make_fresh if options.redo else report.make)(options.log_dir, *human_status.keys())
29 runtime = report.make_fresh(options.log_dir, 'success', 'lookup', 'warnings', 'errors')
31 rr_cache = os.path.join(options.log_dir, "rr-cache")
35 if e.errno != errno.EEXIST:
37 os.chmod(rr_cache, 0o777)
38 # setup base arguments
39 base_args = calculate_base_args(options)
40 base_args.append("--non-interactive")
41 base_args.append("--rr-cache=" + rr_cache)
45 deploys = deploy.parse_install_lines(app, options.versions_path, user=options.user)
46 requested_deploys = itertools.islice(deploys, options.limit)
47 # clean up /dev/shm/wizard
48 if os.path.exists("/dev/shm/wizard"):
49 shutil.rmtree("/dev/shm/wizard")
50 os.mkdir("/dev/shm/wizard")
51 os.chmod("/dev/shm/wizard", 0o777)
53 for i, d in enumerate(requested_deploys, 1):
54 runtime.write("lookup", i, d.location)
55 if not os.getuid() and not command.security_check_homedir(d.location):
59 for r in status.reports.values():
65 # XXX: we may be able to punt based on detected versions from d, which
66 # would be faster than spinning up a new process. On the other hand,
67 # our aggressive caching strategies using reports make this mostly not a problem
68 logging.info("[%04d] Processing %s", i, d.location)
69 child_args = list(base_args) # copy
70 # calculate the log file, if a log dir was specified
72 log_file = command.calculate_log_name(options.log_dir, i)
73 child_args.append("--log-file=" + log_file)
75 def make_on_pair(d, i):
76 # we need to make another stack frame so that d and i get specific bindings.
77 def on_success(stdout, stderr):
79 runtime.write("warnings", i, d.location)
80 logging.warning("[%04d] Warnings at [%s]:\n%s", i, d.location, stderr)
81 runtime.write("success", i, d.location)
82 status.write("up_to_date", i, d.location)
84 if e.name == "AlreadyUpgraded":
85 logging.info("[%04d] Skipped already upgraded %s" % (i, d.location))
86 status.write("up_to_date", i, d.location)
87 elif e.name == "MergeFailed":
88 conflicts, _, tmpdir = e.stdout.rstrip().partition(" ")
89 logging.warning("[%04d] Conflicts in %s files: resolve at [%s], source at [%s]",
90 i, conflicts, tmpdir, d.location)
91 status.write("merge", i, tmpdir, conflicts, d.location)
92 elif e.name == "BlacklistedError":
93 reason = e.stdout.rstrip().replace("\n", " ")
94 logging.warning("[%04d] Blacklisted because of '%s' at %s", i, reason, d.location)
95 status.write("blacklisted", i, d.location, reason)
96 elif e.name == "WebVerificationError":
98 # This should actually be a warning, but it's a really common error
99 logging.info("[%04d] Could not verify application at %s", i, url)
100 status.write("verify", i, url)
101 elif e.name == "NotMigratedError":
102 logging.info("[%04d] Application not migrated at %s", i, d.location)
103 status.write("not_migrated", i, d.location)
104 elif e.name == "BackupFailure":
105 logging.info("[%04d] Failed backups at %s", i, d.location)
106 status.write("backup_failure", i, d.location)
108 errors.setdefault(e.name, []).append(d)
109 logging.error("[%04d] %s in %s", i, e.name, d.location)
110 runtime.write("errors", i, e.name, d.location)
111 # lack of status write means that we'll always retry
112 return (on_success, on_error)
113 on_success, on_error = make_on_pair(d, i)
114 sh.call("wizard", "upgrade", d.location, *child_args,
115 on_success=on_success, on_error=on_error)
118 sys.stderr.write("\n")
119 for name, deploys in errors.items():
120 logging.warning("%s from %d installs", name, len(deploys))
122 def printPercent(description, number):
123 print "% 4d out of % 4d installs (% 5.1f%%) %s" % (number, i, float(number)/i*100, description)
124 error_count = sum(len(e) for e in errors.values())
126 printPercent("had unusual errors", error_count)
127 for name, description in human_status.items():
128 values = status.reports[name].values
130 printPercent(description, len(values))
131 sys.stderr.write("\n")
132 print "%d installs were upgraded this run" % len(runtime.reports["success"].values)
134 def parse_args(argv, baton):
135 usage = """usage: %prog mass-upgrade [ARGS] APPLICATION
137 Mass upgrades an application to the latest scripts version.
138 Essentially equivalent to running '%prog upgrade' on all
139 autoinstalls for a particular application found by parallel-find,
140 but with advanced reporting.
142 This command is intended to be run as root on a server with
143 the scripts AFS patch."""
144 parser = command.WizardOptionParser(usage)
145 baton.push(parser, "log_dir")
146 baton.push(parser, "no_parallelize")
147 baton.push(parser, "dry_run")
148 baton.push(parser, "max_processes")
149 baton.push(parser ,"limit")
150 baton.push(parser, "versions_path")
151 baton.push(parser, "srv_path")
152 baton.push(parser, "user")
153 parser.add_option("--force", dest="force", action="store_true",
154 default=False, help="Force running upgrade even if it's already at latest version.")
155 parser.add_option("--redo", dest="redo", action="store_true",
156 default=False, help="Redo all upgrades; use this if you updated Wizard's code.")
157 parser.add_option("--remerge", dest="remerge", action="store_true",
158 default=False, help="Redo all merges.")
159 options, args, = parser.parse_all(argv)
161 parser.error("too many arguments")
163 parser.error("must specify application to upgrade")
166 def calculate_base_args(options):
167 return command.make_base_args(options, dry_run="--dry-run", srv_path="--srv-path", force="--force")