6 import wizard as _wizard
7 from wizard import util
9 """This is the path to the wizard executable as specified
10 by the caller; it lets us recursively invoke wizard"""
14 return args[0] == "python" or args[0] == wizard
17 """An advanced shell, with the ability to do dry-run and log commands"""
18 def __init__(self, dry = False):
19 """ `dry` Don't run any commands, just print them"""
21 def call(self, *args, **kwargs):
22 kwargs.setdefault("python", None)
23 logging.info("Running `" + ' '.join(args) + "`")
26 if kwargs["python"] is None and is_python(args):
27 kwargs["python"] = True
28 # XXX: There is a possible problem here where we can fill up
29 # the kernel buffer if we have 64KB of data. This shouldn't
30 # be a problem, and the fix for such case would be to write to
31 # temporary files instead of a pipe.
32 # Another possible way of fixing this is converting from a
33 # waitpid() pump to a select() pump, creating a pipe to
34 # ourself, and then setting up a
35 # SIGCHILD handler to write a single byte to the pipe to get
36 # us out of select() when a subprocess exits.
37 proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
38 if hasattr(self, "async"):
39 self.async(proc, args, **kwargs)
41 stdout, stderr = proc.communicate()
42 self.log(stdout, stderr)
44 if kwargs["python"]: eclass = PythonCallError
45 else: eclass = CallError
46 raise eclass(proc.returncode, args, stdout, stderr)
47 return (stdout, stderr)
48 def log(self, stdout, stderr):
50 logging.debug("STDOUT:\n" + stdout)
52 logging.debug("STDERR:\n" + stderr)
53 def callAsUser(self, *args, **kwargs):
54 user = kwargs.pop("user", None)
55 uid = kwargs.pop("uid", None)
56 kwargs.setdefault("python", is_python(args))
57 if not user and not uid: return self.call(*args, **kwargs)
58 if uid: return self.call("sudo", "-u", "#" + str(uid), *args, **kwargs)
59 if user: return self.call("sudo", "-u", user, *args, **kwargs)
61 class ParallelShell(Shell):
62 """Commands are queued here, and executed in parallel (with
63 threading) in accordance with the maximum number of allowed
64 subprocesses, and result in callback execution when they finish."""
65 def __init__(self, dry = False, max = 10):
66 super(ParallelShell, self).__init__(dry=dry)
68 self.max = max # maximum of commands to run in parallel
69 def async(self, proc, args, python, on_success, on_error):
70 """Gets handed a subprocess.Proc object from our deferred
72 self.running[proc.pid] = (proc, args, python, on_success, on_error)
74 # bail out immediately on initial ramp up
75 if len(self.running) < self.max: return
76 # now, wait for open pids.
78 pid, status = os.waitpid(-1, 0)
80 if e.errno == errno.ECHILD: return
82 # ooh, zombie process. reap it
83 proc, args, python, on_success, on_error = self.running.pop(pid)
84 # XXX: this is slightly dangerous; should actually use
86 stdout = proc.stdout.read()
87 stderr = proc.stderr.read()
88 self.log(stdout, stderr)
90 if python: eclass = PythonCallError
91 else: eclass = CallError
92 on_error(eclass(proc.returncode, args, stdout, stderr))
94 on_success(stdout, stderr)
96 """Waits for all of our subprocesses to terminate."""
98 while os.waitpid(-1, 0):
101 if e.errno == errno.ECHILD: return
104 class DummyParallelShell(ParallelShell):
105 """Same API as ParallelShell, but doesn't actually parallelize (by
106 using only one thread)"""
107 def __init__(self, dry = False):
108 super(DummyParallelShell, self).__init__(dry=dry, max=1)
110 class CallError(_wizard.Error):
111 def __init__(self, code, args, stdout, stderr):
117 return "CallError [%d]" % self.code
119 class PythonCallError(CallError):
120 def __init__(self, code, args, stdout, stderr):
121 self.name = util.get_exception_name(stderr)
122 CallError.__init__(self, code, args, stdout, stderr)
124 return "PythonCallError [%s]" % self.name