2 Wrappers around subprocess functionality that simulate an actual shell.
6 from wizard.shell import *
16 from wizard import util
18 wizard_bin = sys.argv[0]
20 This is the path to the wizard executable as specified
21 by the caller; it lets us recursively invoke wizard.
25 """Detects whether or not an argument list invokes a Python program."""
26 return args[0] == "python" or args[0] == "wizard"
28 def drop_priviledges(dir, log_file):
30 Checks if we are running as root. If we are, attempt to drop
31 priviledges to the user who owns ``dir``, by re-calling
32 itself using sudo with exec, such that the new process subsumes our
33 current one. If ``log_file`` is passed, the file is chown'ed
34 to the user we are dropping priviledges to, so the subprocess
39 uid = util.get_dir_uid(dir)
43 for k,v in os.environ.items():
44 if k.startswith('WIZARD_') or k == "SSH_GSSAPI_NAME":
45 args.append("%s=%s" % (k,v))
47 logging.debug("Dropping priviledges")
48 if log_file: os.chown(log_file, uid, -1)
49 os.execlp('sudo', 'sudo', '-u', '#' + str(uid), *args)
53 An advanced shell that performs logging. If ``dry`` is ``True``,
54 no commands are actually run.
56 def __init__(self, dry = False):
59 def call(self, *args, **kwargs):
61 Performs a system call. The actual executable and options should
62 be passed as arguments to this function. It will magically
63 ensure that 'wizard' as a command works. Several keyword arguments
66 :param python: explicitly marks the subprocess as Python or not Python
67 for improved error reporting. By default, we use
68 :func:`is_python` to autodetect this.
69 :param input: input to feed the subprocess on standard input.
70 :param interactive: whether or not directly hook up all pipes
71 to the controlling terminal, to allow interaction with subprocess.
72 :param strip: if ``True``, instead of returning a tuple,
73 return the string stdout output of the command with trailing newlines
74 removed. This emulates the behavior of backticks and ``$()`` in Bash.
75 Prefer to use :meth:`eval` instead (you should only need to explicitly
76 specify this if you are using another wrapper around this function).
77 :param log: if True, we log the call as INFO, if False, we log the call
78 as DEBUG, otherwise, we detect based on ``strip``.
79 :param addenv: mapping of environment variables *to add*
82 :param stdin: a file-type object that will be written to or read from as a pipe.
83 :returns: a tuple of strings ``(stdout, stderr)``, or a string ``stdout``
84 if ``strip`` is specified.
87 >>> sh.call("echo", "Foobar")
89 >>> sh.call("cat", input='Foobar')
93 kwargs.setdefault("interactive", False)
94 kwargs.setdefault("strip", False)
95 kwargs.setdefault("python", None)
96 kwargs.setdefault("log", None)
97 kwargs.setdefault("stdout", subprocess.PIPE)
98 kwargs.setdefault("stdin", subprocess.PIPE)
99 kwargs.setdefault("stderr", subprocess.PIPE)
100 kwargs.setdefault("addenv", None)
101 msg = "Running `" + ' '.join(args) + "`"
102 if kwargs["strip"] and not kwargs["log"] is True or kwargs["log"] is False:
110 if kwargs["python"] is None and is_python(args):
111 kwargs["python"] = True
112 if args[0] == "wizard":
115 kwargs.setdefault("input", None)
116 if kwargs["interactive"]:
121 stdout=kwargs["stdout"]
122 stdin=kwargs["stdin"]
123 stderr=kwargs["stderr"]
126 env = dict(os.environ.items() + kwargs["addenv"].items())
127 # XXX: There is a possible problem here where we can fill up
128 # the kernel buffer if we have 64KB of data. This shouldn't
129 # normally be a problem, and the fix for such case would be to write to
130 # temporary files instead of a pipe.
132 # However, it *is* a problem when you do something silly, like
133 # pass --debug to mass-upgrade.
135 # Another possible way of fixing this is converting from a
136 # waitpid() pump to a select() pump, creating a pipe to
137 # ourself, and then setting up a SIGCHILD handler to write a single
138 # byte to the pipe to get us out of select() when a subprocess exits.
139 proc = subprocess.Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, cwd=self.cwd, env=env)
140 if self._async(proc, args, **kwargs):
142 stdout, stderr = proc.communicate(kwargs["input"])
143 # can occur if we were doing interactive communication; i.e.
144 # we didn't pass in PIPE.
149 if not kwargs["interactive"]:
151 self._log(None, stderr)
153 self._log(stdout, stderr)
155 if kwargs["python"]: eclass = PythonCallError
156 else: eclass = CallError
157 raise eclass(proc.returncode, args, stdout, stderr)
159 return str(stdout).rstrip("\n")
160 return (stdout, stderr)
161 def _log(self, stdout, stderr):
162 """Logs the standard output and standard input from a command."""
164 logging.debug("STDOUT:\n" + stdout)
166 logging.debug("STDERR:\n" + stderr)
169 def _async(self, *args, **kwargs):
171 def callAsUser(self, *args, **kwargs):
173 Performs a system call as a different user. This is only possible
174 if you are running as root. Keyword arguments
175 are the same as :meth:`call` with the following additions:
177 :param user: name of the user to run command as.
178 :param uid: uid of the user to run command as.
182 The resulting system call internally uses :command:`sudo`,
183 and as such environment variables will get scrubbed. We
184 manually preserve :envvar:`SSH_GSSAPI_NAME`.
186 user = kwargs.pop("user", None)
187 uid = kwargs.pop("uid", None)
188 kwargs.setdefault("python", is_python(args))
189 if not user and not uid: return self.call(*args, **kwargs)
190 if os.getenv("SSH_GSSAPI_NAME"):
191 # This might be generalized as "preserve some environment"
193 args.insert(0, "SSH_GSSAPI_NAME=" + os.getenv("SSH_GSSAPI_NAME"))
194 if uid: return self.call("sudo", "-u", "#" + str(uid), *args, **kwargs)
195 if user: return self.call("sudo", "-u", user, *args, **kwargs)
196 def safeCall(self, *args, **kwargs):
198 Checks if the owner of the current working directory is the same
199 as the current user, and if it isn't, attempts to sudo to be
200 that user. The intended use case is for calling Git commands
201 when running as root, but this method should be used when
202 interfacing with any moderately complex program that depends
203 on working directory context. Keyword arguments are the
204 same as :meth:`call`.
207 return self.call(*args, **kwargs)
208 uid = os.stat(os.getcwd()).st_uid
209 # consider also checking ruid?
210 if uid != os.geteuid():
212 return self.callAsUser(*args, **kwargs)
214 return self.call(*args, **kwargs)
215 def eval(self, *args, **kwargs):
217 Evaluates a command and returns its output, with trailing newlines
218 stripped (like backticks in Bash). This is a convenience method for
219 calling :meth:`call` with ``strip``.
222 >>> sh.eval("echo", "Foobar")
225 kwargs["strip"] = True
226 return self.call(*args, **kwargs)
227 def setcwd(self, cwd):
229 Sets the directory processes are executed in. This sets a value
230 to be passed as the ``cwd`` argument to ``subprocess.Popen``.
234 user_shell = os.getenv("SHELL")
235 if not user_shell: user_shell = "/bin/bash"
236 # XXX: scripts specific hack, since mbash doesn't respect the current working directory
237 # When the revolution comes (i.e. $ATHENA_HOMEDIR/Scripts is your Scripts home
238 # directory) this isn't strictly necessary, but we'll probably need to support
239 # web_scripts directories ad infinitum.
240 if user_shell == "/usr/local/bin/mbash": user_shell = "/bin/bash"
243 self.call(user_shell, "-i", interactive=True)
244 except shell.CallError as e:
245 logging.warning("Shell returned non-zero exit code %d" % e.code)
247 class ParallelShell(Shell):
249 Modifies the semantics of :class:`Shell` so that
250 commands are queued here, and executed in parallel using waitpid
251 with ``max`` subprocesses, and result in callback execution
254 .. method:: call(*args, **kwargs)
256 Enqueues a system call for parallel processing. If there are
257 no openings in the queue, this will block. Keyword arguments
258 are the same as :meth:`Shell.call` with the following additions:
260 :param on_success: Callback function for success (zero exit status).
261 The callback function should accept two arguments,
262 ``stdout`` and ``stderr``.
263 :param on_error: Callback function for failure (nonzero exit status).
264 The callback function should accept one argument, the
265 exception that would have been thrown by the synchronous
267 :return: The :class:`subprocess.Proc` object that was opened.
269 .. method:: callAsUser(*args, **kwargs)
271 Enqueues a system call under a different user for parallel
272 processing. Keyword arguments are the same as
273 :meth:`Shell.callAsUser` with the additions of keyword
274 arguments from :meth:`call`.
276 .. method:: safeCall(*args, **kwargs)
278 Enqueues a "safe" call for parallel processing. Keyword
279 arguments are the same as :meth:`Shell.safeCall` with the
280 additions of keyword arguments from :meth:`call`.
282 .. method:: eval(*args, **kwargs)
284 No difference from :meth:`call`. Consider having a
285 non-parallel shell if the program you are shelling out
289 def __init__(self, dry = False, max = 10):
290 super(ParallelShell, self).__init__(dry=dry)
292 self.max = max # maximum of commands to run in parallel
294 def make(no_parallelize, max):
295 """Convenience method oriented towards command modules."""
297 return DummyParallelShell()
299 return ParallelShell(max=max)
300 def _async(self, proc, args, python, on_success, on_error, **kwargs):
302 Gets handed a :class:`subprocess.Proc` object from our deferred
303 execution. See :meth:`Shell.call` source code for details.
305 self.running[proc.pid] = (proc, args, python, on_success, on_error)
306 return True # so that the parent function returns
309 Blocking call that waits for an open subprocess slot. This is
310 automatically called by :meth:`Shell.call`.
312 # XXX: This API sucks; the actual call/callAsUser call should
313 # probably block automatically (unless I have a good reason not to)
314 # bail out immediately on initial ramp up
315 if len(self.running) < self.max: return
316 # now, wait for open pids.
318 self.reap(*os.waitpid(-1, 0))
320 if e.errno == errno.ECHILD: return
323 """Waits for all of our subprocesses to terminate."""
326 self.reap(*os.waitpid(-1, 0))
328 if e.errno == errno.ECHILD: return
330 def reap(self, pid, status):
331 """Reaps a process."""
332 # ooh, zombie process. reap it
333 proc, args, python, on_success, on_error = self.running.pop(pid)
334 # XXX: this is slightly dangerous; should actually use
336 stdout = proc.stdout.read()
337 stderr = proc.stderr.read()
338 self._log(stdout, stderr)
340 if python: eclass = PythonCallError
341 else: eclass = CallError
342 on_error(eclass(proc.returncode, args, stdout, stderr))
344 on_success(stdout, stderr)
346 raise Error("Cannot use interactive() on parallel shell")
348 # Setup a convenience global instance
351 callAsUser = shell.callAsUser
352 safeCall = shell.safeCall
354 interactive = shell.interactive
356 class DummyParallelShell(ParallelShell):
357 """Same API as :class:`ParallelShell`, but doesn't actually
358 parallelize (i.e. all calls to :meth:`wait` block.)"""
359 def __init__(self, dry = False):
360 super(DummyParallelShell, self).__init__(dry=dry, max=1)
362 class Error(wizard.Error):
363 """Base exception for this module"""
366 class CallError(Error):
367 """Indicates that a subprocess call returned a nonzero exit status."""
368 #: The exit code of the failed subprocess.
370 #: List of the program and arguments that failed.
372 #: The stdout of the program.
374 #: The stderr of the program.
376 def __init__(self, code, args, stdout, stderr):
382 compact = self.stderr.rstrip().split("\n")[-1]
383 return "%s (exited with %d)\n%s" % (compact, self.code, self.stderr)
385 class PythonCallError(CallError):
387 Indicates that a Python subprocess call had an uncaught exception.
388 This exception also contains the attributes of :class:`CallError`.
390 #: Name of the uncaught exception.
392 def __init__(self, code, args, stdout, stderr):
393 if stderr: self.name = util.get_exception_name(stderr)
394 CallError.__init__(self, code, args, stdout, stderr)
397 return "PythonCallError [%s]\n%s" % (self.name, self.stderr)
399 return "PythonCallError\n%s" % self.stderr