blob: fb0f5fe7c8202db1b8be11b4deb8b1dd0dc9a1bb [file] [log] [blame]
Ved-vampira915a012015-03-18 14:38:52 +03001"""
2http://amoffat.github.io/sh/
3"""
4#===============================================================================
5# Copyright (C) 2011-2015 by Andrew Moffat
6#
7# Permission is hereby granted, free of charge, to any person obtaining a copy
8# of this software and associated documentation files (the "Software"), to deal
9# in the Software without restriction, including without limitation the rights
10# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11# copies of the Software, and to permit persons to whom the Software is
12# furnished to do so, subject to the following conditions:
13#
14# The above copyright notice and this permission notice shall be included in
15# all copies or substantial portions of the Software.
16#
17# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23# THE SOFTWARE.
24#===============================================================================
25
26
27__version__ = "1.11"
28__project_url__ = "https://github.com/amoffat/sh"
29
30
31
32import platform
33
34if "windows" in platform.system().lower():
35 raise ImportError("sh %s is currently only supported on linux and osx. \
36please install pbs 0.110 (http://pypi.python.org/pypi/pbs) for windows \
37support." % __version__)
38
39
40import sys
41IS_PY3 = sys.version_info[0] == 3
42
43import traceback
44import os
45import re
46from glob import glob as original_glob
47import time
48from types import ModuleType
49from functools import partial
50import inspect
51from contextlib import contextmanager
52
53from locale import getpreferredencoding
54DEFAULT_ENCODING = getpreferredencoding() or "UTF-8"
55
56
57if IS_PY3:
58 from io import StringIO
59 from io import BytesIO as cStringIO
60 from queue import Queue, Empty
61
62 # for some reason, python 3.1 removed the builtin "callable", wtf
63 if not hasattr(__builtins__, "callable"):
64 def callable(ob):
65 return hasattr(ob, "__call__")
66else:
67 from StringIO import StringIO
68 from cStringIO import OutputType as cStringIO
69 from Queue import Queue, Empty
70
71IS_OSX = platform.system() == "Darwin"
72THIS_DIR = os.path.dirname(os.path.realpath(__file__))
73SH_LOGGER_NAME = "sh"
74
75
76import errno
77import warnings
78
79import pty
80import termios
81import signal
82import gc
83import select
84import threading
85import tty
86import fcntl
87import struct
88import resource
89from collections import deque
90import logging
91import weakref
92
93
94# TODO remove with contexts in next version
95def with_context_warning():
96 warnings.warn("""
97with contexts are deprecated because they are not thread safe. they will be \
98removed in the next version. use subcommands instead \
99http://amoffat.github.io/sh/#sub-commands. see \
100https://github.com/amoffat/sh/issues/195
101""".strip(), stacklevel=3)
102
103
104
105if IS_PY3:
106 raw_input = input
107 unicode = str
108 basestring = str
109
110
111_unicode_methods = set(dir(unicode()))
112
113
114def encode_to_py3bytes_or_py2str(s):
115 """ takes anything and attempts to return a py2 string or py3 bytes. this
116 is typically used when creating command + arguments to be executed via
117 os.exec* """
118
119 fallback_encoding = "utf8"
120
121 if IS_PY3:
122 # if we're already bytes, do nothing
123 if isinstance(s, bytes):
124 pass
125 else:
126 s = str(s)
127 try:
128 s = bytes(s, DEFAULT_ENCODING)
129 except UnicodeEncodeError:
130 s = bytes(s, fallback_encoding)
131 else:
132 # attempt to convert the thing to unicode from the system's encoding
133 try:
134 s = unicode(s, DEFAULT_ENCODING)
135 # if the thing is already unicode, or it's a number, it can't be
136 # coerced to unicode with an encoding argument, but if we leave out
137 # the encoding argument, it will convert it to a string, then to unicode
138 except TypeError:
139 s = unicode(s)
140
141 # now that we have guaranteed unicode, encode to our system encoding,
142 # but attempt to fall back to something
143 try:
144 s = s.encode(DEFAULT_ENCODING)
145 except:
146 s = s.encode(fallback_encoding)
147 return s
148
149
150class ErrorReturnCode(Exception):
151 """ base class for all exceptions as a result of a command's exit status
152 being deemed an error. this base class is dynamically subclassed into
153 derived classes with the format: ErrorReturnCode_NNN where NNN is the exit
154 code number. the reason for this is it reduces boiler plate code when
155 testing error return codes:
156
157 try:
158 some_cmd()
159 except ErrorReturnCode_12:
160 print("couldn't do X")
161
162 vs:
163 try:
164 some_cmd()
165 except ErrorReturnCode as e:
166 if e.exit_code == 12:
167 print("couldn't do X")
168
169 it's not much of a savings, but i believe it makes the code easier to read """
170
171 truncate_cap = 750
172
173 def __init__(self, full_cmd, stdout, stderr):
174 self.full_cmd = full_cmd
175 self.stdout = stdout
176 self.stderr = stderr
177
178
179 if self.stdout is None:
180 exc_stdout = "<redirected>"
181 else:
182 exc_stdout = self.stdout[:self.truncate_cap]
183 out_delta = len(self.stdout) - len(exc_stdout)
184 if out_delta:
185 exc_stdout += ("... (%d more, please see e.stdout)" % out_delta).encode()
186
187 if self.stderr is None:
188 exc_stderr = "<redirected>"
189 else:
190 exc_stderr = self.stderr[:self.truncate_cap]
191 err_delta = len(self.stderr) - len(exc_stderr)
192 if err_delta:
193 exc_stderr += ("... (%d more, please see e.stderr)" % err_delta).encode()
194
195 msg = "\n\n RAN: %r\n\n STDOUT:\n%s\n\n STDERR:\n%s" % \
196 (full_cmd, exc_stdout.decode(DEFAULT_ENCODING, "replace"),
197 exc_stderr.decode(DEFAULT_ENCODING, "replace"))
198 super(ErrorReturnCode, self).__init__(msg)
199
200
201class SignalException(ErrorReturnCode): pass
202class TimeoutException(Exception):
203 """ the exception thrown when a command is killed because a specified
204 timeout (via _timeout) was hit """
205 def __init__(self, exit_code):
206 self.exit_code = exit_code
207 super(Exception, self).__init__()
208
209SIGNALS_THAT_SHOULD_THROW_EXCEPTION = (
210 signal.SIGABRT,
211 signal.SIGBUS,
212 signal.SIGFPE,
213 signal.SIGILL,
214 signal.SIGINT,
215 signal.SIGKILL,
216 signal.SIGPIPE,
217 signal.SIGQUIT,
218 signal.SIGSEGV,
219 signal.SIGTERM,
220 signal.SIGSYS,
221)
222
223
224# we subclass AttributeError because:
225# https://github.com/ipython/ipython/issues/2577
226# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
227class CommandNotFound(AttributeError): pass
228
229
230
231
232rc_exc_regex = re.compile("(ErrorReturnCode|SignalException)_((\d+)|SIG\w+)")
233rc_exc_cache = {}
234
235
236def get_exc_from_name(name):
237 """ takes an exception name, like:
238
239 ErrorReturnCode_1
240 SignalException_9
241 SignalException_SIGHUP
242
243 and returns the corresponding exception. this is primarily used for
244 importing exceptions from sh into user code, for instance, to capture those
245 exceptions """
246
247 exc = None
248 try:
249 return rc_exc_cache[name]
250 except KeyError:
251 m = rc_exc_regex.match(name)
252 if m:
253 base = m.group(1)
254 rc_or_sig_name = m.group(2)
255
256 if base == "SignalException":
257 try:
258 rc = -int(rc_or_sig_name)
259 except ValueError:
260 rc = -getattr(signal, rc_or_sig_name)
261 else:
262 rc = int(rc_or_sig_name)
263
264 exc = get_rc_exc(rc)
265 return exc
266
267
268def get_rc_exc(rc_or_sig_name):
269 """ takes a exit code, signal number, or signal name, and produces an
270 exception that corresponds to that return code. positive return codes yield
271 ErrorReturnCode exception, negative return codes yield SignalException
272
273 we also cache the generated exception so that only one signal of that type
274 exists, preserving identity """
275
276 try:
277 rc = int(rc_or_sig_name)
278 except ValueError:
279 rc = -getattr(signal, rc_or_sig_name)
280
281 try:
282 return rc_exc_cache[rc]
283 except KeyError:
284 pass
285
286 if rc > 0:
287 name = "ErrorReturnCode_%d" % rc
288 base = ErrorReturnCode
289 else:
290 name = "SignalException_%d" % abs(rc)
291 base = SignalException
292
293 exc = type(name, (base,), {"exit_code": rc})
294 rc_exc_cache[rc] = exc
295 return exc
296
297
298
299
300def which(program):
301 def is_exe(fpath):
302 return (os.path.exists(fpath) and
303 os.access(fpath, os.X_OK) and
304 os.path.isfile(os.path.realpath(fpath)))
305
306 fpath, fname = os.path.split(program)
307 if fpath:
308 if is_exe(program):
309 return program
310 else:
311 if "PATH" not in os.environ:
312 return None
313 for path in os.environ["PATH"].split(os.pathsep):
314 exe_file = os.path.join(path, program)
315 if is_exe(exe_file):
316 return exe_file
317
318 return None
319
320def resolve_program(program):
321 path = which(program)
322 if not path:
323 # our actual command might have a dash in it, but we can't call
324 # that from python (we have to use underscores), so we'll check
325 # if a dash version of our underscore command exists and use that
326 # if it does
327 if "_" in program:
328 path = which(program.replace("_", "-"))
329 if not path:
330 return None
331 return path
332
333
334# we add this thin wrapper to glob.glob because of a specific edge case where
335# glob does not expand to anything. for example, if you try to do
336# glob.glob("*.py") and there are no *.py files in the directory, glob.glob
337# returns an empty list. this empty list gets passed to the command, and
338# then the command fails with a misleading error message. this thin wrapper
339# ensures that if there is no expansion, we pass in the original argument,
340# so that when the command fails, the error message is clearer
341def glob(arg):
342 return original_glob(arg) or arg
343
344
345
346class Logger(object):
347 """ provides a memory-inexpensive logger. a gotcha about python's builtin
348 logger is that logger objects are never garbage collected. if you create a
349 thousand loggers with unique names, they'll sit there in memory until your
350 script is done. with sh, it's easy to create loggers with unique names if
351 we want our loggers to include our command arguments. for example, these
352 are all unique loggers:
353
354 ls -l
355 ls -l /tmp
356 ls /tmp
357
358 so instead of creating unique loggers, and without sacrificing logging
359 output, we use this class, which maintains as part of its state, the logging
360 "context", which will be the very unique name. this allows us to get a
361 logger with a very general name, eg: "command", and have a unique name
362 appended to it via the context, eg: "ls -l /tmp" """
363 def __init__(self, name, context=None):
364 self.name = name
365 if context:
366 context = context.replace("%", "%%")
367 self.context = context
368 self.log = logging.getLogger("%s.%s" % (SH_LOGGER_NAME, name))
369
370 def _format_msg(self, msg, *args):
371 if self.context:
372 msg = "%s: %s" % (self.context, msg)
373 return msg % args
374
375 def get_child(self, name, context):
376 new_name = self.name + "." + name
377 new_context = self.context + "." + context
378 l = Logger(new_name, new_context)
379 return l
380
381 def info(self, msg, *args):
382 self.log.info(self._format_msg(msg, *args))
383
384 def debug(self, msg, *args):
385 self.log.debug(self._format_msg(msg, *args))
386
387 def error(self, msg, *args):
388 self.log.error(self._format_msg(msg, *args))
389
390 def exception(self, msg, *args):
391 self.log.exception(self._format_msg(msg, *args))
392
393
394def friendly_truncate(s, max_len):
395 if len(s) > max_len:
396 s = "%s...(%d more)" % (s[:max_len], len(s) - max_len)
397 return s
398
399
400class RunningCommand(object):
401 """ this represents an executing Command object. it is returned as the
402 result of __call__() being executed on a Command instance. this creates a
403 reference to a OProc instance, which is a low-level wrapper around the
404 process that was exec'd
405
406 this is the class that gets manipulated the most by user code, and so it
407 implements various convenience methods and logical mechanisms for the
408 underlying process. for example, if a user tries to access a
409 backgrounded-process's stdout/err, the RunningCommand object is smart enough
410 to know to wait() on the process to finish first. and when the process
411 finishes, RunningCommand is smart enough to translate exit codes to
412 exceptions. """
413
414 def __init__(self, cmd, call_args, stdin, stdout, stderr):
415 # self.ran is used for auditing what actually ran. for example, in
416 # exceptions, or if you just want to know what was ran after the
417 # command ran
418 if IS_PY3:
419 self.ran = " ".join([arg.decode(DEFAULT_ENCODING, "ignore") for arg in cmd])
420 else:
421 self.ran = " ".join(cmd)
422
423
424 friendly_cmd = friendly_truncate(self.ran, 20)
425 friendly_call_args = friendly_truncate(str(call_args), 20)
426
427 # we're setting up the logger string here, instead of __repr__ because
428 # we reserve __repr__ to behave as if it was evaluating the child
429 # process's output
430 logger_str = "<Command %r call_args %s>" % (friendly_cmd,
431 friendly_call_args)
432
433 self.log = Logger("command", logger_str)
434 self.call_args = call_args
435 self.cmd = cmd
436
437 self.process = None
438 self._process_completed = False
439 should_wait = True
440 spawn_process = True
441
442
443 # with contexts shouldn't run at all yet, they prepend
444 # to every command in the context
445 if call_args["with"]:
446 spawn_process = False
447 Command._prepend_stack.append(self)
448
449
450 if call_args["piped"] or call_args["iter"] or call_args["iter_noblock"]:
451 should_wait = False
452
453 # we're running in the background, return self and let us lazily
454 # evaluate
455 if call_args["bg"]:
456 should_wait = False
457
458 # redirection
459 if call_args["err_to_out"]:
460 stderr = OProc.STDOUT
461
462
463 # set up which stream should write to the pipe
464 # TODO, make pipe None by default and limit the size of the Queue
465 # in oproc.OProc
466 pipe = OProc.STDOUT
467 if call_args["iter"] == "out" or call_args["iter"] is True:
468 pipe = OProc.STDOUT
469 elif call_args["iter"] == "err":
470 pipe = OProc.STDERR
471
472 if call_args["iter_noblock"] == "out" or call_args["iter_noblock"] is True:
473 pipe = OProc.STDOUT
474 elif call_args["iter_noblock"] == "err":
475 pipe = OProc.STDERR
476
477
478 # there's currently only one case where we wouldn't spawn a child
479 # process, and that's if we're using a with-context with our command
480 if spawn_process:
481 self.log.info("starting process")
482 self.process = OProc(self.log, cmd, stdin, stdout, stderr,
483 self.call_args, pipe)
484
485 if should_wait:
486 self.wait()
487
488
489 def wait(self):
490 if not self._process_completed:
491 self._process_completed = True
492
493 exit_code = self.process.wait()
494 if self.process.timed_out:
495 # if we timed out, our exit code represents a signal, which is
496 # negative, so let's make it positive to store in our
497 # TimeoutException
498 raise TimeoutException(-exit_code)
499 else:
500 self.handle_command_exit_code(exit_code)
501
502 # https://github.com/amoffat/sh/issues/185
503 if self.call_args["done"]:
504 self.call_args["done"](self)
505
506 return self
507
508
509 def handle_command_exit_code(self, code):
510 """ here we determine if we had an exception, or an error code that we
511 weren't expecting to see. if we did, we create and raise an exception
512 """
513 if (code not in self.call_args["ok_code"] and (code > 0 or -code in
514 SIGNALS_THAT_SHOULD_THROW_EXCEPTION)):
515 exc = get_rc_exc(code)
516 raise exc(self.ran, self.process.stdout, self.process.stderr)
517
518
519
520 @property
521 def stdout(self):
522 self.wait()
523 return self.process.stdout
524
525 @property
526 def stderr(self):
527 self.wait()
528 return self.process.stderr
529
530 @property
531 def exit_code(self):
532 self.wait()
533 return self.process.exit_code
534
535 @property
536 def pid(self):
537 return self.process.pid
538
539 def __len__(self):
540 return len(str(self))
541
542 def __enter__(self):
543 """ we don't actually do anything here because anything that should have
544 been done would have been done in the Command.__call__ call.
545 essentially all that has to happen is the comand be pushed on the
546 prepend stack. """
547 with_context_warning()
548
549 def __iter__(self):
550 return self
551
552 def next(self):
553 """ allow us to iterate over the output of our command """
554
555 # we do this because if get blocks, we can't catch a KeyboardInterrupt
556 # so the slight timeout allows for that.
557 while True:
558 try:
559 chunk = self.process._pipe_queue.get(True, 0.001)
560 except Empty:
561 if self.call_args["iter_noblock"]:
562 return errno.EWOULDBLOCK
563 else:
564 if chunk is None:
565 self.wait()
566 raise StopIteration()
567 try:
568 return chunk.decode(self.call_args["encoding"],
569 self.call_args["decode_errors"])
570 except UnicodeDecodeError:
571 return chunk
572
573 # python 3
574 __next__ = next
575
576 def __exit__(self, typ, value, traceback):
577 if self.call_args["with"] and Command._prepend_stack:
578 Command._prepend_stack.pop()
579
580 def __str__(self):
581 """ in python3, should return unicode. in python2, should return a
582 string of bytes """
583 if IS_PY3:
584 return self.__unicode__()
585 else:
586 return unicode(self).encode(self.call_args["encoding"])
587
588 def __unicode__(self):
589 """ a magic method defined for python2. calling unicode() on a
590 RunningCommand object will call this """
591 if self.process and self.stdout:
592 return self.stdout.decode(self.call_args["encoding"],
593 self.call_args["decode_errors"])
594 elif IS_PY3:
595 return ""
596 else:
597 return unicode("")
598
599 def __eq__(self, other):
600 return unicode(self) == unicode(other)
601 __hash__ = None # Avoid DeprecationWarning in Python < 3
602
603 def __contains__(self, item):
604 return item in str(self)
605
606 def __getattr__(self, p):
607 # let these three attributes pass through to the OProc object
608 if p in ("signal", "terminate", "kill"):
609 if self.process:
610 return getattr(self.process, p)
611 else:
612 raise AttributeError
613
614 # see if strings have what we're looking for. we're looking at the
615 # method names explicitly because we don't want to evaluate self unless
616 # we absolutely have to, the reason being, in python2, hasattr swallows
617 # exceptions, and if we try to run hasattr on a command that failed and
618 # is being run with _iter=True, the command will be evaluated, throw an
619 # exception, but hasattr will discard it
620 if p in _unicode_methods:
621 return getattr(unicode(self), p)
622
623 raise AttributeError
624
625 def __repr__(self):
626 """ in python3, should return unicode. in python2, should return a
627 string of bytes """
628 try:
629 return str(self)
630 except UnicodeDecodeError:
631 if self.process:
632 if self.stdout:
633 return repr(self.stdout)
634 return repr("")
635
636 def __long__(self):
637 return long(str(self).strip())
638
639 def __float__(self):
640 return float(str(self).strip())
641
642 def __int__(self):
643 return int(str(self).strip())
644
645
646
647def output_redirect_is_filename(out):
648 return out \
649 and not callable(out) \
650 and not hasattr(out, "write") \
651 and not isinstance(out, (cStringIO, StringIO))
652
653
654
655
656
657
658class Command(object):
659 """ represents an un-run system program, like "ls" or "cd". because it
660 represents the program itself (and not a running instance of it), it should
661 hold very little state. in fact, the only state it does hold is baked
662 arguments.
663
664 when a Command object is called, the result that is returned is a
665 RunningCommand object, which represents the Command put into an execution
666 state. """
667 _prepend_stack = []
668
669 _call_args = {
670 # currently unsupported
671 #"fg": False, # run command in foreground
672
673 # run a command in the background. commands run in the background
674 # ignore SIGHUP and do not automatically exit when the parent process
675 # ends
676 "bg": False,
677
678 "with": False, # prepend the command to every command after it
679 "in": None,
680 "out": None, # redirect STDOUT
681 "err": None, # redirect STDERR
682 "err_to_out": None, # redirect STDERR to STDOUT
683
684 # stdin buffer size
685 # 1 for line, 0 for unbuffered, any other number for that amount
686 "in_bufsize": 0,
687 # stdout buffer size, same values as above
688 "out_bufsize": 1,
689 "err_bufsize": 1,
690
691 # this is how big the output buffers will be for stdout and stderr.
692 # this is essentially how much output they will store from the process.
693 # we use a deque, so if it overflows past this amount, the first items
694 # get pushed off as each new item gets added.
695 #
696 # NOTICE
697 # this is not a *BYTE* size, this is a *CHUNK* size...meaning, that if
698 # you're buffering out/err at 1024 bytes, the internal buffer size will
699 # be "internal_bufsize" CHUNKS of 1024 bytes
700 "internal_bufsize": 3 * 1024 ** 2,
701
702 "env": None,
703 "piped": None,
704 "iter": None,
705 "iter_noblock": None,
706 "ok_code": 0,
707 "cwd": None,
708
709 # the separator delimiting between a long-argument's name and its value
710 # for example, --arg=derp, '=' is the long_sep
711 "long_sep": "=",
712
713 # this is for programs that expect their input to be from a terminal.
714 # ssh is one of those programs
715 "tty_in": False,
716 "tty_out": True,
717
718 "encoding": DEFAULT_ENCODING,
719 "decode_errors": "strict",
720
721 # how long the process should run before it is auto-killed
722 "timeout": 0,
723 "timeout_signal": signal.SIGKILL,
724
725 # TODO write some docs on "long-running processes"
726 # these control whether or not stdout/err will get aggregated together
727 # as the process runs. this has memory usage implications, so sometimes
728 # with long-running processes with a lot of data, it makes sense to
729 # set these to true
730 "no_out": False,
731 "no_err": False,
732 "no_pipe": False,
733
734 # if any redirection is used for stdout or stderr, internal buffering
735 # of that data is not stored. this forces it to be stored, as if
736 # the output is being T'd to both the redirected destination and our
737 # internal buffers
738 "tee": None,
739
740 # will be called when a process terminates without exception. this
741 # option also puts the command in the background, since it doesn't make
742 # sense to have an un-backgrounded command with a done callback
743 "done": None,
744
745 # a tuple (rows, columns) of the desired size of both the stdout and
746 # stdin ttys, if ttys are being used
747 "tty_size": (20, 80),
748 }
749
750 # these are arguments that cannot be called together, because they wouldn't
751 # make any sense
752 _incompatible_call_args = (
753 #("fg", "bg", "Command can't be run in the foreground and background"),
754 ("err", "err_to_out", "Stderr is already being redirected"),
755 ("piped", "iter", "You cannot iterate when this command is being piped"),
756 ("piped", "no_pipe", "Using a pipe doesn't make sense if you've \
757disabled the pipe"),
758 ("no_out", "iter", "You cannot iterate over output if there is no \
759output"),
760 )
761
762
763 # this method exists because of the need to have some way of letting
764 # manual object instantiation not perform the underscore-to-dash command
765 # conversion that resolve_program uses.
766 #
767 # there are 2 ways to create a Command object. using sh.Command(<program>)
768 # or by using sh.<program>. the method fed into sh.Command must be taken
769 # literally, and so no underscore-dash conversion is performed. the one
770 # for sh.<program> must do the underscore-dash converesion, because we
771 # can't type dashes in method names
772 @classmethod
773 def _create(cls, program, **default_kwargs):
774 path = resolve_program(program)
775 if not path:
776 raise CommandNotFound(program)
777
778 cmd = cls(path)
779 if default_kwargs:
780 cmd = cmd.bake(**default_kwargs)
781
782 return cmd
783
784
785 def __init__(self, path):
786 found = which(path)
787 if not found:
788 raise CommandNotFound(path)
789
790 self._path = encode_to_py3bytes_or_py2str(found)
791
792 self._partial = False
793 self._partial_baked_args = []
794 self._partial_call_args = {}
795
796 # bugfix for functools.wraps. issue #121
797 self.__name__ = str(self)
798
799
800 def __getattribute__(self, name):
801 # convenience
802 getattr = partial(object.__getattribute__, self)
803
804 if name.startswith("_"):
805 return getattr(name)
806 if name == "bake":
807 return getattr("bake")
808 if name.endswith("_"):
809 name = name[:-1]
810
811 return getattr("bake")(name)
812
813
814 @staticmethod
815 def _extract_call_args(kwargs, to_override={}):
816 kwargs = kwargs.copy()
817 call_args = {}
818 for parg, default in Command._call_args.items():
819 key = "_" + parg
820
821 if key in kwargs:
822 call_args[parg] = kwargs[key]
823 del kwargs[key]
824 elif parg in to_override:
825 call_args[parg] = to_override[parg]
826
827 # test for incompatible call args
828 s1 = set(call_args.keys())
829 for args in Command._incompatible_call_args:
830 args = list(args)
831 error = args.pop()
832
833 if s1.issuperset(args):
834 raise TypeError("Invalid special arguments %r: %s" % (args, error))
835
836 return call_args, kwargs
837
838
839 def _aggregate_keywords(self, keywords, sep, raw=False):
840 processed = []
841 for k, v in keywords.items():
842 # we're passing a short arg as a kwarg, example:
843 # cut(d="\t")
844 if len(k) == 1:
845 if v is not False:
846 processed.append(encode_to_py3bytes_or_py2str("-" + k))
847 if v is not True:
848 processed.append(encode_to_py3bytes_or_py2str(v))
849
850 # we're doing a long arg
851 else:
852 if not raw:
853 k = k.replace("_", "-")
854
855 if v is True:
856 processed.append(encode_to_py3bytes_or_py2str("--" + k))
857 elif v is False:
858 pass
859 else:
860 arg = encode_to_py3bytes_or_py2str("--%s%s%s" % (k, sep, v))
861 processed.append(arg)
862 return processed
863
864
865 def _compile_args(self, args, kwargs, sep):
866 processed_args = []
867
868 # aggregate positional args
869 for arg in args:
870 if isinstance(arg, (list, tuple)):
871 if not arg:
872 warnings.warn("Empty list passed as an argument to %r. \
873If you're using glob.glob(), please use sh.glob() instead." % self._path, stacklevel=3)
874 for sub_arg in arg:
875 processed_args.append(encode_to_py3bytes_or_py2str(sub_arg))
876 elif isinstance(arg, dict):
877 processed_args += self._aggregate_keywords(arg, sep, raw=True)
878 else:
879 processed_args.append(encode_to_py3bytes_or_py2str(arg))
880
881 # aggregate the keyword arguments
882 processed_args += self._aggregate_keywords(kwargs, sep)
883
884 return processed_args
885
886
887 # TODO needs documentation
888 def bake(self, *args, **kwargs):
889 fn = Command(self._path)
890 fn._partial = True
891
892 call_args, kwargs = self._extract_call_args(kwargs)
893
894 pruned_call_args = call_args
895 for k, v in Command._call_args.items():
896 try:
897 if pruned_call_args[k] == v:
898 del pruned_call_args[k]
899 except KeyError:
900 continue
901
902 fn._partial_call_args.update(self._partial_call_args)
903 fn._partial_call_args.update(pruned_call_args)
904 fn._partial_baked_args.extend(self._partial_baked_args)
905 sep = pruned_call_args.get("long_sep", self._call_args["long_sep"])
906 fn._partial_baked_args.extend(self._compile_args(args, kwargs, sep))
907 return fn
908
909 def __str__(self):
910 """ in python3, should return unicode. in python2, should return a
911 string of bytes """
912 if IS_PY3:
913 return self.__unicode__()
914 else:
915 return self.__unicode__().encode(DEFAULT_ENCODING)
916
917
918 def __eq__(self, other):
919 try:
920 return str(self) == str(other)
921 except:
922 return False
923 __hash__ = None # Avoid DeprecationWarning in Python < 3
924
925
926 def __repr__(self):
927 """ in python3, should return unicode. in python2, should return a
928 string of bytes """
929 return "<Command %r>" % str(self)
930
931
932 def __unicode__(self):
933 """ a magic method defined for python2. calling unicode() on a
934 self will call this """
935 baked_args = " ".join(item.decode(DEFAULT_ENCODING) for item in self._partial_baked_args)
936 if baked_args:
937 baked_args = " " + baked_args
938 return self._path.decode(DEFAULT_ENCODING) + baked_args
939
940 def __enter__(self):
941 with_context_warning()
942 self(_with=True)
943
944 def __exit__(self, typ, value, traceback):
945 Command._prepend_stack.pop()
946
947
948 def __call__(self, *args, **kwargs):
949 kwargs = kwargs.copy()
950 args = list(args)
951
952 cmd = []
953
954 # aggregate any 'with' contexts
955 call_args = Command._call_args.copy()
956 for prepend in self._prepend_stack:
957 # don't pass the 'with' call arg
958 pcall_args = prepend.call_args.copy()
959 try:
960 del pcall_args["with"]
961 except:
962 pass
963
964 call_args.update(pcall_args)
965 cmd.extend(prepend.cmd)
966
967 cmd.append(self._path)
968
969 # here we extract the special kwargs and override any
970 # special kwargs from the possibly baked command
971 tmp_call_args, kwargs = self._extract_call_args(kwargs, self._partial_call_args)
972 call_args.update(tmp_call_args)
973
974 if not getattr(call_args["ok_code"], "__iter__", None):
975 call_args["ok_code"] = [call_args["ok_code"]]
976
977
978 if call_args["done"]:
979 call_args["bg"] = True
980
981 # check if we're piping via composition
982 stdin = call_args["in"]
983 if args:
984 first_arg = args.pop(0)
985 if isinstance(first_arg, RunningCommand):
986 # it makes sense that if the input pipe of a command is running
987 # in the background, then this command should run in the
988 # background as well
989 if first_arg.call_args["bg"]:
990 call_args["bg"] = True
991
992 if first_arg.call_args["piped"] == "direct":
993 stdin = first_arg.process
994 else:
995 stdin = first_arg.process._pipe_queue
996
997 else:
998 args.insert(0, first_arg)
999
1000 processed_args = self._compile_args(args, kwargs, call_args["long_sep"])
1001
1002 # makes sure our arguments are broken up correctly
1003 split_args = self._partial_baked_args + processed_args
1004
1005 final_args = split_args
1006
1007 cmd.extend(final_args)
1008
1009
1010 # stdout redirection
1011 stdout = call_args["out"]
1012 if output_redirect_is_filename(stdout):
1013 stdout = open(str(stdout), "wb")
1014
1015 # stderr redirection
1016 stderr = call_args["err"]
1017 if output_redirect_is_filename(stderr):
1018 stderr = open(str(stderr), "wb")
1019
1020
1021 return RunningCommand(cmd, call_args, stdin, stdout, stderr)
1022
1023
1024
1025
1026def _start_daemon_thread(fn, *args):
1027 thrd = threading.Thread(target=fn, args=args)
1028 thrd.daemon = True
1029 thrd.start()
1030 return thrd
1031
1032
1033def setwinsize(fd, rows_cols):
1034 """ set the terminal size of a tty file descriptor. borrowed logic
1035 from pexpect.py """
1036 rows, cols = rows_cols
1037 TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
1038
1039 s = struct.pack('HHHH', rows, cols, 0, 0)
1040 fcntl.ioctl(fd, TIOCSWINSZ, s)
1041
1042def construct_streamreader_callback(process, handler):
1043 """ here we're constructing a closure for our streamreader callback. this
1044 is used in the case that we pass a callback into _out or _err, meaning we
1045 want to our callback to handle each bit of output
1046
1047 we construct the closure based on how many arguments it takes. the reason
1048 for this is to make it as easy as possible for people to use, without
1049 limiting them. a new user will assume the callback takes 1 argument (the
1050 data). as they get more advanced, they may want to terminate the process,
1051 or pass some stdin back, and will realize that they can pass a callback of
1052 more args """
1053
1054
1055 # implied arg refers to the "self" that methods will pass in. we need to
1056 # account for this implied arg when figuring out what function the user
1057 # passed in based on number of args
1058 implied_arg = 0
1059
1060 partial_args = 0
1061 handler_to_inspect = handler
1062
1063 if isinstance(handler, partial):
1064 partial_args = len(handler.args)
1065 handler_to_inspect = handler.func
1066
1067 if inspect.ismethod(handler_to_inspect):
1068 implied_arg = 1
1069 num_args = len(inspect.getargspec(handler_to_inspect).args)
1070
1071 else:
1072 if inspect.isfunction(handler_to_inspect):
1073 num_args = len(inspect.getargspec(handler_to_inspect).args)
1074
1075 # is an object instance with __call__ method
1076 else:
1077 implied_arg = 1
1078 num_args = len(inspect.getargspec(handler_to_inspect.__call__).args)
1079
1080
1081 net_args = num_args - implied_arg - partial_args
1082
1083 handler_args = ()
1084
1085 # just the chunk
1086 if net_args == 1:
1087 handler_args = ()
1088
1089 # chunk, stdin
1090 if net_args == 2:
1091 handler_args = (process.stdin,)
1092
1093 # chunk, stdin, process
1094 elif net_args == 3:
1095 # notice we're only storing a weakref, to prevent cyclic references
1096 # (where the process holds a streamreader, and a streamreader holds a
1097 # handler-closure with a reference to the process
1098 handler_args = (process.stdin, weakref.ref(process))
1099
1100 def fn(chunk):
1101 # this is pretty ugly, but we're evaluating the process at call-time,
1102 # because it's a weakref
1103 args = handler_args
1104 if len(args) == 2:
1105 args = (handler_args[0], handler_args[1]())
1106 return handler(chunk, *args)
1107
1108 return fn
1109
1110
1111
1112def handle_process_exit_code(exit_code):
1113 """ this should only ever be called once for each child process """
1114 # if we exited from a signal, let our exit code reflect that
1115 if os.WIFSIGNALED(exit_code):
1116 return -os.WTERMSIG(exit_code)
1117 # otherwise just give us a normal exit code
1118 elif os.WIFEXITED(exit_code):
1119 return os.WEXITSTATUS(exit_code)
1120 else:
1121 raise RuntimeError("Unknown child exit status!")
1122
1123
1124
1125
1126class OProc(object):
1127 """ this class is instantiated by RunningCommand for a command to be exec'd.
1128 it handles all the nasty business involved with correctly setting up the
1129 input/output to the child process. it gets its name for subprocess.Popen
1130 (process open) but we're calling ours OProc (open process) """
1131
1132 _default_window_size = (24, 80)
1133
1134 # used in redirecting
1135 STDOUT = -1
1136 STDERR = -2
1137
1138 def __init__(self, parent_log, cmd, stdin, stdout, stderr, call_args, pipe):
1139 """
1140 cmd is the full string that will be exec'd. it includes the program
1141 name and all its arguments
1142
1143 stdin, stdout, stderr are what the child will use for standard
1144 input/output/err
1145
1146 call_args is a mapping of all the special keyword arguments to apply
1147 to the child process
1148 """
1149
1150 self.call_args = call_args
1151
1152 # I had issues with getting 'Input/Output error reading stdin' from dd,
1153 # until I set _tty_out=False
1154 if self.call_args["piped"] == "direct":
1155 self.call_args["tty_out"] = False
1156
1157 self._single_tty = self.call_args["tty_in"] and self.call_args["tty_out"]
1158
1159 # this logic is a little convoluted, but basically this top-level
1160 # if/else is for consolidating input and output TTYs into a single
1161 # TTY. this is the only way some secure programs like ssh will
1162 # output correctly (is if stdout and stdin are both the same TTY)
1163 if self._single_tty:
1164 self._stdin_fd, self._slave_stdin_fd = pty.openpty()
1165
1166 self._stdout_fd = self._stdin_fd
1167 self._slave_stdout_fd = self._slave_stdin_fd
1168
1169 self._stderr_fd = self._stdin_fd
1170 self._slave_stderr_fd = self._slave_stdin_fd
1171
1172 # do not consolidate stdin and stdout. this is the most common use-
1173 # case
1174 else:
1175 # this check here is because we may be doing "direct" piping
1176 # (_piped="direct"), and so our stdin might be an instance of
1177 # OProc
1178 if isinstance(stdin, OProc):
1179 self._slave_stdin_fd = stdin._stdout_fd
1180 self._stdin_fd = None
1181 elif self.call_args["tty_in"]:
1182 self._slave_stdin_fd, self._stdin_fd = pty.openpty()
1183 # tty_in=False is the default
1184 else:
1185 self._slave_stdin_fd, self._stdin_fd = os.pipe()
1186
1187
1188 # tty_out=True is the default
1189 if self.call_args["tty_out"]:
1190 self._stdout_fd, self._slave_stdout_fd = pty.openpty()
1191 else:
1192 self._stdout_fd, self._slave_stdout_fd = os.pipe()
1193
1194 # unless STDERR is going to STDOUT, it ALWAYS needs to be a pipe,
1195 # and never a PTY. the reason for this is not totally clear to me,
1196 # but it has to do with the fact that if STDERR isn't set as the
1197 # CTTY (because STDOUT is), the STDERR buffer won't always flush
1198 # by the time the process exits, and the data will be lost.
1199 # i've only seen this on OSX.
1200 if stderr is not OProc.STDOUT:
1201 self._stderr_fd, self._slave_stderr_fd = os.pipe()
1202
1203
1204 # this is a hack, but what we're doing here is intentionally throwing an
1205 # OSError exception if our child processes's directory doesn't exist,
1206 # but we're doing it BEFORE we fork. the reason for before the fork is
1207 # error handling. i'm currently too lazy to implement what
1208 # subprocess.py did and set up a error pipe to handle exceptions that
1209 # happen in the child between fork and exec. it has only been seen in
1210 # the wild for a missing cwd, so we'll handle it here.
1211 cwd = self.call_args["cwd"]
1212 if cwd is not None and not os.path.exists(cwd):
1213 os.chdir(cwd)
1214
1215
1216 gc_enabled = gc.isenabled()
1217 if gc_enabled:
1218 gc.disable()
1219 self.pid = os.fork()
1220
1221
1222 # child
1223 if self.pid == 0: # pragma: no cover
1224 try:
1225 # ignoring SIGHUP lets us persist even after the parent process
1226 # exits. only ignore if we're backgrounded
1227 if self.call_args["bg"] is True:
1228 signal.signal(signal.SIGHUP, signal.SIG_IGN)
1229
1230 # this piece of ugliness is due to a bug where we can lose output
1231 # if we do os.close(self._slave_stdout_fd) in the parent after
1232 # the child starts writing.
1233 # see http://bugs.python.org/issue15898
1234 if IS_OSX:
1235 time.sleep(0.01)
1236
1237 os.setsid()
1238
1239 if self.call_args["tty_out"]:
1240 # set raw mode, so there isn't any weird translation of
1241 # newlines to \r\n and other oddities. we're not outputting
1242 # to a terminal anyways
1243 #
1244 # we HAVE to do this here, and not in the parent process,
1245 # because we have to guarantee that this is set before the
1246 # child process is run, and we can't do it twice.
1247 tty.setraw(self._slave_stdout_fd)
1248
1249
1250 # if the parent-side fd for stdin exists, close it. the case
1251 # where it may not exist is if we're using piped="direct"
1252 if self._stdin_fd:
1253 os.close(self._stdin_fd)
1254
1255 if not self._single_tty:
1256 os.close(self._stdout_fd)
1257 if stderr is not OProc.STDOUT:
1258 os.close(self._stderr_fd)
1259
1260
1261 if cwd:
1262 os.chdir(cwd)
1263
1264 os.dup2(self._slave_stdin_fd, 0)
1265 os.dup2(self._slave_stdout_fd, 1)
1266
1267 # we're not directing stderr to stdout? then set self._slave_stderr_fd to
1268 # fd 2, the common stderr fd
1269 if stderr is OProc.STDOUT:
1270 os.dup2(self._slave_stdout_fd, 2)
1271 else:
1272 os.dup2(self._slave_stderr_fd, 2)
1273
1274 # don't inherit file descriptors
1275 max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
1276 os.closerange(3, max_fd)
1277
1278
1279 # set our controlling terminal. tty_out defaults to true
1280 if self.call_args["tty_out"]:
1281 tmp_fd = os.open(os.ttyname(1), os.O_RDWR)
1282 os.close(tmp_fd)
1283
1284
1285 if self.call_args["tty_out"]:
1286 setwinsize(1, self.call_args["tty_size"])
1287
1288 # actually execute the process
1289 if self.call_args["env"] is None:
1290 os.execv(cmd[0], cmd)
1291 else:
1292 os.execve(cmd[0], cmd, self.call_args["env"])
1293
1294 # we must ensure that we ALWAYS exit the child process, otherwise
1295 # the parent process code will be executed twice on exception
1296 # https://github.com/amoffat/sh/issues/202
1297 #
1298 # if your parent process experiences an exit code 255, it is most
1299 # likely that an exception occurred between the fork of the child
1300 # and the exec. this should be reported.
1301 finally:
1302 os._exit(255)
1303
1304 # parent
1305 else:
1306 if gc_enabled:
1307 gc.enable()
1308
1309 # used to determine what exception to raise. if our process was
1310 # killed via a timeout counter, we'll raise something different than
1311 # a SIGKILL exception
1312 self.timed_out = False
1313
1314 self.started = time.time()
1315 self.cmd = cmd
1316
1317 # exit code should only be manipulated from within self._wait_lock
1318 # to prevent race conditions
1319 self.exit_code = None
1320
1321 self.stdin = stdin or Queue()
1322
1323 # _pipe_queue is used internally to hand off stdout from one process
1324 # to another. by default, all stdout from a process gets dumped
1325 # into this pipe queue, to be consumed in real time (hence the
1326 # thread-safe Queue), or at a potentially later time
1327 self._pipe_queue = Queue()
1328
1329 # this is used to prevent a race condition when we're waiting for
1330 # a process to end, and the OProc's internal threads are also checking
1331 # for the processes's end
1332 self._wait_lock = threading.Lock()
1333
1334 # these are for aggregating the stdout and stderr. we use a deque
1335 # because we don't want to overflow
1336 self._stdout = deque(maxlen=self.call_args["internal_bufsize"])
1337 self._stderr = deque(maxlen=self.call_args["internal_bufsize"])
1338
1339 if self.call_args["tty_in"]:
1340 setwinsize(self._stdin_fd, self.call_args["tty_size"])
1341
1342
1343 self.log = parent_log.get_child("process", repr(self))
1344
1345 os.close(self._slave_stdin_fd)
1346 if not self._single_tty:
1347 os.close(self._slave_stdout_fd)
1348 if stderr is not OProc.STDOUT:
1349 os.close(self._slave_stderr_fd)
1350
1351 self.log.debug("started process")
1352
1353
1354 if self.call_args["tty_in"]:
1355 attr = termios.tcgetattr(self._stdin_fd)
1356 attr[3] &= ~termios.ECHO
1357 termios.tcsetattr(self._stdin_fd, termios.TCSANOW, attr)
1358
1359 # this represents the connection from a Queue object (or whatever
1360 # we're using to feed STDIN) to the process's STDIN fd
1361 self._stdin_stream = None
1362 if not isinstance(self.stdin, OProc):
1363 self._stdin_stream = \
1364 StreamWriter(self.log.get_child("streamwriter",
1365 "stdin"), self._stdin_fd, self.stdin,
1366 self.call_args["in_bufsize"],
1367 self.call_args["encoding"],
1368 self.call_args["tty_in"])
1369
1370 stdout_pipe = None
1371 if pipe is OProc.STDOUT and not self.call_args["no_pipe"]:
1372 stdout_pipe = self._pipe_queue
1373
1374
1375 # this represents the connection from a process's STDOUT fd to
1376 # wherever it has to go, sometimes a pipe Queue (that we will use
1377 # to pipe data to other processes), and also an internal deque
1378 # that we use to aggregate all the output
1379 save_stdout = not self.call_args["no_out"] and \
1380 (self.call_args["tee"] in (True, "out") or stdout is None)
1381
1382
1383 # if we're piping directly into another process's filedescriptor, we
1384 # bypass reading from the stdout stream altogether, because we've
1385 # already hooked up this processes's stdout fd to the other
1386 # processes's stdin fd
1387 self._stdout_stream = None
1388 if self.call_args["piped"] != "direct":
1389 if callable(stdout):
1390 stdout = construct_streamreader_callback(self, stdout)
1391 self._stdout_stream = \
1392 StreamReader(self.log.get_child("streamreader",
1393 "stdout"), self._stdout_fd, stdout, self._stdout,
1394 self.call_args["out_bufsize"],
1395 self.call_args["encoding"],
1396 self.call_args["decode_errors"], stdout_pipe,
1397 save_data=save_stdout)
1398
1399 if stderr is OProc.STDOUT or self._single_tty:
1400 self._stderr_stream = None
1401 else:
1402 stderr_pipe = None
1403 if pipe is OProc.STDERR and not self.call_args["no_pipe"]:
1404 stderr_pipe = self._pipe_queue
1405
1406 save_stderr = not self.call_args["no_err"] and \
1407 (self.call_args["tee"] in ("err",) or stderr is None)
1408
1409 if callable(stderr):
1410 stderr = construct_streamreader_callback(self, stderr)
1411
1412 self._stderr_stream = StreamReader(Logger("streamreader"),
1413 self._stderr_fd, stderr, self._stderr,
1414 self.call_args["err_bufsize"], self.call_args["encoding"],
1415 self.call_args["decode_errors"], stderr_pipe,
1416 save_data=save_stderr)
1417
1418
1419 # start the main io threads
1420 # stdin thread is not needed if we are connecting from another process's stdout pipe
1421 self._input_thread = None
1422 if self._stdin_stream:
1423 self._input_thread = _start_daemon_thread(self.input_thread,
1424 self._stdin_stream)
1425
1426 self._output_thread = _start_daemon_thread(self.output_thread,
1427 self._stdout_stream, self._stderr_stream,
1428 self.call_args["timeout"], self.started,
1429 self.call_args["timeout_signal"])
1430
1431
1432 def __repr__(self):
1433 return "<Process %d %r>" % (self.pid, self.cmd[:500])
1434
1435
1436 def change_in_bufsize(self, buf):
1437 self._stdin_stream.stream_bufferer.change_buffering(buf)
1438
1439 def change_out_bufsize(self, buf):
1440 self._stdout_stream.stream_bufferer.change_buffering(buf)
1441
1442 def change_err_bufsize(self, buf):
1443 self._stderr_stream.stream_bufferer.change_buffering(buf)
1444
1445
1446 def input_thread(self, stdin):
1447 """ this is run in a separate thread. it writes into our process's
1448 stdin (a streamwriter) and waits the process to end AND everything that
1449 can be written to be written """
1450 done = False
1451 while not done and self.is_alive():
1452 self.log.debug("%r ready for more input", stdin)
1453 done = stdin.write()
1454
1455 stdin.close()
1456
1457
1458 def output_thread(self, stdout, stderr, timeout, started, timeout_exc):
1459 """ this function is run in a separate thread. it reads from the
1460 process's stdout stream (a streamreader), and waits for it to claim that
1461 its done """
1462
1463 readers = []
1464 errors = []
1465
1466 if stdout is not None:
1467 readers.append(stdout)
1468 errors.append(stdout)
1469 if stderr is not None:
1470 readers.append(stderr)
1471 errors.append(stderr)
1472
1473 # this is our select loop for polling stdout or stderr that is ready to
1474 # be read and processed. if one of those streamreaders indicate that it
1475 # is done altogether being read from, we remove it from our list of
1476 # things to poll. when no more things are left to poll, we leave this
1477 # loop and clean up
1478 while readers:
1479 outputs, inputs, err = select.select(readers, [], errors, 0.1)
1480
1481 # stdout and stderr
1482 for stream in outputs:
1483 self.log.debug("%r ready to be read from", stream)
1484 done = stream.read()
1485 if done:
1486 readers.remove(stream)
1487
1488 for stream in err:
1489 pass
1490
1491 # test if the process has been running too long
1492 if timeout:
1493 now = time.time()
1494 if now - started > timeout:
1495 self.log.debug("we've been running too long")
1496 self.timed_out = True
1497 self.signal(timeout_exc)
1498
1499
1500 # this is here because stdout may be the controlling TTY, and
1501 # we can't close it until the process has ended, otherwise the
1502 # child will get SIGHUP. typically, if we've broken out of
1503 # the above loop, and we're here, the process is just about to
1504 # end, so it's probably ok to aggressively poll self.is_alive()
1505 #
1506 # the other option to this would be to do the CTTY close from
1507 # the method that does the actual os.waitpid() call, but the
1508 # problem with that is that the above loop might still be
1509 # running, and closing the fd will cause some operation to
1510 # fail. this is less complex than wrapping all the ops
1511 # in the above loop with out-of-band fd-close exceptions
1512 while self.is_alive():
1513 time.sleep(0.001)
1514
1515 if stdout:
1516 stdout.close()
1517
1518 if stderr:
1519 stderr.close()
1520
1521
1522 @property
1523 def stdout(self):
1524 return "".encode(self.call_args["encoding"]).join(self._stdout)
1525
1526 @property
1527 def stderr(self):
1528 return "".encode(self.call_args["encoding"]).join(self._stderr)
1529
1530
1531 def signal(self, sig):
1532 self.log.debug("sending signal %d", sig)
1533 try:
1534 os.kill(self.pid, sig)
1535 except OSError:
1536 pass
1537
1538 def kill(self):
1539 self.log.debug("killing")
1540 self.signal(signal.SIGKILL)
1541
1542 def terminate(self):
1543 self.log.debug("terminating")
1544 self.signal(signal.SIGTERM)
1545
1546
1547 def is_alive(self):
1548 """ polls if our child process has completed, without blocking. this
1549 method has side-effects, such as setting our exit_code, if we happen to
1550 see our child exit while this is running """
1551
1552 if self.exit_code is not None:
1553 return False
1554
1555 # what we're doing here essentially is making sure that the main thread
1556 # (or another thread), isn't calling .wait() on the process. because
1557 # .wait() calls os.waitpid(self.pid, 0), we can't do an os.waitpid
1558 # here...because if we did, and the process exited while in this
1559 # thread, the main thread's os.waitpid(self.pid, 0) would raise OSError
1560 # (because the process ended in another thread).
1561 #
1562 # so essentially what we're doing is, using this lock, checking if
1563 # we're calling .wait(), and if we are, let .wait() get the exit code
1564 # and handle the status, otherwise let us do it.
1565 acquired = self._wait_lock.acquire(False)
1566 if not acquired:
1567 if self.exit_code is not None:
1568 return False
1569 return True
1570
1571 try:
1572 # WNOHANG is just that...we're calling waitpid without hanging...
1573 # essentially polling the process. the return result is (0, 0) if
1574 # there's no process status, so we check that pid == self.pid below
1575 # in order to determine how to proceed
1576 pid, exit_code = os.waitpid(self.pid, os.WNOHANG)
1577 if pid == self.pid:
1578 self.exit_code = handle_process_exit_code(exit_code)
1579 return False
1580
1581 # no child process
1582 except OSError:
1583 return False
1584 else:
1585 return True
1586 finally:
1587 self._wait_lock.release()
1588
1589
1590 def wait(self):
1591 """ waits for the process to complete, handles the exit code """
1592
1593 self.log.debug("acquiring wait lock to wait for completion")
1594 # using the lock in a with-context blocks, which is what we want if
1595 # we're running wait()
1596 with self._wait_lock:
1597 self.log.debug("got wait lock")
1598
1599 if self.exit_code is None:
1600 self.log.debug("exit code not set, waiting on pid")
1601 pid, exit_code = os.waitpid(self.pid, 0) # blocks
1602 self.exit_code = handle_process_exit_code(exit_code)
1603 else:
1604 self.log.debug("exit code already set (%d), no need to wait", self.exit_code)
1605
1606 # we may not have a thread for stdin, if the pipe has been connected
1607 # via _piped="direct"
1608 if self._input_thread:
1609 self._input_thread.join()
1610
1611 # wait for our stdout and stderr streamreaders to finish reading and
1612 # aggregating the process output
1613 self._output_thread.join()
1614
1615 return self.exit_code
1616
1617
1618
1619
1620class DoneReadingForever(Exception): pass
1621class NotYetReadyToRead(Exception): pass
1622
1623
1624def determine_how_to_read_input(input_obj):
1625 """ given some kind of input object, return a function that knows how to
1626 read chunks of that input object.
1627
1628 each reader function should return a chunk and raise a DoneReadingForever
1629 exception, or return None, when there's no more data to read
1630
1631 NOTE: the function returned does not need to care much about the requested
1632 buffering type (eg, unbuffered vs newline-buffered). the StreamBufferer
1633 will take care of that. these functions just need to return a
1634 reasonably-sized chunk of data. """
1635
1636 get_chunk = None
1637
1638 if isinstance(input_obj, Queue):
1639 log_msg = "queue"
1640 get_chunk = get_queue_chunk_reader(input_obj)
1641
1642 elif callable(input_obj):
1643 log_msg = "callable"
1644 get_chunk = get_callable_chunk_reader(input_obj)
1645
1646 # also handles stringio
1647 elif hasattr(input_obj, "read"):
1648 log_msg = "file descriptor"
1649 get_chunk = get_file_chunk_reader(input_obj)
1650
1651 elif isinstance(input_obj, basestring):
1652 log_msg = "string"
1653 get_chunk = get_iter_string_reader(input_obj)
1654
1655 else:
1656 log_msg = "general iterable"
1657 get_chunk = get_iter_chunk_reader(iter(input_obj))
1658
1659 return get_chunk, log_msg
1660
1661
1662
1663def get_queue_chunk_reader(stdin):
1664 def fn():
1665 try:
1666 chunk = stdin.get(True, 0.01)
1667 except Empty:
1668 raise NotYetReadyToRead
1669 if chunk is None:
1670 raise DoneReadingForever
1671 return chunk
1672 return fn
1673
1674
1675def get_callable_chunk_reader(stdin):
1676 def fn():
1677 try:
1678 return stdin()
1679 except:
1680 raise DoneReadingForever
1681 return fn
1682
1683
1684def get_iter_string_reader(stdin):
1685 """ return an iterator that returns a chunk of a string every time it is
1686 called. notice that even though bufsize_type might be line buffered, we're
1687 not doing any line buffering here. that's because our StreamBufferer
1688 handles all buffering. we just need to return a reasonable-sized chunk. """
1689 bufsize = 1024
1690 iter_str = (stdin[i:i + bufsize] for i in range(0, len(stdin), bufsize))
1691 return get_iter_chunk_reader(iter_str)
1692
1693
1694def get_iter_chunk_reader(stdin):
1695 def fn():
1696 try:
1697 if IS_PY3:
1698 chunk = stdin.__next__()
1699 else:
1700 chunk = stdin.next()
1701 return chunk
1702 except StopIteration:
1703 raise DoneReadingForever
1704 return fn
1705
1706def get_file_chunk_reader(stdin):
1707 bufsize = 1024
1708
1709 def fn():
1710 chunk = stdin.read(bufsize)
1711 if not chunk:
1712 raise DoneReadingForever
1713 else:
1714 return chunk
1715 return fn
1716
1717
1718def bufsize_type_to_bufsize(bf_type):
1719 """ for a given bufsize type, return the actual bufsize we will read.
1720 notice that although 1 means "newline-buffered", we're reading a chunk size
1721 of 1024. this is because we have to read something. we let a
1722 StreamBufferer instance handle splitting our chunk on newlines """
1723
1724 # newlines
1725 if bf_type == 1:
1726 bufsize = 1024
1727 # unbuffered
1728 elif bf_type == 0:
1729 bufsize = 1
1730 # or buffered by specific amount
1731 else:
1732 bufsize = bf_type
1733
1734 return bufsize
1735
1736
1737
1738class StreamWriter(object):
1739 """ StreamWriter reads from some input (the stdin param) and writes to a fd
1740 (the stream param). the stdin may be a Queue, a callable, something with
1741 the "read" method, a string, or an iterable """
1742
1743 def __init__(self, log, stream, stdin, bufsize_type, encoding, tty_in):
1744 self.stream = stream
1745 self.stdin = stdin
1746
1747 self.log = log
1748 self.encoding = encoding
1749 self.tty_in = tty_in
1750
1751
1752 self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding)
1753 self.get_chunk, log_msg = determine_how_to_read_input(stdin)
1754 self.log.debug("parsed stdin as a %s", log_msg)
1755
1756
1757 def fileno(self):
1758 """ defining this allows us to do select.select on an instance of this
1759 class """
1760 return self.stream
1761
1762
1763
1764 def write(self):
1765 """ attempt to get a chunk of data to write to our child process's
1766 stdin, then write it. the return value answers the questions "are we
1767 done writing forever?" """
1768
1769 # get_chunk may sometimes return bytes, and sometimes returns trings
1770 # because of the nature of the different types of STDIN objects we
1771 # support
1772 try:
1773 chunk = self.get_chunk()
1774 if chunk is None:
1775 raise DoneReadingForever
1776
1777 except DoneReadingForever:
1778 self.log.debug("done reading")
1779
1780 if self.tty_in:
1781 # EOF time
1782 try:
1783 char = termios.tcgetattr(self.stream)[6][termios.VEOF]
1784 except:
1785 char = chr(4).encode()
1786 os.write(self.stream, char)
1787
1788 return True
1789
1790 except NotYetReadyToRead:
1791 self.log.debug("received no data")
1792 return False
1793
1794 # if we're not bytes, make us bytes
1795 if IS_PY3 and hasattr(chunk, "encode"):
1796 chunk = chunk.encode(self.encoding)
1797
1798 for proc_chunk in self.stream_bufferer.process(chunk):
1799 self.log.debug("got chunk size %d: %r", len(proc_chunk),
1800 proc_chunk[:30])
1801
1802 self.log.debug("writing chunk to process")
1803 try:
1804 os.write(self.stream, proc_chunk)
1805 except OSError:
1806 self.log.debug("OSError writing stdin chunk")
1807 return True
1808
1809
1810 def close(self):
1811 self.log.debug("closing, but flushing first")
1812 chunk = self.stream_bufferer.flush()
1813 self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
1814 try:
1815 if chunk:
1816 os.write(self.stream, chunk)
1817
1818 if not self.tty_in:
1819 self.log.debug("we used a TTY, so closing the stream")
1820 os.close(self.stream)
1821
1822 except OSError:
1823 pass
1824
1825
1826
1827def determine_how_to_feed_output(handler, encoding, decode_errors):
1828 if callable(handler):
1829 process, finish = get_callback_chunk_consumer(handler, encoding,
1830 decode_errors)
1831 elif isinstance(handler, cStringIO):
1832 process, finish = get_cstringio_chunk_consumer(handler)
1833 elif isinstance(handler, StringIO):
1834 process, finish = get_stringio_chunk_consumer(handler, encoding,
1835 decode_errors)
1836 elif hasattr(handler, "write"):
1837 process, finish = get_file_chunk_consumer(handler)
1838 else:
1839 process = lambda chunk: False
1840 finish = lambda: None
1841
1842 return process, finish
1843
1844
1845def get_file_chunk_consumer(handler):
1846 def process(chunk):
1847 handler.write(chunk)
1848 # we should flush on an fd. chunk is already the correctly-buffered
1849 # size, so we don't need the fd buffering as well
1850 handler.flush()
1851 return False
1852
1853 def finish():
1854 if hasattr(handler, "flush"):
1855 handler.flush()
1856
1857 return process, finish
1858
1859def get_callback_chunk_consumer(handler, encoding, decode_errors):
1860 def process(chunk):
1861 # try to use the encoding first, if that doesn't work, send
1862 # the bytes, because it might be binary
1863 try:
1864 chunk = chunk.decode(encoding, decode_errors)
1865 except UnicodeDecodeError:
1866 pass
1867 return handler(chunk)
1868
1869 def finish():
1870 pass
1871
1872 return process, finish
1873
1874def get_cstringio_chunk_consumer(handler):
1875 def process(chunk):
1876 handler.write(chunk)
1877 return False
1878
1879 def finish():
1880 pass
1881
1882 return process, finish
1883
1884
1885def get_stringio_chunk_consumer(handler, encoding, decode_errors):
1886 def process(chunk):
1887 handler.write(chunk.decode(encoding, decode_errors))
1888 return False
1889
1890 def finish():
1891 pass
1892
1893 return process, finish
1894
1895
1896class StreamReader(object):
1897 """ reads from some output (the stream) and sends what it just read to the
1898 handler. """
1899 def __init__(self, log, stream, handler, buffer, bufsize_type, encoding,
1900 decode_errors, pipe_queue=None, save_data=True):
1901 self.stream = stream
1902 self.buffer = buffer
1903 self.save_data = save_data
1904 self.encoding = encoding
1905 self.decode_errors = decode_errors
1906
1907 self.pipe_queue = None
1908 if pipe_queue:
1909 self.pipe_queue = weakref.ref(pipe_queue)
1910
1911 self.log = log
1912
1913 self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding,
1914 self.decode_errors)
1915 self.bufsize = bufsize_type_to_bufsize(bufsize_type)
1916
1917 self.process_chunk, self.finish_chunk_processor = \
1918 determine_how_to_feed_output(handler, encoding, decode_errors)
1919
1920 self.should_quit = False
1921
1922
1923 def fileno(self):
1924 """ defining this allows us to do select.select on an instance of this
1925 class """
1926 return self.stream
1927
1928 def close(self):
1929 chunk = self.stream_bufferer.flush()
1930 self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
1931 if chunk:
1932 self.write_chunk(chunk)
1933
1934 self.finish_chunk_processor()
1935
1936 if self.pipe_queue and self.save_data:
1937 self.pipe_queue().put(None)
1938
1939 try:
1940 os.close(self.stream)
1941 except OSError:
1942 pass
1943
1944
1945 def write_chunk(self, chunk):
1946 # in PY3, the chunk coming in will be bytes, so keep that in mind
1947
1948 if not self.should_quit:
1949 self.should_quit = self.process_chunk(chunk)
1950
1951
1952 if self.save_data:
1953 self.buffer.append(chunk)
1954
1955 if self.pipe_queue:
1956 self.log.debug("putting chunk onto pipe: %r", chunk[:30])
1957 self.pipe_queue().put(chunk)
1958
1959
1960 def read(self):
1961 # if we're PY3, we're reading bytes, otherwise we're reading
1962 # str
1963 try:
1964 chunk = os.read(self.stream, self.bufsize)
1965 except OSError as e:
1966 self.log.debug("got errno %d, done reading", e.errno)
1967 return True
1968 if not chunk:
1969 self.log.debug("got no chunk, done reading")
1970 return True
1971
1972 self.log.debug("got chunk size %d: %r", len(chunk), chunk[:30])
1973 for chunk in self.stream_bufferer.process(chunk):
1974 self.write_chunk(chunk)
1975
1976
1977
1978
1979class StreamBufferer(object):
1980 """ this is used for feeding in chunks of stdout/stderr, and breaking it up
1981 into chunks that will actually be put into the internal buffers. for
1982 example, if you have two processes, one being piped to the other, and you
1983 want that, first process to feed lines of data (instead of the chunks
1984 however they come in), OProc will use an instance of this class to chop up
1985 the data and feed it as lines to be sent down the pipe """
1986
1987 def __init__(self, buffer_type, encoding=DEFAULT_ENCODING,
1988 decode_errors="strict"):
1989 # 0 for unbuffered, 1 for line, everything else for that amount
1990 self.type = buffer_type
1991 self.buffer = []
1992 self.n_buffer_count = 0
1993 self.encoding = encoding
1994 self.decode_errors = decode_errors
1995
1996 # this is for if we change buffering types. if we change from line
1997 # buffered to unbuffered, its very possible that our self.buffer list
1998 # has data that was being saved up (while we searched for a newline).
1999 # we need to use that up, so we don't lose it
2000 self._use_up_buffer_first = False
2001
2002 # the buffering lock is used because we might chance the buffering
2003 # types from a different thread. for example, if we have a stdout
2004 # callback, we might use it to change the way stdin buffers. so we
2005 # lock
2006 self._buffering_lock = threading.RLock()
2007 self.log = Logger("stream_bufferer")
2008
2009
2010 def change_buffering(self, new_type):
2011 # TODO, when we stop supporting 2.6, make this a with context
2012 self.log.debug("acquiring buffering lock for changing buffering")
2013 self._buffering_lock.acquire()
2014 self.log.debug("got buffering lock for changing buffering")
2015 try:
2016 if new_type == 0:
2017 self._use_up_buffer_first = True
2018
2019 self.type = new_type
2020 finally:
2021 self._buffering_lock.release()
2022 self.log.debug("released buffering lock for changing buffering")
2023
2024
2025 def process(self, chunk):
2026 # MAKE SURE THAT THE INPUT IS PY3 BYTES
2027 # THE OUTPUT IS ALWAYS PY3 BYTES
2028
2029 # TODO, when we stop supporting 2.6, make this a with context
2030 self.log.debug("acquiring buffering lock to process chunk (buffering: %d)", self.type)
2031 self._buffering_lock.acquire()
2032 self.log.debug("got buffering lock to process chunk (buffering: %d)", self.type)
2033 try:
2034 # we've encountered binary, permanently switch to N size buffering
2035 # since matching on newline doesn't make sense anymore
2036 if self.type == 1:
2037 try:
2038 chunk.decode(self.encoding, self.decode_errors)
2039 except:
2040 self.log.debug("detected binary data, changing buffering")
2041 self.change_buffering(1024)
2042
2043 # unbuffered
2044 if self.type == 0:
2045 if self._use_up_buffer_first:
2046 self._use_up_buffer_first = False
2047 to_write = self.buffer
2048 self.buffer = []
2049 to_write.append(chunk)
2050 return to_write
2051
2052 return [chunk]
2053
2054 # line buffered
2055 # we must decode the bytes before we try to match on newline
2056 elif self.type == 1:
2057 total_to_write = []
2058 chunk = chunk.decode(self.encoding, self.decode_errors)
2059 while True:
2060 newline = chunk.find("\n")
2061 if newline == -1:
2062 break
2063
2064 chunk_to_write = chunk[:newline + 1]
2065 if self.buffer:
2066 # this is ugly, but it's designed to take the existing
2067 # bytes buffer, join it together, tack on our latest
2068 # chunk, then convert the whole thing to a string.
2069 # it's necessary, i'm sure. read the whole block to
2070 # see why.
2071 chunk_to_write = "".encode(self.encoding).join(self.buffer) \
2072 + chunk_to_write.encode(self.encoding)
2073 chunk_to_write = chunk_to_write.decode(self.encoding)
2074
2075 self.buffer = []
2076 self.n_buffer_count = 0
2077
2078 chunk = chunk[newline + 1:]
2079 total_to_write.append(chunk_to_write.encode(self.encoding))
2080
2081 if chunk:
2082 self.buffer.append(chunk.encode(self.encoding))
2083 self.n_buffer_count += len(chunk)
2084 return total_to_write
2085
2086 # N size buffered
2087 else:
2088 total_to_write = []
2089 while True:
2090 overage = self.n_buffer_count + len(chunk) - self.type
2091 if overage >= 0:
2092 ret = "".encode(self.encoding).join(self.buffer) + chunk
2093 chunk_to_write = ret[:self.type]
2094 chunk = ret[self.type:]
2095 total_to_write.append(chunk_to_write)
2096 self.buffer = []
2097 self.n_buffer_count = 0
2098 else:
2099 self.buffer.append(chunk)
2100 self.n_buffer_count += len(chunk)
2101 break
2102 return total_to_write
2103 finally:
2104 self._buffering_lock.release()
2105 self.log.debug("released buffering lock for processing chunk (buffering: %d)", self.type)
2106
2107
2108 def flush(self):
2109 self.log.debug("acquiring buffering lock for flushing buffer")
2110 self._buffering_lock.acquire()
2111 self.log.debug("got buffering lock for flushing buffer")
2112 try:
2113 ret = "".encode(self.encoding).join(self.buffer)
2114 self.buffer = []
2115 return ret
2116 finally:
2117 self._buffering_lock.release()
2118 self.log.debug("released buffering lock for flushing buffer")
2119
2120
2121
2122@contextmanager
2123def pushd(path):
2124 """ pushd is just a specialized form of args, where we're passing in the
2125 current working directory """
2126 with args(_cwd=path):
2127 yield
2128
2129
2130@contextmanager
2131def args(*args, **kwargs):
2132 """ allows us to temporarily override all the special keyword parameters in
2133 a with context """
2134 call_args = Command._call_args
2135 old_args = call_args.copy()
2136
2137 for key,value in kwargs.items():
2138 key = key.lstrip("_")
2139 call_args[key] = value
2140
2141 yield
2142 call_args.update(old_args)
2143
2144
2145
2146class Environment(dict):
2147 """ this allows lookups to names that aren't found in the global scope to be
2148 searched for as a program name. for example, if "ls" isn't found in this
2149 module's scope, we consider it a system program and try to find it.
2150
2151 we use a dict instead of just a regular object as the base class because the
2152 exec() statement used in this file requires the "globals" argument to be a
2153 dictionary """
2154
2155
2156 # this is a list of all of the names that the sh module exports that will
2157 # not resolve to functions. we don't want to accidentally shadow real
2158 # commands with functions/imports that we define in sh.py. for example,
2159 # "import time" may override the time system program
2160 whitelist = set([
2161 "Command",
2162 "CommandNotFound",
2163 "DEFAULT_ENCODING",
2164 "DoneReadingForever",
2165 "ErrorReturnCode",
2166 "NotYetReadyToRead",
2167 "SignalException",
2168 "TimeoutException",
2169 "__project_url__",
2170 "__version__",
2171 "args",
2172 "glob",
2173 "pushd",
2174 ])
2175
2176 def __init__(self, globs, baked_args={}):
2177 self.globs = globs
2178 self.baked_args = baked_args
2179 self.disable_whitelist = False
2180
2181 def __setitem__(self, k, v):
2182 self.globs[k] = v
2183
2184 def __getitem__(self, k):
2185 # if we first import "_disable_whitelist" from sh, we can import
2186 # anything defined in the global scope of sh.py. this is useful for our
2187 # tests
2188 if k == "_disable_whitelist":
2189 self.disable_whitelist = True
2190 return None
2191
2192 # we're trying to import something real (maybe), see if it's in our
2193 # global scope
2194 if k in self.whitelist or self.disable_whitelist:
2195 try:
2196 return self.globs[k]
2197 except KeyError:
2198 pass
2199
2200 # somebody tried to be funny and do "from sh import *"
2201 if k == "__all__":
2202 raise AttributeError("Cannot import * from sh. \
2203Please import sh or import programs individually.")
2204
2205
2206 # check if we're naming a dynamically generated ReturnCode exception
2207 exc = get_exc_from_name(k)
2208 if exc:
2209 return exc
2210
2211
2212 # https://github.com/ipython/ipython/issues/2577
2213 # https://github.com/amoffat/sh/issues/97#issuecomment-10610629
2214 if k.startswith("__") and k.endswith("__"):
2215 raise AttributeError
2216
2217 # how about an environment variable?
2218 try:
2219 return os.environ[k]
2220 except KeyError:
2221 pass
2222
2223 # is it a custom builtin?
2224 builtin = getattr(self, "b_" + k, None)
2225 if builtin:
2226 return builtin
2227
2228 # it must be a command then
2229 # we use _create instead of instantiating the class directly because
2230 # _create uses resolve_program, which will automatically do underscore-
2231 # to-dash conversions. instantiating directly does not use that
2232 return Command._create(k, **self.baked_args)
2233
2234
2235 # methods that begin with "b_" are custom builtins and will override any
2236 # program that exists in our path. this is useful for things like
2237 # common shell builtins that people are used to, but which aren't actually
2238 # full-fledged system binaries
2239
2240 def b_cd(self, path):
2241 os.chdir(path)
2242
2243 def b_which(self, program):
2244 return which(program)
2245
2246
2247
2248
2249def run_repl(env): # pragma: no cover
2250 banner = "\n>> sh v{version}\n>> https://github.com/amoffat/sh\n"
2251
2252 print(banner.format(version=__version__))
2253 while True:
2254 try:
2255 line = raw_input("sh> ")
2256 except (ValueError, EOFError):
2257 break
2258
2259 try:
2260 exec(compile(line, "<dummy>", "single"), env, env)
2261 except SystemExit:
2262 break
2263 except:
2264 print(traceback.format_exc())
2265
2266 # cleans up our last line
2267 print("")
2268
2269
2270
2271
2272# this is a thin wrapper around THIS module (we patch sys.modules[__name__]).
2273# this is in the case that the user does a "from sh import whatever"
2274# in other words, they only want to import certain programs, not the whole
2275# system PATH worth of commands. in this case, we just proxy the
2276# import lookup to our Environment class
2277class SelfWrapper(ModuleType):
2278 def __init__(self, self_module, baked_args={}):
2279 # this is super ugly to have to copy attributes like this,
2280 # but it seems to be the only way to make reload() behave
2281 # nicely. if i make these attributes dynamic lookups in
2282 # __getattr__, reload sometimes chokes in weird ways...
2283 for attr in ["__builtins__", "__doc__", "__name__", "__package__"]:
2284 setattr(self, attr, getattr(self_module, attr, None))
2285
2286 # python 3.2 (2.7 and 3.3 work fine) breaks on osx (not ubuntu)
2287 # if we set this to None. and 3.3 needs a value for __path__
2288 self.__path__ = []
2289 self.__self_module = self_module
2290 self.__env = Environment(globals(), baked_args)
2291
2292 def __setattr__(self, name, value):
2293 if hasattr(self, "__env"):
2294 self.__env[name] = value
2295 else:
2296 ModuleType.__setattr__(self, name, value)
2297
2298 def __getattr__(self, name):
2299 if name == "__env":
2300 raise AttributeError
2301 return self.__env[name]
2302
2303 # accept special keywords argument to define defaults for all operations
2304 # that will be processed with given by return SelfWrapper
2305 def __call__(self, **kwargs):
2306 return SelfWrapper(self.__self_module, kwargs)
2307
2308
2309
2310# we're being run as a stand-alone script
2311if __name__ == "__main__": # pragma: no cover
2312 try:
2313 arg = sys.argv.pop(1)
2314 except:
2315 arg = None
2316
2317 if arg == "test":
2318 import subprocess
2319
2320 def run_test(version, locale):
2321 py_version = "python%s" % version
2322 py_bin = which(py_version)
2323
2324 if py_bin:
2325 print("Testing %s, locale %r" % (py_version.capitalize(),
2326 locale))
2327
2328 env = os.environ.copy()
2329 env["LANG"] = locale
2330 p = subprocess.Popen([py_bin, os.path.join(THIS_DIR, "test.py")]
2331 + sys.argv[1:], env=env)
2332 return_code = p.wait()
2333
2334 if return_code != 0:
2335 exit(1)
2336 else:
2337 print("Couldn't find %s, skipping" % py_version.capitalize())
2338
2339 versions = ("2.6", "2.7", "3.1", "3.2", "3.3", "3.4")
2340 locales = ("en_US.UTF-8", "C")
2341 for locale in locales:
2342 for version in versions:
2343 run_test(version, locale)
2344
2345 else:
2346 env = Environment(globals())
2347 run_repl(env)
2348
2349# we're being imported from somewhere
2350else:
2351 self = sys.modules[__name__]
2352 sys.modules[__name__] = SelfWrapper(self)