first commit

This commit is contained in:
Ayxan
2022-05-23 00:16:32 +04:00
commit d660f2a4ca
24786 changed files with 4428337 additions and 0 deletions

View File

@@ -0,0 +1,331 @@
# -*- coding: utf-8 -*-
"""
Class and program to colorize python source code for ANSI terminals.
Based on an HTML code highlighter by Jurgen Hermann found at:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
Modifications by Fernando Perez (fperez@colorado.edu).
Information on the original HTML highlighter follows:
MoinMoin - Python Source Parser
Title: Colorize Python source using the built-in tokenizer
Submitter: Jurgen Hermann
Last Updated:2001/04/06
Version no:1.2
Description:
This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
Python source code to HTML markup, rendering comments, keywords,
operators, numeric and string literals in different colors.
It shows how to use the built-in keyword, token and tokenize modules to
scan Python source code and re-emit it with no changes to its original
formatting (which is the hard part).
"""
__all__ = ['ANSICodeColors', 'Parser']
_scheme_default = 'Linux'
# Imports
import keyword
import os
import sys
import token
import tokenize
generate_tokens = tokenize.generate_tokens
from IPython.utils.coloransi import TermColors, InputTermColors,ColorScheme, ColorSchemeTable
from .colorable import Colorable
from io import StringIO
#############################################################################
### Python Source Parser (does Highlighting)
#############################################################################
_KEYWORD = token.NT_OFFSET + 1
_TEXT = token.NT_OFFSET + 2
#****************************************************************************
# Builtin color schemes
Colors = TermColors # just a shorthand
# Build a few color schemes
NoColor = ColorScheme(
'NoColor',{
'header' : Colors.NoColor,
token.NUMBER : Colors.NoColor,
token.OP : Colors.NoColor,
token.STRING : Colors.NoColor,
tokenize.COMMENT : Colors.NoColor,
token.NAME : Colors.NoColor,
token.ERRORTOKEN : Colors.NoColor,
_KEYWORD : Colors.NoColor,
_TEXT : Colors.NoColor,
'in_prompt' : InputTermColors.NoColor, # Input prompt
'in_number' : InputTermColors.NoColor, # Input prompt number
'in_prompt2' : InputTermColors.NoColor, # Continuation prompt
'in_normal' : InputTermColors.NoColor, # color off (usu. Colors.Normal)
'out_prompt' : Colors.NoColor, # Output prompt
'out_number' : Colors.NoColor, # Output prompt number
'normal' : Colors.NoColor # color off (usu. Colors.Normal)
} )
LinuxColors = ColorScheme(
'Linux',{
'header' : Colors.LightRed,
token.NUMBER : Colors.LightCyan,
token.OP : Colors.Yellow,
token.STRING : Colors.LightBlue,
tokenize.COMMENT : Colors.LightRed,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.LightGreen,
_TEXT : Colors.Yellow,
'in_prompt' : InputTermColors.Green,
'in_number' : InputTermColors.LightGreen,
'in_prompt2' : InputTermColors.Green,
'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
'out_prompt' : Colors.Red,
'out_number' : Colors.LightRed,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
NeutralColors = ColorScheme(
'Neutral',{
'header' : Colors.Red,
token.NUMBER : Colors.Cyan,
token.OP : Colors.Blue,
token.STRING : Colors.Blue,
tokenize.COMMENT : Colors.Red,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.Green,
_TEXT : Colors.Blue,
'in_prompt' : InputTermColors.Blue,
'in_number' : InputTermColors.LightBlue,
'in_prompt2' : InputTermColors.Blue,
'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
'out_prompt' : Colors.Red,
'out_number' : Colors.LightRed,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Hack: the 'neutral' colours are not very visible on a dark background on
# Windows. Since Windows command prompts have a dark background by default, and
# relatively few users are likely to alter that, we will use the 'Linux' colours,
# designed for a dark background, as the default on Windows. Changing it here
# avoids affecting the prompt colours rendered by prompt_toolkit, where the
# neutral defaults do work OK.
if os.name == 'nt':
NeutralColors = LinuxColors.copy(name='Neutral')
LightBGColors = ColorScheme(
'LightBG',{
'header' : Colors.Red,
token.NUMBER : Colors.Cyan,
token.OP : Colors.Blue,
token.STRING : Colors.Blue,
tokenize.COMMENT : Colors.Red,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.Green,
_TEXT : Colors.Blue,
'in_prompt' : InputTermColors.Blue,
'in_number' : InputTermColors.LightBlue,
'in_prompt2' : InputTermColors.Blue,
'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
'out_prompt' : Colors.Red,
'out_number' : Colors.LightRed,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Build table of color schemes (needed by the parser)
ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors, NeutralColors],
_scheme_default)
Undefined = object()
class Parser(Colorable):
""" Format colored Python source.
"""
def __init__(self, color_table=None, out = sys.stdout, parent=None, style=None):
""" Create a parser with a specified color table and output channel.
Call format() to process code.
"""
super(Parser, self).__init__(parent=parent)
self.color_table = color_table if color_table else ANSICodeColors
self.out = out
self.pos = None
self.lines = None
self.raw = None
if not style:
self.style = self.default_style
else:
self.style = style
def format(self, raw, out=None, scheme=Undefined):
import warnings
if scheme is not Undefined:
warnings.warn('The `scheme` argument of IPython.utils.PyColorize:Parser.format is deprecated since IPython 6.0.'
'It will have no effect. Set the parser `style` directly.',
stacklevel=2)
return self.format2(raw, out)[0]
def format2(self, raw, out = None):
""" Parse and send the colored source.
If out and scheme are not specified, the defaults (given to
constructor) are used.
out should be a file-type object. Optionally, out can be given as the
string 'str' and the parser will automatically return the output in a
string."""
string_output = 0
if out == 'str' or self.out == 'str' or \
isinstance(self.out, StringIO):
# XXX - I don't really like this state handling logic, but at this
# point I don't want to make major changes, so adding the
# isinstance() check is the simplest I can do to ensure correct
# behavior.
out_old = self.out
self.out = StringIO()
string_output = 1
elif out is not None:
self.out = out
else:
raise ValueError('`out` or `self.out` should be file-like or the value `"str"`')
# Fast return of the unmodified input for NoColor scheme
if self.style == 'NoColor':
error = False
self.out.write(raw)
if string_output:
return raw, error
return None, error
# local shorthands
colors = self.color_table[self.style].colors
self.colors = colors # put in object so __call__ sees it
# Remove trailing whitespace and normalize tabs
self.raw = raw.expandtabs().rstrip()
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
raw_find = self.raw.find
lines_append = self.lines.append
while True:
pos = raw_find('\n', pos) + 1
if not pos:
break
lines_append(pos)
lines_append(len(self.raw))
# parse the source and write it
self.pos = 0
text = StringIO(self.raw)
error = False
try:
for atoken in generate_tokens(text.readline):
self(*atoken)
except tokenize.TokenError as ex:
msg = ex.args[0]
line = ex.args[1][0]
self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
(colors[token.ERRORTOKEN],
msg, self.raw[self.lines[line]:],
colors.normal)
)
error = True
self.out.write(colors.normal+'\n')
if string_output:
output = self.out.getvalue()
self.out = out_old
return (output, error)
return (None, error)
def _inner_call_(self, toktype, toktext, start_pos):
"""like call but write to a temporary buffer"""
buff = StringIO()
srow, scol = start_pos
colors = self.colors
owrite = buff.write
# line separator, so this works across platforms
linesep = os.linesep
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
# send the original whitespace, if needed
if newpos > oldpos:
owrite(self.raw[oldpos:newpos])
# skip indenting tokens
if toktype in [token.INDENT, token.DEDENT]:
self.pos = newpos
buff.seek(0)
return buff.read()
# map token type to a color group
if token.LPAR <= toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
color = colors.get(toktype, colors[_TEXT])
# Triple quoted strings must be handled carefully so that backtracking
# in pagers works correctly. We need color terminators on _each_ line.
if linesep in toktext:
toktext = toktext.replace(linesep, '%s%s%s' %
(colors.normal,linesep,color))
# send text
owrite('%s%s%s' % (color,toktext,colors.normal))
buff.seek(0)
return buff.read()
def __call__(self, toktype, toktext, start_pos, end_pos, line):
""" Token handler, with syntax highlighting."""
self.out.write(
self._inner_call_(toktype, toktext, start_pos))

View File

@@ -0,0 +1,69 @@
"""cli-specific implementation of process utilities.
cli - Common Language Infrastructure for IronPython. Code
can run on any operating system. Check os.name for os-
specific settings.
This file is only meant to be imported by process.py, not by end-users.
This file is largely untested. To become a full drop-in process
interface for IronPython will probably require you to help fill
in the details.
"""
# Import cli libraries:
import clr
import System
# Import Python libraries:
import os
# Import IPython libraries:
from ._process_common import arg_split
def system(cmd):
"""
system(cmd) should work in a cli environment on Mac OSX, Linux,
and Windows
"""
psi = System.Diagnostics.ProcessStartInfo(cmd)
psi.RedirectStandardOutput = True
psi.RedirectStandardError = True
psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
psi.UseShellExecute = False
# Start up process:
reg = System.Diagnostics.Process.Start(psi)
def getoutput(cmd):
"""
getoutput(cmd) should work in a cli environment on Mac OSX, Linux,
and Windows
"""
psi = System.Diagnostics.ProcessStartInfo(cmd)
psi.RedirectStandardOutput = True
psi.RedirectStandardError = True
psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
psi.UseShellExecute = False
# Start up process:
reg = System.Diagnostics.Process.Start(psi)
myOutput = reg.StandardOutput
output = myOutput.ReadToEnd()
myError = reg.StandardError
error = myError.ReadToEnd()
return output
def check_pid(pid):
"""
Check if a process with the given PID (pid) exists
"""
try:
System.Diagnostics.Process.GetProcessById(pid)
# process with given pid is running
return True
except System.InvalidOperationException:
# process wasn't started by this object (but is running)
return True
except System.ArgumentException:
# process with given pid isn't running
return False

View File

@@ -0,0 +1,210 @@
"""Common utilities for the various process_* implementations.
This file is only meant to be imported by the platform-specific implementations
of subprocess utilities, and it contains tools that are common to all of them.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import subprocess
import shlex
import sys
import os
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Function definitions
#-----------------------------------------------------------------------------
def read_no_interrupt(p):
"""Read from a pipe ignoring EINTR errors.
This is necessary because when reading from pipes with GUI event loops
running in the background, often interrupts are raised that stop the
command from completing."""
import errno
try:
return p.read()
except IOError as err:
if err.errno != errno.EINTR:
raise
def process_handler(cmd, callback, stderr=subprocess.PIPE):
"""Open a command in a shell subprocess and execute a callback.
This function provides common scaffolding for creating subprocess.Popen()
calls. It creates a Popen object and then calls the callback with it.
Parameters
----------
cmd : str or list
A command to be executed by the system, using :class:`subprocess.Popen`.
If a string is passed, it will be run in the system shell. If a list is
passed, it will be used directly as arguments.
callback : callable
A one-argument function that will be called with the Popen object.
stderr : file descriptor number, optional
By default this is set to ``subprocess.PIPE``, but you can also pass the
value ``subprocess.STDOUT`` to force the subprocess' stderr to go into
the same file descriptor as its stdout. This is useful to read stdout
and stderr combined in the order they are generated.
Returns
-------
The return value of the provided callback is returned.
"""
sys.stdout.flush()
sys.stderr.flush()
# On win32, close_fds can't be true when using pipes for stdin/out/err
close_fds = sys.platform != 'win32'
# Determine if cmd should be run with system shell.
shell = isinstance(cmd, str)
# On POSIX systems run shell commands with user-preferred shell.
executable = None
if shell and os.name == 'posix' and 'SHELL' in os.environ:
executable = os.environ['SHELL']
p = subprocess.Popen(cmd, shell=shell,
executable=executable,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=stderr,
close_fds=close_fds)
try:
out = callback(p)
except KeyboardInterrupt:
print('^C')
sys.stdout.flush()
sys.stderr.flush()
out = None
finally:
# Make really sure that we don't leave processes behind, in case the
# call above raises an exception
# We start by assuming the subprocess finished (to avoid NameErrors
# later depending on the path taken)
if p.returncode is None:
try:
p.terminate()
p.poll()
except OSError:
pass
# One last try on our way out
if p.returncode is None:
try:
p.kill()
except OSError:
pass
return out
def getoutput(cmd):
"""Run a command and return its stdout/stderr as a string.
Parameters
----------
cmd : str or list
A command to be executed in the system shell.
Returns
-------
output : str
A string containing the combination of stdout and stderr from the
subprocess, in whatever order the subprocess originally wrote to its
file descriptors (so the order of the information in this string is the
correct order as would be seen if running the command in a terminal).
"""
out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT)
if out is None:
return ''
return py3compat.decode(out)
def getoutputerror(cmd):
"""Return (standard output, standard error) of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str or list
A command to be executed in the system shell.
Returns
-------
stdout : str
stderr : str
"""
return get_output_error_code(cmd)[:2]
def get_output_error_code(cmd):
"""Return (standard output, standard error, return code) of executing cmd
in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str or list
A command to be executed in the system shell.
Returns
-------
stdout : str
stderr : str
returncode: int
"""
out_err, p = process_handler(cmd, lambda p: (p.communicate(), p))
if out_err is None:
return '', '', p.returncode
out, err = out_err
return py3compat.decode(out), py3compat.decode(err), p.returncode
def arg_split(s, posix=False, strict=True):
"""Split a command line's arguments in a shell-like manner.
This is a modified version of the standard library's shlex.split()
function, but with a default of posix=False for splitting, so that quotes
in inputs are respected.
if strict=False, then any errors shlex.split would raise will result in the
unparsed remainder being the last element of the list, rather than raising.
This is because we sometimes use arg_split to parse things other than
command-line args.
"""
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
# Extract tokens, ensuring that things like leaving open quotes
# does not cause this to raise. This is important, because we
# sometimes pass Python source through this (e.g. %timeit f(" ")),
# and it shouldn't raise an exception.
# It may be a bad idea to parse things that are not command-line args
# through this function, but we do, so let's be safe about it.
lex.commenters='' #fix for GH-1269
tokens = []
while True:
try:
tokens.append(next(lex))
except StopIteration:
break
except ValueError:
if strict:
raise
# couldn't parse, get remaining blob as last token
tokens.append(lex.token)
break
return tokens

View File

@@ -0,0 +1,216 @@
"""Posix-specific implementation of process utilities.
This file is only meant to be imported by process.py, not by end-users.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import errno
import os
import subprocess as sp
import sys
import pexpect
# Our own
from ._process_common import getoutput, arg_split
from IPython.utils.encoding import DEFAULT_ENCODING
#-----------------------------------------------------------------------------
# Function definitions
#-----------------------------------------------------------------------------
class ProcessHandler(object):
"""Execute subprocesses under the control of pexpect.
"""
# Timeout in seconds to wait on each reading of the subprocess' output.
# This should not be set too low to avoid cpu overusage from our side,
# since we read in a loop whose period is controlled by this timeout.
read_timeout = 0.05
# Timeout to give a process if we receive SIGINT, between sending the
# SIGINT to the process and forcefully terminating it.
terminate_timeout = 0.2
# File object where stdout and stderr of the subprocess will be written
logfile = None
# Shell to call for subprocesses to execute
_sh = None
@property
def sh(self):
if self._sh is None:
shell_name = os.environ.get("SHELL", "sh")
self._sh = pexpect.which(shell_name)
if self._sh is None:
raise OSError('"{}" shell not found'.format(shell_name))
return self._sh
def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None):
"""Arguments are used for pexpect calls."""
self.read_timeout = (ProcessHandler.read_timeout if read_timeout is
None else read_timeout)
self.terminate_timeout = (ProcessHandler.terminate_timeout if
terminate_timeout is None else
terminate_timeout)
self.logfile = sys.stdout if logfile is None else logfile
def getoutput(self, cmd):
"""Run a command and return its stdout/stderr as a string.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
output : str
A string containing the combination of stdout and stderr from the
subprocess, in whatever order the subprocess originally wrote to its
file descriptors (so the order of the information in this string is the
correct order as would be seen if running the command in a terminal).
"""
try:
return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n')
except KeyboardInterrupt:
print('^C', file=sys.stderr, end='')
def getoutput_pexpect(self, cmd):
"""Run a command and return its stdout/stderr as a string.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
output : str
A string containing the combination of stdout and stderr from the
subprocess, in whatever order the subprocess originally wrote to its
file descriptors (so the order of the information in this string is the
correct order as would be seen if running the command in a terminal).
"""
try:
return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n')
except KeyboardInterrupt:
print('^C', file=sys.stderr, end='')
def system(self, cmd):
"""Execute a command in a subshell.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
int : child's exitstatus
"""
# Get likely encoding for the output.
enc = DEFAULT_ENCODING
# Patterns to match on the output, for pexpect. We read input and
# allow either a short timeout or EOF
patterns = [pexpect.TIMEOUT, pexpect.EOF]
# the index of the EOF pattern in the list.
# even though we know it's 1, this call means we don't have to worry if
# we change the above list, and forget to change this value:
EOF_index = patterns.index(pexpect.EOF)
# The size of the output stored so far in the process output buffer.
# Since pexpect only appends to this buffer, each time we print we
# record how far we've printed, so that next time we only print *new*
# content from the buffer.
out_size = 0
try:
# Since we're not really searching the buffer for text patterns, we
# can set pexpect's search window to be tiny and it won't matter.
# We only search for the 'patterns' timeout or EOF, which aren't in
# the text itself.
#child = pexpect.spawn(pcmd, searchwindowsize=1)
if hasattr(pexpect, 'spawnb'):
child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U
else:
child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect
flush = sys.stdout.flush
while True:
# res is the index of the pattern that caused the match, so we
# know whether we've finished (if we matched EOF) or not
res_idx = child.expect_list(patterns, self.read_timeout)
print(child.before[out_size:].decode(enc, 'replace'), end='')
flush()
if res_idx==EOF_index:
break
# Update the pointer to what we've already printed
out_size = len(child.before)
except KeyboardInterrupt:
# We need to send ^C to the process. The ascii code for '^C' is 3
# (the character is known as ETX for 'End of Text', see
# curses.ascii.ETX).
child.sendline(chr(3))
# Read and print any more output the program might produce on its
# way out.
try:
out_size = len(child.before)
child.expect_list(patterns, self.terminate_timeout)
print(child.before[out_size:].decode(enc, 'replace'), end='')
sys.stdout.flush()
except KeyboardInterrupt:
# Impatient users tend to type it multiple times
pass
finally:
# Ensure the subprocess really is terminated
child.terminate(force=True)
# add isalive check, to ensure exitstatus is set:
child.isalive()
# We follow the subprocess pattern, returning either the exit status
# as a positive number, or the terminating signal as a negative
# number.
# on Linux, sh returns 128+n for signals terminating child processes on Linux
# on BSD (OS X), the signal code is set instead
if child.exitstatus is None:
# on WIFSIGNALED, pexpect sets signalstatus, leaving exitstatus=None
if child.signalstatus is None:
# this condition may never occur,
# but let's be certain we always return an integer.
return 0
return -child.signalstatus
if child.exitstatus > 128:
return -(child.exitstatus - 128)
return child.exitstatus
# Make system() with a functional interface for outside use. Note that we use
# getoutput() from the _common utils, which is built on top of popen(). Using
# pexpect to get subprocess output produces difficult to parse output, since
# programs think they are talking to a tty and produce highly formatted output
# (ls is a good example) that makes them hard.
system = ProcessHandler().system
def check_pid(pid):
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
elif err.errno == errno.EPERM:
# Don't have permission to signal the process - probably means it exists
return True
raise
else:
return True

View File

@@ -0,0 +1,184 @@
"""Windows-specific implementation of process utilities.
This file is only meant to be imported by process.py, not by end-users.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import os
import sys
import ctypes
import time
from ctypes import c_int, POINTER
from ctypes.wintypes import LPCWSTR, HLOCAL
from subprocess import STDOUT, TimeoutExpired
from threading import Thread
# our own imports
from ._process_common import read_no_interrupt, process_handler, arg_split as py_arg_split
from . import py3compat
from .encoding import DEFAULT_ENCODING
#-----------------------------------------------------------------------------
# Function definitions
#-----------------------------------------------------------------------------
class AvoidUNCPath(object):
"""A context manager to protect command execution from UNC paths.
In the Win32 API, commands can't be invoked with the cwd being a UNC path.
This context manager temporarily changes directory to the 'C:' drive on
entering, and restores the original working directory on exit.
The context manager returns the starting working directory *if* it made a
change and None otherwise, so that users can apply the necessary adjustment
to their system calls in the event of a change.
Examples
--------
::
cmd = 'dir'
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
os.system(cmd)
"""
def __enter__(self):
self.path = os.getcwd()
self.is_unc_path = self.path.startswith(r"\\")
if self.is_unc_path:
# change to c drive (as cmd.exe cannot handle UNC addresses)
os.chdir("C:")
return self.path
else:
# We return None to signal that there was no change in the working
# directory
return None
def __exit__(self, exc_type, exc_value, traceback):
if self.is_unc_path:
os.chdir(self.path)
def _system_body(p):
"""Callback for _system."""
enc = DEFAULT_ENCODING
def stdout_read():
for line in read_no_interrupt(p.stdout).splitlines():
line = line.decode(enc, 'replace')
print(line, file=sys.stdout)
def stderr_read():
for line in read_no_interrupt(p.stderr).splitlines():
line = line.decode(enc, 'replace')
print(line, file=sys.stderr)
Thread(target=stdout_read).start()
Thread(target=stderr_read).start()
# Wait to finish for returncode. Unfortunately, Python has a bug where
# wait() isn't interruptible (https://bugs.python.org/issue28168) so poll in
# a loop instead of just doing `return p.wait()`.
while True:
result = p.poll()
if result is None:
time.sleep(0.01)
else:
return result
def system(cmd):
"""Win32 version of os.system() that works with network shares.
Note that this implementation returns None, as meant for use in IPython.
Parameters
----------
cmd : str or list
A command to be executed in the system shell.
Returns
-------
int : child process' exit code.
"""
# The controller provides interactivity with both
# stdin and stdout
#import _process_win32_controller
#_process_win32_controller.system(cmd)
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
return process_handler(cmd, _system_body)
def getoutput(cmd):
"""Return standard output of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str or list
A command to be executed in the system shell.
Returns
-------
stdout : str
"""
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
out = process_handler(cmd, lambda p: p.communicate()[0], STDOUT)
if out is None:
out = b''
return py3compat.decode(out)
try:
CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgvW.arg_types = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPCWSTR)
LocalFree = ctypes.windll.kernel32.LocalFree
LocalFree.res_type = HLOCAL
LocalFree.arg_types = [HLOCAL]
def arg_split(commandline, posix=False, strict=True):
"""Split a command line's arguments in a shell-like manner.
This is a special version for windows that use a ctypes call to CommandLineToArgvW
to do the argv splitting. The posix parameter is ignored.
If strict=False, process_common.arg_split(...strict=False) is used instead.
"""
#CommandLineToArgvW returns path to executable if called with empty string.
if commandline.strip() == "":
return []
if not strict:
# not really a cl-arg, fallback on _process_common
return py_arg_split(commandline, posix=posix, strict=strict)
argvn = c_int()
result_pointer = CommandLineToArgvW(py3compat.cast_unicode(commandline.lstrip()), ctypes.byref(argvn))
result_array_type = LPCWSTR * argvn.value
result = [arg for arg in result_array_type.from_address(ctypes.addressof(result_pointer.contents))]
retval = LocalFree(result_pointer)
return result
except AttributeError:
arg_split = py_arg_split
def check_pid(pid):
# OpenProcess returns 0 if no such process (of ours) exists
# positive int otherwise
return bool(ctypes.windll.kernel32.OpenProcess(1,0,pid))

View File

@@ -0,0 +1,573 @@
"""Windows-specific implementation of process utilities with direct WinAPI.
This file is meant to be used by process.py
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
# stdlib
import os, sys, threading
import ctypes, msvcrt
# Win32 API types needed for the API calls
from ctypes import POINTER
from ctypes.wintypes import HANDLE, HLOCAL, LPVOID, WORD, DWORD, BOOL, \
ULONG, LPCWSTR
LPDWORD = POINTER(DWORD)
LPHANDLE = POINTER(HANDLE)
ULONG_PTR = POINTER(ULONG)
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [("nLength", DWORD),
("lpSecurityDescriptor", LPVOID),
("bInheritHandle", BOOL)]
LPSECURITY_ATTRIBUTES = POINTER(SECURITY_ATTRIBUTES)
class STARTUPINFO(ctypes.Structure):
_fields_ = [("cb", DWORD),
("lpReserved", LPCWSTR),
("lpDesktop", LPCWSTR),
("lpTitle", LPCWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPVOID),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE)]
LPSTARTUPINFO = POINTER(STARTUPINFO)
class PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD)]
LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
# Win32 API constants needed
ERROR_HANDLE_EOF = 38
ERROR_BROKEN_PIPE = 109
ERROR_NO_DATA = 232
HANDLE_FLAG_INHERIT = 0x0001
STARTF_USESTDHANDLES = 0x0100
CREATE_SUSPENDED = 0x0004
CREATE_NEW_CONSOLE = 0x0010
CREATE_NO_WINDOW = 0x08000000
STILL_ACTIVE = 259
WAIT_TIMEOUT = 0x0102
WAIT_FAILED = 0xFFFFFFFF
INFINITE = 0xFFFFFFFF
DUPLICATE_SAME_ACCESS = 0x00000002
ENABLE_ECHO_INPUT = 0x0004
ENABLE_LINE_INPUT = 0x0002
ENABLE_PROCESSED_INPUT = 0x0001
# Win32 API functions needed
GetLastError = ctypes.windll.kernel32.GetLastError
GetLastError.argtypes = []
GetLastError.restype = DWORD
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.argtypes = [LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE]
CreateFile.restype = HANDLE
CreatePipe = ctypes.windll.kernel32.CreatePipe
CreatePipe.argtypes = [POINTER(HANDLE), POINTER(HANDLE),
LPSECURITY_ATTRIBUTES, DWORD]
CreatePipe.restype = BOOL
CreateProcess = ctypes.windll.kernel32.CreateProcessW
CreateProcess.argtypes = [LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES,
LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCWSTR, LPSTARTUPINFO,
LPPROCESS_INFORMATION]
CreateProcess.restype = BOOL
GetExitCodeProcess = ctypes.windll.kernel32.GetExitCodeProcess
GetExitCodeProcess.argtypes = [HANDLE, LPDWORD]
GetExitCodeProcess.restype = BOOL
GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
GetCurrentProcess.argtypes = []
GetCurrentProcess.restype = HANDLE
ResumeThread = ctypes.windll.kernel32.ResumeThread
ResumeThread.argtypes = [HANDLE]
ResumeThread.restype = DWORD
ReadFile = ctypes.windll.kernel32.ReadFile
ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID]
ReadFile.restype = BOOL
WriteFile = ctypes.windll.kernel32.WriteFile
WriteFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID]
WriteFile.restype = BOOL
GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode
GetConsoleMode.argtypes = [HANDLE, LPDWORD]
GetConsoleMode.restype = BOOL
SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode
SetConsoleMode.argtypes = [HANDLE, DWORD]
SetConsoleMode.restype = BOOL
FlushConsoleInputBuffer = ctypes.windll.kernel32.FlushConsoleInputBuffer
FlushConsoleInputBuffer.argtypes = [HANDLE]
FlushConsoleInputBuffer.restype = BOOL
WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject
WaitForSingleObject.argtypes = [HANDLE, DWORD]
WaitForSingleObject.restype = DWORD
DuplicateHandle = ctypes.windll.kernel32.DuplicateHandle
DuplicateHandle.argtypes = [HANDLE, HANDLE, HANDLE, LPHANDLE,
DWORD, BOOL, DWORD]
DuplicateHandle.restype = BOOL
SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
SetHandleInformation.argtypes = [HANDLE, DWORD, DWORD]
SetHandleInformation.restype = BOOL
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [HANDLE]
CloseHandle.restype = BOOL
CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(ctypes.c_int)]
CommandLineToArgvW.restype = POINTER(LPCWSTR)
LocalFree = ctypes.windll.kernel32.LocalFree
LocalFree.argtypes = [HLOCAL]
LocalFree.restype = HLOCAL
class AvoidUNCPath(object):
"""A context manager to protect command execution from UNC paths.
In the Win32 API, commands can't be invoked with the cwd being a UNC path.
This context manager temporarily changes directory to the 'C:' drive on
entering, and restores the original working directory on exit.
The context manager returns the starting working directory *if* it made a
change and None otherwise, so that users can apply the necessary adjustment
to their system calls in the event of a change.
Examples
--------
::
cmd = 'dir'
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
os.system(cmd)
"""
def __enter__(self):
self.path = os.getcwd()
self.is_unc_path = self.path.startswith(r"\\")
if self.is_unc_path:
# change to c drive (as cmd.exe cannot handle UNC addresses)
os.chdir("C:")
return self.path
else:
# We return None to signal that there was no change in the working
# directory
return None
def __exit__(self, exc_type, exc_value, traceback):
if self.is_unc_path:
os.chdir(self.path)
class Win32ShellCommandController(object):
"""Runs a shell command in a 'with' context.
This implementation is Win32-specific.
Example:
# Runs the command interactively with default console stdin/stdout
with ShellCommandController('python -i') as scc:
scc.run()
# Runs the command using the provided functions for stdin/stdout
def my_stdout_func(s):
# print or save the string 's'
write_to_stdout(s)
def my_stdin_func():
# If input is available, return it as a string.
if input_available():
return get_input()
# If no input available, return None after a short delay to
# keep from blocking.
else:
time.sleep(0.01)
return None
with ShellCommandController('python -i') as scc:
scc.run(my_stdout_func, my_stdin_func)
"""
def __init__(self, cmd, mergeout = True):
"""Initializes the shell command controller.
The cmd is the program to execute, and mergeout is
whether to blend stdout and stderr into one output
in stdout. Merging them together in this fashion more
reliably keeps stdout and stderr in the correct order
especially for interactive shell usage.
"""
self.cmd = cmd
self.mergeout = mergeout
def __enter__(self):
cmd = self.cmd
mergeout = self.mergeout
self.hstdout, self.hstdin, self.hstderr = None, None, None
self.piProcInfo = None
try:
p_hstdout, c_hstdout, p_hstderr, \
c_hstderr, p_hstdin, c_hstdin = [None]*6
# SECURITY_ATTRIBUTES with inherit handle set to True
saAttr = SECURITY_ATTRIBUTES()
saAttr.nLength = ctypes.sizeof(saAttr)
saAttr.bInheritHandle = True
saAttr.lpSecurityDescriptor = None
def create_pipe(uninherit):
"""Creates a Windows pipe, which consists of two handles.
The 'uninherit' parameter controls which handle is not
inherited by the child process.
"""
handles = HANDLE(), HANDLE()
if not CreatePipe(ctypes.byref(handles[0]),
ctypes.byref(handles[1]), ctypes.byref(saAttr), 0):
raise ctypes.WinError()
if not SetHandleInformation(handles[uninherit],
HANDLE_FLAG_INHERIT, 0):
raise ctypes.WinError()
return handles[0].value, handles[1].value
p_hstdout, c_hstdout = create_pipe(uninherit=0)
# 'mergeout' signals that stdout and stderr should be merged.
# We do that by using one pipe for both of them.
if mergeout:
c_hstderr = HANDLE()
if not DuplicateHandle(GetCurrentProcess(), c_hstdout,
GetCurrentProcess(), ctypes.byref(c_hstderr),
0, True, DUPLICATE_SAME_ACCESS):
raise ctypes.WinError()
else:
p_hstderr, c_hstderr = create_pipe(uninherit=0)
c_hstdin, p_hstdin = create_pipe(uninherit=1)
# Create the process object
piProcInfo = PROCESS_INFORMATION()
siStartInfo = STARTUPINFO()
siStartInfo.cb = ctypes.sizeof(siStartInfo)
siStartInfo.hStdInput = c_hstdin
siStartInfo.hStdOutput = c_hstdout
siStartInfo.hStdError = c_hstderr
siStartInfo.dwFlags = STARTF_USESTDHANDLES
dwCreationFlags = CREATE_SUSPENDED | CREATE_NO_WINDOW # | CREATE_NEW_CONSOLE
if not CreateProcess(None,
u"cmd.exe /c " + cmd,
None, None, True, dwCreationFlags,
None, None, ctypes.byref(siStartInfo),
ctypes.byref(piProcInfo)):
raise ctypes.WinError()
# Close this process's versions of the child handles
CloseHandle(c_hstdin)
c_hstdin = None
CloseHandle(c_hstdout)
c_hstdout = None
if c_hstderr is not None:
CloseHandle(c_hstderr)
c_hstderr = None
# Transfer ownership of the parent handles to the object
self.hstdin = p_hstdin
p_hstdin = None
self.hstdout = p_hstdout
p_hstdout = None
if not mergeout:
self.hstderr = p_hstderr
p_hstderr = None
self.piProcInfo = piProcInfo
finally:
if p_hstdin:
CloseHandle(p_hstdin)
if c_hstdin:
CloseHandle(c_hstdin)
if p_hstdout:
CloseHandle(p_hstdout)
if c_hstdout:
CloseHandle(c_hstdout)
if p_hstderr:
CloseHandle(p_hstderr)
if c_hstderr:
CloseHandle(c_hstderr)
return self
def _stdin_thread(self, handle, hprocess, func, stdout_func):
exitCode = DWORD()
bytesWritten = DWORD(0)
while True:
#print("stdin thread loop start")
# Get the input string (may be bytes or unicode)
data = func()
# None signals to poll whether the process has exited
if data is None:
#print("checking for process completion")
if not GetExitCodeProcess(hprocess, ctypes.byref(exitCode)):
raise ctypes.WinError()
if exitCode.value != STILL_ACTIVE:
return
# TESTING: Does zero-sized writefile help?
if not WriteFile(handle, "", 0,
ctypes.byref(bytesWritten), None):
raise ctypes.WinError()
continue
#print("\nGot str %s\n" % repr(data), file=sys.stderr)
# Encode the string to the console encoding
if isinstance(data, unicode): #FIXME: Python3
data = data.encode('utf_8')
# What we have now must be a string of bytes
if not isinstance(data, str): #FIXME: Python3
raise RuntimeError("internal stdin function string error")
# An empty string signals EOF
if len(data) == 0:
return
# In a windows console, sometimes the input is echoed,
# but sometimes not. How do we determine when to do this?
stdout_func(data)
# WriteFile may not accept all the data at once.
# Loop until everything is processed
while len(data) != 0:
#print("Calling writefile")
if not WriteFile(handle, data, len(data),
ctypes.byref(bytesWritten), None):
# This occurs at exit
if GetLastError() == ERROR_NO_DATA:
return
raise ctypes.WinError()
#print("Called writefile")
data = data[bytesWritten.value:]
def _stdout_thread(self, handle, func):
# Allocate the output buffer
data = ctypes.create_string_buffer(4096)
while True:
bytesRead = DWORD(0)
if not ReadFile(handle, data, 4096,
ctypes.byref(bytesRead), None):
le = GetLastError()
if le == ERROR_BROKEN_PIPE:
return
else:
raise ctypes.WinError()
# FIXME: Python3
s = data.value[0:bytesRead.value]
#print("\nv: %s" % repr(s), file=sys.stderr)
func(s.decode('utf_8', 'replace'))
def run(self, stdout_func = None, stdin_func = None, stderr_func = None):
"""Runs the process, using the provided functions for I/O.
The function stdin_func should return strings whenever a
character or characters become available.
The functions stdout_func and stderr_func are called whenever
something is printed to stdout or stderr, respectively.
These functions are called from different threads (but not
concurrently, because of the GIL).
"""
if stdout_func is None and stdin_func is None and stderr_func is None:
return self._run_stdio()
if stderr_func is not None and self.mergeout:
raise RuntimeError("Shell command was initiated with "
"merged stdin/stdout, but a separate stderr_func "
"was provided to the run() method")
# Create a thread for each input/output handle
stdin_thread = None
threads = []
if stdin_func:
stdin_thread = threading.Thread(target=self._stdin_thread,
args=(self.hstdin, self.piProcInfo.hProcess,
stdin_func, stdout_func))
threads.append(threading.Thread(target=self._stdout_thread,
args=(self.hstdout, stdout_func)))
if not self.mergeout:
if stderr_func is None:
stderr_func = stdout_func
threads.append(threading.Thread(target=self._stdout_thread,
args=(self.hstderr, stderr_func)))
# Start the I/O threads and the process
if ResumeThread(self.piProcInfo.hThread) == 0xFFFFFFFF:
raise ctypes.WinError()
if stdin_thread is not None:
stdin_thread.start()
for thread in threads:
thread.start()
# Wait for the process to complete
if WaitForSingleObject(self.piProcInfo.hProcess, INFINITE) == \
WAIT_FAILED:
raise ctypes.WinError()
# Wait for the I/O threads to complete
for thread in threads:
thread.join()
# Wait for the stdin thread to complete
if stdin_thread is not None:
stdin_thread.join()
def _stdin_raw_nonblock(self):
"""Use the raw Win32 handle of sys.stdin to do non-blocking reads"""
# WARNING: This is experimental, and produces inconsistent results.
# It's possible for the handle not to be appropriate for use
# with WaitForSingleObject, among other things.
handle = msvcrt.get_osfhandle(sys.stdin.fileno())
result = WaitForSingleObject(handle, 100)
if result == WAIT_FAILED:
raise ctypes.WinError()
elif result == WAIT_TIMEOUT:
print(".", end='')
return None
else:
data = ctypes.create_string_buffer(256)
bytesRead = DWORD(0)
print('?', end='')
if not ReadFile(handle, data, 256,
ctypes.byref(bytesRead), None):
raise ctypes.WinError()
# This ensures the non-blocking works with an actual console
# Not checking the error, so the processing will still work with
# other handle types
FlushConsoleInputBuffer(handle)
data = data.value
data = data.replace('\r\n', '\n')
data = data.replace('\r', '\n')
print(repr(data) + " ", end='')
return data
def _stdin_raw_block(self):
"""Use a blocking stdin read"""
# The big problem with the blocking read is that it doesn't
# exit when it's supposed to in all contexts. An extra
# key-press may be required to trigger the exit.
try:
data = sys.stdin.read(1)
data = data.replace('\r', '\n')
return data
except WindowsError as we:
if we.winerror == ERROR_NO_DATA:
# This error occurs when the pipe is closed
return None
else:
# Otherwise let the error propagate
raise we
def _stdout_raw(self, s):
"""Writes the string to stdout"""
print(s, end='', file=sys.stdout)
sys.stdout.flush()
def _stderr_raw(self, s):
"""Writes the string to stdout"""
print(s, end='', file=sys.stderr)
sys.stderr.flush()
def _run_stdio(self):
"""Runs the process using the system standard I/O.
IMPORTANT: stdin needs to be asynchronous, so the Python
sys.stdin object is not used. Instead,
msvcrt.kbhit/getwch are used asynchronously.
"""
# Disable Line and Echo mode
#lpMode = DWORD()
#handle = msvcrt.get_osfhandle(sys.stdin.fileno())
#if GetConsoleMode(handle, ctypes.byref(lpMode)):
# set_console_mode = True
# if not SetConsoleMode(handle, lpMode.value &
# ~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT)):
# raise ctypes.WinError()
if self.mergeout:
return self.run(stdout_func = self._stdout_raw,
stdin_func = self._stdin_raw_block)
else:
return self.run(stdout_func = self._stdout_raw,
stdin_func = self._stdin_raw_block,
stderr_func = self._stderr_raw)
# Restore the previous console mode
#if set_console_mode:
# if not SetConsoleMode(handle, lpMode.value):
# raise ctypes.WinError()
def __exit__(self, exc_type, exc_value, traceback):
if self.hstdin:
CloseHandle(self.hstdin)
self.hstdin = None
if self.hstdout:
CloseHandle(self.hstdout)
self.hstdout = None
if self.hstderr:
CloseHandle(self.hstderr)
self.hstderr = None
if self.piProcInfo is not None:
CloseHandle(self.piProcInfo.hProcess)
CloseHandle(self.piProcInfo.hThread)
self.piProcInfo = None
def system(cmd):
"""Win32 version of os.system() that works with network shares.
Note that this implementation returns None, as meant for use in IPython.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
None : we explicitly do NOT return the subprocess status code, as this
utility is meant to be used extensively in IPython, where any return value
would trigger : func:`sys.displayhook` calls.
"""
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
with Win32ShellCommandController(cmd) as scc:
scc.run()
if __name__ == "__main__":
print("Test starting!")
#system("cmd")
system("python -i")
print("Test finished!")

View File

@@ -0,0 +1,2 @@
# GENERATED BY setup.py
commit = u"55e81b920"

View File

@@ -0,0 +1,170 @@
# encoding: utf-8
"""IO capturing utilities."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from io import StringIO
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class RichOutput(object):
def __init__(self, data=None, metadata=None, transient=None, update=False):
self.data = data or {}
self.metadata = metadata or {}
self.transient = transient or {}
self.update = update
def display(self):
from IPython.display import publish_display_data
publish_display_data(data=self.data, metadata=self.metadata,
transient=self.transient, update=self.update)
def _repr_mime_(self, mime):
if mime not in self.data:
return
data = self.data[mime]
if mime in self.metadata:
return data, self.metadata[mime]
else:
return data
def _repr_mimebundle_(self, include=None, exclude=None):
return self.data, self.metadata
def _repr_html_(self):
return self._repr_mime_("text/html")
def _repr_latex_(self):
return self._repr_mime_("text/latex")
def _repr_json_(self):
return self._repr_mime_("application/json")
def _repr_javascript_(self):
return self._repr_mime_("application/javascript")
def _repr_png_(self):
return self._repr_mime_("image/png")
def _repr_jpeg_(self):
return self._repr_mime_("image/jpeg")
def _repr_svg_(self):
return self._repr_mime_("image/svg+xml")
class CapturedIO(object):
"""Simple object for containing captured stdout/err and rich display StringIO objects
Each instance `c` has three attributes:
- ``c.stdout`` : standard output as a string
- ``c.stderr`` : standard error as a string
- ``c.outputs``: a list of rich display outputs
Additionally, there's a ``c.show()`` method which will print all of the
above in the same order, and can be invoked simply via ``c()``.
"""
def __init__(self, stdout, stderr, outputs=None):
self._stdout = stdout
self._stderr = stderr
if outputs is None:
outputs = []
self._outputs = outputs
def __str__(self):
return self.stdout
@property
def stdout(self):
"Captured standard output"
if not self._stdout:
return ''
return self._stdout.getvalue()
@property
def stderr(self):
"Captured standard error"
if not self._stderr:
return ''
return self._stderr.getvalue()
@property
def outputs(self):
"""A list of the captured rich display outputs, if any.
If you have a CapturedIO object ``c``, these can be displayed in IPython
using::
from IPython.display import display
for o in c.outputs:
display(o)
"""
return [ RichOutput(**kargs) for kargs in self._outputs ]
def show(self):
"""write my output to sys.stdout/err as appropriate"""
sys.stdout.write(self.stdout)
sys.stderr.write(self.stderr)
sys.stdout.flush()
sys.stderr.flush()
for kargs in self._outputs:
RichOutput(**kargs).display()
__call__ = show
class capture_output(object):
"""context manager for capturing stdout/err"""
stdout = True
stderr = True
display = True
def __init__(self, stdout=True, stderr=True, display=True):
self.stdout = stdout
self.stderr = stderr
self.display = display
self.shell = None
def __enter__(self):
from IPython.core.getipython import get_ipython
from IPython.core.displaypub import CapturingDisplayPublisher
from IPython.core.displayhook import CapturingDisplayHook
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
if self.display:
self.shell = get_ipython()
if self.shell is None:
self.save_display_pub = None
self.display = False
stdout = stderr = outputs = None
if self.stdout:
stdout = sys.stdout = StringIO()
if self.stderr:
stderr = sys.stderr = StringIO()
if self.display:
self.save_display_pub = self.shell.display_pub
self.shell.display_pub = CapturingDisplayPublisher()
outputs = self.shell.display_pub.outputs
self.save_display_hook = sys.displayhook
sys.displayhook = CapturingDisplayHook(shell=self.shell,
outputs=outputs)
return CapturedIO(stdout, stderr, outputs)
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
if self.display and self.shell:
self.shell.display_pub = self.save_display_pub
sys.displayhook = self.save_display_hook

View File

@@ -0,0 +1,25 @@
#*****************************************************************************
# Copyright (C) 2016 The IPython Team <ipython-dev@scipy.org>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
"""
Color managing related utilities
"""
import pygments
from traitlets.config import Configurable
from traitlets import Unicode
available_themes = lambda : [s for s in pygments.styles.get_all_styles()]+['NoColor','LightBG','Linux', 'Neutral']
class Colorable(Configurable):
"""
A subclass of configurable for all the classes that have a `default_scheme`
"""
default_style=Unicode('LightBG').tag(config=True)

View File

@@ -0,0 +1,187 @@
# -*- coding: utf-8 -*-
"""Tools for coloring text in ANSI terminals.
"""
#*****************************************************************************
# Copyright (C) 2002-2006 Fernando Perez. <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
__all__ = ['TermColors','InputTermColors','ColorScheme','ColorSchemeTable']
import os
from IPython.utils.ipstruct import Struct
color_templates = (
# Dark colors
("Black" , "0;30"),
("Red" , "0;31"),
("Green" , "0;32"),
("Brown" , "0;33"),
("Blue" , "0;34"),
("Purple" , "0;35"),
("Cyan" , "0;36"),
("LightGray" , "0;37"),
# Light colors
("DarkGray" , "1;30"),
("LightRed" , "1;31"),
("LightGreen" , "1;32"),
("Yellow" , "1;33"),
("LightBlue" , "1;34"),
("LightPurple" , "1;35"),
("LightCyan" , "1;36"),
("White" , "1;37"),
# Blinking colors. Probably should not be used in anything serious.
("BlinkBlack" , "5;30"),
("BlinkRed" , "5;31"),
("BlinkGreen" , "5;32"),
("BlinkYellow" , "5;33"),
("BlinkBlue" , "5;34"),
("BlinkPurple" , "5;35"),
("BlinkCyan" , "5;36"),
("BlinkLightGray", "5;37"),
)
def make_color_table(in_class):
"""Build a set of color attributes in a class.
Helper function for building the :class:`TermColors` and
:class`InputTermColors`.
"""
for name,value in color_templates:
setattr(in_class,name,in_class._base % value)
class TermColors:
"""Color escape sequences.
This class defines the escape sequences for all the standard (ANSI?)
colors in terminals. Also defines a NoColor escape which is just the null
string, suitable for defining 'dummy' color schemes in terminals which get
confused by color escapes.
This class should be used as a mixin for building color schemes."""
NoColor = '' # for color schemes in color-less terminals.
Normal = '\033[0m' # Reset normal coloring
_base = '\033[%sm' # Template for all other colors
# Build the actual color table as a set of class attributes:
make_color_table(TermColors)
class InputTermColors:
"""Color escape sequences for input prompts.
This class is similar to TermColors, but the escapes are wrapped in \001
and \002 so that readline can properly know the length of each line and
can wrap lines accordingly. Use this class for any colored text which
needs to be used in input prompts, such as in calls to raw_input().
This class defines the escape sequences for all the standard (ANSI?)
colors in terminals. Also defines a NoColor escape which is just the null
string, suitable for defining 'dummy' color schemes in terminals which get
confused by color escapes.
This class should be used as a mixin for building color schemes."""
NoColor = '' # for color schemes in color-less terminals.
if os.name == 'nt' and os.environ.get('TERM','dumb') == 'emacs':
# (X)emacs on W32 gets confused with \001 and \002 so we remove them
Normal = '\033[0m' # Reset normal coloring
_base = '\033[%sm' # Template for all other colors
else:
Normal = '\001\033[0m\002' # Reset normal coloring
_base = '\001\033[%sm\002' # Template for all other colors
# Build the actual color table as a set of class attributes:
make_color_table(InputTermColors)
class NoColors:
"""This defines all the same names as the colour classes, but maps them to
empty strings, so it can easily be substituted to turn off colours."""
NoColor = ''
Normal = ''
for name, value in color_templates:
setattr(NoColors, name, '')
class ColorScheme:
"""Generic color scheme class. Just a name and a Struct."""
def __init__(self,__scheme_name_,colordict=None,**colormap):
self.name = __scheme_name_
if colordict is None:
self.colors = Struct(**colormap)
else:
self.colors = Struct(colordict)
def copy(self,name=None):
"""Return a full copy of the object, optionally renaming it."""
if name is None:
name = self.name
return ColorScheme(name, self.colors.dict())
class ColorSchemeTable(dict):
"""General class to handle tables of color schemes.
It's basically a dict of color schemes with a couple of shorthand
attributes and some convenient methods.
active_scheme_name -> obvious
active_colors -> actual color table of the active scheme"""
def __init__(self, scheme_list=None, default_scheme=''):
"""Create a table of color schemes.
The table can be created empty and manually filled or it can be
created with a list of valid color schemes AND the specification for
the default active scheme.
"""
# create object attributes to be set later
self.active_scheme_name = ''
self.active_colors = None
if scheme_list:
if default_scheme == '':
raise ValueError('you must specify the default color scheme')
for scheme in scheme_list:
self.add_scheme(scheme)
self.set_active_scheme(default_scheme)
def copy(self):
"""Return full copy of object"""
return ColorSchemeTable(self.values(),self.active_scheme_name)
def add_scheme(self,new_scheme):
"""Add a new color scheme to the table."""
if not isinstance(new_scheme,ColorScheme):
raise ValueError('ColorSchemeTable only accepts ColorScheme instances')
self[new_scheme.name] = new_scheme
def set_active_scheme(self,scheme,case_sensitive=0):
"""Set the currently active scheme.
Names are by default compared in a case-insensitive way, but this can
be changed by setting the parameter case_sensitive to true."""
scheme_names = list(self.keys())
if case_sensitive:
valid_schemes = scheme_names
scheme_test = scheme
else:
valid_schemes = [s.lower() for s in scheme_names]
scheme_test = scheme.lower()
try:
scheme_idx = valid_schemes.index(scheme_test)
except ValueError as e:
raise ValueError('Unrecognized color scheme: ' + scheme + \
'\nValid schemes: '+str(scheme_names).replace("'', ",'')) from e
else:
active = scheme_names[scheme_idx]
self.active_scheme_name = active
self.active_colors = self[active].colors
# Now allow using '' as an index for the current active scheme
self[''] = self[active]

View File

@@ -0,0 +1,60 @@
# encoding: utf-8
"""Miscellaneous context managers.
"""
import warnings
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
class preserve_keys(object):
"""Preserve a set of keys in a dictionary.
Upon entering the context manager the current values of the keys
will be saved. Upon exiting, the dictionary will be updated to
restore the original value of the preserved keys. Preserved keys
which did not exist when entering the context manager will be
deleted.
Examples
--------
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> with preserve_keys(d, 'b', 'c', 'd'):
... del d['a']
... del d['b'] # will be reset to 2
... d['c'] = None # will be reset to 3
... d['d'] = 4 # will be deleted
... d['e'] = 5
... print(sorted(d.items()))
...
[('c', None), ('d', 4), ('e', 5)]
>>> print(sorted(d.items()))
[('b', 2), ('c', 3), ('e', 5)]
"""
def __init__(self, dictionary, *keys):
self.dictionary = dictionary
self.keys = keys
def __enter__(self):
# Actions to perform upon exiting.
to_delete = []
to_update = {}
d = self.dictionary
for k in self.keys:
if k in d:
to_update[k] = d[k]
else:
to_delete.append(k)
self.to_delete = to_delete
self.to_update = to_update
def __exit__(self, *exc_info):
d = self.dictionary
for k in self.to_delete:
d.pop(k, None)
d.update(self.to_update)

View File

@@ -0,0 +1,4 @@
from warnings import warn
warn("IPython.utils.daemonize has moved to ipyparallel.apps.daemonize since IPython 4.0", DeprecationWarning, stacklevel=2)
from ipyparallel.apps.daemonize import daemonize

View File

@@ -0,0 +1,30 @@
# encoding: utf-8
"""Utilities for working with data structures like lists, dicts and tuples.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
def uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
but maintaining the order in which they first appear.
Note: All elements in the input must be hashable for this routine
to work, as it internally uses a set for efficiency reasons.
"""
seen = set()
return [x for x in elems if x not in seen and not seen.add(x)]
def chop(seq, size):
"""Chop a sequence into chunks of the given size."""
return [seq[i:i+size] for i in range(0,len(seq),size)]

View File

@@ -0,0 +1,58 @@
# encoding: utf-8
"""Decorators that don't go anywhere else.
This module contains misc. decorators that don't really go with another module
in :mod:`IPython.utils`. Beore putting something here please see if it should
go into another topical module in :mod:`IPython.utils`.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def undoc(func):
"""Mark a function or class as undocumented.
This is found by inspecting the AST, so for now it must be used directly
as @undoc, not as e.g. @decorators.undoc
"""
return func

View File

@@ -0,0 +1,84 @@
# encoding: utf-8
"""A fancy version of Python's builtin :func:`dir` function.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import inspect
import types
def safe_hasattr(obj, attr):
"""In recent versions of Python, hasattr() only catches AttributeError.
This catches all errors.
"""
try:
getattr(obj, attr)
return True
except:
return False
def dir2(obj):
"""dir2(obj) -> list of strings
Extended version of the Python builtin dir(), which does a few extra
checks.
This version is guaranteed to return only a list of true strings, whereas
dir() returns anything that objects inject into themselves, even if they
are later not really valid for attribute access (many extension libraries
have such bugs).
"""
# Start building the attribute list via dir(), and then complete it
# with a few extra special-purpose calls.
try:
words = set(dir(obj))
except Exception:
# TypeError: dir(obj) does not return a list
words = set()
if safe_hasattr(obj, '__class__'):
words |= set(dir(obj.__class__))
# filter out non-string attributes which may be stuffed by dir() calls
# and poor coding in third-party modules
words = [w for w in words if isinstance(w, str)]
return sorted(words)
def get_real_method(obj, name):
"""Like getattr, but with a few extra sanity checks:
- If obj is a class, ignore everything except class methods
- Check if obj is a proxy that claims to have all attributes
- Catch attribute access failing with any exception
- Check that the attribute is a callable object
Returns the method or None.
"""
try:
canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None)
except Exception:
return None
if canary is not None:
# It claimed to have an attribute it should never have
return None
try:
m = getattr(obj, name, None)
except Exception:
return None
if inspect.isclass(obj) and not isinstance(m, types.MethodType):
return None
if callable(m):
return m
return None

View File

@@ -0,0 +1,71 @@
# coding: utf-8
"""
Utilities for dealing with text encodings
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import locale
import warnings
# to deal with the possibility of sys.std* not being a stream at all
def get_stream_enc(stream, default=None):
"""Return the given stream's encoding or a default.
There are cases where ``sys.std*`` might not actually be a stream, so
check for the encoding attribute prior to returning it, and return
a default if it doesn't exist or evaluates as False. ``default``
is None if not provided.
"""
if not hasattr(stream, 'encoding') or not stream.encoding:
return default
else:
return stream.encoding
# Less conservative replacement for sys.getdefaultencoding, that will try
# to match the environment.
# Defined here as central function, so if we find better choices, we
# won't need to make changes all over IPython.
def getdefaultencoding(prefer_stream=True):
"""Return IPython's guess for the default encoding for bytes as text.
If prefer_stream is True (default), asks for stdin.encoding first,
to match the calling Terminal, but that is often None for subprocesses.
Then fall back on locale.getpreferredencoding(),
which should be a sensible platform default (that respects LANG environment),
and finally to sys.getdefaultencoding() which is the most conservative option,
and usually UTF8 as of Python 3.
"""
enc = None
if prefer_stream:
enc = get_stream_enc(sys.stdin)
if not enc or enc=='ascii':
try:
# There are reports of getpreferredencoding raising errors
# in some cases, which may well be fixed, but let's be conservative here.
enc = locale.getpreferredencoding()
except Exception:
pass
enc = enc or sys.getdefaultencoding()
# On windows `cp0` can be returned to indicate that there is no code page.
# Since cp0 is an invalid encoding return instead cp1252 which is the
# Western European default.
if enc == 'cp0':
warnings.warn(
"Invalid code page cp0 detected - using cp1252 instead."
"If cp1252 is incorrect please ensure a valid code page "
"is defined for the process.", RuntimeWarning)
return 'cp1252'
return enc
DEFAULT_ENCODING = getdefaultencoding()

View File

@@ -0,0 +1,6 @@
from warnings import warn
warn("IPython.utils.eventful has moved to traitlets.eventful", stacklevel=2)
from traitlets.eventful import *

View File

@@ -0,0 +1,92 @@
# encoding: utf-8
"""
Utilities for working with stack frames.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def extract_vars(*names,**kw):
"""Extract a set of variables by name from another frame.
Parameters
----------
*names : str
One or more variable names which will be extracted from the caller's
frame.
**kw : integer, optional
How many frames in the stack to walk when looking for your variables.
The default is 0, which will use the frame where the call was made.
Examples
--------
::
In [2]: def func(x):
...: y = 1
...: print(sorted(extract_vars('x','y').items()))
...:
In [3]: func('hello')
[('x', 'hello'), ('y', 1)]
"""
depth = kw.get('depth',0)
callerNS = sys._getframe(depth+1).f_locals
return dict((k,callerNS[k]) for k in names)
def extract_vars_above(*names):
"""Extract a set of variables by name from another frame.
Similar to extractVars(), but with a specified depth of 1, so that names
are extracted exactly from above the caller.
This is simply a convenience function so that the very common case (for us)
of skipping exactly 1 frame doesn't have to construct a special dict for
keyword passing."""
callerNS = sys._getframe(2).f_locals
return dict((k,callerNS[k]) for k in names)
def debugx(expr,pre_msg=''):
"""Print the value of an expression from the caller's frame.
Takes an expression, evaluates it in the caller's frame and prints both
the given expression and the resulting value (as well as a debug mark
indicating the name of the calling function. The input must be of a form
suitable for eval().
An optional message can be passed, which will be prepended to the printed
expr->value pair."""
cf = sys._getframe(1)
print('[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr,
eval(expr,cf.f_globals,cf.f_locals)))
# deactivate it by uncommenting the following line, which makes it a no-op
#def debugx(expr,pre_msg=''): pass
def extract_module_locals(depth=0):
"""Returns (module, locals) of the function `depth` frames away from the caller"""
f = sys._getframe(depth + 1)
global_ns = f.f_globals
module = sys.modules[global_ns['__name__']]
return (module, f.f_locals)

View File

@@ -0,0 +1,29 @@
# encoding: utf-8
"""Generic functions for extending IPython.
"""
from IPython.core.error import TryNext
from functools import singledispatch
@singledispatch
def inspect_object(obj):
"""Called when you do obj?"""
raise TryNext
@singledispatch
def complete_object(obj, prev_completions):
"""Custom completer dispatching for python objects.
Parameters
----------
obj : object
The object to complete.
prev_completions : list
List of attributes discovered so far.
This should return the list of attributes in obj. If you only wish to
add to the attributes already discovered normally, return
own_attrs + prev_completions.
"""
raise TryNext

View File

@@ -0,0 +1,39 @@
# encoding: utf-8
"""
A simple utility to import something by its string name.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
def import_item(name):
"""Import and return ``bar`` given the string ``foo.bar``.
Calling ``bar = import_item("foo.bar")`` is the functional equivalent of
executing the code ``from foo import bar``.
Parameters
----------
name : string
The fully qualified name of the module/package being imported.
Returns
-------
mod : module object
The module that was imported.
"""
parts = name.rsplit('.', 1)
if len(parts) == 2:
# called with 'foo.bar....'
package, obj = parts
module = __import__(package, fromlist=[obj])
try:
pak = getattr(module, obj)
except AttributeError as e:
raise ImportError('No module named %s' % obj) from e
return pak
else:
# called with un-dotted string
return __import__(parts[0])

View File

@@ -0,0 +1,157 @@
# encoding: utf-8
"""
IO related utilities.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import atexit
import os
import sys
import tempfile
import warnings
from pathlib import Path
from warnings import warn
from IPython.utils.decorators import undoc
from .capture import CapturedIO, capture_output
# setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr
devnull = open(os.devnull, "w", encoding="utf-8")
atexit.register(devnull.close)
class Tee(object):
"""A class to duplicate an output stream to stdout/err.
This works in a manner very similar to the Unix 'tee' command.
When the object is closed or deleted, it closes the original file given to
it for duplication.
"""
# Inspired by:
# http://mail.python.org/pipermail/python-list/2007-May/442737.html
def __init__(self, file_or_name, mode="w", channel='stdout'):
"""Construct a new Tee object.
Parameters
----------
file_or_name : filename or open filehandle (writable)
File that will be duplicated
mode : optional, valid mode for open().
If a filename was give, open with this mode.
channel : str, one of ['stdout', 'stderr']
"""
if channel not in ['stdout', 'stderr']:
raise ValueError('Invalid channel spec %s' % channel)
if hasattr(file_or_name, 'write') and hasattr(file_or_name, 'seek'):
self.file = file_or_name
else:
encoding = None if "b" in mode else "utf-8"
self.file = open(file_or_name, mode, encoding=encoding)
self.channel = channel
self.ostream = getattr(sys, channel)
setattr(sys, channel, self)
self._closed = False
def close(self):
"""Close the file and restore the channel."""
self.flush()
setattr(sys, self.channel, self.ostream)
self.file.close()
self._closed = True
def write(self, data):
"""Write data to both channels."""
self.file.write(data)
self.ostream.write(data)
self.ostream.flush()
def flush(self):
"""Flush both channels."""
self.file.flush()
self.ostream.flush()
def __del__(self):
if not self._closed:
self.close()
def ask_yes_no(prompt, default=None, interrupt=None):
"""Asks a question and returns a boolean (y/n) answer.
If default is given (one of 'y','n'), it is used if the user input is
empty. If interrupt is given (one of 'y','n'), it is used if the user
presses Ctrl-C. Otherwise the question is repeated until an answer is
given.
An EOF is treated as the default answer. If there is no default, an
exception is raised to prevent infinite loops.
Valid answers are: y/yes/n/no (match is not case sensitive)."""
answers = {'y':True,'n':False,'yes':True,'no':False}
ans = None
while ans not in answers.keys():
try:
ans = input(prompt+' ').lower()
if not ans: # response was an empty string
ans = default
except KeyboardInterrupt:
if interrupt:
ans = interrupt
print("\r")
except EOFError:
if default in answers.keys():
ans = default
print()
else:
raise
return answers[ans]
def temp_pyfile(src, ext='.py'):
"""Make a temporary python file, return filename and filehandle.
Parameters
----------
src : string or list of strings (no need for ending newlines if list)
Source code to be written to the file.
ext : optional, string
Extension for the generated file.
Returns
-------
(filename, open filehandle)
It is the caller's responsibility to close the open file and unlink it.
"""
fname = tempfile.mkstemp(ext)[1]
with open(Path(fname), "w", encoding="utf-8") as f:
f.write(src)
f.flush()
return fname
@undoc
def raw_print(*args, **kw):
"""DEPRECATED: Raw print to sys.__stdout__, otherwise identical interface to print()."""
warn("IPython.utils.io.raw_print has been deprecated since IPython 7.0", DeprecationWarning, stacklevel=2)
print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
file=sys.__stdout__)
sys.__stdout__.flush()
@undoc
def raw_print_err(*args, **kw):
"""DEPRECATED: Raw print to sys.__stderr__, otherwise identical interface to print()."""
warn("IPython.utils.io.raw_print_err has been deprecated since IPython 7.0", DeprecationWarning, stacklevel=2)
print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
file=sys.__stderr__)
sys.__stderr__.flush()

View File

@@ -0,0 +1,379 @@
# encoding: utf-8
"""A dict subclass that supports attribute style access.
Authors:
* Fernando Perez (original)
* Brian Granger (refactoring to a dict subclass)
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
__all__ = ['Struct']
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class Struct(dict):
"""A dict subclass with attribute style access.
This dict subclass has a a few extra features:
* Attribute style access.
* Protection of class members (like keys, items) when using attribute
style access.
* The ability to restrict assignment to only existing keys.
* Intelligent merging.
* Overloaded operators.
"""
_allownew = True
def __init__(self, *args, **kw):
"""Initialize with a dictionary, another Struct, or data.
Parameters
----------
*args : dict, Struct
Initialize with one dict or Struct
**kw : dict
Initialize with key, value pairs.
Examples
--------
>>> s = Struct(a=10,b=30)
>>> s.a
10
>>> s.b
30
>>> s2 = Struct(s,c=30)
>>> sorted(s2.keys())
['a', 'b', 'c']
"""
object.__setattr__(self, '_allownew', True)
dict.__init__(self, *args, **kw)
def __setitem__(self, key, value):
"""Set an item with check for allownew.
Examples
--------
>>> s = Struct()
>>> s['a'] = 10
>>> s.allow_new_attr(False)
>>> s['a'] = 10
>>> s['a']
10
>>> try:
... s['b'] = 20
... except KeyError:
... print('this is not allowed')
...
this is not allowed
"""
if not self._allownew and key not in self:
raise KeyError(
"can't create new attribute %s when allow_new_attr(False)" % key)
dict.__setitem__(self, key, value)
def __setattr__(self, key, value):
"""Set an attr with protection of class members.
This calls :meth:`self.__setitem__` but convert :exc:`KeyError` to
:exc:`AttributeError`.
Examples
--------
>>> s = Struct()
>>> s.a = 10
>>> s.a
10
>>> try:
... s.get = 10
... except AttributeError:
... print("you can't set a class member")
...
you can't set a class member
"""
# If key is an str it might be a class member or instance var
if isinstance(key, str):
# I can't simply call hasattr here because it calls getattr, which
# calls self.__getattr__, which returns True for keys in
# self._data. But I only want keys in the class and in
# self.__dict__
if key in self.__dict__ or hasattr(Struct, key):
raise AttributeError(
'attr %s is a protected member of class Struct.' % key
)
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e) from e
def __getattr__(self, key):
"""Get an attr by calling :meth:`dict.__getitem__`.
Like :meth:`__setattr__`, this method converts :exc:`KeyError` to
:exc:`AttributeError`.
Examples
--------
>>> s = Struct(a=10)
>>> s.a
10
>>> type(s.get)
<...method'>
>>> try:
... s.b
... except AttributeError:
... print("I don't have that key")
...
I don't have that key
"""
try:
result = self[key]
except KeyError as e:
raise AttributeError(key) from e
else:
return result
def __iadd__(self, other):
"""s += s2 is a shorthand for s.merge(s2).
Examples
--------
>>> s = Struct(a=10,b=30)
>>> s2 = Struct(a=20,c=40)
>>> s += s2
>>> sorted(s.keys())
['a', 'b', 'c']
"""
self.merge(other)
return self
def __add__(self,other):
"""s + s2 -> New Struct made from s.merge(s2).
Examples
--------
>>> s1 = Struct(a=10,b=30)
>>> s2 = Struct(a=20,c=40)
>>> s = s1 + s2
>>> sorted(s.keys())
['a', 'b', 'c']
"""
sout = self.copy()
sout.merge(other)
return sout
def __sub__(self,other):
"""s1 - s2 -> remove keys in s2 from s1.
Examples
--------
>>> s1 = Struct(a=10,b=30)
>>> s2 = Struct(a=40)
>>> s = s1 - s2
>>> s
{'b': 30}
"""
sout = self.copy()
sout -= other
return sout
def __isub__(self,other):
"""Inplace remove keys from self that are in other.
Examples
--------
>>> s1 = Struct(a=10,b=30)
>>> s2 = Struct(a=40)
>>> s1 -= s2
>>> s1
{'b': 30}
"""
for k in other.keys():
if k in self:
del self[k]
return self
def __dict_invert(self, data):
"""Helper function for merge.
Takes a dictionary whose values are lists and returns a dict with
the elements of each list as keys and the original keys as values.
"""
outdict = {}
for k,lst in data.items():
if isinstance(lst, str):
lst = lst.split()
for entry in lst:
outdict[entry] = k
return outdict
def dict(self):
return self
def copy(self):
"""Return a copy as a Struct.
Examples
--------
>>> s = Struct(a=10,b=30)
>>> s2 = s.copy()
>>> type(s2) is Struct
True
"""
return Struct(dict.copy(self))
def hasattr(self, key):
"""hasattr function available as a method.
Implemented like has_key.
Examples
--------
>>> s = Struct(a=10)
>>> s.hasattr('a')
True
>>> s.hasattr('b')
False
>>> s.hasattr('get')
False
"""
return key in self
def allow_new_attr(self, allow = True):
"""Set whether new attributes can be created in this Struct.
This can be used to catch typos by verifying that the attribute user
tries to change already exists in this Struct.
"""
object.__setattr__(self, '_allownew', allow)
def merge(self, __loc_data__=None, __conflict_solve=None, **kw):
"""Merge two Structs with customizable conflict resolution.
This is similar to :meth:`update`, but much more flexible. First, a
dict is made from data+key=value pairs. When merging this dict with
the Struct S, the optional dictionary 'conflict' is used to decide
what to do.
If conflict is not given, the default behavior is to preserve any keys
with their current value (the opposite of the :meth:`update` method's
behavior).
Parameters
----------
__loc_data__ : dict, Struct
The data to merge into self
__conflict_solve : dict
The conflict policy dict. The keys are binary functions used to
resolve the conflict and the values are lists of strings naming
the keys the conflict resolution function applies to. Instead of
a list of strings a space separated string can be used, like
'a b c'.
**kw : dict
Additional key, value pairs to merge in
Notes
-----
The `__conflict_solve` dict is a dictionary of binary functions which will be used to
solve key conflicts. Here is an example::
__conflict_solve = dict(
func1=['a','b','c'],
func2=['d','e']
)
In this case, the function :func:`func1` will be used to resolve
keys 'a', 'b' and 'c' and the function :func:`func2` will be used for
keys 'd' and 'e'. This could also be written as::
__conflict_solve = dict(func1='a b c',func2='d e')
These functions will be called for each key they apply to with the
form::
func1(self['a'], other['a'])
The return value is used as the final merged value.
As a convenience, merge() provides five (the most commonly needed)
pre-defined policies: preserve, update, add, add_flip and add_s. The
easiest explanation is their implementation::
preserve = lambda old,new: old
update = lambda old,new: new
add = lambda old,new: old + new
add_flip = lambda old,new: new + old # note change of order!
add_s = lambda old,new: old + ' ' + new # only for str!
You can use those four words (as strings) as keys instead
of defining them as functions, and the merge method will substitute
the appropriate functions for you.
For more complicated conflict resolution policies, you still need to
construct your own functions.
Examples
--------
This show the default policy:
>>> s = Struct(a=10,b=30)
>>> s2 = Struct(a=20,c=40)
>>> s.merge(s2)
>>> sorted(s.items())
[('a', 10), ('b', 30), ('c', 40)]
Now, show how to specify a conflict dict:
>>> s = Struct(a=10,b=30)
>>> s2 = Struct(a=20,b=40)
>>> conflict = {'update':'a','add':'b'}
>>> s.merge(s2,conflict)
>>> sorted(s.items())
[('a', 20), ('b', 70)]
"""
data_dict = dict(__loc_data__,**kw)
# policies for conflict resolution: two argument functions which return
# the value that will go in the new struct
preserve = lambda old,new: old
update = lambda old,new: new
add = lambda old,new: old + new
add_flip = lambda old,new: new + old # note change of order!
add_s = lambda old,new: old + ' ' + new
# default policy is to keep current keys when there's a conflict
conflict_solve = dict.fromkeys(self, preserve)
# the conflict_solve dictionary is given by the user 'inverted': we
# need a name-function mapping, it comes as a function -> names
# dict. Make a local copy (b/c we'll make changes), replace user
# strings for the three builtin policies and invert it.
if __conflict_solve:
inv_conflict_solve_user = __conflict_solve.copy()
for name, func in [('preserve',preserve), ('update',update),
('add',add), ('add_flip',add_flip),
('add_s',add_s)]:
if name in inv_conflict_solve_user.keys():
inv_conflict_solve_user[func] = inv_conflict_solve_user[name]
del inv_conflict_solve_user[name]
conflict_solve.update(self.__dict_invert(inv_conflict_solve_user))
for key in data_dict:
if key not in self:
self[key] = data_dict[key]
else:
self[key] = conflict_solve[key](self[key],data_dict[key])

View File

@@ -0,0 +1,5 @@
from warnings import warn
warn("IPython.utils.jsonutil has moved to jupyter_client.jsonutil", stacklevel=2)
from jupyter_client.jsonutil import *

View File

@@ -0,0 +1,5 @@
from warnings import warn
warn("IPython.utils.localinterfaces has moved to jupyter_client.localinterfaces", stacklevel=2)
from jupyter_client.localinterfaces import *

View File

@@ -0,0 +1,6 @@
from warnings import warn
warn("IPython.utils.log has moved to traitlets.log", stacklevel=2)
from traitlets.log import *

View File

@@ -0,0 +1,71 @@
"""Utility functions for finding modules
Utility functions for finding modules on sys.path.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib imports
import importlib
import os
import sys
# Third-party imports
# Our own imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def find_mod(module_name):
"""
Find module `module_name` on sys.path, and return the path to module `module_name`.
- If `module_name` refers to a module directory, then return path to __init__ file.
- If `module_name` is a directory without an __init__file, return None.
- If module is missing or does not have a `.py` or `.pyw` extension, return None.
- Note that we are not interested in running bytecode.
- Otherwise, return the fill path of the module.
Parameters
----------
module_name : str
Returns
-------
module_path : str
Path to module `module_name`, its __init__.py, or None,
depending on above conditions.
"""
spec = importlib.util.find_spec(module_name)
module_path = spec.origin
if module_path is None:
if spec.loader in sys.meta_path:
return spec.loader
return None
else:
split_path = module_path.split(".")
if split_path[-1] in ["py", "pyw"]:
return module_path
else:
return None

View File

@@ -0,0 +1,105 @@
"""
Tools to open .py files as Unicode, using the encoding specified within the file,
as per PEP 263.
Much of the code is taken from the tokenize module in Python 3.2.
"""
import io
from io import TextIOWrapper, BytesIO
from pathlib import Path
import re
from tokenize import open, detect_encoding
cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE)
cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE)
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
"""Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
"""
if isinstance(txt, str):
return txt
if isinstance(txt, bytes):
buffer = BytesIO(txt)
else:
buffer = txt
try:
encoding, _ = detect_encoding(buffer.readline)
except SyntaxError:
encoding = "ascii"
buffer.seek(0)
with TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) as text:
text.mode = 'r'
if skip_encoding_cookie:
return u"".join(strip_encoding_cookie(text))
else:
return text.read()
def strip_encoding_cookie(filelike):
"""Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines.
"""
it = iter(filelike)
try:
first = next(it)
if not cookie_comment_re.match(first):
yield first
second = next(it)
if not cookie_comment_re.match(second):
yield second
except StopIteration:
return
for line in it:
yield line
def read_py_file(filename, skip_encoding_cookie=True):
"""Read a Python file, using the encoding declared inside the file.
Parameters
----------
filename : str
The path to the file to read.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output.
Returns
-------
A unicode string containing the contents of the file.
"""
filepath = Path(filename)
with open(filepath) as f: # the open function defined in this module.
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(f))
else:
return f.read()
def read_py_url(url, errors='replace', skip_encoding_cookie=True):
"""Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output.
Returns
-------
A unicode string containing the contents of the file.
"""
# Deferred import for faster start
from urllib.request import urlopen
response = urlopen(url)
buffer = io.BytesIO(response.read())
return source_to_unicode(buffer, errors, skip_encoding_cookie)

View File

@@ -0,0 +1,392 @@
# encoding: utf-8
"""
Utilities for path handling.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
import errno
import shutil
import random
import glob
from warnings import warn
from IPython.utils.process import system
from IPython.utils.decorators import undoc
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
fs_encoding = sys.getfilesystemencoding()
def _writable_dir(path):
"""Whether `path` is a directory, to which the user has write access."""
return os.path.isdir(path) and os.access(path, os.W_OK)
if sys.platform == 'win32':
def _get_long_path_name(path):
"""Get a long path name (expand ~) on Windows using ctypes.
Examples
--------
>>> get_long_path_name('c:\\\\docume~1')
'c:\\\\Documents and Settings'
"""
try:
import ctypes
except ImportError as e:
raise ImportError('you need to have ctypes installed for this to work') from e
_GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
_GetLongPathName.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p,
ctypes.c_uint ]
buf = ctypes.create_unicode_buffer(260)
rv = _GetLongPathName(path, buf, 260)
if rv == 0 or rv > 260:
return path
else:
return buf.value
else:
def _get_long_path_name(path):
"""Dummy no-op."""
return path
def get_long_path_name(path):
"""Expand a path into its long form.
On Windows this expands any ~ in the paths. On other platforms, it is
a null operation.
"""
return _get_long_path_name(path)
def compress_user(path):
"""Reverse of :func:`os.path.expanduser`
"""
home = os.path.expanduser('~')
if path.startswith(home):
path = "~" + path[len(home):]
return path
def get_py_filename(name):
"""Return a valid python filename in the current directory.
If the given name is not a file, it adds '.py' and searches again.
Raises IOError with an informative message if the file isn't found.
"""
name = os.path.expanduser(name)
if not os.path.isfile(name) and not name.endswith('.py'):
name += '.py'
if os.path.isfile(name):
return name
else:
raise IOError('File `%r` not found.' % name)
def filefind(filename: str, path_dirs=None) -> str:
"""Find a file by looking through a sequence of paths.
This iterates through a sequence of paths looking for a file and returns
the full, absolute path of the first occurrence of the file. If no set of
path dirs is given, the filename is tested as is, after running through
:func:`expandvars` and :func:`expanduser`. Thus a simple call::
filefind('myfile.txt')
will find the file in the current working dir, but::
filefind('~/myfile.txt')
Will find the file in the users home directory. This function does not
automatically try any paths, such as the cwd or the user's home directory.
Parameters
----------
filename : str
The filename to look for.
path_dirs : str, None or sequence of str
The sequence of paths to look for the file in. If None, the filename
need to be absolute or be in the cwd. If a string, the string is
put into a sequence and the searched. If a sequence, walk through
each element and join with ``filename``, calling :func:`expandvars`
and :func:`expanduser` before testing for existence.
Returns
-------
path : str
returns absolute path to file.
Raises
------
IOError
"""
# If paths are quoted, abspath gets confused, strip them...
filename = filename.strip('"').strip("'")
# If the input is an absolute path, just check it exists
if os.path.isabs(filename) and os.path.isfile(filename):
return filename
if path_dirs is None:
path_dirs = ("",)
elif isinstance(path_dirs, str):
path_dirs = (path_dirs,)
for path in path_dirs:
if path == '.': path = os.getcwd()
testname = expand_path(os.path.join(path, filename))
if os.path.isfile(testname):
return os.path.abspath(testname)
raise IOError("File %r does not exist in any of the search paths: %r" %
(filename, path_dirs) )
class HomeDirError(Exception):
pass
def get_home_dir(require_writable=False) -> str:
"""Return the 'home' directory, as a unicode string.
Uses os.path.expanduser('~'), and checks for writability.
See stdlib docs for how this is determined.
For Python <3.8, $HOME is first priority on *ALL* platforms.
For Python >=3.8 on Windows, %HOME% is no longer considered.
Parameters
----------
require_writable : bool [default: False]
if True:
guarantees the return value is a writable directory, otherwise
raises HomeDirError
if False:
The path is resolved, but it is not guaranteed to exist or be writable.
"""
homedir = os.path.expanduser('~')
# Next line will make things work even when /home/ is a symlink to
# /usr/home as it is on FreeBSD, for example
homedir = os.path.realpath(homedir)
if not _writable_dir(homedir) and os.name == 'nt':
# expanduser failed, use the registry to get the 'My Documents' folder.
try:
import winreg as wreg
with wreg.OpenKey(
wreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
) as key:
homedir = wreg.QueryValueEx(key,'Personal')[0]
except:
pass
if (not require_writable) or _writable_dir(homedir):
assert isinstance(homedir, str), "Homedir should be unicode not bytes"
return homedir
else:
raise HomeDirError('%s is not a writable dir, '
'set $HOME environment variable to override' % homedir)
def get_xdg_dir():
"""Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
This is only for non-OS X posix (Linux,Unix,etc.) systems.
"""
env = os.environ
if os.name == "posix":
# Linux, Unix, AIX, etc.
# use ~/.config if empty OR not set
xdg = env.get("XDG_CONFIG_HOME", None) or os.path.join(get_home_dir(), '.config')
if xdg and _writable_dir(xdg):
assert isinstance(xdg, str)
return xdg
return None
def get_xdg_cache_dir():
"""Return the XDG_CACHE_HOME, if it is defined and exists, else None.
This is only for non-OS X posix (Linux,Unix,etc.) systems.
"""
env = os.environ
if os.name == "posix":
# Linux, Unix, AIX, etc.
# use ~/.cache if empty OR not set
xdg = env.get("XDG_CACHE_HOME", None) or os.path.join(get_home_dir(), '.cache')
if xdg and _writable_dir(xdg):
assert isinstance(xdg, str)
return xdg
return None
def expand_path(s):
"""Expand $VARS and ~names in a string, like a shell
:Examples:
In [2]: os.environ['FOO']='test'
In [3]: expand_path('variable FOO is $FOO')
Out[3]: 'variable FOO is test'
"""
# This is a pretty subtle hack. When expand user is given a UNC path
# on Windows (\\server\share$\%username%), os.path.expandvars, removes
# the $ to get (\\server\share\%username%). I think it considered $
# alone an empty var. But, we need the $ to remains there (it indicates
# a hidden share).
if os.name=='nt':
s = s.replace('$\\', 'IPYTHON_TEMP')
s = os.path.expandvars(os.path.expanduser(s))
if os.name=='nt':
s = s.replace('IPYTHON_TEMP', '$\\')
return s
def unescape_glob(string):
"""Unescape glob pattern in `string`."""
def unescape(s):
for pattern in '*[]!?':
s = s.replace(r'\{0}'.format(pattern), pattern)
return s
return '\\'.join(map(unescape, string.split('\\\\')))
def shellglob(args):
"""
Do glob expansion for each element in `args` and return a flattened list.
Unmatched glob pattern will remain as-is in the returned list.
"""
expanded = []
# Do not unescape backslash in Windows as it is interpreted as
# path separator:
unescape = unescape_glob if sys.platform != 'win32' else lambda x: x
for a in args:
expanded.extend(glob.glob(a) or [unescape(a)])
return expanded
def target_outdated(target,deps):
"""Determine whether a target is out of date.
target_outdated(target,deps) -> 1/0
deps: list of filenames which MUST exist.
target: single filename which may or may not exist.
If target doesn't exist or is older than any file listed in deps, return
true, otherwise return false.
"""
try:
target_time = os.path.getmtime(target)
except os.error:
return 1
for dep in deps:
dep_time = os.path.getmtime(dep)
if dep_time > target_time:
#print "For target",target,"Dep failed:",dep # dbg
#print "times (dep,tar):",dep_time,target_time # dbg
return 1
return 0
def target_update(target,deps,cmd):
"""Update a target with a given command given a list of dependencies.
target_update(target,deps,cmd) -> runs cmd if target is outdated.
This is just a wrapper around target_outdated() which calls the given
command if target is outdated."""
if target_outdated(target,deps):
system(cmd)
ENOLINK = 1998
def link(src, dst):
"""Hard links ``src`` to ``dst``, returning 0 or errno.
Note that the special errno ``ENOLINK`` will be returned if ``os.link`` isn't
supported by the operating system.
"""
if not hasattr(os, "link"):
return ENOLINK
link_errno = 0
try:
os.link(src, dst)
except OSError as e:
link_errno = e.errno
return link_errno
def link_or_copy(src, dst):
"""Attempts to hardlink ``src`` to ``dst``, copying if the link fails.
Attempts to maintain the semantics of ``shutil.copy``.
Because ``os.link`` does not overwrite files, a unique temporary file
will be used if the target already exists, then that file will be moved
into place.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
link_errno = link(src, dst)
if link_errno == errno.EEXIST:
if os.stat(src).st_ino == os.stat(dst).st_ino:
# dst is already a hard link to the correct file, so we don't need
# to do anything else. If we try to link and rename the file
# anyway, we get duplicate files - see http://bugs.python.org/issue21876
return
new_dst = dst + "-temp-%04X" %(random.randint(1, 16**4), )
try:
link_or_copy(src, new_dst)
except:
try:
os.remove(new_dst)
except OSError:
pass
raise
os.rename(new_dst, dst)
elif link_errno != 0:
# Either link isn't supported, or the filesystem doesn't support
# linking, or 'src' and 'dst' are on different filesystems.
shutil.copy(src, dst)
def ensure_dir_exists(path, mode=0o755):
"""ensure that a directory exists
If it doesn't exist, try to create it and protect against a race condition
if another process is doing the same.
The default permissions are 755, which differ from os.makedirs default of 777.
"""
if not os.path.exists(path):
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno != errno.EEXIST:
raise
elif not os.path.isdir(path):
raise IOError("%r exists but is not a directory" % path)

View File

@@ -0,0 +1,69 @@
# encoding: utf-8
"""
Utilities for working with external processes.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import sys
if sys.platform == 'win32':
from ._process_win32 import system, getoutput, arg_split, check_pid
elif sys.platform == 'cli':
from ._process_cli import system, getoutput, arg_split, check_pid
else:
from ._process_posix import system, getoutput, arg_split, check_pid
from ._process_common import getoutputerror, get_output_error_code, process_handler
class FindCmdError(Exception):
pass
def find_cmd(cmd):
"""Find absolute path to executable cmd in a cross platform manner.
This function tries to determine the full path to a command line program
using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the
time it will use the version that is first on the users `PATH`.
Warning, don't use this to find IPython command line programs as there
is a risk you will find the wrong one. Instead find those using the
following code and looking for the application itself::
import sys
argv = [sys.executable, '-m', 'IPython']
Parameters
----------
cmd : str
The command line program to look for.
"""
path = shutil.which(cmd)
if path is None:
raise FindCmdError('command could not be found: %s' % cmd)
return path
def abbrev_cwd():
""" Return abbreviated version of cwd, e.g. d:mydir """
cwd = os.getcwd().replace('\\','/')
drivepart = ''
tail = cwd
if sys.platform == 'win32':
if len(cwd) < 4:
return cwd
drivepart,tail = os.path.splitdrive(cwd)
parts = tail.split('/')
if len(parts) > 2:
tail = '/'.join(parts[-2:])
return (drivepart + (
cwd == '/' and '/' or tail))

View File

@@ -0,0 +1,67 @@
# coding: utf-8
"""Compatibility tricks for Python 3. Mainly to do with unicode.
This file is deprecated and will be removed in a future version.
"""
import platform
import builtins as builtin_mod
from .encoding import DEFAULT_ENCODING
def decode(s, encoding=None):
encoding = encoding or DEFAULT_ENCODING
return s.decode(encoding, "replace")
def encode(u, encoding=None):
encoding = encoding or DEFAULT_ENCODING
return u.encode(encoding, "replace")
def cast_unicode(s, encoding=None):
if isinstance(s, bytes):
return decode(s, encoding)
return s
def safe_unicode(e):
"""unicode(e) with various fallbacks. Used for exceptions, which may not be
safe to call unicode() on.
"""
try:
return str(e)
except UnicodeError:
pass
try:
return repr(e)
except UnicodeError:
pass
return "Unrecoverably corrupt evalue"
# keep reference to builtin_mod because the kernel overrides that value
# to forward requests to a frontend.
def input(prompt=""):
return builtin_mod.input(prompt)
def execfile(fname, glob, loc=None, compiler=None):
loc = loc if (loc is not None) else glob
with open(fname, "rb") as f:
compiler = compiler or compile
exec(compiler(f.read(), fname, "exec"), glob, loc)
PYPY = platform.python_implementation() == "PyPy"
# Cython still rely on that as a Dec 28 2019
# See https://github.com/cython/cython/pull/3291 and
# https://github.com/ipython/ipython/issues/12068
def no_code(x, encoding=None):
return x
unicode_to_str = cast_bytes_py2 = no_code

View File

@@ -0,0 +1,17 @@
"""Sentinel class for constants with useful reprs"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
class Sentinel(object):
def __init__(self, name, module, docstring=None):
self.name = name
self.module = module
if docstring:
self.__doc__ = docstring
def __repr__(self):
return str(self.module)+'.'+self.name

View File

@@ -0,0 +1,89 @@
"""A shim module for deprecated imports
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import importlib.abc
import importlib.util
import sys
import types
from importlib import import_module
from .importstring import import_item
class ShimWarning(Warning):
"""A warning to show when a module has moved, and a shim is in its place."""
class ShimImporter(importlib.abc.MetaPathFinder):
"""Import hook for a shim.
This ensures that submodule imports return the real target module,
not a clone that will confuse `is` and `isinstance` checks.
"""
def __init__(self, src, mirror):
self.src = src
self.mirror = mirror
def _mirror_name(self, fullname):
"""get the name of the mirrored module"""
return self.mirror + fullname[len(self.src) :]
def find_spec(self, fullname, path, target=None):
if fullname.startswith(self.src + "."):
mirror_name = self._mirror_name(fullname)
return importlib.util.find_spec(mirror_name)
class ShimModule(types.ModuleType):
def __init__(self, *args, **kwargs):
self._mirror = kwargs.pop("mirror")
src = kwargs.pop("src", None)
if src:
kwargs['name'] = src.rsplit('.', 1)[-1]
super(ShimModule, self).__init__(*args, **kwargs)
# add import hook for descendent modules
if src:
sys.meta_path.append(
ShimImporter(src=src, mirror=self._mirror)
)
@property
def __path__(self):
return []
@property
def __spec__(self):
"""Don't produce __spec__ until requested"""
return import_module(self._mirror).__spec__
def __dir__(self):
return dir(import_module(self._mirror))
@property
def __all__(self):
"""Ensure __all__ is always defined"""
mod = import_module(self._mirror)
try:
return mod.__all__
except AttributeError:
return [name for name in dir(mod) if not name.startswith('_')]
def __getattr__(self, key):
# Use the equivalent of import_item(name), see below
name = "%s.%s" % (self._mirror, key)
try:
return import_item(name)
except ImportError as e:
raise AttributeError(key) from e
def __repr__(self):
# repr on a module can be called during error handling; make sure
# it does not fail, even if the import fails
try:
return self.__getattr__("__repr__")()
except AttributeError:
return f"<ShimModule for {self._mirror!r}>"

View File

@@ -0,0 +1,12 @@
"""DEPRECATED: Function signature objects for callables.
Use the standard library version if available, as it is more up to date.
Fallback on backport otherwise.
"""
import warnings
warnings.warn("{} backport for Python 2 is deprecated in IPython 6, which only supports "
"Python 3. Import directly from standard library `inspect`".format(__name__),
DeprecationWarning, stacklevel=2)
from inspect import BoundArguments, Parameter, Signature, signature

View File

@@ -0,0 +1,68 @@
"""String dispatch class to match regexps and dispatch commands.
"""
# Stdlib imports
import re
# Our own modules
from IPython.core.hooks import CommandChainDispatcher
# Code begins
class StrDispatch(object):
"""Dispatch (lookup) a set of strings / regexps for match.
Example:
>>> dis = StrDispatch()
>>> dis.add_s('hei',34, priority = 4)
>>> dis.add_s('hei',123, priority = 2)
>>> dis.add_re('h.i', 686)
>>> print(list(dis.flat_matches('hei')))
[123, 34, 686]
"""
def __init__(self):
self.strs = {}
self.regexs = {}
def add_s(self, s, obj, priority= 0 ):
""" Adds a target 'string' for dispatching """
chain = self.strs.get(s, CommandChainDispatcher())
chain.add(obj,priority)
self.strs[s] = chain
def add_re(self, regex, obj, priority= 0 ):
""" Adds a target regexp for dispatching """
chain = self.regexs.get(regex, CommandChainDispatcher())
chain.add(obj,priority)
self.regexs[regex] = chain
def dispatch(self, key):
""" Get a seq of Commandchain objects that match key """
if key in self.strs:
yield self.strs[key]
for r, obj in self.regexs.items():
if re.match(r, key):
yield obj
else:
#print "nomatch",key # dbg
pass
def __repr__(self):
return "<Strdispatch %s, %s>" % (self.strs, self.regexs)
def s_matches(self, key):
if key not in self.strs:
return
for el in self.strs[key]:
yield el[1]
def flat_matches(self, key):
""" Yield all 'value' targets, without priority """
for val in self.dispatch(key):
for el in val:
yield el[1] # only value, no priority
return

View File

@@ -0,0 +1,142 @@
# encoding: utf-8
"""
Utilities for getting information about IPython and the system it's running in.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import platform
import pprint
import sys
import subprocess
from IPython.core import release
from IPython.utils import _sysinfo, encoding
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def pkg_commit_hash(pkg_path):
"""Get short form of commit hash given directory `pkg_path`
We get the commit hash from (in order of preference):
* IPython.utils._sysinfo.commit
* git output, if we are in a git repository
If these fail, we return a not-found placeholder tuple
Parameters
----------
pkg_path : str
directory containing package
only used for getting commit from active repo
Returns
-------
hash_from : str
Where we got the hash from - description
hash_str : str
short form of hash
"""
# Try and get commit from written commit text file
if _sysinfo.commit:
return "installation", _sysinfo.commit
# maybe we are in a repository
proc = subprocess.Popen('git rev-parse --short HEAD'.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=pkg_path)
repo_commit, _ = proc.communicate()
if repo_commit:
return 'repository', repo_commit.strip().decode('ascii')
return '(none found)', '<not found>'
def pkg_info(pkg_path):
"""Return dict describing the context of this package
Parameters
----------
pkg_path : str
path containing __init__.py for package
Returns
-------
context : dict
with named parameters of interest
"""
src, hsh = pkg_commit_hash(pkg_path)
return dict(
ipython_version=release.version,
ipython_path=pkg_path,
commit_source=src,
commit_hash=hsh,
sys_version=sys.version,
sys_executable=sys.executable,
sys_platform=sys.platform,
platform=platform.platform(),
os_name=os.name,
default_encoding=encoding.DEFAULT_ENCODING,
)
def get_sys_info():
"""Return useful information about IPython and the system, as a dict."""
p = os.path
path = p.realpath(p.dirname(p.abspath(p.join(__file__, '..'))))
return pkg_info(path)
def sys_info():
"""Return useful information about IPython and the system, as a string.
Examples
--------
::
In [2]: print(sys_info())
{'commit_hash': '144fdae', # random
'commit_source': 'repository',
'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',
'ipython_version': '0.11.dev',
'os_name': 'posix',
'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',
'sys_executable': '/usr/bin/python',
'sys_platform': 'linux2',
'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
"""
return pprint.pformat(get_sys_info())
def num_cpus():
"""DEPRECATED
Return the effective number of CPUs in the system as an integer.
This cross-platform function makes an attempt at finding the total number of
available CPUs in the system, as returned by various underlying system and
python calls.
If it can't find a sensible answer, it returns 1 (though an error *may* make
it return a large positive number that's actually incorrect).
"""
import warnings
warnings.warn(
"`num_cpus` is deprecated since IPython 8.0. Use `os.cpu_count` instead.",
DeprecationWarning,
stacklevel=2,
)
return os.cpu_count() or 1

View File

@@ -0,0 +1,71 @@
# encoding: utf-8
"""
Context managers for adding things to sys.path temporarily.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import warnings
class appended_to_syspath(object):
"""
Deprecated since IPython 8.1, no replacements.
A context for appending a directory to sys.path for a second."""
def __init__(self, dir):
warnings.warn(
"`appended_to_syspath` is deprecated since IPython 8.1, and has no replacements",
DeprecationWarning,
stacklevel=2,
)
self.dir = dir
def __enter__(self):
if self.dir not in sys.path:
sys.path.append(self.dir)
self.added = True
else:
self.added = False
def __exit__(self, type, value, traceback):
if self.added:
try:
sys.path.remove(self.dir)
except ValueError:
pass
# Returning False causes any exceptions to be re-raised.
return False
class prepended_to_syspath(object):
"""A context for prepending a directory to sys.path for a second."""
def __init__(self, dir):
self.dir = dir
def __enter__(self):
if self.dir not in sys.path:
sys.path.insert(0,self.dir)
self.added = True
else:
self.added = False
def __exit__(self, type, value, traceback):
if self.added:
try:
sys.path.remove(self.dir)
except ValueError:
pass
# Returning False causes any exceptions to be re-raised.
return False

View File

@@ -0,0 +1,58 @@
""" This module contains classes - NamedFileInTemporaryDirectory, TemporaryWorkingDirectory.
These classes add extra features such as creating a named file in temporary directory and
creating a context manager for the working directory which is also temporary.
"""
import os as _os
from pathlib import Path
from tempfile import TemporaryDirectory
class NamedFileInTemporaryDirectory(object):
def __init__(self, filename, mode="w+b", bufsize=-1, add_to_syspath=False, **kwds):
"""
Open a file named `filename` in a temporary directory.
This context manager is preferred over `NamedTemporaryFile` in
stdlib `tempfile` when one needs to reopen the file.
Arguments `mode` and `bufsize` are passed to `open`.
Rest of the arguments are passed to `TemporaryDirectory`.
"""
self._tmpdir = TemporaryDirectory(**kwds)
path = Path(self._tmpdir.name) / filename
encoding = None if "b" in mode else "utf-8"
self.file = open(path, mode, bufsize, encoding=encoding)
def cleanup(self):
self.file.close()
self._tmpdir.cleanup()
__del__ = cleanup
def __enter__(self):
return self.file
def __exit__(self, type, value, traceback):
self.cleanup()
class TemporaryWorkingDirectory(TemporaryDirectory):
"""
Creates a temporary directory and sets the cwd to that directory.
Automatically reverts to previous cwd upon cleanup.
Usage example:
with TemporaryWorkingDirectory() as tmpdir:
...
"""
def __enter__(self):
self.old_wd = Path.cwd()
_os.chdir(self.name)
return super(TemporaryWorkingDirectory, self).__enter__()
def __exit__(self, exc, value, tb):
_os.chdir(self.old_wd)
return super(TemporaryWorkingDirectory, self).__exit__(exc, value, tb)

View File

@@ -0,0 +1,129 @@
# encoding: utf-8
"""
Utilities for working with terminals.
Authors:
* Brian E. Granger
* Fernando Perez
* Alexander Belchenko (e-mail: bialix AT ukr.net)
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
import warnings
from shutil import get_terminal_size as _get_terminal_size
# This variable is part of the expected API of the module:
ignore_termtitle = True
if os.name == 'posix':
def _term_clear():
os.system('clear')
elif sys.platform == 'win32':
def _term_clear():
os.system('cls')
else:
def _term_clear():
pass
def toggle_set_term_title(val):
"""Control whether set_term_title is active or not.
set_term_title() allows writing to the console titlebar. In embedded
widgets this can cause problems, so this call can be used to toggle it on
or off as needed.
The default state of the module is for the function to be disabled.
Parameters
----------
val : bool
If True, set_term_title() actually writes to the terminal (using the
appropriate platform-specific module). If False, it is a no-op.
"""
global ignore_termtitle
ignore_termtitle = not(val)
def _set_term_title(*args,**kw):
"""Dummy no-op."""
pass
def _restore_term_title():
pass
def _set_term_title_xterm(title):
""" Change virtual terminal title in xterm-workalikes """
# save the current title to the xterm "stack"
sys.stdout.write('\033[22;0t')
sys.stdout.write('\033]0;%s\007' % title)
def _restore_term_title_xterm():
sys.stdout.write('\033[23;0t')
if os.name == 'posix':
TERM = os.environ.get('TERM','')
if TERM.startswith('xterm'):
_set_term_title = _set_term_title_xterm
_restore_term_title = _restore_term_title_xterm
elif sys.platform == 'win32':
try:
import ctypes
SetConsoleTitleW = ctypes.windll.kernel32.SetConsoleTitleW
SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]
def _set_term_title(title):
"""Set terminal title using ctypes to access the Win32 APIs."""
SetConsoleTitleW(title)
except ImportError:
def _set_term_title(title):
"""Set terminal title using the 'title' command."""
global ignore_termtitle
try:
# Cannot be on network share when issuing system commands
curr = os.getcwd()
os.chdir("C:")
ret = os.system("title " + title)
finally:
os.chdir(curr)
if ret:
# non-zero return code signals error, don't try again
ignore_termtitle = True
def set_term_title(title):
"""Set terminal title using the necessary platform-dependent calls."""
if ignore_termtitle:
return
_set_term_title(title)
def restore_term_title():
"""Restore, if possible, terminal title to the original state"""
if ignore_termtitle:
return
_restore_term_title()
def freeze_term_title():
warnings.warn("This function is deprecated, use toggle_set_term_title()")
global ignore_termtitle
ignore_termtitle = True
def get_terminal_size(defaultx=80, defaulty=25):
return _get_terminal_size((defaultx, defaulty))

View File

@@ -0,0 +1,166 @@
# encoding: utf-8
"""Tests for IPython.utils.capture"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import pytest
from IPython.utils import capture
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
_mime_map = dict(
_repr_png_="image/png",
_repr_jpeg_="image/jpeg",
_repr_svg_="image/svg+xml",
_repr_html_="text/html",
_repr_json_="application/json",
_repr_javascript_="application/javascript",
)
basic_data = {
'image/png' : b'binarydata',
'text/html' : "<b>bold</b>",
}
basic_metadata = {
'image/png' : {
'width' : 10,
'height' : 20,
},
}
full_data = {
'image/png' : b'binarydata',
'image/jpeg' : b'binarydata',
'image/svg+xml' : "<svg>",
'text/html' : "<b>bold</b>",
'application/javascript' : "alert();",
'application/json' : "{}",
}
full_metadata = {
'image/png' : {"png" : "exists"},
'image/jpeg' : {"jpeg" : "exists"},
'image/svg+xml' : {"svg" : "exists"},
'text/html' : {"html" : "exists"},
'application/javascript' : {"js" : "exists"},
'application/json' : {"json" : "exists"},
}
hello_stdout = "hello, stdout"
hello_stderr = "hello, stderr"
#-----------------------------------------------------------------------------
# Test Functions
#-----------------------------------------------------------------------------
@pytest.mark.parametrize("method_mime", _mime_map.items())
def test_rich_output_empty(method_mime):
"""RichOutput with no args"""
rich = capture.RichOutput()
method, mime = method_mime
assert getattr(rich, method)() is None
def test_rich_output():
"""test RichOutput basics"""
data = basic_data
metadata = basic_metadata
rich = capture.RichOutput(data=data, metadata=metadata)
assert rich._repr_html_() == data["text/html"]
assert rich._repr_png_() == (data["image/png"], metadata["image/png"])
assert rich._repr_latex_() is None
assert rich._repr_javascript_() is None
assert rich._repr_svg_() is None
@pytest.mark.parametrize("method_mime", _mime_map.items())
def test_rich_output_no_metadata(method_mime):
"""test RichOutput with no metadata"""
data = full_data
rich = capture.RichOutput(data=data)
method, mime = method_mime
assert getattr(rich, method)() == data[mime]
@pytest.mark.parametrize("method_mime", _mime_map.items())
def test_rich_output_metadata(method_mime):
"""test RichOutput with metadata"""
data = full_data
metadata = full_metadata
rich = capture.RichOutput(data=data, metadata=metadata)
method, mime = method_mime
assert getattr(rich, method)() == (data[mime], metadata[mime])
def test_rich_output_display():
"""test RichOutput.display
This is a bit circular, because we are actually using the capture code we are testing
to test itself.
"""
data = full_data
rich = capture.RichOutput(data=data)
with capture.capture_output() as cap:
rich.display()
assert len(cap.outputs) == 1
rich2 = cap.outputs[0]
assert rich2.data == rich.data
assert rich2.metadata == rich.metadata
def test_capture_output():
"""capture_output works"""
rich = capture.RichOutput(data=full_data)
with capture.capture_output() as cap:
print(hello_stdout, end="")
print(hello_stderr, end="", file=sys.stderr)
rich.display()
assert hello_stdout == cap.stdout
assert hello_stderr == cap.stderr
def test_capture_output_no_stdout():
"""test capture_output(stdout=False)"""
rich = capture.RichOutput(data=full_data)
with capture.capture_output(stdout=False) as cap:
print(hello_stdout, end="")
print(hello_stderr, end="", file=sys.stderr)
rich.display()
assert "" == cap.stdout
assert hello_stderr == cap.stderr
assert len(cap.outputs) == 1
def test_capture_output_no_stderr():
"""test capture_output(stderr=False)"""
rich = capture.RichOutput(data=full_data)
# add nested capture_output so stderr doesn't make it to nose output
with capture.capture_output(), capture.capture_output(stderr=False) as cap:
print(hello_stdout, end="")
print(hello_stderr, end="", file=sys.stderr)
rich.display()
assert hello_stdout == cap.stdout
assert "" == cap.stderr
assert len(cap.outputs) == 1
def test_capture_output_no_display():
"""test capture_output(display=False)"""
rich = capture.RichOutput(data=full_data)
with capture.capture_output(display=False) as cap:
print(hello_stdout, end="")
print(hello_stderr, end="", file=sys.stderr)
rich.display()
assert hello_stdout == cap.stdout
assert hello_stderr == cap.stderr
assert cap.outputs == []

View File

@@ -0,0 +1,10 @@
from IPython.utils import decorators
def test_flag_calls():
@decorators.flag_calls
def f():
pass
assert not f.called
f()
assert f.called

View File

@@ -0,0 +1,7 @@
from IPython.utils.syspathcontext import appended_to_syspath
import pytest
def test_append_deprecated():
with pytest.warns(DeprecationWarning):
appended_to_syspath(".")

View File

@@ -0,0 +1,67 @@
from IPython.utils.dir2 import dir2
import pytest
class Base(object):
x = 1
z = 23
def test_base():
res = dir2(Base())
assert "x" in res
assert "z" in res
assert "y" not in res
assert "__class__" in res
assert res.count("x") == 1
assert res.count("__class__") == 1
def test_SubClass():
class SubClass(Base):
y = 2
res = dir2(SubClass())
assert "y" in res
assert res.count("y") == 1
assert res.count("x") == 1
def test_SubClass_with_trait_names_attr():
# usecase: trait_names is used in a class describing psychological classification
class SubClass(Base):
y = 2
trait_names = 44
res = dir2(SubClass())
assert "trait_names" in res
def test_misbehaving_object_without_trait_names():
# dir2 shouldn't raise even when objects are dumb and raise
# something other than AttribteErrors on bad getattr.
class MisbehavingGetattr:
def __getattr__(self, attr):
raise KeyError("I should be caught")
def some_method(self):
return True
class SillierWithDir(MisbehavingGetattr):
def __dir__(self):
return ['some_method']
for bad_klass in (MisbehavingGetattr, SillierWithDir):
obj = bad_klass()
assert obj.some_method()
with pytest.raises(KeyError):
obj.other_method()
res = dir2(obj)
assert "some_method" in res

View File

@@ -0,0 +1,20 @@
# encoding: utf-8
def test_import_coloransi():
from IPython.utils import coloransi
def test_import_generics():
from IPython.utils import generics
def test_import_ipstruct():
from IPython.utils import ipstruct
def test_import_PyColorize():
from IPython.utils import PyColorize
def test_import_strdispatch():
from IPython.utils import strdispatch
def test_import_wildcard():
from IPython.utils import wildcard

View File

@@ -0,0 +1,40 @@
"""Tests for IPython.utils.importstring."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import pytest
from IPython.utils.importstring import import_item
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_import_plain():
"Test simple imports"
import os
os2 = import_item("os")
assert os is os2
def test_import_nested():
"Test nested imports from the stdlib"
from os import path
path2 = import_item("os.path")
assert path is path2
def test_import_raises():
"Test that failing imports raise the right exception"
pytest.raises(ImportError, import_item, "IPython.foobar")

View File

@@ -0,0 +1,61 @@
# encoding: utf-8
"""Tests for io.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from io import StringIO
from subprocess import Popen, PIPE
import unittest
from IPython.utils.io import Tee, capture_output
def test_tee_simple():
"Very simple check with stdout only"
chan = StringIO()
text = 'Hello'
tee = Tee(chan, channel='stdout')
print(text, file=chan)
assert chan.getvalue() == text + "\n"
class TeeTestCase(unittest.TestCase):
def tchan(self, channel):
trap = StringIO()
chan = StringIO()
text = 'Hello'
std_ori = getattr(sys, channel)
setattr(sys, channel, trap)
tee = Tee(chan, channel=channel)
print(text, end='', file=chan)
trap_val = trap.getvalue()
self.assertEqual(chan.getvalue(), text)
tee.close()
setattr(sys, channel, std_ori)
assert getattr(sys, channel) == std_ori
def test(self):
for chan in ['stdout', 'stderr']:
self.tchan(chan)
class TestIOStream(unittest.TestCase):
def test_capture_output(self):
"""capture_output() context works"""
with capture_output() as io:
print("hi, stdout")
print("hi, stderr", file=sys.stderr)
self.assertEqual(io.stdout, "hi, stdout\n")
self.assertEqual(io.stderr, "hi, stderr\n")

View File

@@ -0,0 +1,109 @@
# encoding: utf-8
"""Tests for IPython.utils.module_paths.py"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import shutil
import sys
import tempfile
from pathlib import Path
from IPython.testing.tools import make_tempfile
import IPython.utils.module_paths as mp
TEST_FILE_PATH = Path(__file__).resolve().parent
TMP_TEST_DIR = Path(tempfile.mkdtemp(suffix="with.dot"))
#
# Setup/teardown functions/decorators
#
old_syspath = sys.path
def make_empty_file(fname):
open(fname, "w", encoding="utf-8").close()
def setup_module():
"""Setup testenvironment for the module:
"""
# Do not mask exceptions here. In particular, catching WindowsError is a
# problem because that exception is only defined on Windows...
Path(TMP_TEST_DIR / "xmod").mkdir(parents=True)
Path(TMP_TEST_DIR / "nomod").mkdir(parents=True)
make_empty_file(TMP_TEST_DIR / "xmod/__init__.py")
make_empty_file(TMP_TEST_DIR / "xmod/sub.py")
make_empty_file(TMP_TEST_DIR / "pack.py")
make_empty_file(TMP_TEST_DIR / "packpyc.pyc")
sys.path = [str(TMP_TEST_DIR)]
def teardown_module():
"""Teardown testenvironment for the module:
- Remove tempdir
- restore sys.path
"""
# Note: we remove the parent test dir, which is the root of all test
# subdirs we may have created. Use shutil instead of os.removedirs, so
# that non-empty directories are all recursively removed.
shutil.rmtree(TMP_TEST_DIR)
sys.path = old_syspath
def test_tempdir():
"""
Ensure the test are done with a temporary file that have a dot somewhere.
"""
assert "." in str(TMP_TEST_DIR)
def test_find_mod_1():
"""
Search for a directory's file path.
Expected output: a path to that directory's __init__.py file.
"""
modpath = TMP_TEST_DIR / "xmod" / "__init__.py"
assert Path(mp.find_mod("xmod")) == modpath
def test_find_mod_2():
"""
Search for a directory's file path.
Expected output: a path to that directory's __init__.py file.
TODO: Confirm why this is a duplicate test.
"""
modpath = TMP_TEST_DIR / "xmod" / "__init__.py"
assert Path(mp.find_mod("xmod")) == modpath
def test_find_mod_3():
"""
Search for a directory + a filename without its .py extension
Expected output: full path with .py extension.
"""
modpath = TMP_TEST_DIR / "xmod" / "sub.py"
assert Path(mp.find_mod("xmod.sub")) == modpath
def test_find_mod_4():
"""
Search for a filename without its .py extension
Expected output: full path with .py extension
"""
modpath = TMP_TEST_DIR / "pack.py"
assert Path(mp.find_mod("pack")) == modpath
def test_find_mod_5():
"""
Search for a filename with a .pyc extension
Expected output: TODO: do we exclude or include .pyc files?
"""
assert mp.find_mod("packpyc") == None

View File

@@ -0,0 +1,38 @@
import io
import os.path
from IPython.utils import openpy
mydir = os.path.dirname(__file__)
nonascii_path = os.path.join(mydir, "../../core/tests/nonascii.py")
def test_detect_encoding():
with open(nonascii_path, "rb") as f:
enc, lines = openpy.detect_encoding(f.readline)
assert enc == "iso-8859-5"
def test_read_file():
with io.open(nonascii_path, encoding="iso-8859-5") as f:
read_specified_enc = f.read()
read_detected_enc = openpy.read_py_file(nonascii_path, skip_encoding_cookie=False)
assert read_detected_enc == read_specified_enc
assert "coding: iso-8859-5" in read_detected_enc
read_strip_enc_cookie = openpy.read_py_file(
nonascii_path, skip_encoding_cookie=True
)
assert "coding: iso-8859-5" not in read_strip_enc_cookie
def test_source_to_unicode():
with io.open(nonascii_path, "rb") as f:
source_bytes = f.read()
assert (
openpy.source_to_unicode(source_bytes, skip_encoding_cookie=False).splitlines()
== source_bytes.decode("iso-8859-5").splitlines()
)
source_no_cookie = openpy.source_to_unicode(source_bytes, skip_encoding_cookie=True)
assert "coding: iso-8859-5" not in source_no_cookie

View File

@@ -0,0 +1,509 @@
# encoding: utf-8
"""Tests for IPython.utils.path.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
from importlib import reload
from os.path import abspath, join
from unittest.mock import patch
import pytest
from tempfile import TemporaryDirectory
import IPython
from IPython import paths
from IPython.testing import decorators as dec
from IPython.testing.decorators import (
onlyif_unicode_paths,
skip_if_not_win32,
skip_win32,
)
from IPython.testing.tools import make_tempfile
from IPython.utils import path
# Platform-dependent imports
try:
import winreg as wreg
except ImportError:
#Fake _winreg module on non-windows platforms
import types
wr_name = "winreg"
sys.modules[wr_name] = types.ModuleType(wr_name)
try:
import winreg as wreg
except ImportError:
import _winreg as wreg
#Add entries that needs to be stubbed by the testing code
(wreg.OpenKey, wreg.QueryValueEx,) = (None, None)
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
env = os.environ
TMP_TEST_DIR = tempfile.mkdtemp()
HOME_TEST_DIR = join(TMP_TEST_DIR, "home_test_dir")
#
# Setup/teardown functions/decorators
#
def setup_module():
"""Setup testenvironment for the module:
- Adds dummy home dir tree
"""
# Do not mask exceptions here. In particular, catching WindowsError is a
# problem because that exception is only defined on Windows...
os.makedirs(os.path.join(HOME_TEST_DIR, 'ipython'))
def teardown_module():
"""Teardown testenvironment for the module:
- Remove dummy home dir tree
"""
# Note: we remove the parent test dir, which is the root of all test
# subdirs we may have created. Use shutil instead of os.removedirs, so
# that non-empty directories are all recursively removed.
shutil.rmtree(TMP_TEST_DIR)
def setup_environment():
"""Setup testenvironment for some functions that are tested
in this module. In particular this functions stores attributes
and other things that we need to stub in some test functions.
This needs to be done on a function level and not module level because
each testfunction needs a pristine environment.
"""
global oldstuff, platformstuff
oldstuff = (env.copy(), os.name, sys.platform, path.get_home_dir, IPython.__file__, os.getcwd())
def teardown_environment():
"""Restore things that were remembered by the setup_environment function
"""
(oldenv, os.name, sys.platform, path.get_home_dir, IPython.__file__, old_wd) = oldstuff
os.chdir(old_wd)
reload(path)
for key in list(env):
if key not in oldenv:
del env[key]
env.update(oldenv)
if hasattr(sys, 'frozen'):
del sys.frozen
# Build decorator that uses the setup_environment/setup_environment
@pytest.fixture
def environment():
setup_environment()
yield
teardown_environment()
with_environment = pytest.mark.usefixtures("environment")
@skip_if_not_win32
@with_environment
def test_get_home_dir_1():
"""Testcase for py2exe logic, un-compressed lib
"""
unfrozen = path.get_home_dir()
sys.frozen = True
#fake filename for IPython.__init__
IPython.__file__ = abspath(join(HOME_TEST_DIR, "Lib/IPython/__init__.py"))
home_dir = path.get_home_dir()
assert home_dir == unfrozen
@skip_if_not_win32
@with_environment
def test_get_home_dir_2():
"""Testcase for py2exe logic, compressed lib
"""
unfrozen = path.get_home_dir()
sys.frozen = True
#fake filename for IPython.__init__
IPython.__file__ = abspath(join(HOME_TEST_DIR, "Library.zip/IPython/__init__.py")).lower()
home_dir = path.get_home_dir(True)
assert home_dir == unfrozen
@skip_win32
@with_environment
def test_get_home_dir_3():
"""get_home_dir() uses $HOME if set"""
env["HOME"] = HOME_TEST_DIR
home_dir = path.get_home_dir(True)
# get_home_dir expands symlinks
assert home_dir == os.path.realpath(env["HOME"])
@with_environment
def test_get_home_dir_4():
"""get_home_dir() still works if $HOME is not set"""
if 'HOME' in env: del env['HOME']
# this should still succeed, but we don't care what the answer is
home = path.get_home_dir(False)
@skip_win32
@with_environment
def test_get_home_dir_5():
"""raise HomeDirError if $HOME is specified, but not a writable dir"""
env['HOME'] = abspath(HOME_TEST_DIR+'garbage')
# set os.name = posix, to prevent My Documents fallback on Windows
os.name = 'posix'
pytest.raises(path.HomeDirError, path.get_home_dir, True)
# Should we stub wreg fully so we can run the test on all platforms?
@skip_if_not_win32
@with_environment
def test_get_home_dir_8():
"""Using registry hack for 'My Documents', os=='nt'
HOMESHARE, HOMEDRIVE, HOMEPATH, USERPROFILE and others are missing.
"""
os.name = 'nt'
# Remove from stub environment all keys that may be set
for key in ['HOME', 'HOMESHARE', 'HOMEDRIVE', 'HOMEPATH', 'USERPROFILE']:
env.pop(key, None)
class key:
def __enter__(self):
pass
def Close(self):
pass
def __exit__(*args, **kwargs):
pass
with patch.object(wreg, 'OpenKey', return_value=key()), \
patch.object(wreg, 'QueryValueEx', return_value=[abspath(HOME_TEST_DIR)]):
home_dir = path.get_home_dir()
assert home_dir == abspath(HOME_TEST_DIR)
@with_environment
def test_get_xdg_dir_0():
"""test_get_xdg_dir_0, check xdg_dir"""
reload(path)
path._writable_dir = lambda path: True
path.get_home_dir = lambda : 'somewhere'
os.name = "posix"
sys.platform = "linux2"
env.pop('IPYTHON_DIR', None)
env.pop('IPYTHONDIR', None)
env.pop('XDG_CONFIG_HOME', None)
assert path.get_xdg_dir() == os.path.join("somewhere", ".config")
@with_environment
def test_get_xdg_dir_1():
"""test_get_xdg_dir_1, check nonexistent xdg_dir"""
reload(path)
path.get_home_dir = lambda : HOME_TEST_DIR
os.name = "posix"
sys.platform = "linux2"
env.pop('IPYTHON_DIR', None)
env.pop('IPYTHONDIR', None)
env.pop('XDG_CONFIG_HOME', None)
assert path.get_xdg_dir() is None
@with_environment
def test_get_xdg_dir_2():
"""test_get_xdg_dir_2, check xdg_dir default to ~/.config"""
reload(path)
path.get_home_dir = lambda : HOME_TEST_DIR
os.name = "posix"
sys.platform = "linux2"
env.pop('IPYTHON_DIR', None)
env.pop('IPYTHONDIR', None)
env.pop('XDG_CONFIG_HOME', None)
cfgdir=os.path.join(path.get_home_dir(), '.config')
if not os.path.exists(cfgdir):
os.makedirs(cfgdir)
assert path.get_xdg_dir() == cfgdir
@with_environment
def test_get_xdg_dir_3():
"""test_get_xdg_dir_3, check xdg_dir not used on non-posix systems"""
reload(path)
path.get_home_dir = lambda : HOME_TEST_DIR
os.name = "nt"
sys.platform = "win32"
env.pop('IPYTHON_DIR', None)
env.pop('IPYTHONDIR', None)
env.pop('XDG_CONFIG_HOME', None)
cfgdir=os.path.join(path.get_home_dir(), '.config')
os.makedirs(cfgdir, exist_ok=True)
assert path.get_xdg_dir() is None
def test_filefind():
"""Various tests for filefind"""
f = tempfile.NamedTemporaryFile()
# print 'fname:',f.name
alt_dirs = paths.get_ipython_dir()
t = path.filefind(f.name, alt_dirs)
# print 'found:',t
@dec.skip_if_not_win32
def test_get_long_path_name_win32():
with TemporaryDirectory() as tmpdir:
# Make a long path. Expands the path of tmpdir prematurely as it may already have a long
# path component, so ensure we include the long form of it
long_path = os.path.join(path.get_long_path_name(tmpdir), 'this is my long path name')
os.makedirs(long_path)
# Test to see if the short path evaluates correctly.
short_path = os.path.join(tmpdir, 'THISIS~1')
evaluated_path = path.get_long_path_name(short_path)
assert evaluated_path.lower() == long_path.lower()
@dec.skip_win32
def test_get_long_path_name():
p = path.get_long_path_name("/usr/local")
assert p == "/usr/local"
class TestRaiseDeprecation(unittest.TestCase):
@dec.skip_win32 # can't create not-user-writable dir on win
@with_environment
def test_not_writable_ipdir(self):
tmpdir = tempfile.mkdtemp()
os.name = "posix"
env.pop('IPYTHON_DIR', None)
env.pop('IPYTHONDIR', None)
env.pop('XDG_CONFIG_HOME', None)
env['HOME'] = tmpdir
ipdir = os.path.join(tmpdir, '.ipython')
os.mkdir(ipdir, 0o555)
try:
open(os.path.join(ipdir, "_foo_"), "w", encoding="utf-8").close()
except IOError:
pass
else:
# I can still write to an unwritable dir,
# assume I'm root and skip the test
pytest.skip("I can't create directories that I can't write to")
with self.assertWarnsRegex(UserWarning, 'is not a writable location'):
ipdir = paths.get_ipython_dir()
env.pop('IPYTHON_DIR', None)
@with_environment
def test_get_py_filename():
os.chdir(TMP_TEST_DIR)
with make_tempfile("foo.py"):
assert path.get_py_filename("foo.py") == "foo.py"
assert path.get_py_filename("foo") == "foo.py"
with make_tempfile("foo"):
assert path.get_py_filename("foo") == "foo"
pytest.raises(IOError, path.get_py_filename, "foo.py")
pytest.raises(IOError, path.get_py_filename, "foo")
pytest.raises(IOError, path.get_py_filename, "foo.py")
true_fn = "foo with spaces.py"
with make_tempfile(true_fn):
assert path.get_py_filename("foo with spaces") == true_fn
assert path.get_py_filename("foo with spaces.py") == true_fn
pytest.raises(IOError, path.get_py_filename, '"foo with spaces.py"')
pytest.raises(IOError, path.get_py_filename, "'foo with spaces.py'")
@onlyif_unicode_paths
def test_unicode_in_filename():
"""When a file doesn't exist, the exception raised should be safe to call
str() on - i.e. in Python 2 it must only have ASCII characters.
https://github.com/ipython/ipython/issues/875
"""
try:
# these calls should not throw unicode encode exceptions
path.get_py_filename('fooéè.py')
except IOError as ex:
str(ex)
class TestShellGlob(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.filenames_start_with_a = ['a0', 'a1', 'a2']
cls.filenames_end_with_b = ['0b', '1b', '2b']
cls.filenames = cls.filenames_start_with_a + cls.filenames_end_with_b
cls.tempdir = TemporaryDirectory()
td = cls.tempdir.name
with cls.in_tempdir():
# Create empty files
for fname in cls.filenames:
open(os.path.join(td, fname), "w", encoding="utf-8").close()
@classmethod
def tearDownClass(cls):
cls.tempdir.cleanup()
@classmethod
@contextmanager
def in_tempdir(cls):
save = os.getcwd()
try:
os.chdir(cls.tempdir.name)
yield
finally:
os.chdir(save)
def check_match(self, patterns, matches):
with self.in_tempdir():
# glob returns unordered list. that's why sorted is required.
assert sorted(path.shellglob(patterns)) == sorted(matches)
def common_cases(self):
return [
(['*'], self.filenames),
(['a*'], self.filenames_start_with_a),
(['*c'], ['*c']),
(['*', 'a*', '*b', '*c'], self.filenames
+ self.filenames_start_with_a
+ self.filenames_end_with_b
+ ['*c']),
(['a[012]'], self.filenames_start_with_a),
]
@skip_win32
def test_match_posix(self):
for (patterns, matches) in self.common_cases() + [
([r'\*'], ['*']),
([r'a\*', 'a*'], ['a*'] + self.filenames_start_with_a),
([r'a\[012]'], ['a[012]']),
]:
self.check_match(patterns, matches)
@skip_if_not_win32
def test_match_windows(self):
for (patterns, matches) in self.common_cases() + [
# In windows, backslash is interpreted as path
# separator. Therefore, you can't escape glob
# using it.
([r'a\*', 'a*'], [r'a\*'] + self.filenames_start_with_a),
([r'a\[012]'], [r'a\[012]']),
]:
self.check_match(patterns, matches)
@pytest.mark.parametrize(
"globstr, unescaped_globstr",
[
(r"\*\[\!\]\?", "*[!]?"),
(r"\\*", r"\*"),
(r"\\\*", r"\*"),
(r"\\a", r"\a"),
(r"\a", r"\a"),
],
)
def test_unescape_glob(globstr, unescaped_globstr):
assert path.unescape_glob(globstr) == unescaped_globstr
@onlyif_unicode_paths
def test_ensure_dir_exists():
with TemporaryDirectory() as td:
d = os.path.join(td, '∂ir')
path.ensure_dir_exists(d) # create it
assert os.path.isdir(d)
path.ensure_dir_exists(d) # no-op
f = os.path.join(td, "ƒile")
open(f, "w", encoding="utf-8").close() # touch
with pytest.raises(IOError):
path.ensure_dir_exists(f)
class TestLinkOrCopy(unittest.TestCase):
def setUp(self):
self.tempdir = TemporaryDirectory()
self.src = self.dst("src")
with open(self.src, "w", encoding="utf-8") as f:
f.write("Hello, world!")
def tearDown(self):
self.tempdir.cleanup()
def dst(self, *args):
return os.path.join(self.tempdir.name, *args)
def assert_inode_not_equal(self, a, b):
assert (
os.stat(a).st_ino != os.stat(b).st_ino
), "%r and %r do reference the same indoes" % (a, b)
def assert_inode_equal(self, a, b):
assert (
os.stat(a).st_ino == os.stat(b).st_ino
), "%r and %r do not reference the same indoes" % (a, b)
def assert_content_equal(self, a, b):
with open(a, "rb") as a_f:
with open(b, "rb") as b_f:
assert a_f.read() == b_f.read()
@skip_win32
def test_link_successful(self):
dst = self.dst("target")
path.link_or_copy(self.src, dst)
self.assert_inode_equal(self.src, dst)
@skip_win32
def test_link_into_dir(self):
dst = self.dst("some_dir")
os.mkdir(dst)
path.link_or_copy(self.src, dst)
expected_dst = self.dst("some_dir", os.path.basename(self.src))
self.assert_inode_equal(self.src, expected_dst)
@skip_win32
def test_target_exists(self):
dst = self.dst("target")
open(dst, "w", encoding="utf-8").close()
path.link_or_copy(self.src, dst)
self.assert_inode_equal(self.src, dst)
@skip_win32
def test_no_link(self):
real_link = os.link
try:
del os.link
dst = self.dst("target")
path.link_or_copy(self.src, dst)
self.assert_content_equal(self.src, dst)
self.assert_inode_not_equal(self.src, dst)
finally:
os.link = real_link
@skip_if_not_win32
def test_windows(self):
dst = self.dst("target")
path.link_or_copy(self.src, dst)
self.assert_content_equal(self.src, dst)
def test_link_twice(self):
# Linking the same file twice shouldn't leave duplicates around.
# See https://github.com/ipython/ipython/issues/6450
dst = self.dst('target')
path.link_or_copy(self.src, dst)
path.link_or_copy(self.src, dst)
self.assert_inode_equal(self.src, dst)
assert sorted(os.listdir(self.tempdir.name)) == ["src", "target"]

View File

@@ -0,0 +1,188 @@
"""
Tests for platutils.py
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import signal
import os
import time
from _thread import interrupt_main # Py 3
import threading
import pytest
from IPython.utils.process import (find_cmd, FindCmdError, arg_split,
system, getoutput, getoutputerror,
get_output_error_code)
from IPython.utils.capture import capture_output
from IPython.testing import decorators as dec
from IPython.testing import tools as tt
python = os.path.basename(sys.executable)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
@dec.skip_win32
def test_find_cmd_ls():
"""Make sure we can find the full path to ls."""
path = find_cmd("ls")
assert path.endswith("ls")
@dec.skip_if_not_win32
def test_find_cmd_pythonw():
"""Try to find pythonw on Windows."""
path = find_cmd('pythonw')
assert path.lower().endswith('pythonw.exe'), path
def test_find_cmd_fail():
"""Make sure that FindCmdError is raised if we can't find the cmd."""
pytest.raises(FindCmdError, find_cmd, "asdfasdf")
@dec.skip_win32
@pytest.mark.parametrize(
"argstr, argv",
[
("hi", ["hi"]),
("hello there", ["hello", "there"]),
# \u01ce == \N{LATIN SMALL LETTER A WITH CARON}
# Do not use \N because the tests crash with syntax error in
# some cases, for example windows python2.6.
("h\u01cello", ["h\u01cello"]),
('something "with quotes"', ["something", '"with quotes"']),
],
)
def test_arg_split(argstr, argv):
"""Ensure that argument lines are correctly split like in a shell."""
assert arg_split(argstr) == argv
@dec.skip_if_not_win32
@pytest.mark.parametrize(
"argstr,argv",
[
("hi", ["hi"]),
("hello there", ["hello", "there"]),
("h\u01cello", ["h\u01cello"]),
('something "with quotes"', ["something", "with quotes"]),
],
)
def test_arg_split_win32(argstr, argv):
"""Ensure that argument lines are correctly split like in a shell."""
assert arg_split(argstr) == argv
class SubProcessTestCase(tt.TempFileMixin):
def setUp(self):
"""Make a valid python temp file."""
lines = [ "import sys",
"print('on stdout', end='', file=sys.stdout)",
"print('on stderr', end='', file=sys.stderr)",
"sys.stdout.flush()",
"sys.stderr.flush()"]
self.mktmp('\n'.join(lines))
def test_system(self):
status = system(f'{python} "{self.fname}"')
self.assertEqual(status, 0)
def test_system_quotes(self):
status = system('%s -c "import sys"' % python)
self.assertEqual(status, 0)
def assert_interrupts(self, command):
"""
Interrupt a subprocess after a second.
"""
if threading.main_thread() != threading.current_thread():
raise pytest.skip("Can't run this test if not in main thread.")
# Some tests can overwrite SIGINT handler (by using pdb for example),
# which then breaks this test, so just make sure it's operating
# normally.
signal.signal(signal.SIGINT, signal.default_int_handler)
def interrupt():
# Wait for subprocess to start:
time.sleep(0.5)
interrupt_main()
threading.Thread(target=interrupt).start()
start = time.time()
try:
result = command()
except KeyboardInterrupt:
# Success!
pass
end = time.time()
self.assertTrue(
end - start < 2, "Process didn't die quickly: %s" % (end - start)
)
return result
def test_system_interrupt(self):
"""
When interrupted in the way ipykernel interrupts IPython, the
subprocess is interrupted.
"""
def command():
return system('%s -c "import time; time.sleep(5)"' % python)
status = self.assert_interrupts(command)
self.assertNotEqual(
status, 0, f"The process wasn't interrupted. Status: {status}"
)
def test_getoutput(self):
out = getoutput(f'{python} "{self.fname}"')
# we can't rely on the order the line buffered streams are flushed
try:
self.assertEqual(out, 'on stderron stdout')
except AssertionError:
self.assertEqual(out, 'on stdouton stderr')
def test_getoutput_quoted(self):
out = getoutput('%s -c "print (1)"' % python)
self.assertEqual(out.strip(), '1')
#Invalid quoting on windows
@dec.skip_win32
def test_getoutput_quoted2(self):
out = getoutput("%s -c 'print (1)'" % python)
self.assertEqual(out.strip(), '1')
out = getoutput("%s -c 'print (\"1\")'" % python)
self.assertEqual(out.strip(), '1')
def test_getoutput_error(self):
out, err = getoutputerror(f'{python} "{self.fname}"')
self.assertEqual(out, 'on stdout')
self.assertEqual(err, 'on stderr')
def test_get_output_error_code(self):
quiet_exit = '%s -c "import sys; sys.exit(1)"' % python
out, err, code = get_output_error_code(quiet_exit)
self.assertEqual(out, '')
self.assertEqual(err, '')
self.assertEqual(code, 1)
out, err, code = get_output_error_code(f'{python} "{self.fname}"')
self.assertEqual(out, 'on stdout')
self.assertEqual(err, 'on stderr')
self.assertEqual(code, 0)

View File

@@ -0,0 +1,69 @@
# coding: utf-8
"""Test suite for our color utilities.
Authors
-------
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# our own
from IPython.utils.PyColorize import Parser
import io
import pytest
@pytest.fixture(scope="module", params=("Linux", "NoColor", "LightBG", "Neutral"))
def style(request):
yield request.param
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
sample = """
def function(arg, *args, kwarg=True, **kwargs):
'''
this is docs
'''
pass is True
False == None
with io.open(ru'unicode', encoding='utf-8'):
raise ValueError("\n escape \r sequence")
print("wěird ünicoðe")
class Bar(Super):
def __init__(self):
super(Bar, self).__init__(1**2, 3^4, 5 or 6)
"""
def test_parse_sample(style):
"""and test writing to a buffer"""
buf = io.StringIO()
p = Parser(style=style)
p.format(sample, buf)
buf.seek(0)
f1 = buf.read()
assert "ERROR" not in f1
def test_parse_error(style):
p = Parser(style=style)
f1 = p.format(")", "str")
if style != "NoColor":
assert "ERROR" in f1

View File

@@ -0,0 +1,12 @@
from IPython.utils.shimmodule import ShimModule
import IPython
def test_shimmodule_repr_does_not_fail_on_import_error():
shim_module = ShimModule("shim_module", mirror="mirrored_module_does_not_exist")
repr(shim_module)
def test_shimmodule_repr_forwards_to_module():
shim_module = ShimModule("shim_module", mirror="IPython")
assert repr(shim_module) == repr(IPython)

View File

@@ -0,0 +1,22 @@
# coding: utf-8
"""Test suite for our sysinfo utilities."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import pytest
from IPython.utils import sysinfo
def test_json_getsysinfo():
"""
test that it is easily jsonable and don't return bytes somewhere.
"""
json.dumps(sysinfo.get_sys_info())
def test_num_cpus():
with pytest.deprecated_call():
sysinfo.num_cpus()

View File

@@ -0,0 +1,29 @@
#-----------------------------------------------------------------------------
# Copyright (C) 2012- The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from pathlib import Path
from IPython.utils.tempdir import NamedFileInTemporaryDirectory
from IPython.utils.tempdir import TemporaryWorkingDirectory
def test_named_file_in_temporary_directory():
with NamedFileInTemporaryDirectory('filename') as file:
name = file.name
assert not file.closed
assert Path(name).exists()
file.write(b'test')
assert file.closed
assert not Path(name).exists()
def test_temporary_working_directory():
with TemporaryWorkingDirectory() as directory:
directory_path = Path(directory).resolve()
assert directory_path.exists()
assert Path.cwd().resolve() == directory_path
assert not directory_path.exists()
assert Path.cwd().resolve() != directory_path

View File

@@ -0,0 +1,208 @@
# encoding: utf-8
"""Tests for IPython.utils.text"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import math
import random
import sys
from pathlib import Path
import pytest
from IPython.utils import text
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
def test_columnize():
"""Basic columnize tests."""
size = 5
items = [l*size for l in 'abcd']
out = text.columnize(items, displaywidth=80)
assert out == "aaaaa bbbbb ccccc ddddd\n"
out = text.columnize(items, displaywidth=25)
assert out == "aaaaa ccccc\nbbbbb ddddd\n"
out = text.columnize(items, displaywidth=12)
assert out == "aaaaa ccccc\nbbbbb ddddd\n"
out = text.columnize(items, displaywidth=10)
assert out == "aaaaa\nbbbbb\nccccc\nddddd\n"
out = text.columnize(items, row_first=True, displaywidth=80)
assert out == "aaaaa bbbbb ccccc ddddd\n"
out = text.columnize(items, row_first=True, displaywidth=25)
assert out == "aaaaa bbbbb\nccccc ddddd\n"
out = text.columnize(items, row_first=True, displaywidth=12)
assert out == "aaaaa bbbbb\nccccc ddddd\n"
out = text.columnize(items, row_first=True, displaywidth=10)
assert out == "aaaaa\nbbbbb\nccccc\nddddd\n"
out = text.columnize(items, displaywidth=40, spread=True)
assert out == "aaaaa bbbbb ccccc ddddd\n"
out = text.columnize(items, displaywidth=20, spread=True)
assert out == "aaaaa ccccc\nbbbbb ddddd\n"
out = text.columnize(items, displaywidth=12, spread=True)
assert out == "aaaaa ccccc\nbbbbb ddddd\n"
out = text.columnize(items, displaywidth=10, spread=True)
assert out == "aaaaa\nbbbbb\nccccc\nddddd\n"
def test_columnize_random():
"""Test with random input to hopefully catch edge case """
for row_first in [True, False]:
for nitems in [random.randint(2,70) for i in range(2,20)]:
displaywidth = random.randint(20,200)
rand_len = [random.randint(2,displaywidth) for i in range(nitems)]
items = ['x'*l for l in rand_len]
out = text.columnize(items, row_first=row_first, displaywidth=displaywidth)
longer_line = max([len(x) for x in out.split('\n')])
longer_element = max(rand_len)
assert longer_line <= displaywidth, (
f"Columnize displayed something lager than displaywidth : {longer_line}\n"
f"longer element : {longer_element}\n"
f"displaywidth : {displaywidth}\n"
f"number of element : {nitems}\n"
f"size of each element : {rand_len}\n"
f"row_first={row_first}\n"
)
@pytest.mark.parametrize("row_first", [True, False])
def test_columnize_medium(row_first):
"""Test with inputs than shouldn't be wider than 80"""
size = 40
items = [l*size for l in 'abc']
out = text.columnize(items, row_first=row_first, displaywidth=80)
assert out == "\n".join(items + [""]), "row_first={0}".format(row_first)
@pytest.mark.parametrize("row_first", [True, False])
def test_columnize_long(row_first):
"""Test columnize with inputs longer than the display window"""
size = 11
items = [l*size for l in 'abc']
out = text.columnize(items, row_first=row_first, displaywidth=size - 1)
assert out == "\n".join(items + [""]), "row_first={0}".format(row_first)
def eval_formatter_check(f):
ns = dict(n=12, pi=math.pi, stuff='hello there', os=os, u=u"café", b="café")
s = f.format("{n} {n//4} {stuff.split()[0]}", **ns)
assert s == "12 3 hello"
s = f.format(" ".join(["{n//%i}" % i for i in range(1, 8)]), **ns)
assert s == "12 6 4 3 2 2 1"
s = f.format("{[n//i for i in range(1,8)]}", **ns)
assert s == "[12, 6, 4, 3, 2, 2, 1]"
s = f.format("{stuff!s}", **ns)
assert s == ns["stuff"]
s = f.format("{stuff!r}", **ns)
assert s == repr(ns["stuff"])
# Check with unicode:
s = f.format("{u}", **ns)
assert s == ns["u"]
# This decodes in a platform dependent manner, but it shouldn't error out
s = f.format("{b}", **ns)
pytest.raises(NameError, f.format, "{dne}", **ns)
def eval_formatter_slicing_check(f):
ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
s = f.format(" {stuff.split()[:]} ", **ns)
assert s == " ['hello', 'there'] "
s = f.format(" {stuff.split()[::-1]} ", **ns)
assert s == " ['there', 'hello'] "
s = f.format("{stuff[::2]}", **ns)
assert s == ns["stuff"][::2]
pytest.raises(SyntaxError, f.format, "{n:x}", **ns)
def eval_formatter_no_slicing_check(f):
ns = dict(n=12, pi=math.pi, stuff="hello there", os=os)
s = f.format("{n:x} {pi**2:+f}", **ns)
assert s == "c +9.869604"
s = f.format("{stuff[slice(1,4)]}", **ns)
assert s == "ell"
s = f.format("{a[:]}", a=[1, 2])
assert s == "[1, 2]"
def test_eval_formatter():
f = text.EvalFormatter()
eval_formatter_check(f)
eval_formatter_no_slicing_check(f)
def test_full_eval_formatter():
f = text.FullEvalFormatter()
eval_formatter_check(f)
eval_formatter_slicing_check(f)
def test_dollar_formatter():
f = text.DollarFormatter()
eval_formatter_check(f)
eval_formatter_slicing_check(f)
ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
s = f.format("$n", **ns)
assert s == "12"
s = f.format("$n.real", **ns)
assert s == "12"
s = f.format("$n/{stuff[:5]}", **ns)
assert s == "12/hello"
s = f.format("$n $$HOME", **ns)
assert s == "12 $HOME"
s = f.format("${foo}", foo="HOME")
assert s == "$HOME"
def test_strip_email():
src = """\
>> >>> def f(x):
>> ... return x+1
>> ...
>> >>> zz = f(2.5)"""
cln = """\
>>> def f(x):
... return x+1
...
>>> zz = f(2.5)"""
assert text.strip_email_quotes(src) == cln
def test_strip_email2():
src = "> > > list()"
cln = "list()"
assert text.strip_email_quotes(src) == cln
def test_LSString():
lss = text.LSString("abc\ndef")
assert lss.l == ["abc", "def"]
assert lss.s == "abc def"
lss = text.LSString(os.getcwd())
assert isinstance(lss.p[0], Path)
def test_SList():
sl = text.SList(["a 11", "b 1", "a 2"])
assert sl.n == "a 11\nb 1\na 2"
assert sl.s == "a 11 b 1 a 2"
assert sl.grep(lambda x: x.startswith("a")) == text.SList(["a 11", "a 2"])
assert sl.fields(0) == text.SList(["a", "b", "a"])
assert sl.sort(field=1, nums=True) == text.SList(["b 1", "a 2", "a 11"])

View File

@@ -0,0 +1,141 @@
"""Tests for tokenutil"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import pytest
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
def expect_token(expected, cell, cursor_pos):
token = token_at_cursor(cell, cursor_pos)
offset = 0
for line in cell.splitlines():
if offset + len(line) >= cursor_pos:
break
else:
offset += len(line)+1
column = cursor_pos - offset
line_with_cursor = "%s|%s" % (line[:column], line[column:])
assert token == expected, "Expected %r, got %r in: %r (pos %i)" % (
expected,
token,
line_with_cursor,
cursor_pos,
)
def test_simple():
cell = "foo"
for i in range(len(cell)):
expect_token("foo", cell, i)
def test_function():
cell = "foo(a=5, b='10')"
expected = 'foo'
# up to `foo(|a=`
for i in range(cell.find('a=') + 1):
expect_token("foo", cell, i)
# find foo after `=`
for i in [cell.find('=') + 1, cell.rfind('=') + 1]:
expect_token("foo", cell, i)
# in between `5,|` and `|b=`
for i in range(cell.find(','), cell.find('b=')):
expect_token("foo", cell, i)
def test_multiline():
cell = '\n'.join([
'a = 5',
'b = hello("string", there)'
])
expected = 'hello'
start = cell.index(expected) + 1
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)
expected = 'hello'
start = cell.index(expected) + 1
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)
def test_multiline_token():
cell = '\n'.join([
'"""\n\nxxxxxxxxxx\n\n"""',
'5, """',
'docstring',
'multiline token',
'""", [',
'2, 3, "complicated"]',
'b = hello("string", there)'
])
expected = 'hello'
start = cell.index(expected) + 1
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)
expected = 'hello'
start = cell.index(expected) + 1
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)
def test_nested_call():
cell = "foo(bar(a=5), b=10)"
expected = 'foo'
start = cell.index('bar') + 1
for i in range(start, start + 3):
expect_token(expected, cell, i)
expected = 'bar'
start = cell.index('a=')
for i in range(start, start + 3):
expect_token(expected, cell, i)
expected = 'foo'
start = cell.index(')') + 1
for i in range(start, len(cell)-1):
expect_token(expected, cell, i)
def test_attrs():
cell = "a = obj.attr.subattr"
expected = 'obj'
idx = cell.find('obj') + 1
for i in range(idx, idx + 3):
expect_token(expected, cell, i)
idx = cell.find('.attr') + 2
expected = 'obj.attr'
for i in range(idx, idx + 4):
expect_token(expected, cell, i)
idx = cell.find('.subattr') + 2
expected = 'obj.attr.subattr'
for i in range(idx, len(cell)):
expect_token(expected, cell, i)
def test_line_at_cursor():
cell = ""
(line, offset) = line_at_cursor(cell, cursor_pos=11)
assert line == ""
assert offset == 0
# The position after a newline should be the start of the following line.
cell = "One\nTwo\n"
(line, offset) = line_at_cursor(cell, cursor_pos=4)
assert line == "Two\n"
assert offset == 4
# The end of a cell should be on the last line
cell = "pri\npri"
(line, offset) = line_at_cursor(cell, cursor_pos=7)
assert line == "pri"
assert offset == 4
@pytest.mark.parametrize(
"c, token",
zip(
list(range(16, 22)) + list(range(22, 28)),
["int"] * (22 - 16) + ["map"] * (28 - 22),
),
)
def test_multiline_statement(c, token):
cell = """a = (1,
3)
int()
map()
"""
expect_token(token, cell, c)

View File

@@ -0,0 +1,143 @@
"""Some tests for the wildcard utilities."""
#-----------------------------------------------------------------------------
# Library imports
#-----------------------------------------------------------------------------
# Stdlib
import unittest
# Our own
from IPython.utils import wildcard
#-----------------------------------------------------------------------------
# Globals for test
#-----------------------------------------------------------------------------
class obj_t(object):
pass
root = obj_t()
l = ["arna","abel","ABEL","active","bob","bark","abbot"]
q = ["kate","loop","arne","vito","lucifer","koppel"]
for x in l:
o = obj_t()
setattr(root,x,o)
for y in q:
p = obj_t()
setattr(o,y,p)
root._apan = obj_t()
root._apan.a = 10
root._apan._a = 20
root._apan.__a = 20
root.__anka = obj_t()
root.__anka.a = 10
root.__anka._a = 20
root.__anka.__a = 20
root._APAN = obj_t()
root._APAN.a = 10
root._APAN._a = 20
root._APAN.__a = 20
root.__ANKA = obj_t()
root.__ANKA.a = 10
root.__ANKA._a = 20
root.__ANKA.__a = 20
#-----------------------------------------------------------------------------
# Test cases
#-----------------------------------------------------------------------------
class Tests (unittest.TestCase):
def test_case(self):
ns=root.__dict__
tests=[
("a*", ["abbot","abel","active","arna",]),
("?b*.?o*",["abbot.koppel","abbot.loop","abel.koppel","abel.loop",]),
("_a*", []),
("_*anka", ["__anka",]),
("_*a*", ["__anka",]),
]
for pat,res in tests:
res.sort()
a=sorted(wildcard.list_namespace(ns,"all",pat,ignore_case=False,
show_all=False).keys())
self.assertEqual(a,res)
def test_case_showall(self):
ns=root.__dict__
tests=[
("a*", ["abbot","abel","active","arna",]),
("?b*.?o*",["abbot.koppel","abbot.loop","abel.koppel","abel.loop",]),
("_a*", ["_apan"]),
("_*anka", ["__anka",]),
("_*a*", ["__anka","_apan",]),
]
for pat,res in tests:
res.sort()
a=sorted(wildcard.list_namespace(ns,"all",pat,ignore_case=False,
show_all=True).keys())
self.assertEqual(a,res)
def test_nocase(self):
ns=root.__dict__
tests=[
("a*", ["abbot","abel","ABEL","active","arna",]),
("?b*.?o*",["abbot.koppel","abbot.loop","abel.koppel","abel.loop",
"ABEL.koppel","ABEL.loop",]),
("_a*", []),
("_*anka", ["__anka","__ANKA",]),
("_*a*", ["__anka","__ANKA",]),
]
for pat,res in tests:
res.sort()
a=sorted(wildcard.list_namespace(ns,"all",pat,ignore_case=True,
show_all=False).keys())
self.assertEqual(a,res)
def test_nocase_showall(self):
ns=root.__dict__
tests=[
("a*", ["abbot","abel","ABEL","active","arna",]),
("?b*.?o*",["abbot.koppel","abbot.loop","abel.koppel","abel.loop",
"ABEL.koppel","ABEL.loop",]),
("_a*", ["_apan","_APAN"]),
("_*anka", ["__anka","__ANKA",]),
("_*a*", ["__anka","__ANKA","_apan","_APAN"]),
]
for pat,res in tests:
res.sort()
a=sorted(wildcard.list_namespace(ns,"all",pat,ignore_case=True,
show_all=True).keys())
a.sort()
self.assertEqual(a,res)
def test_dict_attributes(self):
"""Dictionaries should be indexed by attributes, not by keys. This was
causing Github issue 129."""
ns = {"az":{"king":55}, "pq":{1:0}}
tests = [
("a*", ["az"]),
("az.k*", ["az.keys"]),
("pq.k*", ["pq.keys"])
]
for pat, res in tests:
res.sort()
a = sorted(wildcard.list_namespace(ns, "all", pat, ignore_case=False,
show_all=True).keys())
self.assertEqual(a, res)
def test_dict_dir(self):
class A(object):
def __init__(self):
self.a = 1
self.b = 2
def __getattribute__(self, name):
if name=="a":
raise AttributeError
return object.__getattribute__(self, name)
a = A()
adict = wildcard.dict_dir(a)
assert "a" not in adict # change to assertNotIn method in >= 2.7
self.assertEqual(adict["b"], 2)

View File

@@ -0,0 +1,752 @@
# encoding: utf-8
"""
Utilities for working with strings and text.
Inheritance diagram:
.. inheritance-diagram:: IPython.utils.text
:parts: 3
"""
import os
import re
import string
import sys
import textwrap
from string import Formatter
from pathlib import Path
# datetime.strftime date format for ipython
if sys.platform == 'win32':
date_format = "%B %d, %Y"
else:
date_format = "%B %-d, %Y"
class LSString(str):
"""String derivative with a special access attributes.
These are normal strings, but with the special attributes:
.l (or .list) : value as list (split on newlines).
.n (or .nlstr): original value (the string itself).
.s (or .spstr): value as whitespace-separated string.
.p (or .paths): list of path objects (requires path.py package)
Any values which require transformations are computed only once and
cached.
Such strings are very useful to efficiently interact with the shell, which
typically only understands whitespace-separated options for commands."""
def get_list(self):
try:
return self.__list
except AttributeError:
self.__list = self.split('\n')
return self.__list
l = list = property(get_list)
def get_spstr(self):
try:
return self.__spstr
except AttributeError:
self.__spstr = self.replace('\n',' ')
return self.__spstr
s = spstr = property(get_spstr)
def get_nlstr(self):
return self
n = nlstr = property(get_nlstr)
def get_paths(self):
try:
return self.__paths
except AttributeError:
self.__paths = [Path(p) for p in self.split('\n') if os.path.exists(p)]
return self.__paths
p = paths = property(get_paths)
# FIXME: We need to reimplement type specific displayhook and then add this
# back as a custom printer. This should also be moved outside utils into the
# core.
# def print_lsstring(arg):
# """ Prettier (non-repr-like) and more informative printer for LSString """
# print "LSString (.p, .n, .l, .s available). Value:"
# print arg
#
#
# print_lsstring = result_display.register(LSString)(print_lsstring)
class SList(list):
"""List derivative with a special access attributes.
These are normal lists, but with the special attributes:
* .l (or .list) : value as list (the list itself).
* .n (or .nlstr): value as a string, joined on newlines.
* .s (or .spstr): value as a string, joined on spaces.
* .p (or .paths): list of path objects (requires path.py package)
Any values which require transformations are computed only once and
cached."""
def get_list(self):
return self
l = list = property(get_list)
def get_spstr(self):
try:
return self.__spstr
except AttributeError:
self.__spstr = ' '.join(self)
return self.__spstr
s = spstr = property(get_spstr)
def get_nlstr(self):
try:
return self.__nlstr
except AttributeError:
self.__nlstr = '\n'.join(self)
return self.__nlstr
n = nlstr = property(get_nlstr)
def get_paths(self):
try:
return self.__paths
except AttributeError:
self.__paths = [Path(p) for p in self if os.path.exists(p)]
return self.__paths
p = paths = property(get_paths)
def grep(self, pattern, prune = False, field = None):
""" Return all strings matching 'pattern' (a regex or callable)
This is case-insensitive. If prune is true, return all items
NOT matching the pattern.
If field is specified, the match must occur in the specified
whitespace-separated field.
Examples::
a.grep( lambda x: x.startswith('C') )
a.grep('Cha.*log', prune=1)
a.grep('chm', field=-1)
"""
def match_target(s):
if field is None:
return s
parts = s.split()
try:
tgt = parts[field]
return tgt
except IndexError:
return ""
if isinstance(pattern, str):
pred = lambda x : re.search(pattern, x, re.IGNORECASE)
else:
pred = pattern
if not prune:
return SList([el for el in self if pred(match_target(el))])
else:
return SList([el for el in self if not pred(match_target(el))])
def fields(self, *fields):
""" Collect whitespace-separated fields from string list
Allows quick awk-like usage of string lists.
Example data (in var a, created by 'a = !ls -l')::
-rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog
drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython
* ``a.fields(0)`` is ``['-rwxrwxrwx', 'drwxrwxrwx+']``
* ``a.fields(1,0)`` is ``['1 -rwxrwxrwx', '6 drwxrwxrwx+']``
(note the joining by space).
* ``a.fields(-1)`` is ``['ChangeLog', 'IPython']``
IndexErrors are ignored.
Without args, fields() just split()'s the strings.
"""
if len(fields) == 0:
return [el.split() for el in self]
res = SList()
for el in [f.split() for f in self]:
lineparts = []
for fd in fields:
try:
lineparts.append(el[fd])
except IndexError:
pass
if lineparts:
res.append(" ".join(lineparts))
return res
def sort(self,field= None, nums = False):
""" sort by specified fields (see fields())
Example::
a.sort(1, nums = True)
Sorts a by second field, in numerical order (so that 21 > 3)
"""
#decorate, sort, undecorate
if field is not None:
dsu = [[SList([line]).fields(field), line] for line in self]
else:
dsu = [[line, line] for line in self]
if nums:
for i in range(len(dsu)):
numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()])
try:
n = int(numstr)
except ValueError:
n = 0
dsu[i][0] = n
dsu.sort()
return SList([t[1] for t in dsu])
# FIXME: We need to reimplement type specific displayhook and then add this
# back as a custom printer. This should also be moved outside utils into the
# core.
# def print_slist(arg):
# """ Prettier (non-repr-like) and more informative printer for SList """
# print "SList (.p, .n, .l, .s, .grep(), .fields(), sort() available):"
# if hasattr(arg, 'hideonce') and arg.hideonce:
# arg.hideonce = False
# return
#
# nlprint(arg) # This was a nested list printer, now removed.
#
# print_slist = result_display.register(SList)(print_slist)
def indent(instr,nspaces=4, ntabs=0, flatten=False):
"""Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
str|unicode : string indented by ntabs and nspaces.
"""
if instr is None:
return
ind = '\t'*ntabs+' '*nspaces
if flatten:
pat = re.compile(r'^\s*', re.MULTILINE)
else:
pat = re.compile(r'^', re.MULTILINE)
outstr = re.sub(pat, ind, instr)
if outstr.endswith(os.linesep+ind):
return outstr[:-len(ind)]
else:
return outstr
def list_strings(arg):
"""Always return a list of strings, given a string or list of strings
as input.
Examples
--------
::
In [7]: list_strings('A single string')
Out[7]: ['A single string']
In [8]: list_strings(['A single string in a list'])
Out[8]: ['A single string in a list']
In [9]: list_strings(['A','list','of','strings'])
Out[9]: ['A', 'list', 'of', 'strings']
"""
if isinstance(arg, str):
return [arg]
else:
return arg
def marquee(txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'.
Examples
--------
::
In [16]: marquee('A test',40)
Out[16]: '**************** A test ****************'
In [17]: marquee('A test',40,'-')
Out[17]: '---------------- A test ----------------'
In [18]: marquee('A test',40,' ')
Out[18]: ' A test '
"""
if not txt:
return (mark*width)[:width]
nmark = (width-len(txt)-2)//len(mark)//2
if nmark < 0: nmark =0
marks = mark*nmark
return '%s %s %s' % (marks,txt,marks)
ini_spaces_re = re.compile(r'^(\s+)')
def num_ini_spaces(strng):
"""Return the number of initial spaces in a string"""
ini_spaces = ini_spaces_re.match(strng)
if ini_spaces:
return ini_spaces.end()
else:
return 0
def format_screen(strng):
"""Format a string for screen printing.
This removes some latex-type format codes."""
# Paragraph continue
par_re = re.compile(r'\\$',re.MULTILINE)
strng = par_re.sub('',strng)
return strng
def dedent(text):
"""Equivalent of textwrap.dedent that ignores unindented first line.
This means it will still dedent strings like:
'''foo
is a bar
'''
For use in wrap_paragraphs.
"""
if text.startswith('\n'):
# text starts with blank line, don't ignore the first line
return textwrap.dedent(text)
# split first line
splits = text.split('\n',1)
if len(splits) == 1:
# only one line
return textwrap.dedent(text)
first, rest = splits
# dedent everything but the first line
rest = textwrap.dedent(rest)
return '\n'.join([first, rest])
def wrap_paragraphs(text, ncols=80):
"""Wrap multiple paragraphs to fit a specified width.
This is equivalent to textwrap.wrap, but with support for multiple
paragraphs, as separated by empty lines.
Returns
-------
list of complete paragraphs, wrapped to fill `ncols` columns.
"""
paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE)
text = dedent(text).strip()
paragraphs = paragraph_re.split(text)[::2] # every other entry is space
out_ps = []
indent_re = re.compile(r'\n\s+', re.MULTILINE)
for p in paragraphs:
# presume indentation that survives dedent is meaningful formatting,
# so don't fill unless text is flush.
if indent_re.search(p) is None:
# wrap paragraph
p = textwrap.fill(p, ncols)
out_ps.append(p)
return out_ps
def strip_email_quotes(text):
"""Strip leading email quotation characters ('>').
Removes any combination of leading '>' interspersed with whitespace that
appears *identically* in all lines of the input text.
Parameters
----------
text : str
Examples
--------
Simple uses::
In [2]: strip_email_quotes('> > text')
Out[2]: 'text'
In [3]: strip_email_quotes('> > text\\n> > more')
Out[3]: 'text\\nmore'
Note how only the common prefix that appears in all lines is stripped::
In [4]: strip_email_quotes('> > text\\n> > more\\n> more...')
Out[4]: '> text\\n> more\\nmore...'
So if any line has no quote marks ('>'), then none are stripped from any
of them ::
In [5]: strip_email_quotes('> > text\\n> > more\\nlast different')
Out[5]: '> > text\\n> > more\\nlast different'
"""
lines = text.splitlines()
strip_len = 0
for characters in zip(*lines):
# Check if all characters in this position are the same
if len(set(characters)) > 1:
break
prefix_char = characters[0]
if prefix_char in string.whitespace or prefix_char == ">":
strip_len += 1
else:
break
text = "\n".join([ln[strip_len:] for ln in lines])
return text
def strip_ansi(source):
"""
Remove ansi escape codes from text.
Parameters
----------
source : str
Source to remove the ansi from
"""
return re.sub(r'\033\[(\d|;)+?m', '', source)
class EvalFormatter(Formatter):
"""A String Formatter that allows evaluation of simple expressions.
Note that this version interprets a `:` as specifying a format string (as per
standard string formatting), so if slicing is required, you must explicitly
create a slice.
This is to be used in templating cases, such as the parallel batch
script templates, where simple arithmetic on arguments is useful.
Examples
--------
::
In [1]: f = EvalFormatter()
In [2]: f.format('{n//4}', n=8)
Out[2]: '2'
In [3]: f.format("{greeting[slice(2,4)]}", greeting="Hello")
Out[3]: 'll'
"""
def get_field(self, name, args, kwargs):
v = eval(name, kwargs)
return v, name
#XXX: As of Python 3.4, the format string parsing no longer splits on a colon
# inside [], so EvalFormatter can handle slicing. Once we only support 3.4 and
# above, it should be possible to remove FullEvalFormatter.
class FullEvalFormatter(Formatter):
"""A String Formatter that allows evaluation of simple expressions.
Any time a format key is not found in the kwargs,
it will be tried as an expression in the kwargs namespace.
Note that this version allows slicing using [1:2], so you cannot specify
a format string. Use :class:`EvalFormatter` to permit format strings.
Examples
--------
::
In [1]: f = FullEvalFormatter()
In [2]: f.format('{n//4}', n=8)
Out[2]: '2'
In [3]: f.format('{list(range(5))[2:4]}')
Out[3]: '[2, 3]'
In [4]: f.format('{3*2}')
Out[4]: '6'
"""
# copied from Formatter._vformat with minor changes to allow eval
# and replace the format_spec code with slicing
def vformat(self, format_string:str, args, kwargs)->str:
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
if format_spec:
# override format spec, to allow slicing:
field_name = ':'.join([field_name, format_spec])
# eval the contents of the field for the object
# to be formatted
obj = eval(field_name, kwargs)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# format the object and append to the result
result.append(self.format_field(obj, ''))
return ''.join(result)
class DollarFormatter(FullEvalFormatter):
"""Formatter allowing Itpl style $foo replacement, for names and attribute
access only. Standard {foo} replacement also works, and allows full
evaluation of its arguments.
Examples
--------
::
In [1]: f = DollarFormatter()
In [2]: f.format('{n//4}', n=8)
Out[2]: '2'
In [3]: f.format('23 * 76 is $result', result=23*76)
Out[3]: '23 * 76 is 1748'
In [4]: f.format('$a or {b}', a=1, b=2)
Out[4]: '1 or 2'
"""
_dollar_pattern_ignore_single_quote = re.compile(r"(.*?)\$(\$?[\w\.]+)(?=([^']*'[^']*')*[^']*$)")
def parse(self, fmt_string):
for literal_txt, field_name, format_spec, conversion \
in Formatter.parse(self, fmt_string):
# Find $foo patterns in the literal text.
continue_from = 0
txt = ""
for m in self._dollar_pattern_ignore_single_quote.finditer(literal_txt):
new_txt, new_field = m.group(1,2)
# $$foo --> $foo
if new_field.startswith("$"):
txt += new_txt + new_field
else:
yield (txt + new_txt, new_field, "", None)
txt = ""
continue_from = m.end()
# Re-yield the {foo} style pattern
yield (txt + literal_txt[continue_from:], field_name, format_spec, conversion)
def __repr__(self):
return "<DollarFormatter>"
#-----------------------------------------------------------------------------
# Utils to columnize a list of string
#-----------------------------------------------------------------------------
def _col_chunks(l, max_rows, row_first=False):
"""Yield successive max_rows-sized column chunks from l."""
if row_first:
ncols = (len(l) // max_rows) + (len(l) % max_rows > 0)
for i in range(ncols):
yield [l[j] for j in range(i, len(l), ncols)]
else:
for i in range(0, len(l), max_rows):
yield l[i:(i + max_rows)]
def _find_optimal(rlist, row_first=False, separator_size=2, displaywidth=80):
"""Calculate optimal info to columnize a list of string"""
for max_rows in range(1, len(rlist) + 1):
col_widths = list(map(max, _col_chunks(rlist, max_rows, row_first)))
sumlength = sum(col_widths)
ncols = len(col_widths)
if sumlength + separator_size * (ncols - 1) <= displaywidth:
break
return {'num_columns': ncols,
'optimal_separator_width': (displaywidth - sumlength) // (ncols - 1) if (ncols - 1) else 0,
'max_rows': max_rows,
'column_widths': col_widths
}
def _get_or_default(mylist, i, default=None):
"""return list item number, or default if don't exist"""
if i >= len(mylist):
return default
else :
return mylist[i]
def compute_item_matrix(items, row_first=False, empty=None, *args, **kwargs) :
"""Returns a nested list, and info to columnize items
Parameters
----------
items
list of strings to columize
row_first : (default False)
Whether to compute columns for a row-first matrix instead of
column-first (default).
empty : (default None)
default value to fill list if needed
separator_size : int (default=2)
How much characters will be used as a separation between each columns.
displaywidth : int (default=80)
The width of the area onto which the columns should enter
Returns
-------
strings_matrix
nested list of string, the outer most list contains as many list as
rows, the innermost lists have each as many element as columns. If the
total number of elements in `items` does not equal the product of
rows*columns, the last element of some lists are filled with `None`.
dict_info
some info to make columnize easier:
num_columns
number of columns
max_rows
maximum number of rows (final number may be less)
column_widths
list of with of each columns
optimal_separator_width
best separator width between columns
Examples
--------
::
In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l']
In [2]: list, info = compute_item_matrix(l, displaywidth=12)
In [3]: list
Out[3]: [['aaa', 'f', 'k'], ['b', 'g', 'l'], ['cc', 'h', None], ['d', 'i', None], ['eeeee', 'j', None]]
In [4]: ideal = {'num_columns': 3, 'column_widths': [5, 1, 1], 'optimal_separator_width': 2, 'max_rows': 5}
In [5]: all((info[k] == ideal[k] for k in ideal.keys()))
Out[5]: True
"""
info = _find_optimal(list(map(len, items)), row_first, *args, **kwargs)
nrow, ncol = info['max_rows'], info['num_columns']
if row_first:
return ([[_get_or_default(items, r * ncol + c, default=empty) for c in range(ncol)] for r in range(nrow)], info)
else:
return ([[_get_or_default(items, c * nrow + r, default=empty) for c in range(ncol)] for r in range(nrow)], info)
def columnize(items, row_first=False, separator=" ", displaywidth=80, spread=False):
"""Transform a list of strings into a single string with columns.
Parameters
----------
items : sequence of strings
The strings to process.
row_first : (default False)
Whether to compute columns for a row-first matrix instead of
column-first (default).
separator : str, optional [default is two spaces]
The string that separates columns.
displaywidth : int, optional [default is 80]
Width of the display in number of characters.
Returns
-------
The formatted string.
"""
if not items:
return '\n'
matrix, info = compute_item_matrix(items, row_first=row_first, separator_size=len(separator), displaywidth=displaywidth)
if spread:
separator = separator.ljust(int(info['optimal_separator_width']))
fmatrix = [filter(None, x) for x in matrix]
sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['column_widths'])])
return '\n'.join(map(sjoin, fmatrix))+'\n'
def get_text_list(list_, last_sep=' and ', sep=", ", wrap_item_with=""):
"""
Return a string with a natural enumeration of items
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c and d'
>>> get_text_list(['a', 'b', 'c'], ' or ')
'a, b or c'
>>> get_text_list(['a', 'b', 'c'], ', ')
'a, b, c'
>>> get_text_list(['a', 'b'], ' or ')
'a or b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
>>> get_text_list(['a', 'b'], wrap_item_with="`")
'`a` and `b`'
>>> get_text_list(['a', 'b', 'c', 'd'], " = ", sep=" + ")
'a + b + c = d'
"""
if len(list_) == 0:
return ''
if wrap_item_with:
list_ = ['%s%s%s' % (wrap_item_with, item, wrap_item_with) for
item in list_]
if len(list_) == 1:
return list_[0]
return '%s%s%s' % (
sep.join(i for i in list_[:-1]),
last_sep, list_[-1])

View File

@@ -0,0 +1,123 @@
# encoding: utf-8
"""
Utilities for timing code execution.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import time
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# If possible (Unix), use the resource module instead of time.clock()
try:
import resource
except ImportError:
resource = None
# Some implementations (like jyputerlite) don't have getrusage
if resource is not None and hasattr(resource, "getrusage"):
def clocku():
"""clocku() -> floating point number
Return the *USER* CPU time in seconds since the start of the process.
This is done via a call to resource.getrusage, so it avoids the
wraparound problems in time.clock()."""
return resource.getrusage(resource.RUSAGE_SELF)[0]
def clocks():
"""clocks() -> floating point number
Return the *SYSTEM* CPU time in seconds since the start of the process.
This is done via a call to resource.getrusage, so it avoids the
wraparound problems in time.clock()."""
return resource.getrusage(resource.RUSAGE_SELF)[1]
def clock():
"""clock() -> floating point number
Return the *TOTAL USER+SYSTEM* CPU time in seconds since the start of
the process. This is done via a call to resource.getrusage, so it
avoids the wraparound problems in time.clock()."""
u,s = resource.getrusage(resource.RUSAGE_SELF)[:2]
return u+s
def clock2():
"""clock2() -> (t_user,t_system)
Similar to clock(), but return a tuple of user/system times."""
return resource.getrusage(resource.RUSAGE_SELF)[:2]
else:
# There is no distinction of user/system time under windows, so we just use
# time.process_time() for everything...
clocku = clocks = clock = time.process_time
def clock2():
"""Under windows, system CPU time can't be measured.
This just returns process_time() and zero."""
return time.process_time(), 0.0
def timings_out(reps,func,*args,**kw):
"""timings_out(reps,func,*args,**kw) -> (t_total,t_per_call,output)
Execute a function reps times, return a tuple with the elapsed total
CPU time in seconds, the time per call and the function's output.
Under Unix, the return value is the sum of user+system time consumed by
the process, computed via the resource module. This prevents problems
related to the wraparound effect which the time.clock() function has.
Under Windows the return value is in wall clock seconds. See the
documentation for the time module for more details."""
reps = int(reps)
assert reps >=1, 'reps must be >= 1'
if reps==1:
start = clock()
out = func(*args,**kw)
tot_time = clock()-start
else:
rng = range(reps-1) # the last time is executed separately to store output
start = clock()
for dummy in rng: func(*args,**kw)
out = func(*args,**kw) # one last time
tot_time = clock()-start
av_time = tot_time / reps
return tot_time,av_time,out
def timings(reps,func,*args,**kw):
"""timings(reps,func,*args,**kw) -> (t_total,t_per_call)
Execute a function reps times, return a tuple with the elapsed total CPU
time in seconds and the time per call. These are just the first two values
in timings_out()."""
return timings_out(reps,func,*args,**kw)[0:2]
def timing(func,*args,**kw):
"""timing(func,*args,**kw) -> t_total
Execute a function once, return the elapsed total CPU time in
seconds. This is just the first value in timings_out()."""
return timings_out(1,func,*args,**kw)[0]

View File

@@ -0,0 +1,127 @@
"""Token-related utilities"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import namedtuple
from io import StringIO
from keyword import iskeyword
import tokenize
Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
def generate_tokens(readline):
"""wrap generate_tokens to catch EOF errors"""
try:
for token in tokenize.generate_tokens(readline):
yield token
except tokenize.TokenError:
# catch EOF error
return
def line_at_cursor(cell, cursor_pos=0):
"""Return the line in a cell at a given cursor position
Used for calling line-based APIs that don't support multi-line input, yet.
Parameters
----------
cell : str
multiline block of text
cursor_pos : integer
the cursor position
Returns
-------
(line, offset): (string, integer)
The line with the current cursor, and the character offset of the start of the line.
"""
offset = 0
lines = cell.splitlines(True)
for line in lines:
next_offset = offset + len(line)
if not line.endswith('\n'):
# If the last line doesn't have a trailing newline, treat it as if
# it does so that the cursor at the end of the line still counts
# as being on that line.
next_offset += 1
if next_offset > cursor_pos:
break
offset = next_offset
else:
line = ""
return (line, offset)
def token_at_cursor(cell, cursor_pos=0):
"""Get the token at a given cursor
Used for introspection.
Function calls are prioritized, so the token for the callable will be returned
if the cursor is anywhere inside the call.
Parameters
----------
cell : unicode
A block of Python code
cursor_pos : int
The location of the cursor in the block where the token should be found
"""
names = []
tokens = []
call_names = []
offsets = {1: 0} # lines start at 1
for tup in generate_tokens(StringIO(cell).readline):
tok = Token(*tup)
# token, text, start, end, line = tup
start_line, start_col = tok.start
end_line, end_col = tok.end
if end_line + 1 not in offsets:
# keep track of offsets for each line
lines = tok.line.splitlines(True)
for lineno, line in enumerate(lines, start_line + 1):
if lineno not in offsets:
offsets[lineno] = offsets[lineno-1] + len(line)
offset = offsets[start_line]
# allow '|foo' to find 'foo' at the beginning of a line
boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
if offset + start_col >= boundary:
# current token starts after the cursor,
# don't consume it
break
if tok.token == tokenize.NAME and not iskeyword(tok.text):
if names and tokens and tokens[-1].token == tokenize.OP and tokens[-1].text == '.':
names[-1] = "%s.%s" % (names[-1], tok.text)
else:
names.append(tok.text)
elif tok.token == tokenize.OP:
if tok.text == '=' and names:
# don't inspect the lhs of an assignment
names.pop(-1)
if tok.text == '(' and names:
# if we are inside a function call, inspect the function
call_names.append(names[-1])
elif tok.text == ')' and call_names:
call_names.pop(-1)
tokens.append(tok)
if offsets[end_line] + end_col > cursor_pos:
# we found the cursor, stop reading
break
if call_names:
return call_names[-1]
elif names:
return names[-1]
else:
return ''

View File

@@ -0,0 +1,6 @@
from warnings import warn
warn("IPython.utils.traitlets has moved to a top-level traitlets package.", stacklevel=2)
from traitlets import *

View File

@@ -0,0 +1,46 @@
# encoding: utf-8
"""
Timezone utilities
Just UTC-awareness right now
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from datetime import tzinfo, timedelta, datetime
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# constant for zero offset
ZERO = timedelta(0)
class tzUTC(tzinfo):
"""tzinfo object for UTC (zero offset)"""
def utcoffset(self, d):
return ZERO
def dst(self, d):
return ZERO
UTC = tzUTC()
def utc_aware(unaware):
"""decorator for adding UTC tzinfo to datetime's utcfoo methods"""
def utc_method(*args, **kwargs):
dt = unaware(*args, **kwargs)
return dt.replace(tzinfo=UTC)
return utc_method
utcfromtimestamp = utc_aware(datetime.utcfromtimestamp)
utcnow = utc_aware(datetime.utcnow)

View File

@@ -0,0 +1,21 @@
"""
This module has been deprecated since IPython 6.0.
Wrapper around linecache which decodes files to unicode according to PEP 263.
"""
import functools
import linecache
from warnings import warn
getline = linecache.getline
# getlines has to be looked up at runtime, because doctests monkeypatch it.
@functools.wraps(linecache.getlines)
def getlines(filename, module_globals=None):
"""
Deprecated since IPython 6.0
"""
warn(("`IPython.utils.ulinecache.getlines` is deprecated since"
" IPython 6.0 and will be removed in future versions."),
DeprecationWarning, stacklevel=2)
return linecache.getlines(filename, module_globals=module_globals)

View File

@@ -0,0 +1,43 @@
# encoding: utf-8
"""
Utilities for version comparison
It is a bit ridiculous that we need these.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from warnings import warn
warn(
"The `IPython.utils.version` module has been deprecated since IPython 8.0.",
DeprecationWarning,
)
def check_version(v, check):
"""check version string v >= check
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to date.
"""
warn(
"`check_version` function is deprecated as of IPython 8.0"
"and will be removed in future versions.",
DeprecationWarning,
stacklevel=2,
)
from distutils.version import LooseVersion
try:
return LooseVersion(v) >= LooseVersion(check)
except TypeError:
return True

View File

@@ -0,0 +1,111 @@
# -*- coding: utf-8 -*-
"""Support for wildcard pattern matching in object inspection.
Authors
-------
- Jörgen Stenarson <jorgen.stenarson@bostream.nu>
- Thomas Kluyver
"""
#*****************************************************************************
# Copyright (C) 2005 Jörgen Stenarson <jorgen.stenarson@bostream.nu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import re
import types
from IPython.utils.dir2 import dir2
def create_typestr2type_dicts(dont_include_in_type2typestr=["lambda"]):
"""Return dictionaries mapping lower case typename (e.g. 'tuple') to type
objects from the types package, and vice versa."""
typenamelist = [tname for tname in dir(types) if tname.endswith("Type")]
typestr2type, type2typestr = {}, {}
for tname in typenamelist:
name = tname[:-4].lower() # Cut 'Type' off the end of the name
obj = getattr(types, tname)
typestr2type[name] = obj
if name not in dont_include_in_type2typestr:
type2typestr[obj] = name
return typestr2type, type2typestr
typestr2type, type2typestr = create_typestr2type_dicts()
def is_type(obj, typestr_or_type):
"""is_type(obj, typestr_or_type) verifies if obj is of a certain type. It
can take strings or actual python types for the second argument, i.e.
'tuple'<->TupleType. 'all' matches all types.
TODO: Should be extended for choosing more than one type."""
if typestr_or_type == "all":
return True
if type(typestr_or_type) == type:
test_type = typestr_or_type
else:
test_type = typestr2type.get(typestr_or_type, False)
if test_type:
return isinstance(obj, test_type)
return False
def show_hidden(str, show_all=False):
"""Return true for strings starting with single _ if show_all is true."""
return show_all or str.startswith("__") or not str.startswith("_")
def dict_dir(obj):
"""Produce a dictionary of an object's attributes. Builds on dir2 by
checking that a getattr() call actually succeeds."""
ns = {}
for key in dir2(obj):
# This seemingly unnecessary try/except is actually needed
# because there is code out there with metaclasses that
# create 'write only' attributes, where a getattr() call
# will fail even if the attribute appears listed in the
# object's dictionary. Properties can actually do the same
# thing. In particular, Traits use this pattern
try:
ns[key] = getattr(obj, key)
except AttributeError:
pass
return ns
def filter_ns(ns, name_pattern="*", type_pattern="all", ignore_case=True,
show_all=True):
"""Filter a namespace dictionary by name pattern and item type."""
pattern = name_pattern.replace("*",".*").replace("?",".")
if ignore_case:
reg = re.compile(pattern+"$", re.I)
else:
reg = re.compile(pattern+"$")
# Check each one matches regex; shouldn't be hidden; of correct type.
return dict((key,obj) for key, obj in ns.items() if reg.match(key) \
and show_hidden(key, show_all) \
and is_type(obj, type_pattern) )
def list_namespace(namespace, type_pattern, filter, ignore_case=False, show_all=False):
"""Return dictionary of all objects in a namespace dictionary that match
type_pattern and filter."""
pattern_list=filter.split(".")
if len(pattern_list) == 1:
return filter_ns(namespace, name_pattern=pattern_list[0],
type_pattern=type_pattern,
ignore_case=ignore_case, show_all=show_all)
else:
# This is where we can change if all objects should be searched or
# only modules. Just change the type_pattern to module to search only
# modules
filtered = filter_ns(namespace, name_pattern=pattern_list[0],
type_pattern="all",
ignore_case=ignore_case, show_all=show_all)
results = {}
for name, obj in filtered.items():
ns = list_namespace(dict_dir(obj), type_pattern,
".".join(pattern_list[1:]),
ignore_case=ignore_case, show_all=show_all)
for inner_name, inner_obj in ns.items():
results["%s.%s"%(name,inner_name)] = inner_obj
return results