first commit

This commit is contained in:
Ayxan
2022-05-23 00:16:32 +04:00
commit d660f2a4ca
24786 changed files with 4428337 additions and 0 deletions

View File

@ -0,0 +1,11 @@
# encoding: utf-8
"""
Extra capabilities for IPython
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------

View File

@ -0,0 +1,491 @@
# -*- coding: utf-8 -*-
"""Manage background (threaded) jobs conveniently from an interactive shell.
This module provides a BackgroundJobManager class. This is the main class
meant for public usage, it implements an object which can create and manage
new background jobs.
It also provides the actual job classes managed by these BackgroundJobManager
objects, see their docstrings below.
This system was inspired by discussions with B. Granger and the
BackgroundCommand class described in the book Python Scripting for
Computational Science, by H. P. Langtangen:
http://folk.uio.no/hpl/scripting
(although ultimately no code from this text was used, as IPython's system is a
separate implementation).
An example notebook is provided in our documentation illustrating interactive
use of the system.
"""
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Code begins
import sys
import threading
from IPython import get_ipython
from IPython.core.ultratb import AutoFormattedTB
from logging import error, debug
class BackgroundJobManager(object):
"""Class to manage a pool of backgrounded threaded jobs.
Below, we assume that 'jobs' is a BackgroundJobManager instance.
Usage summary (see the method docstrings for details):
jobs.new(...) -> start a new job
jobs() or jobs.status() -> print status summary of all jobs
jobs[N] -> returns job number N.
foo = jobs[N].result -> assign to variable foo the result of job N
jobs[N].traceback() -> print the traceback of dead job N
jobs.remove(N) -> remove (finished) job N
jobs.flush() -> remove all finished jobs
As a convenience feature, BackgroundJobManager instances provide the
utility result and traceback methods which retrieve the corresponding
information from the jobs list:
jobs.result(N) <--> jobs[N].result
jobs.traceback(N) <--> jobs[N].traceback()
While this appears minor, it allows you to use tab completion
interactively on the job manager instance.
"""
def __init__(self):
# Lists for job management, accessed via a property to ensure they're
# up to date.x
self._running = []
self._completed = []
self._dead = []
# A dict of all jobs, so users can easily access any of them
self.all = {}
# For reporting
self._comp_report = []
self._dead_report = []
# Store status codes locally for fast lookups
self._s_created = BackgroundJobBase.stat_created_c
self._s_running = BackgroundJobBase.stat_running_c
self._s_completed = BackgroundJobBase.stat_completed_c
self._s_dead = BackgroundJobBase.stat_dead_c
self._current_job_id = 0
@property
def running(self):
self._update_status()
return self._running
@property
def dead(self):
self._update_status()
return self._dead
@property
def completed(self):
self._update_status()
return self._completed
def new(self, func_or_exp, *args, **kwargs):
"""Add a new background job and start it in a separate thread.
There are two types of jobs which can be created:
1. Jobs based on expressions which can be passed to an eval() call.
The expression must be given as a string. For example:
job_manager.new('myfunc(x,y,z=1)'[,glob[,loc]])
The given expression is passed to eval(), along with the optional
global/local dicts provided. If no dicts are given, they are
extracted automatically from the caller's frame.
A Python statement is NOT a valid eval() expression. Basically, you
can only use as an eval() argument something which can go on the right
of an '=' sign and be assigned to a variable.
For example,"print 'hello'" is not valid, but '2+3' is.
2. Jobs given a function object, optionally passing additional
positional arguments:
job_manager.new(myfunc, x, y)
The function is called with the given arguments.
If you need to pass keyword arguments to your function, you must
supply them as a dict named kw:
job_manager.new(myfunc, x, y, kw=dict(z=1))
The reason for this asymmetry is that the new() method needs to
maintain access to its own keywords, and this prevents name collisions
between arguments to new() and arguments to your own functions.
In both cases, the result is stored in the job.result field of the
background job object.
You can set `daemon` attribute of the thread by giving the keyword
argument `daemon`.
Notes and caveats:
1. All threads running share the same standard output. Thus, if your
background jobs generate output, it will come out on top of whatever
you are currently writing. For this reason, background jobs are best
used with silent functions which simply return their output.
2. Threads also all work within the same global namespace, and this
system does not lock interactive variables. So if you send job to the
background which operates on a mutable object for a long time, and
start modifying that same mutable object interactively (or in another
backgrounded job), all sorts of bizarre behaviour will occur.
3. If a background job is spending a lot of time inside a C extension
module which does not release the Python Global Interpreter Lock
(GIL), this will block the IPython prompt. This is simply because the
Python interpreter can only switch between threads at Python
bytecodes. While the execution is inside C code, the interpreter must
simply wait unless the extension module releases the GIL.
4. There is no way, due to limitations in the Python threads library,
to kill a thread once it has started."""
if callable(func_or_exp):
kw = kwargs.get('kw',{})
job = BackgroundJobFunc(func_or_exp,*args,**kw)
elif isinstance(func_or_exp, str):
if not args:
frame = sys._getframe(1)
glob, loc = frame.f_globals, frame.f_locals
elif len(args)==1:
glob = loc = args[0]
elif len(args)==2:
glob,loc = args
else:
raise ValueError(
'Expression jobs take at most 2 args (globals,locals)')
job = BackgroundJobExpr(func_or_exp, glob, loc)
else:
raise TypeError('invalid args for new job')
if kwargs.get('daemon', False):
job.daemon = True
job.num = self._current_job_id
self._current_job_id += 1
self.running.append(job)
self.all[job.num] = job
debug('Starting job # %s in a separate thread.' % job.num)
job.start()
return job
def __getitem__(self, job_key):
num = job_key if isinstance(job_key, int) else job_key.num
return self.all[num]
def __call__(self):
"""An alias to self.status(),
This allows you to simply call a job manager instance much like the
Unix `jobs` shell command."""
return self.status()
def _update_status(self):
"""Update the status of the job lists.
This method moves finished jobs to one of two lists:
- self.completed: jobs which completed successfully
- self.dead: jobs which finished but died.
It also copies those jobs to corresponding _report lists. These lists
are used to report jobs completed/dead since the last update, and are
then cleared by the reporting function after each call."""
# Status codes
srun, scomp, sdead = self._s_running, self._s_completed, self._s_dead
# State lists, use the actual lists b/c the public names are properties
# that call this very function on access
running, completed, dead = self._running, self._completed, self._dead
# Now, update all state lists
for num, job in enumerate(running):
stat = job.stat_code
if stat == srun:
continue
elif stat == scomp:
completed.append(job)
self._comp_report.append(job)
running[num] = False
elif stat == sdead:
dead.append(job)
self._dead_report.append(job)
running[num] = False
# Remove dead/completed jobs from running list
running[:] = filter(None, running)
def _group_report(self,group,name):
"""Report summary for a given job group.
Return True if the group had any elements."""
if group:
print('%s jobs:' % name)
for job in group:
print('%s : %s' % (job.num,job))
print()
return True
def _group_flush(self,group,name):
"""Flush a given job group
Return True if the group had any elements."""
njobs = len(group)
if njobs:
plural = {1:''}.setdefault(njobs,'s')
print('Flushing %s %s job%s.' % (njobs,name,plural))
group[:] = []
return True
def _status_new(self):
"""Print the status of newly finished jobs.
Return True if any new jobs are reported.
This call resets its own state every time, so it only reports jobs
which have finished since the last time it was called."""
self._update_status()
new_comp = self._group_report(self._comp_report, 'Completed')
new_dead = self._group_report(self._dead_report,
'Dead, call jobs.traceback() for details')
self._comp_report[:] = []
self._dead_report[:] = []
return new_comp or new_dead
def status(self,verbose=0):
"""Print a status of all jobs currently being managed."""
self._update_status()
self._group_report(self.running,'Running')
self._group_report(self.completed,'Completed')
self._group_report(self.dead,'Dead')
# Also flush the report queues
self._comp_report[:] = []
self._dead_report[:] = []
def remove(self,num):
"""Remove a finished (completed or dead) job."""
try:
job = self.all[num]
except KeyError:
error('Job #%s not found' % num)
else:
stat_code = job.stat_code
if stat_code == self._s_running:
error('Job #%s is still running, it can not be removed.' % num)
return
elif stat_code == self._s_completed:
self.completed.remove(job)
elif stat_code == self._s_dead:
self.dead.remove(job)
def flush(self):
"""Flush all finished jobs (completed and dead) from lists.
Running jobs are never flushed.
It first calls _status_new(), to update info. If any jobs have
completed since the last _status_new() call, the flush operation
aborts."""
# Remove the finished jobs from the master dict
alljobs = self.all
for job in self.completed+self.dead:
del(alljobs[job.num])
# Now flush these lists completely
fl_comp = self._group_flush(self.completed, 'Completed')
fl_dead = self._group_flush(self.dead, 'Dead')
if not (fl_comp or fl_dead):
print('No jobs to flush.')
def result(self,num):
"""result(N) -> return the result of job N."""
try:
return self.all[num].result
except KeyError:
error('Job #%s not found' % num)
def _traceback(self, job):
num = job if isinstance(job, int) else job.num
try:
self.all[num].traceback()
except KeyError:
error('Job #%s not found' % num)
def traceback(self, job=None):
if job is None:
self._update_status()
for deadjob in self.dead:
print("Traceback for: %r" % deadjob)
self._traceback(deadjob)
print()
else:
self._traceback(job)
class BackgroundJobBase(threading.Thread):
"""Base class to build BackgroundJob classes.
The derived classes must implement:
- Their own __init__, since the one here raises NotImplementedError. The
derived constructor must call self._init() at the end, to provide common
initialization.
- A strform attribute used in calls to __str__.
- A call() method, which will make the actual execution call and must
return a value to be held in the 'result' field of the job object.
"""
# Class constants for status, in string and as numerical codes (when
# updating jobs lists, we don't want to do string comparisons). This will
# be done at every user prompt, so it has to be as fast as possible
stat_created = 'Created'; stat_created_c = 0
stat_running = 'Running'; stat_running_c = 1
stat_completed = 'Completed'; stat_completed_c = 2
stat_dead = 'Dead (Exception), call jobs.traceback() for details'
stat_dead_c = -1
def __init__(self):
"""Must be implemented in subclasses.
Subclasses must call :meth:`_init` for standard initialisation.
"""
raise NotImplementedError("This class can not be instantiated directly.")
def _init(self):
"""Common initialization for all BackgroundJob objects"""
for attr in ['call','strform']:
assert hasattr(self,attr), "Missing attribute <%s>" % attr
# The num tag can be set by an external job manager
self.num = None
self.status = BackgroundJobBase.stat_created
self.stat_code = BackgroundJobBase.stat_created_c
self.finished = False
self.result = '<BackgroundJob has not completed>'
# reuse the ipython traceback handler if we can get to it, otherwise
# make a new one
try:
make_tb = get_ipython().InteractiveTB.text
except:
make_tb = AutoFormattedTB(mode = 'Context',
color_scheme='NoColor',
tb_offset = 1).text
# Note that the actual API for text() requires the three args to be
# passed in, so we wrap it in a simple lambda.
self._make_tb = lambda : make_tb(None, None, None)
# Hold a formatted traceback if one is generated.
self._tb = None
threading.Thread.__init__(self)
def __str__(self):
return self.strform
def __repr__(self):
return '<BackgroundJob #%d: %s>' % (self.num, self.strform)
def traceback(self):
print(self._tb)
def run(self):
try:
self.status = BackgroundJobBase.stat_running
self.stat_code = BackgroundJobBase.stat_running_c
self.result = self.call()
except:
self.status = BackgroundJobBase.stat_dead
self.stat_code = BackgroundJobBase.stat_dead_c
self.finished = None
self.result = ('<BackgroundJob died, call jobs.traceback() for details>')
self._tb = self._make_tb()
else:
self.status = BackgroundJobBase.stat_completed
self.stat_code = BackgroundJobBase.stat_completed_c
self.finished = True
class BackgroundJobExpr(BackgroundJobBase):
"""Evaluate an expression as a background job (uses a separate thread)."""
def __init__(self, expression, glob=None, loc=None):
"""Create a new job from a string which can be fed to eval().
global/locals dicts can be provided, which will be passed to the eval
call."""
# fail immediately if the given expression can't be compiled
self.code = compile(expression,'<BackgroundJob compilation>','eval')
glob = {} if glob is None else glob
loc = {} if loc is None else loc
self.expression = self.strform = expression
self.glob = glob
self.loc = loc
self._init()
def call(self):
return eval(self.code,self.glob,self.loc)
class BackgroundJobFunc(BackgroundJobBase):
"""Run a function call as a background job (uses a separate thread)."""
def __init__(self, func, *args, **kwargs):
"""Create a new job from a callable object.
Any positional arguments and keyword args given to this constructor
after the initial callable are passed directly to it."""
if not callable(func):
raise TypeError(
'first argument to BackgroundJobFunc must be callable')
self.func = func
self.args = args
self.kwargs = kwargs
# The string form will only include the function passed, because
# generating string representations of the arguments is a potentially
# _very_ expensive operation (e.g. with large arrays).
self.strform = str(func)
self._init()
def call(self):
return self.func(*self.args, **self.kwargs)

View File

@ -0,0 +1,69 @@
""" Utilities for accessing the platform's clipboard.
"""
import subprocess
from IPython.core.error import TryNext
import IPython.utils.py3compat as py3compat
class ClipboardEmpty(ValueError):
pass
def win32_clipboard_get():
""" Get the current clipboard's text on Windows.
Requires Mark Hammond's pywin32 extensions.
"""
try:
import win32clipboard
except ImportError as e:
raise TryNext("Getting text from the clipboard requires the pywin32 "
"extensions: http://sourceforge.net/projects/pywin32/") from e
win32clipboard.OpenClipboard()
try:
text = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)
except (TypeError, win32clipboard.error):
try:
text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
except (TypeError, win32clipboard.error) as e:
raise ClipboardEmpty from e
finally:
win32clipboard.CloseClipboard()
return text
def osx_clipboard_get() -> str:
""" Get the clipboard's text on OS X.
"""
p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'],
stdout=subprocess.PIPE)
bytes_, stderr = p.communicate()
# Text comes in with old Mac \r line endings. Change them to \n.
bytes_ = bytes_.replace(b'\r', b'\n')
text = py3compat.decode(bytes_)
return text
def tkinter_clipboard_get():
""" Get the clipboard's text using Tkinter.
This is the default on systems that are not Windows or OS X. It may
interfere with other UI toolkits and should be replaced with an
implementation that uses that toolkit.
"""
try:
from tkinter import Tk, TclError
except ImportError as e:
raise TryNext("Getting text from the clipboard on this platform requires tkinter.") from e
root = Tk()
root.withdraw()
try:
text = root.clipboard_get()
except TclError as e:
raise ClipboardEmpty from e
finally:
root.destroy()
text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
return text

View File

@ -0,0 +1,310 @@
# -*- coding: utf-8 -*-
"""
Provides a reload() function that acts recursively.
Python's normal :func:`python:reload` function only reloads the module that it's
passed. The :func:`reload` function in this module also reloads everything
imported from that module, which is useful when you're changing files deep
inside a package.
To use this as your default reload function, type this::
import builtins
from IPython.lib import deepreload
builtins.reload = deepreload.reload
A reference to the original :func:`python:reload` is stored in this module as
:data:`original_reload`, so you can restore it later.
This code is almost entirely based on knee.py, which is a Python
re-implementation of hierarchical module import.
"""
#*****************************************************************************
# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import builtins as builtin_mod
from contextlib import contextmanager
import importlib
import sys
from types import ModuleType
from warnings import warn
import types
original_import = builtin_mod.__import__
@contextmanager
def replace_import_hook(new_import):
saved_import = builtin_mod.__import__
builtin_mod.__import__ = new_import
try:
yield
finally:
builtin_mod.__import__ = saved_import
def get_parent(globals, level):
"""
parent, name = get_parent(globals, level)
Return the package that an import is being performed in. If globals comes
from the module foo.bar.bat (not itself a package), this returns the
sys.modules entry for foo.bar. If globals is from a package's __init__.py,
the package's entry in sys.modules is returned.
If globals doesn't come from a package or a module in a package, or a
corresponding entry is not found in sys.modules, None is returned.
"""
orig_level = level
if not level or not isinstance(globals, dict):
return None, ''
pkgname = globals.get('__package__', None)
if pkgname is not None:
# __package__ is set, so use it
if not hasattr(pkgname, 'rindex'):
raise ValueError('__package__ set to non-string')
if len(pkgname) == 0:
if level > 0:
raise ValueError('Attempted relative import in non-package')
return None, ''
name = pkgname
else:
# __package__ not set, so figure it out and set it
if '__name__' not in globals:
return None, ''
modname = globals['__name__']
if '__path__' in globals:
# __path__ is set, so modname is already the package name
globals['__package__'] = name = modname
else:
# Normal module, so work out the package name if any
lastdot = modname.rfind('.')
if lastdot < 0 < level:
raise ValueError("Attempted relative import in non-package")
if lastdot < 0:
globals['__package__'] = None
return None, ''
globals['__package__'] = name = modname[:lastdot]
dot = len(name)
for x in range(level, 1, -1):
try:
dot = name.rindex('.', 0, dot)
except ValueError as e:
raise ValueError("attempted relative import beyond top-level "
"package") from e
name = name[:dot]
try:
parent = sys.modules[name]
except BaseException as e:
if orig_level < 1:
warn("Parent module '%.200s' not found while handling absolute "
"import" % name)
parent = None
else:
raise SystemError("Parent module '%.200s' not loaded, cannot "
"perform relative import" % name) from e
# We expect, but can't guarantee, if parent != None, that:
# - parent.__name__ == name
# - parent.__dict__ is globals
# If this is violated... Who cares?
return parent, name
def load_next(mod, altmod, name, buf):
"""
mod, name, buf = load_next(mod, altmod, name, buf)
altmod is either None or same as mod
"""
if len(name) == 0:
# completely empty module name should only happen in
# 'from . import' (or '__import__("")')
return mod, None, buf
dot = name.find('.')
if dot == 0:
raise ValueError('Empty module name')
if dot < 0:
subname = name
next = None
else:
subname = name[:dot]
next = name[dot+1:]
if buf != '':
buf += '.'
buf += subname
result = import_submodule(mod, subname, buf)
if result is None and mod != altmod:
result = import_submodule(altmod, subname, subname)
if result is not None:
buf = subname
if result is None:
raise ImportError("No module named %.200s" % name)
return result, next, buf
# Need to keep track of what we've already reloaded to prevent cyclic evil
found_now = {}
def import_submodule(mod, subname, fullname):
"""m = import_submodule(mod, subname, fullname)"""
# Require:
# if mod == None: subname == fullname
# else: mod.__name__ + "." + subname == fullname
global found_now
if fullname in found_now and fullname in sys.modules:
m = sys.modules[fullname]
else:
print('Reloading', fullname)
found_now[fullname] = 1
oldm = sys.modules.get(fullname, None)
try:
if oldm is not None:
m = importlib.reload(oldm)
else:
m = importlib.import_module(subname, mod)
except:
# load_module probably removed name from modules because of
# the error. Put back the original module object.
if oldm:
sys.modules[fullname] = oldm
raise
add_submodule(mod, m, fullname, subname)
return m
def add_submodule(mod, submod, fullname, subname):
"""mod.{subname} = submod"""
if mod is None:
return #Nothing to do here.
if submod is None:
submod = sys.modules[fullname]
setattr(mod, subname, submod)
return
def ensure_fromlist(mod, fromlist, buf, recursive):
"""Handle 'from module import a, b, c' imports."""
if not hasattr(mod, '__path__'):
return
for item in fromlist:
if not hasattr(item, 'rindex'):
raise TypeError("Item in ``from list'' not a string")
if item == '*':
if recursive:
continue # avoid endless recursion
try:
all = mod.__all__
except AttributeError:
pass
else:
ret = ensure_fromlist(mod, all, buf, 1)
if not ret:
return 0
elif not hasattr(mod, item):
import_submodule(mod, item, buf + '.' + item)
def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1):
"""Replacement for __import__()"""
parent, buf = get_parent(globals, level)
head, name, buf = load_next(parent, None if level < 0 else parent, name, buf)
tail = head
while name:
tail, name, buf = load_next(tail, tail, name, buf)
# If tail is None, both get_parent and load_next found
# an empty module name: someone called __import__("") or
# doctored faulty bytecode
if tail is None:
raise ValueError('Empty module name')
if not fromlist:
return head
ensure_fromlist(tail, fromlist, buf, 0)
return tail
modules_reloading = {}
def deep_reload_hook(m):
"""Replacement for reload()."""
# Hardcode this one as it would raise a NotImplementedError from the
# bowels of Python and screw up the import machinery after.
# unlike other imports the `exclude` list already in place is not enough.
if m is types:
return m
if not isinstance(m, ModuleType):
raise TypeError("reload() argument must be module")
name = m.__name__
if name not in sys.modules:
raise ImportError("reload(): module %.200s not in sys.modules" % name)
global modules_reloading
try:
return modules_reloading[name]
except:
modules_reloading[name] = m
try:
newm = importlib.reload(m)
except:
sys.modules[name] = m
raise
finally:
modules_reloading.clear()
return newm
# Save the original hooks
original_reload = importlib.reload
# Replacement for reload()
def reload(
module,
exclude=(
*sys.builtin_module_names,
"sys",
"os.path",
"builtins",
"__main__",
"numpy",
"numpy._globals",
),
):
"""Recursively reload all modules used in the given module. Optionally
takes a list of modules to exclude from reloading. The default exclude
list contains modules listed in sys.builtin_module_names with additional
sys, os.path, builtins and __main__, to prevent, e.g., resetting
display, exception, and io hooks.
"""
global found_now
for i in exclude:
found_now[i] = 1
try:
with replace_import_hook(deep_import_hook):
return deep_reload_hook(module)
finally:
found_now = {}

View File

@ -0,0 +1,672 @@
"""Module for interactive demos using IPython.
This module implements a few classes for running Python scripts interactively
in IPython for demonstrations. With very simple markup (a few tags in
comments), you can control points where the script stops executing and returns
control to IPython.
Provided classes
----------------
The classes are (see their docstrings for further details):
- Demo: pure python demos
- IPythonDemo: demos with input to be processed by IPython as if it had been
typed interactively (so magics work, as well as any other special syntax you
may have added via input prefilters).
- LineDemo: single-line version of the Demo class. These demos are executed
one line at a time, and require no markup.
- IPythonLineDemo: IPython version of the LineDemo class (the demo is
executed a line at a time, but processed via IPython).
- ClearMixin: mixin to make Demo classes with less visual clutter. It
declares an empty marquee and a pre_cmd that clears the screen before each
block (see Subclassing below).
- ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
classes.
Inheritance diagram:
.. inheritance-diagram:: IPython.lib.demo
:parts: 3
Subclassing
-----------
The classes here all include a few methods meant to make customization by
subclassing more convenient. Their docstrings below have some more details:
- highlight(): format every block and optionally highlight comments and
docstring content.
- marquee(): generates a marquee to provide visible on-screen markers at each
block start and end.
- pre_cmd(): run right before the execution of each block.
- post_cmd(): run right after the execution of each block. If the block
raises an exception, this is NOT called.
Operation
---------
The file is run in its own empty namespace (though you can pass it a string of
arguments as if in a command line environment, and it will see those as
sys.argv). But at each stop, the global IPython namespace is updated with the
current internal demo namespace, so you can work interactively with the data
accumulated so far.
By default, each block of code is printed (with syntax highlighting) before
executing it and you have to confirm execution. This is intended to show the
code to an audience first so you can discuss it, and only proceed with
execution once you agree. There are a few tags which allow you to modify this
behavior.
The supported tags are:
# <demo> stop
Defines block boundaries, the points where IPython stops execution of the
file and returns to the interactive prompt.
You can optionally mark the stop tag with extra dashes before and after the
word 'stop', to help visually distinguish the blocks in a text editor:
# <demo> --- stop ---
# <demo> silent
Make a block execute silently (and hence automatically). Typically used in
cases where you have some boilerplate or initialization code which you need
executed but do not want to be seen in the demo.
# <demo> auto
Make a block execute automatically, but still being printed. Useful for
simple code which does not warrant discussion, since it avoids the extra
manual confirmation.
# <demo> auto_all
This tag can _only_ be in the first block, and if given it overrides the
individual auto tags to make the whole demo fully automatic (no block asks
for confirmation). It can also be given at creation time (or the attribute
set later) to override what's in the file.
While _any_ python file can be run as a Demo instance, if there are no stop
tags the whole file will run in a single block (no different that calling
first %pycat and then %run). The minimal markup to make this useful is to
place a set of stop tags; the other tags are only there to let you fine-tune
the execution.
This is probably best explained with the simple example file below. You can
copy this into a file named ex_demo.py, and try running it via::
from IPython.lib.demo import Demo
d = Demo('ex_demo.py')
d()
Each time you call the demo object, it runs the next block. The demo object
has a few useful methods for navigation, like again(), edit(), jump(), seek()
and back(). It can be reset for a new run via reset() or reloaded from disk
(in case you've edited the source) via reload(). See their docstrings below.
Note: To make this simpler to explore, a file called "demo-exercizer.py" has
been added to the "docs/examples/core" directory. Just cd to this directory in
an IPython session, and type::
%run demo-exercizer.py
and then follow the directions.
Example
-------
The following is a very simple example of a valid demo file.
::
#################### EXAMPLE DEMO <ex_demo.py> ###############################
'''A simple interactive demo to illustrate the use of IPython's Demo class.'''
print 'Hello, welcome to an interactive IPython demo.'
# The mark below defines a block boundary, which is a point where IPython will
# stop execution and return to the interactive prompt. The dashes are actually
# optional and used only as a visual aid to clearly separate blocks while
# editing the demo code.
# <demo> stop
x = 1
y = 2
# <demo> stop
# the mark below makes this block as silent
# <demo> silent
print 'This is a silent block, which gets executed but not printed.'
# <demo> stop
# <demo> auto
print 'This is an automatic block.'
print 'It is executed without asking for confirmation, but printed.'
z = x+y
print 'z=',x
# <demo> stop
# This is just another normal block.
print 'z is now:', z
print 'bye!'
################### END EXAMPLE DEMO <ex_demo.py> ############################
"""
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#
#*****************************************************************************
import os
import re
import shlex
import sys
import pygments
from pathlib import Path
from IPython.utils.text import marquee
from IPython.utils import openpy
from IPython.utils import py3compat
__all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
class DemoError(Exception): pass
def re_mark(mark):
return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
class Demo(object):
re_stop = re_mark(r'-*\s?stop\s?-*')
re_silent = re_mark('silent')
re_auto = re_mark('auto')
re_auto_all = re_mark('auto_all')
def __init__(self,src,title='',arg_str='',auto_all=None, format_rst=False,
formatter='terminal', style='default'):
"""Make a new demo object. To run the demo, simply call the object.
See the module docstring for full details and an example (you can use
IPython.Demo? in IPython to see it).
Inputs:
- src is either a file, or file-like object, or a
string that can be resolved to a filename.
Optional inputs:
- title: a string to use as the demo name. Of most use when the demo
you are making comes from an object that has no filename, or if you
want an alternate denotation distinct from the filename.
- arg_str(''): a string of arguments, internally converted to a list
just like sys.argv, so the demo script can see a similar
environment.
- auto_all(None): global flag to run all blocks automatically without
confirmation. This attribute overrides the block-level tags and
applies to the whole demo. It is an attribute of the object, and
can be changed at runtime simply by reassigning it to a boolean
value.
- format_rst(False): a bool to enable comments and doc strings
formatting with pygments rst lexer
- formatter('terminal'): a string of pygments formatter name to be
used. Useful values for terminals: terminal, terminal256,
terminal16m
- style('default'): a string of pygments style name to be used.
"""
if hasattr(src, "read"):
# It seems to be a file or a file-like object
self.fname = "from a file-like object"
if title == '':
self.title = "from a file-like object"
else:
self.title = title
else:
# Assume it's a string or something that can be converted to one
self.fname = src
if title == '':
(filepath, filename) = os.path.split(src)
self.title = filename
else:
self.title = title
self.sys_argv = [src] + shlex.split(arg_str)
self.auto_all = auto_all
self.src = src
try:
ip = get_ipython() # this is in builtins whenever IPython is running
self.inside_ipython = True
except NameError:
self.inside_ipython = False
if self.inside_ipython:
# get a few things from ipython. While it's a bit ugly design-wise,
# it ensures that things like color scheme and the like are always in
# sync with the ipython mode being used. This class is only meant to
# be used inside ipython anyways, so it's OK.
self.ip_ns = ip.user_ns
self.ip_colorize = ip.pycolorize
self.ip_showtb = ip.showtraceback
self.ip_run_cell = ip.run_cell
self.shell = ip
self.formatter = pygments.formatters.get_formatter_by_name(formatter,
style=style)
self.python_lexer = pygments.lexers.get_lexer_by_name("py3")
self.format_rst = format_rst
if format_rst:
self.rst_lexer = pygments.lexers.get_lexer_by_name("rst")
# load user data and initialize data structures
self.reload()
def fload(self):
"""Load file object."""
# read data and parse into blocks
if hasattr(self, 'fobj') and self.fobj is not None:
self.fobj.close()
if hasattr(self.src, "read"):
# It seems to be a file or a file-like object
self.fobj = self.src
else:
# Assume it's a string or something that can be converted to one
self.fobj = openpy.open(self.fname)
def reload(self):
"""Reload source from disk and initialize state."""
self.fload()
self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
# if auto_all is not given (def. None), we read it from the file
if self.auto_all is None:
self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
else:
self.auto_all = bool(self.auto_all)
# Clean the sources from all markup so it doesn't get displayed when
# running the demo
src_blocks = []
auto_strip = lambda s: self.re_auto.sub('',s)
for i,b in enumerate(src_b):
if self._auto[i]:
src_blocks.append(auto_strip(b))
else:
src_blocks.append(b)
# remove the auto_all marker
src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
self.nblocks = len(src_blocks)
self.src_blocks = src_blocks
# also build syntax-highlighted source
self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
# ensure clean namespace and seek offset
self.reset()
def reset(self):
"""Reset the namespace and seek pointer to restart the demo"""
self.user_ns = {}
self.finished = False
self.block_index = 0
def _validate_index(self,index):
if index<0 or index>=self.nblocks:
raise ValueError('invalid block index %s' % index)
def _get_index(self,index):
"""Get the current block index, validating and checking status.
Returns None if the demo is finished"""
if index is None:
if self.finished:
print('Demo finished. Use <demo_name>.reset() if you want to rerun it.')
return None
index = self.block_index
else:
self._validate_index(index)
return index
def seek(self,index):
"""Move the current seek pointer to the given block.
You can use negative indices to seek from the end, with identical
semantics to those of Python lists."""
if index<0:
index = self.nblocks + index
self._validate_index(index)
self.block_index = index
self.finished = False
def back(self,num=1):
"""Move the seek pointer back num blocks (default is 1)."""
self.seek(self.block_index-num)
def jump(self,num=1):
"""Jump a given number of blocks relative to the current one.
The offset can be positive or negative, defaults to 1."""
self.seek(self.block_index+num)
def again(self):
"""Move the seek pointer back one block and re-execute."""
self.back(1)
self()
def edit(self,index=None):
"""Edit a block.
If no number is given, use the last block executed.
This edits the in-memory copy of the demo, it does NOT modify the
original source file. If you want to do that, simply open the file in
an editor and use reload() when you make changes to the file. This
method is meant to let you change a block during a demonstration for
explanatory purposes, without damaging your original script."""
index = self._get_index(index)
if index is None:
return
# decrease the index by one (unless we're at the very beginning), so
# that the default demo.edit() call opens up the sblock we've last run
if index>0:
index -= 1
filename = self.shell.mktempfile(self.src_blocks[index])
self.shell.hooks.editor(filename, 1)
with open(Path(filename), "r", encoding="utf-8") as f:
new_block = f.read()
# update the source and colored block
self.src_blocks[index] = new_block
self.src_blocks_colored[index] = self.highlight(new_block)
self.block_index = index
# call to run with the newly edited index
self()
def show(self,index=None):
"""Show a single block on screen"""
index = self._get_index(index)
if index is None:
return
print(self.marquee('<%s> block # %s (%s remaining)' %
(self.title,index,self.nblocks-index-1)))
print(self.src_blocks_colored[index])
sys.stdout.flush()
def show_all(self):
"""Show entire demo on screen, block by block"""
fname = self.title
title = self.title
nblocks = self.nblocks
silent = self._silent
marquee = self.marquee
for index,block in enumerate(self.src_blocks_colored):
if silent[index]:
print(marquee('<%s> SILENT block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
else:
print(marquee('<%s> block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
print(block, end=' ')
sys.stdout.flush()
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
exec(source, self.user_ns)
def __call__(self,index=None):
"""run a block of the demo.
If index is given, it should be an integer >=1 and <= nblocks. This
means that the calling convention is one off from typical Python
lists. The reason for the inconsistency is that the demo always
prints 'Block n/N, and N is the total, so it would be very odd to use
zero-indexing here."""
index = self._get_index(index)
if index is None:
return
try:
marquee = self.marquee
next_block = self.src_blocks[index]
self.block_index += 1
if self._silent[index]:
print(marquee('Executing silent block # %s (%s remaining)' %
(index,self.nblocks-index-1)))
else:
self.pre_cmd()
self.show(index)
if self.auto_all or self._auto[index]:
print(marquee('output:'))
else:
print(marquee('Press <q> to quit, <Enter> to execute...'), end=' ')
ans = py3compat.input().strip()
if ans:
print(marquee('Block NOT executed'))
return
try:
save_argv = sys.argv
sys.argv = self.sys_argv
self.run_cell(next_block)
self.post_cmd()
finally:
sys.argv = save_argv
except:
if self.inside_ipython:
self.ip_showtb(filename=self.fname)
else:
if self.inside_ipython:
self.ip_ns.update(self.user_ns)
if self.block_index == self.nblocks:
mq1 = self.marquee('END OF DEMO')
if mq1:
# avoid spurious print if empty marquees are used
print()
print(mq1)
print(self.marquee('Use <demo_name>.reset() if you want to rerun it.'))
self.finished = True
# These methods are meant to be overridden by subclasses who may wish to
# customize the behavior of of their demos.
def marquee(self,txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'."""
return marquee(txt,width,mark)
def pre_cmd(self):
"""Method called before executing each block."""
pass
def post_cmd(self):
"""Method called after executing each block."""
pass
def highlight(self, block):
"""Method called on each block to highlight it content"""
tokens = pygments.lex(block, self.python_lexer)
if self.format_rst:
from pygments.token import Token
toks = []
for token in tokens:
if token[0] == Token.String.Doc and len(token[1]) > 6:
toks += pygments.lex(token[1][:3], self.python_lexer)
# parse doc string content by rst lexer
toks += pygments.lex(token[1][3:-3], self.rst_lexer)
toks += pygments.lex(token[1][-3:], self.python_lexer)
elif token[0] == Token.Comment.Single:
toks.append((Token.Comment.Single, token[1][0]))
# parse comment content by rst lexer
# remove the extra newline added by rst lexer
toks += list(pygments.lex(token[1][1:], self.rst_lexer))[:-1]
else:
toks.append(token)
tokens = toks
return pygments.format(tokens, self.formatter)
class IPythonDemo(Demo):
"""Class for interactive demos with IPython's input processing applied.
This subclasses Demo, but instead of executing each block by the Python
interpreter (via exec), it actually calls IPython on it, so that any input
filters which may be in place are applied to the input block.
If you have an interactive environment which exposes special input
processing, you can use this class instead to write demo scripts which
operate exactly as if you had typed them interactively. The default Demo
class requires the input to be valid, pure Python code.
"""
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
self.shell.run_cell(source)
class LineDemo(Demo):
"""Demo where each line is executed as a separate block.
The input script should be valid Python code.
This class doesn't require any markup at all, and it's meant for simple
scripts (with no nesting or any kind of indentation) which consist of
multiple lines of input to be executed, one at a time, as if they had been
typed in the interactive prompt.
Note: the input can not have *any* indentation, which means that only
single-lines of input are accepted, not even function definitions are
valid."""
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
self.fload()
lines = self.fobj.readlines()
src_b = [l for l in lines if l.strip()]
nblocks = len(src_b)
self.src = ''.join(lines)
self._silent = [False]*nblocks
self._auto = [True]*nblocks
self.auto_all = True
self.nblocks = nblocks
self.src_blocks = src_b
# also build syntax-highlighted source
self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
# ensure clean namespace and seek offset
self.reset()
class IPythonLineDemo(IPythonDemo,LineDemo):
"""Variant of the LineDemo class whose input is processed by IPython."""
pass
class ClearMixin(object):
"""Use this mixin to make Demo classes with less visual clutter.
Demos using this mixin will clear the screen before every block and use
blank marquees.
Note that in order for the methods defined here to actually override those
of the classes it's mixed with, it must go /first/ in the inheritance
tree. For example:
class ClearIPDemo(ClearMixin,IPythonDemo): pass
will provide an IPythonDemo class with the mixin's features.
"""
def marquee(self,txt='',width=78,mark='*'):
"""Blank marquee that returns '' no matter what the input."""
return ''
def pre_cmd(self):
"""Method called before executing each block.
This one simply clears the screen."""
from IPython.utils.terminal import _term_clear
_term_clear()
class ClearDemo(ClearMixin,Demo):
pass
class ClearIPDemo(ClearMixin,IPythonDemo):
pass
def slide(file_path, noclear=False, format_rst=True, formatter="terminal",
style="native", auto_all=False, delimiter='...'):
if noclear:
demo_class = Demo
else:
demo_class = ClearDemo
demo = demo_class(file_path, format_rst=format_rst, formatter=formatter,
style=style, auto_all=auto_all)
while not demo.finished:
demo()
try:
py3compat.input('\n' + delimiter)
except KeyboardInterrupt:
exit(1)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Run python demos')
parser.add_argument('--noclear', '-C', action='store_true',
help='Do not clear terminal on each slide')
parser.add_argument('--rst', '-r', action='store_true',
help='Highlight comments and dostrings as rst')
parser.add_argument('--formatter', '-f', default='terminal',
help='pygments formatter name could be: terminal, '
'terminal256, terminal16m')
parser.add_argument('--style', '-s', default='default',
help='pygments style name')
parser.add_argument('--auto', '-a', action='store_true',
help='Run all blocks automatically without'
'confirmation')
parser.add_argument('--delimiter', '-d', default='...',
help='slides delimiter added after each slide run')
parser.add_argument('file', nargs=1,
help='python demo file')
args = parser.parse_args()
slide(args.file[0], noclear=args.noclear, format_rst=args.rst,
formatter=args.formatter, style=args.style, auto_all=args.auto,
delimiter=args.delimiter)

View File

@ -0,0 +1,675 @@
"""Various display related classes.
Authors : MinRK, gregcaporaso, dannystaple
"""
from html import escape as html_escape
from os.path import exists, isfile, splitext, abspath, join, isdir
from os import walk, sep, fsdecode
from IPython.core.display import DisplayObject, TextDisplayObject
from typing import Tuple, Iterable
__all__ = ['Audio', 'IFrame', 'YouTubeVideo', 'VimeoVideo', 'ScribdDocument',
'FileLink', 'FileLinks', 'Code']
class Audio(DisplayObject):
"""Create an audio object.
When this object is returned by an input cell or passed to the
display function, it will result in Audio controls being displayed
in the frontend (only works in the notebook).
Parameters
----------
data : numpy array, list, unicode, str or bytes
Can be one of
* Numpy 1d array containing the desired waveform (mono)
* Numpy 2d array containing waveforms for each channel.
Shape=(NCHAN, NSAMPLES). For the standard channel order, see
http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
* List of float or integer representing the waveform (mono)
* String containing the filename
* Bytestring containing raw PCM data or
* URL pointing to a file on the web.
If the array option is used, the waveform will be normalized.
If a filename or url is used, the format support will be browser
dependent.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
embed : boolean
Should the audio data be embedded using a data URI (True) or should
the original source be referenced. Set this to True if you want the
audio to playable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
rate : integer
The sampling rate of the raw data.
Only required when data parameter is being used as an array
autoplay : bool
Set to True if the audio should immediately start playing.
Default is `False`.
normalize : bool
Whether audio should be normalized (rescaled) to the maximum possible
range. Default is `True`. When set to `False`, `data` must be between
-1 and 1 (inclusive), otherwise an error is raised.
Applies only when `data` is a list or array of samples; other types of
audio are never normalized.
Examples
--------
>>> import pytest
>>> np = pytest.importorskip("numpy")
Generate a sound
>>> import numpy as np
>>> framerate = 44100
>>> t = np.linspace(0,5,framerate*5)
>>> data = np.sin(2*np.pi*220*t) + np.sin(2*np.pi*224*t)
>>> Audio(data, rate=framerate)
<IPython.lib.display.Audio object>
Can also do stereo or more channels
>>> dataleft = np.sin(2*np.pi*220*t)
>>> dataright = np.sin(2*np.pi*224*t)
>>> Audio([dataleft, dataright], rate=framerate)
<IPython.lib.display.Audio object>
From URL:
>>> Audio("http://www.nch.com.au/acm/8k16bitpcm.wav") # doctest: +SKIP
>>> Audio(url="http://www.w3schools.com/html/horse.ogg") # doctest: +SKIP
From a File:
>>> Audio('IPython/lib/tests/test.wav') # doctest: +SKIP
>>> Audio(filename='IPython/lib/tests/test.wav') # doctest: +SKIP
From Bytes:
>>> Audio(b'RAW_WAV_DATA..') # doctest: +SKIP
>>> Audio(data=b'RAW_WAV_DATA..') # doctest: +SKIP
See Also
--------
ipywidgets.Audio
Audio widget with more more flexibility and options.
"""
_read_flags = 'rb'
def __init__(self, data=None, filename=None, url=None, embed=None, rate=None, autoplay=False, normalize=True, *,
element_id=None):
if filename is None and url is None and data is None:
raise ValueError("No audio data found. Expecting filename, url, or data.")
if embed is False and url is None:
raise ValueError("No url found. Expecting url when embed=False")
if url is not None and embed is not True:
self.embed = False
else:
self.embed = True
self.autoplay = autoplay
self.element_id = element_id
super(Audio, self).__init__(data=data, url=url, filename=filename)
if self.data is not None and not isinstance(self.data, bytes):
if rate is None:
raise ValueError("rate must be specified when data is a numpy array or list of audio samples.")
self.data = Audio._make_wav(data, rate, normalize)
def reload(self):
"""Reload the raw data from file or URL."""
import mimetypes
if self.embed:
super(Audio, self).reload()
if self.filename is not None:
self.mimetype = mimetypes.guess_type(self.filename)[0]
elif self.url is not None:
self.mimetype = mimetypes.guess_type(self.url)[0]
else:
self.mimetype = "audio/wav"
@staticmethod
def _make_wav(data, rate, normalize):
""" Transform a numpy array to a PCM bytestring """
from io import BytesIO
import wave
try:
scaled, nchan = Audio._validate_and_normalize_with_numpy(data, normalize)
except ImportError:
scaled, nchan = Audio._validate_and_normalize_without_numpy(data, normalize)
fp = BytesIO()
waveobj = wave.open(fp,mode='wb')
waveobj.setnchannels(nchan)
waveobj.setframerate(rate)
waveobj.setsampwidth(2)
waveobj.setcomptype('NONE','NONE')
waveobj.writeframes(scaled)
val = fp.getvalue()
waveobj.close()
return val
@staticmethod
def _validate_and_normalize_with_numpy(data, normalize) -> Tuple[bytes, int]:
import numpy as np
data = np.array(data, dtype=float)
if len(data.shape) == 1:
nchan = 1
elif len(data.shape) == 2:
# In wave files,channels are interleaved. E.g.,
# "L1R1L2R2..." for stereo. See
# http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
# for channel ordering
nchan = data.shape[0]
data = data.T.ravel()
else:
raise ValueError('Array audio input must be a 1D or 2D array')
max_abs_value = np.max(np.abs(data))
normalization_factor = Audio._get_normalization_factor(max_abs_value, normalize)
scaled = data / normalization_factor * 32767
return scaled.astype("<h").tobytes(), nchan
@staticmethod
def _validate_and_normalize_without_numpy(data, normalize):
import array
import sys
data = array.array('f', data)
try:
max_abs_value = float(max([abs(x) for x in data]))
except TypeError as e:
raise TypeError('Only lists of mono audio are '
'supported if numpy is not installed') from e
normalization_factor = Audio._get_normalization_factor(max_abs_value, normalize)
scaled = array.array('h', [int(x / normalization_factor * 32767) for x in data])
if sys.byteorder == 'big':
scaled.byteswap()
nchan = 1
return scaled.tobytes(), nchan
@staticmethod
def _get_normalization_factor(max_abs_value, normalize):
if not normalize and max_abs_value > 1:
raise ValueError('Audio data must be between -1 and 1 when normalize=False.')
return max_abs_value if normalize else 1
def _data_and_metadata(self):
"""shortcut for returning metadata with url information, if defined"""
md = {}
if self.url:
md['url'] = self.url
if md:
return self.data, md
else:
return self.data
def _repr_html_(self):
src = """
<audio {element_id} controls="controls" {autoplay}>
<source src="{src}" type="{type}" />
Your browser does not support the audio element.
</audio>
"""
return src.format(src=self.src_attr(), type=self.mimetype, autoplay=self.autoplay_attr(),
element_id=self.element_id_attr())
def src_attr(self):
import base64
if self.embed and (self.data is not None):
data = base64=base64.b64encode(self.data).decode('ascii')
return """data:{type};base64,{base64}""".format(type=self.mimetype,
base64=data)
elif self.url is not None:
return self.url
else:
return ""
def autoplay_attr(self):
if(self.autoplay):
return 'autoplay="autoplay"'
else:
return ''
def element_id_attr(self):
if (self.element_id):
return 'id="{element_id}"'.format(element_id=self.element_id)
else:
return ''
class IFrame(object):
"""
Generic class to embed an iframe in an IPython notebook
"""
iframe = """
<iframe
width="{width}"
height="{height}"
src="{src}{params}"
frameborder="0"
allowfullscreen
{extras}
></iframe>
"""
def __init__(self, src, width, height, extras: Iterable[str] = None, **kwargs):
if extras is None:
extras = []
self.src = src
self.width = width
self.height = height
self.extras = extras
self.params = kwargs
def _repr_html_(self):
"""return the embed iframe"""
if self.params:
from urllib.parse import urlencode
params = "?" + urlencode(self.params)
else:
params = ""
return self.iframe.format(
src=self.src,
width=self.width,
height=self.height,
params=params,
extras=" ".join(self.extras),
)
class YouTubeVideo(IFrame):
"""Class for embedding a YouTube Video in an IPython session, based on its video id.
e.g. to embed the video from https://www.youtube.com/watch?v=foo , you would
do::
vid = YouTubeVideo("foo")
display(vid)
To start from 30 seconds::
vid = YouTubeVideo("abc", start=30)
display(vid)
To calculate seconds from time as hours, minutes, seconds use
:class:`datetime.timedelta`::
start=int(timedelta(hours=1, minutes=46, seconds=40).total_seconds())
Other parameters can be provided as documented at
https://developers.google.com/youtube/player_parameters#Parameters
When converting the notebook using nbconvert, a jpeg representation of the video
will be inserted in the document.
"""
def __init__(self, id, width=400, height=300, allow_autoplay=False, **kwargs):
self.id=id
src = "https://www.youtube.com/embed/{0}".format(id)
if allow_autoplay:
extras = list(kwargs.get("extras", [])) + ['allow="autoplay"']
kwargs.update(autoplay=1, extras=extras)
super(YouTubeVideo, self).__init__(src, width, height, **kwargs)
def _repr_jpeg_(self):
# Deferred import
from urllib.request import urlopen
try:
return urlopen("https://img.youtube.com/vi/{id}/hqdefault.jpg".format(id=self.id)).read()
except IOError:
return None
class VimeoVideo(IFrame):
"""
Class for embedding a Vimeo video in an IPython session, based on its video id.
"""
def __init__(self, id, width=400, height=300, **kwargs):
src="https://player.vimeo.com/video/{0}".format(id)
super(VimeoVideo, self).__init__(src, width, height, **kwargs)
class ScribdDocument(IFrame):
"""
Class for embedding a Scribd document in an IPython session
Use the start_page params to specify a starting point in the document
Use the view_mode params to specify display type one off scroll | slideshow | book
e.g to Display Wes' foundational paper about PANDAS in book mode from page 3
ScribdDocument(71048089, width=800, height=400, start_page=3, view_mode="book")
"""
def __init__(self, id, width=400, height=300, **kwargs):
src="https://www.scribd.com/embeds/{0}/content".format(id)
super(ScribdDocument, self).__init__(src, width, height, **kwargs)
class FileLink(object):
"""Class for embedding a local file link in an IPython session, based on path
e.g. to embed a link that was generated in the IPython notebook as my/data.txt
you would do::
local_file = FileLink("my/data.txt")
display(local_file)
or in the HTML notebook, just::
FileLink("my/data.txt")
"""
html_link_str = "<a href='%s' target='_blank'>%s</a>"
def __init__(self,
path,
url_prefix='',
result_html_prefix='',
result_html_suffix='<br>'):
"""
Parameters
----------
path : str
path to the file or directory that should be formatted
url_prefix : str
prefix to be prepended to all files to form a working link [default:
'']
result_html_prefix : str
text to append to beginning to link [default: '']
result_html_suffix : str
text to append at the end of link [default: '<br>']
"""
if isdir(path):
raise ValueError("Cannot display a directory using FileLink. "
"Use FileLinks to display '%s'." % path)
self.path = fsdecode(path)
self.url_prefix = url_prefix
self.result_html_prefix = result_html_prefix
self.result_html_suffix = result_html_suffix
def _format_path(self):
fp = ''.join([self.url_prefix, html_escape(self.path)])
return ''.join([self.result_html_prefix,
self.html_link_str % \
(fp, html_escape(self.path, quote=False)),
self.result_html_suffix])
def _repr_html_(self):
"""return html link to file
"""
if not exists(self.path):
return ("Path (<tt>%s</tt>) doesn't exist. "
"It may still be in the process of "
"being generated, or you may have the "
"incorrect path." % self.path)
return self._format_path()
def __repr__(self):
"""return absolute path to file
"""
return abspath(self.path)
class FileLinks(FileLink):
"""Class for embedding local file links in an IPython session, based on path
e.g. to embed links to files that were generated in the IPython notebook
under ``my/data``, you would do::
local_files = FileLinks("my/data")
display(local_files)
or in the HTML notebook, just::
FileLinks("my/data")
"""
def __init__(self,
path,
url_prefix='',
included_suffixes=None,
result_html_prefix='',
result_html_suffix='<br>',
notebook_display_formatter=None,
terminal_display_formatter=None,
recursive=True):
"""
See :class:`FileLink` for the ``path``, ``url_prefix``,
``result_html_prefix`` and ``result_html_suffix`` parameters.
included_suffixes : list
Filename suffixes to include when formatting output [default: include
all files]
notebook_display_formatter : function
Used to format links for display in the notebook. See discussion of
formatter functions below.
terminal_display_formatter : function
Used to format links for display in the terminal. See discussion of
formatter functions below.
Formatter functions must be of the form::
f(dirname, fnames, included_suffixes)
dirname : str
The name of a directory
fnames : list
The files in that directory
included_suffixes : list
The file suffixes that should be included in the output (passing None
meansto include all suffixes in the output in the built-in formatters)
recursive : boolean
Whether to recurse into subdirectories. Default is True.
The function should return a list of lines that will be printed in the
notebook (if passing notebook_display_formatter) or the terminal (if
passing terminal_display_formatter). This function is iterated over for
each directory in self.path. Default formatters are in place, can be
passed here to support alternative formatting.
"""
if isfile(path):
raise ValueError("Cannot display a file using FileLinks. "
"Use FileLink to display '%s'." % path)
self.included_suffixes = included_suffixes
# remove trailing slashes for more consistent output formatting
path = path.rstrip('/')
self.path = path
self.url_prefix = url_prefix
self.result_html_prefix = result_html_prefix
self.result_html_suffix = result_html_suffix
self.notebook_display_formatter = \
notebook_display_formatter or self._get_notebook_display_formatter()
self.terminal_display_formatter = \
terminal_display_formatter or self._get_terminal_display_formatter()
self.recursive = recursive
def _get_display_formatter(
self, dirname_output_format, fname_output_format, fp_format, fp_cleaner=None
):
"""generate built-in formatter function
this is used to define both the notebook and terminal built-in
formatters as they only differ by some wrapper text for each entry
dirname_output_format: string to use for formatting directory
names, dirname will be substituted for a single "%s" which
must appear in this string
fname_output_format: string to use for formatting file names,
if a single "%s" appears in the string, fname will be substituted
if two "%s" appear in the string, the path to fname will be
substituted for the first and fname will be substituted for the
second
fp_format: string to use for formatting filepaths, must contain
exactly two "%s" and the dirname will be substituted for the first
and fname will be substituted for the second
"""
def f(dirname, fnames, included_suffixes=None):
result = []
# begin by figuring out which filenames, if any,
# are going to be displayed
display_fnames = []
for fname in fnames:
if (isfile(join(dirname,fname)) and
(included_suffixes is None or
splitext(fname)[1] in included_suffixes)):
display_fnames.append(fname)
if len(display_fnames) == 0:
# if there are no filenames to display, don't print anything
# (not even the directory name)
pass
else:
# otherwise print the formatted directory name followed by
# the formatted filenames
dirname_output_line = dirname_output_format % dirname
result.append(dirname_output_line)
for fname in display_fnames:
fp = fp_format % (dirname,fname)
if fp_cleaner is not None:
fp = fp_cleaner(fp)
try:
# output can include both a filepath and a filename...
fname_output_line = fname_output_format % (fp, fname)
except TypeError:
# ... or just a single filepath
fname_output_line = fname_output_format % fname
result.append(fname_output_line)
return result
return f
def _get_notebook_display_formatter(self,
spacer="&nbsp;&nbsp;"):
""" generate function to use for notebook formatting
"""
dirname_output_format = \
self.result_html_prefix + "%s/" + self.result_html_suffix
fname_output_format = \
self.result_html_prefix + spacer + self.html_link_str + self.result_html_suffix
fp_format = self.url_prefix + '%s/%s'
if sep == "\\":
# Working on a platform where the path separator is "\", so
# must convert these to "/" for generating a URI
def fp_cleaner(fp):
# Replace all occurrences of backslash ("\") with a forward
# slash ("/") - this is necessary on windows when a path is
# provided as input, but we must link to a URI
return fp.replace('\\','/')
else:
fp_cleaner = None
return self._get_display_formatter(dirname_output_format,
fname_output_format,
fp_format,
fp_cleaner)
def _get_terminal_display_formatter(self,
spacer=" "):
""" generate function to use for terminal formatting
"""
dirname_output_format = "%s/"
fname_output_format = spacer + "%s"
fp_format = '%s/%s'
return self._get_display_formatter(dirname_output_format,
fname_output_format,
fp_format)
def _format_path(self):
result_lines = []
if self.recursive:
walked_dir = list(walk(self.path))
else:
walked_dir = [next(walk(self.path))]
walked_dir.sort()
for dirname, subdirs, fnames in walked_dir:
result_lines += self.notebook_display_formatter(dirname, fnames, self.included_suffixes)
return '\n'.join(result_lines)
def __repr__(self):
"""return newline-separated absolute paths
"""
result_lines = []
if self.recursive:
walked_dir = list(walk(self.path))
else:
walked_dir = [next(walk(self.path))]
walked_dir.sort()
for dirname, subdirs, fnames in walked_dir:
result_lines += self.terminal_display_formatter(dirname, fnames, self.included_suffixes)
return '\n'.join(result_lines)
class Code(TextDisplayObject):
"""Display syntax-highlighted source code.
This uses Pygments to highlight the code for HTML and Latex output.
Parameters
----------
data : str
The code as a string
url : str
A URL to fetch the code from
filename : str
A local filename to load the code from
language : str
The short name of a Pygments lexer to use for highlighting.
If not specified, it will guess the lexer based on the filename
or the code. Available lexers: http://pygments.org/docs/lexers/
"""
def __init__(self, data=None, url=None, filename=None, language=None):
self.language = language
super().__init__(data=data, url=url, filename=filename)
def _get_lexer(self):
if self.language:
from pygments.lexers import get_lexer_by_name
return get_lexer_by_name(self.language)
elif self.filename:
from pygments.lexers import get_lexer_for_filename
return get_lexer_for_filename(self.filename)
else:
from pygments.lexers import guess_lexer
return guess_lexer(self.data)
def __repr__(self):
return self.data
def _repr_html_(self):
from pygments import highlight
from pygments.formatters import HtmlFormatter
fmt = HtmlFormatter()
style = '<style>{}</style>'.format(fmt.get_style_defs('.output_html'))
return style + highlight(self.data, self._get_lexer(), fmt)
def _repr_latex_(self):
from pygments import highlight
from pygments.formatters import LatexFormatter
return highlight(self.data, self._get_lexer(), LatexFormatter())

View File

@ -0,0 +1,127 @@
""" 'editor' hooks for common editors that work well with ipython
They should honor the line number argument, at least.
Contributions are *very* welcome.
"""
import os
import shlex
import subprocess
import sys
from IPython import get_ipython
from IPython.core.error import TryNext
from IPython.utils import py3compat
def install_editor(template, wait=False):
"""Installs the editor that is called by IPython for the %edit magic.
This overrides the default editor, which is generally set by your EDITOR
environment variable or is notepad (windows) or vi (linux). By supplying a
template string `run_template`, you can control how the editor is invoked
by IPython -- (e.g. the format in which it accepts command line options)
Parameters
----------
template : basestring
run_template acts as a template for how your editor is invoked by
the shell. It should contain '{filename}', which will be replaced on
invocation with the file name, and '{line}', $line by line number
(or 0) to invoke the file with.
wait : bool
If `wait` is true, wait until the user presses enter before returning,
to facilitate non-blocking editors that exit immediately after
the call.
"""
# not all editors support $line, so we'll leave out this check
# for substitution in ['$file', '$line']:
# if not substitution in run_template:
# raise ValueError(('run_template should contain %s'
# ' for string substitution. You supplied "%s"' % (substitution,
# run_template)))
def call_editor(self, filename, line=0):
if line is None:
line = 0
cmd = template.format(filename=shlex.quote(filename), line=line)
print(">", cmd)
# shlex.quote doesn't work right on Windows, but it does after splitting
if sys.platform.startswith('win'):
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, shell=True)
if proc.wait() != 0:
raise TryNext()
if wait:
py3compat.input("Press Enter when done editing:")
get_ipython().set_hook('editor', call_editor)
get_ipython().editor = template
# in these, exe is always the path/name of the executable. Useful
# if you don't have the editor directory in your path
def komodo(exe=u'komodo'):
""" Activestate Komodo [Edit] """
install_editor(exe + u' -l {line} {filename}', wait=True)
def scite(exe=u"scite"):
""" SciTE or Sc1 """
install_editor(exe + u' {filename} -goto:{line}')
def notepadplusplus(exe=u'notepad++'):
""" Notepad++ http://notepad-plus.sourceforge.net """
install_editor(exe + u' -n{line} {filename}')
def jed(exe=u'jed'):
""" JED, the lightweight emacsish editor """
install_editor(exe + u' +{line} {filename}')
def idle(exe=u'idle'):
""" Idle, the editor bundled with python
Parameters
----------
exe : str, None
If none, should be pretty smart about finding the executable.
"""
if exe is None:
import idlelib
p = os.path.dirname(idlelib.__filename__)
# i'm not sure if this actually works. Is this idle.py script
# guaranteed to be executable?
exe = os.path.join(p, 'idle.py')
install_editor(exe + u' {filename}')
def mate(exe=u'mate'):
""" TextMate, the missing editor"""
# wait=True is not required since we're using the -w flag to mate
install_editor(exe + u' -w -l {line} {filename}')
# ##########################################
# these are untested, report any problems
# ##########################################
def emacs(exe=u'emacs'):
install_editor(exe + u' +{line} {filename}')
def gnuclient(exe=u'gnuclient'):
install_editor(exe + u' -nw +{line} {filename}')
def crimson_editor(exe=u'cedt.exe'):
install_editor(exe + u' /L:{line} {filename}')
def kate(exe=u'kate'):
install_editor(exe + u' -u -l {line} {filename}')

View File

@ -0,0 +1,155 @@
# coding: utf-8
"""
Support for creating GUI apps and starting event loops.
IPython's GUI integration allows interactive plotting and GUI usage in IPython
session. IPython has two different types of GUI integration:
1. The terminal based IPython supports GUI event loops through Python's
PyOS_InputHook. PyOS_InputHook is a hook that Python calls periodically
whenever raw_input is waiting for a user to type code. We implement GUI
support in the terminal by setting PyOS_InputHook to a function that
iterates the event loop for a short while. It is important to note that
in this situation, the real GUI event loop is NOT run in the normal
manner, so you can't use the normal means to detect that it is running.
2. In the two process IPython kernel/frontend, the GUI event loop is run in
the kernel. In this case, the event loop is run in the normal manner by
calling the function or method of the GUI toolkit that starts the event
loop.
In addition to starting the GUI event loops in one of these two ways, IPython
will *always* create an appropriate GUI application object when GUi
integration is enabled.
If you want your GUI apps to run in IPython you need to do two things:
1. Test to see if there is already an existing main application object. If
there is, you should use it. If there is not an existing application object
you should create one.
2. Test to see if the GUI event loop is running. If it is, you should not
start it. If the event loop is not running you may start it.
This module contains functions for each toolkit that perform these things
in a consistent manner. Because of how PyOS_InputHook runs the event loop
you cannot detect if the event loop is running using the traditional calls
(such as ``wx.GetApp.IsMainLoopRunning()`` in wxPython). If PyOS_InputHook is
set These methods will return a false negative. That is, they will say the
event loop is not running, when is actually is. To work around this limitation
we proposed the following informal protocol:
* Whenever someone starts the event loop, they *must* set the ``_in_event_loop``
attribute of the main application object to ``True``. This should be done
regardless of how the event loop is actually run.
* Whenever someone stops the event loop, they *must* set the ``_in_event_loop``
attribute of the main application object to ``False``.
* If you want to see if the event loop is running, you *must* use ``hasattr``
to see if ``_in_event_loop`` attribute has been set. If it is set, you
*must* use its value. If it has not been set, you can query the toolkit
in the normal manner.
* If you want GUI support and no one else has created an application or
started the event loop you *must* do this. We don't want projects to
attempt to defer these things to someone else if they themselves need it.
The functions below implement this logic for each GUI toolkit. If you need
to create custom application subclasses, you will likely have to modify this
code for your own purposes. This code can be copied into your own project
so you don't have to depend on IPython.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from IPython.core.getipython import get_ipython
#-----------------------------------------------------------------------------
# wx
#-----------------------------------------------------------------------------
def get_app_wx(*args, **kwargs):
"""Create a new wx app or return an exiting one."""
import wx
app = wx.GetApp()
if app is None:
if 'redirect' not in kwargs:
kwargs['redirect'] = False
app = wx.PySimpleApp(*args, **kwargs)
return app
def is_event_loop_running_wx(app=None):
"""Is the wx event loop running."""
# New way: check attribute on shell instance
ip = get_ipython()
if ip is not None:
if ip.active_eventloop and ip.active_eventloop == 'wx':
return True
# Fall through to checking the application, because Wx has a native way
# to check if the event loop is running, unlike Qt.
# Old way: check Wx application
if app is None:
app = get_app_wx()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return app.IsMainLoopRunning()
def start_event_loop_wx(app=None):
"""Start the wx event loop in a consistent manner."""
if app is None:
app = get_app_wx()
if not is_event_loop_running_wx(app):
app._in_event_loop = True
app.MainLoop()
app._in_event_loop = False
else:
app._in_event_loop = True
#-----------------------------------------------------------------------------
# qt4
#-----------------------------------------------------------------------------
def get_app_qt4(*args, **kwargs):
"""Create a new qt4 app or return an existing one."""
from IPython.external.qt_for_kernel import QtGui
app = QtGui.QApplication.instance()
if app is None:
if not args:
args = ([''],)
app = QtGui.QApplication(*args, **kwargs)
return app
def is_event_loop_running_qt4(app=None):
"""Is the qt4 event loop running."""
# New way: check attribute on shell instance
ip = get_ipython()
if ip is not None:
return ip.active_eventloop and ip.active_eventloop.startswith('qt')
# Old way: check attribute on QApplication singleton
if app is None:
app = get_app_qt4([''])
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
# Does qt4 provide a other way to detect this?
return False
def start_event_loop_qt4(app=None):
"""Start the qt4 event loop in a consistent manner."""
if app is None:
app = get_app_qt4([''])
if not is_event_loop_running_qt4(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
#-----------------------------------------------------------------------------
# Tk
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# gtk
#-----------------------------------------------------------------------------

View File

@ -0,0 +1,246 @@
# -*- coding: utf-8 -*-
"""Tools for handling LaTeX."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO, open
import os
import tempfile
import shutil
import subprocess
from base64 import encodebytes
import textwrap
from pathlib import Path, PurePath
from IPython.utils.process import find_cmd, FindCmdError
from traitlets.config import get_config
from traitlets.config.configurable import SingletonConfigurable
from traitlets import List, Bool, Unicode
from IPython.utils.py3compat import cast_unicode
class LaTeXTool(SingletonConfigurable):
"""An object to store configuration of the LaTeX tool."""
def _config_default(self):
return get_config()
backends = List(
Unicode(), ["matplotlib", "dvipng"],
help="Preferred backend to draw LaTeX math equations. "
"Backends in the list are checked one by one and the first "
"usable one is used. Note that `matplotlib` backend "
"is usable only for inline style equations. To draw "
"display style equations, `dvipng` backend must be specified. ",
# It is a List instead of Enum, to make configuration more
# flexible. For example, to use matplotlib mainly but dvipng
# for display style, the default ["matplotlib", "dvipng"] can
# be used. To NOT use dvipng so that other repr such as
# unicode pretty printing is used, you can use ["matplotlib"].
).tag(config=True)
use_breqn = Bool(
True,
help="Use breqn.sty to automatically break long equations. "
"This configuration takes effect only for dvipng backend.",
).tag(config=True)
packages = List(
['amsmath', 'amsthm', 'amssymb', 'bm'],
help="A list of packages to use for dvipng backend. "
"'breqn' will be automatically appended when use_breqn=True.",
).tag(config=True)
preamble = Unicode(
help="Additional preamble to use when generating LaTeX source "
"for dvipng backend.",
).tag(config=True)
def latex_to_png(s, encode=False, backend=None, wrap=False, color='Black',
scale=1.0):
"""Render a LaTeX string to PNG.
Parameters
----------
s : str
The raw string containing valid inline LaTeX.
encode : bool, optional
Should the PNG data base64 encoded to make it JSON'able.
backend : {matplotlib, dvipng}
Backend for producing PNG data.
wrap : bool
If true, Automatically wrap `s` as a LaTeX equation.
color : string
Foreground color name among dvipsnames, e.g. 'Maroon' or on hex RGB
format, e.g. '#AA20FA'.
scale : float
Scale factor for the resulting PNG.
None is returned when the backend cannot be used.
"""
s = cast_unicode(s)
allowed_backends = LaTeXTool.instance().backends
if backend is None:
backend = allowed_backends[0]
if backend not in allowed_backends:
return None
if backend == 'matplotlib':
f = latex_to_png_mpl
elif backend == 'dvipng':
f = latex_to_png_dvipng
if color.startswith('#'):
# Convert hex RGB color to LaTeX RGB color.
if len(color) == 7:
try:
color = "RGB {}".format(" ".join([str(int(x, 16)) for x in
textwrap.wrap(color[1:], 2)]))
except ValueError as e:
raise ValueError('Invalid color specification {}.'.format(color)) from e
else:
raise ValueError('Invalid color specification {}.'.format(color))
else:
raise ValueError('No such backend {0}'.format(backend))
bin_data = f(s, wrap, color, scale)
if encode and bin_data:
bin_data = encodebytes(bin_data)
return bin_data
def latex_to_png_mpl(s, wrap, color='Black', scale=1.0):
try:
from matplotlib import figure, font_manager, mathtext
from matplotlib.backends import backend_agg
from pyparsing import ParseFatalException
except ImportError:
return None
# mpl mathtext doesn't support display math, force inline
s = s.replace('$$', '$')
if wrap:
s = u'${0}$'.format(s)
try:
prop = font_manager.FontProperties(size=12)
dpi = 120 * scale
buffer = BytesIO()
# Adapted from mathtext.math_to_image
parser = mathtext.MathTextParser("path")
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72, height / 72))
fig.text(0, depth / height, s, fontproperties=prop, color=color)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(buffer, dpi=dpi, format="png", transparent=True)
return buffer.getvalue()
except (ValueError, RuntimeError, ParseFatalException):
return None
def latex_to_png_dvipng(s, wrap, color='Black', scale=1.0):
try:
find_cmd('latex')
find_cmd('dvipng')
except FindCmdError:
return None
try:
workdir = Path(tempfile.mkdtemp())
tmpfile = workdir.joinpath("tmp.tex")
dvifile = workdir.joinpath("tmp.dvi")
outfile = workdir.joinpath("tmp.png")
with tmpfile.open("w", encoding="utf8") as f:
f.writelines(genelatex(s, wrap))
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(
["latex", "-halt-on-error", "-interaction", "batchmode", tmpfile],
cwd=workdir, stdout=devnull, stderr=devnull)
resolution = round(150*scale)
subprocess.check_call(
[
"dvipng",
"-T",
"tight",
"-D",
str(resolution),
"-z",
"9",
"-bg",
"Transparent",
"-o",
outfile,
dvifile,
"-fg",
color,
],
cwd=workdir,
stdout=devnull,
stderr=devnull,
)
with outfile.open("rb") as f:
return f.read()
except subprocess.CalledProcessError:
return None
finally:
shutil.rmtree(workdir)
def kpsewhich(filename):
"""Invoke kpsewhich command with an argument `filename`."""
try:
find_cmd("kpsewhich")
proc = subprocess.Popen(
["kpsewhich", filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return stdout.strip().decode('utf8', 'replace')
except FindCmdError:
pass
def genelatex(body, wrap):
"""Generate LaTeX document for dvipng backend."""
lt = LaTeXTool.instance()
breqn = wrap and lt.use_breqn and kpsewhich("breqn.sty")
yield r'\documentclass{article}'
packages = lt.packages
if breqn:
packages = packages + ['breqn']
for pack in packages:
yield r'\usepackage{{{0}}}'.format(pack)
yield r'\pagestyle{empty}'
if lt.preamble:
yield lt.preamble
yield r'\begin{document}'
if breqn:
yield r'\begin{dmath*}'
yield body
yield r'\end{dmath*}'
elif wrap:
yield u'$${0}$$'.format(body)
else:
yield body
yield u'\\end{document}'
_data_uri_template_png = u"""<img src="data:image/png;base64,%s" alt=%s />"""
def latex_to_html(s, alt='image'):
"""Render LaTeX to HTML with embedded PNG data using data URIs.
Parameters
----------
s : str
The raw string containing valid inline LateX.
alt : str
The alt text to use for the HTML.
"""
base64_data = latex_to_png(s, encode=True).decode('ascii')
if base64_data:
return _data_uri_template_png % (base64_data, alt)

View File

@ -0,0 +1,526 @@
# -*- coding: utf-8 -*-
"""
Defines a variety of Pygments lexers for highlighting IPython code.
This includes:
IPythonLexer, IPython3Lexer
Lexers for pure IPython (python + magic/shell commands)
IPythonPartialTracebackLexer, IPythonTracebackLexer
Supports 2.x and 3.x via keyword `python3`. The partial traceback
lexer reads everything but the Python code appearing in a traceback.
The full lexer combines the partial lexer with an IPython lexer.
IPythonConsoleLexer
A lexer for IPython console sessions, with support for tracebacks.
IPyLexer
A friendly lexer which examines the first line of text and from it,
decides whether to use an IPython lexer or an IPython console lexer.
This is probably the only lexer that needs to be explicitly added
to Pygments.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Standard library
import re
# Third party
from pygments.lexers import (
BashLexer, HtmlLexer, JavascriptLexer, RubyLexer, PerlLexer, PythonLexer,
Python3Lexer, TexLexer)
from pygments.lexer import (
Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using,
)
from pygments.token import (
Generic, Keyword, Literal, Name, Operator, Other, Text, Error,
)
from pygments.util import get_bool_opt
# Local
line_re = re.compile('.*?\n')
__all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer',
'IPythonPartialTracebackLexer', 'IPythonTracebackLexer',
'IPythonConsoleLexer', 'IPyLexer']
def build_ipy_lexer(python3):
"""Builds IPython lexers depending on the value of `python3`.
The lexer inherits from an appropriate Python lexer and then adds
information about IPython specific keywords (i.e. magic commands,
shell commands, etc.)
Parameters
----------
python3 : bool
If `True`, then build an IPython lexer from a Python 3 lexer.
"""
# It would be nice to have a single IPython lexer class which takes
# a boolean `python3`. But since there are two Python lexer classes,
# we will also have two IPython lexer classes.
if python3:
PyLexer = Python3Lexer
name = 'IPython3'
aliases = ['ipython3']
doc = """IPython3 Lexer"""
else:
PyLexer = PythonLexer
name = 'IPython'
aliases = ['ipython2', 'ipython']
doc = """IPython Lexer"""
ipython_tokens = [
(r'(?s)(\s*)(%%capture)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
(r'(?s)(\s*)(%%debug)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
(r'(?is)(\s*)(%%html)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(HtmlLexer))),
(r'(?s)(\s*)(%%javascript)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))),
(r'(?s)(\s*)(%%js)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))),
(r'(?s)(\s*)(%%latex)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(TexLexer))),
(r'(?s)(\s*)(%%perl)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PerlLexer))),
(r'(?s)(\s*)(%%prun)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
(r'(?s)(\s*)(%%pypy)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
(r'(?s)(\s*)(%%python)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
(r'(?s)(\s*)(%%python2)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PythonLexer))),
(r'(?s)(\s*)(%%python3)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(Python3Lexer))),
(r'(?s)(\s*)(%%ruby)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(RubyLexer))),
(r'(?s)(\s*)(%%time)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
(r'(?s)(\s*)(%%timeit)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
(r'(?s)(\s*)(%%writefile)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
(r'(?s)(\s*)(%%file)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
(r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
(r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))),
(r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)),
(r"\b(\?\??)(\s*)$", bygroups(Operator, Text)),
(r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword,
using(BashLexer), Text)),
(r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)),
(r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
(r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
(r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)),
(r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)),
]
tokens = PyLexer.tokens.copy()
tokens['root'] = ipython_tokens + tokens['root']
attrs = {'name': name, 'aliases': aliases, 'filenames': [],
'__doc__': doc, 'tokens': tokens}
return type(name, (PyLexer,), attrs)
IPython3Lexer = build_ipy_lexer(python3=True)
IPythonLexer = build_ipy_lexer(python3=False)
class IPythonPartialTracebackLexer(RegexLexer):
"""
Partial lexer for IPython tracebacks.
Handles all the non-python output.
"""
name = 'IPython Partial Traceback'
tokens = {
'root': [
# Tracebacks for syntax errors have a different style.
# For both types of tracebacks, we mark the first line with
# Generic.Traceback. For syntax errors, we mark the filename
# as we mark the filenames for non-syntax tracebacks.
#
# These two regexps define how IPythonConsoleLexer finds a
# traceback.
#
## Non-syntax traceback
(r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)),
## Syntax traceback
(r'^( File)(.*)(, line )(\d+\n)',
bygroups(Generic.Traceback, Name.Namespace,
Generic.Traceback, Literal.Number.Integer)),
# (Exception Identifier)(Whitespace)(Traceback Message)
(r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)',
bygroups(Name.Exception, Generic.Whitespace, Text)),
# (Module/Filename)(Text)(Callee)(Function Signature)
# Better options for callee and function signature?
(r'(.*)( in )(.*)(\(.*\)\n)',
bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)),
# Regular line: (Whitespace)(Line Number)(Python Code)
(r'(\s*?)(\d+)(.*?\n)',
bygroups(Generic.Whitespace, Literal.Number.Integer, Other)),
# Emphasized line: (Arrow)(Line Number)(Python Code)
# Using Exception token so arrow color matches the Exception.
(r'(-*>?\s?)(\d+)(.*?\n)',
bygroups(Name.Exception, Literal.Number.Integer, Other)),
# (Exception Identifier)(Message)
(r'(?u)(^[^\d\W]\w*)(:.*?\n)',
bygroups(Name.Exception, Text)),
# Tag everything else as Other, will be handled later.
(r'.*\n', Other),
],
}
class IPythonTracebackLexer(DelegatingLexer):
"""
IPython traceback lexer.
For doctests, the tracebacks can be snipped as much as desired with the
exception to the lines that designate a traceback. For non-syntax error
tracebacks, this is the line of hyphens. For syntax error tracebacks,
this is the line which lists the File and line number.
"""
# The lexer inherits from DelegatingLexer. The "root" lexer is an
# appropriate IPython lexer, which depends on the value of the boolean
# `python3`. First, we parse with the partial IPython traceback lexer.
# Then, any code marked with the "Other" token is delegated to the root
# lexer.
#
name = 'IPython Traceback'
aliases = ['ipythontb']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', False)
if self.python3:
self.aliases = ['ipython3tb']
else:
self.aliases = ['ipython2tb', 'ipythontb']
if self.python3:
IPyLexer = IPython3Lexer
else:
IPyLexer = IPythonLexer
DelegatingLexer.__init__(self, IPyLexer,
IPythonPartialTracebackLexer, **options)
class IPythonConsoleLexer(Lexer):
"""
An IPython console lexer for IPython code-blocks and doctests, such as:
.. code-block:: rst
.. code-block:: ipythonconsole
In [1]: a = 'foo'
In [2]: a
Out[2]: 'foo'
In [3]: print(a)
foo
Support is also provided for IPython exceptions:
.. code-block:: rst
.. code-block:: ipythonconsole
In [1]: raise Exception
Traceback (most recent call last):
...
Exception
"""
name = 'IPython console session'
aliases = ['ipythonconsole']
mimetypes = ['text/x-ipython-console']
# The regexps used to determine what is input and what is output.
# The default prompts for IPython are:
#
# in = 'In [#]: '
# continuation = ' .D.: '
# template = 'Out[#]: '
#
# Where '#' is the 'prompt number' or 'execution count' and 'D'
# D is a number of dots matching the width of the execution count
#
in1_regex = r'In \[[0-9]+\]: '
in2_regex = r' \.\.+\.: '
out_regex = r'Out\[[0-9]+\]: '
#: The regex to determine when a traceback starts.
ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)')
def __init__(self, **options):
"""Initialize the IPython console lexer.
Parameters
----------
python3 : bool
If `True`, then the console inputs are parsed using a Python 3
lexer. Otherwise, they are parsed using a Python 2 lexer.
in1_regex : RegexObject
The compiled regular expression used to detect the start
of inputs. Although the IPython configuration setting may have a
trailing whitespace, do not include it in the regex. If `None`,
then the default input prompt is assumed.
in2_regex : RegexObject
The compiled regular expression used to detect the continuation
of inputs. Although the IPython configuration setting may have a
trailing whitespace, do not include it in the regex. If `None`,
then the default input prompt is assumed.
out_regex : RegexObject
The compiled regular expression used to detect outputs. If `None`,
then the default output prompt is assumed.
"""
self.python3 = get_bool_opt(options, 'python3', False)
if self.python3:
self.aliases = ['ipython3console']
else:
self.aliases = ['ipython2console', 'ipythonconsole']
in1_regex = options.get('in1_regex', self.in1_regex)
in2_regex = options.get('in2_regex', self.in2_regex)
out_regex = options.get('out_regex', self.out_regex)
# So that we can work with input and output prompts which have been
# rstrip'd (possibly by editors) we also need rstrip'd variants. If
# we do not do this, then such prompts will be tagged as 'output'.
# The reason can't just use the rstrip'd variants instead is because
# we want any whitespace associated with the prompt to be inserted
# with the token. This allows formatted code to be modified so as hide
# the appearance of prompts, with the whitespace included. One example
# use of this is in copybutton.js from the standard lib Python docs.
in1_regex_rstrip = in1_regex.rstrip() + '\n'
in2_regex_rstrip = in2_regex.rstrip() + '\n'
out_regex_rstrip = out_regex.rstrip() + '\n'
# Compile and save them all.
attrs = ['in1_regex', 'in2_regex', 'out_regex',
'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip']
for attr in attrs:
self.__setattr__(attr, re.compile(locals()[attr]))
Lexer.__init__(self, **options)
if self.python3:
pylexer = IPython3Lexer
tblexer = IPythonTracebackLexer
else:
pylexer = IPythonLexer
tblexer = IPythonTracebackLexer
self.pylexer = pylexer(**options)
self.tblexer = tblexer(**options)
self.reset()
def reset(self):
self.mode = 'output'
self.index = 0
self.buffer = u''
self.insertions = []
def buffered_tokens(self):
"""
Generator of unprocessed tokens after doing insertions and before
changing to a new state.
"""
if self.mode == 'output':
tokens = [(0, Generic.Output, self.buffer)]
elif self.mode == 'input':
tokens = self.pylexer.get_tokens_unprocessed(self.buffer)
else: # traceback
tokens = self.tblexer.get_tokens_unprocessed(self.buffer)
for i, t, v in do_insertions(self.insertions, tokens):
# All token indexes are relative to the buffer.
yield self.index + i, t, v
# Clear it all
self.index += len(self.buffer)
self.buffer = u''
self.insertions = []
def get_mci(self, line):
"""
Parses the line and returns a 3-tuple: (mode, code, insertion).
`mode` is the next mode (or state) of the lexer, and is always equal
to 'input', 'output', or 'tb'.
`code` is a portion of the line that should be added to the buffer
corresponding to the next mode and eventually lexed by another lexer.
For example, `code` could be Python code if `mode` were 'input'.
`insertion` is a 3-tuple (index, token, text) representing an
unprocessed "token" that will be inserted into the stream of tokens
that are created from the buffer once we change modes. This is usually
the input or output prompt.
In general, the next mode depends on current mode and on the contents
of `line`.
"""
# To reduce the number of regex match checks, we have multiple
# 'if' blocks instead of 'if-elif' blocks.
# Check for possible end of input
in2_match = self.in2_regex.match(line)
in2_match_rstrip = self.in2_regex_rstrip.match(line)
if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \
in2_match_rstrip:
end_input = True
else:
end_input = False
if end_input and self.mode != 'tb':
# Only look for an end of input when not in tb mode.
# An ellipsis could appear within the traceback.
mode = 'output'
code = u''
insertion = (0, Generic.Prompt, line)
return mode, code, insertion
# Check for output prompt
out_match = self.out_regex.match(line)
out_match_rstrip = self.out_regex_rstrip.match(line)
if out_match or out_match_rstrip:
mode = 'output'
if out_match:
idx = out_match.end()
else:
idx = out_match_rstrip.end()
code = line[idx:]
# Use the 'heading' token for output. We cannot use Generic.Error
# since it would conflict with exceptions.
insertion = (0, Generic.Heading, line[:idx])
return mode, code, insertion
# Check for input or continuation prompt (non stripped version)
in1_match = self.in1_regex.match(line)
if in1_match or (in2_match and self.mode != 'tb'):
# New input or when not in tb, continued input.
# We do not check for continued input when in tb since it is
# allowable to replace a long stack with an ellipsis.
mode = 'input'
if in1_match:
idx = in1_match.end()
else: # in2_match
idx = in2_match.end()
code = line[idx:]
insertion = (0, Generic.Prompt, line[:idx])
return mode, code, insertion
# Check for input or continuation prompt (stripped version)
in1_match_rstrip = self.in1_regex_rstrip.match(line)
if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'):
# New input or when not in tb, continued input.
# We do not check for continued input when in tb since it is
# allowable to replace a long stack with an ellipsis.
mode = 'input'
if in1_match_rstrip:
idx = in1_match_rstrip.end()
else: # in2_match
idx = in2_match_rstrip.end()
code = line[idx:]
insertion = (0, Generic.Prompt, line[:idx])
return mode, code, insertion
# Check for traceback
if self.ipytb_start.match(line):
mode = 'tb'
code = line
insertion = None
return mode, code, insertion
# All other stuff...
if self.mode in ('input', 'output'):
# We assume all other text is output. Multiline input that
# does not use the continuation marker cannot be detected.
# For example, the 3 in the following is clearly output:
#
# In [1]: print 3
# 3
#
# But the following second line is part of the input:
#
# In [2]: while True:
# print True
#
# In both cases, the 2nd line will be 'output'.
#
mode = 'output'
else:
mode = 'tb'
code = line
insertion = None
return mode, code, insertion
def get_tokens_unprocessed(self, text):
self.reset()
for match in line_re.finditer(text):
line = match.group()
mode, code, insertion = self.get_mci(line)
if mode != self.mode:
# Yield buffered tokens before transitioning to new mode.
for token in self.buffered_tokens():
yield token
self.mode = mode
if insertion:
self.insertions.append((len(self.buffer), [insertion]))
self.buffer += code
for token in self.buffered_tokens():
yield token
class IPyLexer(Lexer):
r"""
Primary lexer for all IPython-like code.
This is a simple helper lexer. If the first line of the text begins with
"In \[[0-9]+\]:", then the entire text is parsed with an IPython console
lexer. If not, then the entire text is parsed with an IPython lexer.
The goal is to reduce the number of lexers that are registered
with Pygments.
"""
name = 'IPy session'
aliases = ['ipy']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', False)
if self.python3:
self.aliases = ['ipy3']
else:
self.aliases = ['ipy2', 'ipy']
Lexer.__init__(self, **options)
self.IPythonLexer = IPythonLexer(**options)
self.IPythonConsoleLexer = IPythonConsoleLexer(**options)
def get_tokens_unprocessed(self, text):
# Search for the input prompt anywhere...this allows code blocks to
# begin with comments as well.
if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL):
lex = self.IPythonConsoleLexer
else:
lex = self.IPythonLexer
for token in lex.get_tokens_unprocessed(text):
yield token

View File

@ -0,0 +1,951 @@
# -*- coding: utf-8 -*-
"""
Python advanced pretty printer. This pretty printer is intended to
replace the old `pprint` python module which does not allow developers
to provide their own pretty print callbacks.
This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.
Example Usage
-------------
To directly print the representation of an object use `pprint`::
from pretty import pprint
pprint(complex_object)
To get a string of the output use `pretty`::
from pretty import pretty
string = pretty(complex_object)
Extending
---------
The pretty library allows developers to add pretty printing rules for their
own objects. This process is straightforward. All you have to do is to
add a `_repr_pretty_` method to your object and call the methods on the
pretty printer passed::
class MyObject(object):
def _repr_pretty_(self, p, cycle):
...
Here's an example for a class with a simple constructor::
class MySimpleObject:
def __init__(self, a, b, *, c=None):
self.a = a
self.b = b
self.c = c
def _repr_pretty_(self, p, cycle):
ctor = CallExpression.factory(self.__class__.__name__)
if self.c is None:
p.pretty(ctor(a, b))
else:
p.pretty(ctor(a, b, c=c))
Here is an example implementation of a `_repr_pretty_` method for a list
subclass::
class MyList(list):
def _repr_pretty_(self, p, cycle):
if cycle:
p.text('MyList(...)')
else:
with p.group(8, 'MyList([', '])'):
for idx, item in enumerate(self):
if idx:
p.text(',')
p.breakable()
p.pretty(item)
The `cycle` parameter is `True` if pretty detected a cycle. You *have* to
react to that or the result is an infinite loop. `p.text()` just adds
non breaking text to the output, `p.breakable()` either adds a whitespace
or breaks here. If you pass it an argument it's used instead of the
default space. `p.pretty` prettyprints another object using the pretty print
method.
The first parameter to the `group` function specifies the extra indentation
of the next line. In this example the next item will either be on the same
line (if the items are short enough) or aligned with the right edge of the
opening bracket of `MyList`.
If you just want to indent something you can use the group function
without open / close parameters. You can also use this code::
with p.indent(2):
...
Inheritance diagram:
.. inheritance-diagram:: IPython.lib.pretty
:parts: 3
:copyright: 2007 by Armin Ronacher.
Portions (c) 2009 by Robert Kern.
:license: BSD License.
"""
from contextlib import contextmanager
import datetime
import os
import re
import sys
import types
from collections import deque
from inspect import signature
from io import StringIO
from warnings import warn
from IPython.utils.decorators import undoc
from IPython.utils.py3compat import PYPY
__all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter',
'for_type', 'for_type_by_name', 'RawText', 'RawStringLiteral', 'CallExpression']
MAX_SEQ_LENGTH = 1000
_re_pattern_type = type(re.compile(''))
def _safe_getattr(obj, attr, default=None):
"""Safe version of getattr.
Same as getattr, but will return ``default`` on any Exception,
rather than raising.
"""
try:
return getattr(obj, attr, default)
except Exception:
return default
@undoc
class CUnicodeIO(StringIO):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warn(("CUnicodeIO is deprecated since IPython 6.0. "
"Please use io.StringIO instead."),
DeprecationWarning, stacklevel=2)
def _sorted_for_pprint(items):
"""
Sort the given items for pretty printing. Since some predictable
sorting is better than no sorting at all, we sort on the string
representation if normal sorting fails.
"""
items = list(items)
try:
return sorted(items)
except Exception:
try:
return sorted(items, key=str)
except Exception:
return items
def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
"""
Pretty print the object's representation.
"""
stream = StringIO()
printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length=max_seq_length)
printer.pretty(obj)
printer.flush()
return stream.getvalue()
def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
"""
Like `pretty` but print to stdout.
"""
printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length)
printer.pretty(obj)
printer.flush()
sys.stdout.write(newline)
sys.stdout.flush()
class _PrettyPrinterBase(object):
@contextmanager
def indent(self, indent):
"""with statement support for indenting/dedenting."""
self.indentation += indent
try:
yield
finally:
self.indentation -= indent
@contextmanager
def group(self, indent=0, open='', close=''):
"""like begin_group / end_group but for the with statement."""
self.begin_group(indent, open)
try:
yield
finally:
self.end_group(indent, close)
class PrettyPrinter(_PrettyPrinterBase):
"""
Baseclass for the `RepresentationPrinter` prettyprinter that is used to
generate pretty reprs of objects. Contrary to the `RepresentationPrinter`
this printer knows nothing about the default pprinters or the `_repr_pretty_`
callback method.
"""
def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
self.output = output
self.max_width = max_width
self.newline = newline
self.max_seq_length = max_seq_length
self.output_width = 0
self.buffer_width = 0
self.buffer = deque()
root_group = Group(0)
self.group_stack = [root_group]
self.group_queue = GroupQueue(root_group)
self.indentation = 0
def _break_one_group(self, group):
while group.breakables:
x = self.buffer.popleft()
self.output_width = x.output(self.output, self.output_width)
self.buffer_width -= x.width
while self.buffer and isinstance(self.buffer[0], Text):
x = self.buffer.popleft()
self.output_width = x.output(self.output, self.output_width)
self.buffer_width -= x.width
def _break_outer_groups(self):
while self.max_width < self.output_width + self.buffer_width:
group = self.group_queue.deq()
if not group:
return
self._break_one_group(group)
def text(self, obj):
"""Add literal text to the output."""
width = len(obj)
if self.buffer:
text = self.buffer[-1]
if not isinstance(text, Text):
text = Text()
self.buffer.append(text)
text.add(obj, width)
self.buffer_width += width
self._break_outer_groups()
else:
self.output.write(obj)
self.output_width += width
def breakable(self, sep=' '):
"""
Add a breakable separator to the output. This does not mean that it
will automatically break here. If no breaking on this position takes
place the `sep` is inserted which default to one space.
"""
width = len(sep)
group = self.group_stack[-1]
if group.want_break:
self.flush()
self.output.write(self.newline)
self.output.write(' ' * self.indentation)
self.output_width = self.indentation
self.buffer_width = 0
else:
self.buffer.append(Breakable(sep, width, self))
self.buffer_width += width
self._break_outer_groups()
def break_(self):
"""
Explicitly insert a newline into the output, maintaining correct indentation.
"""
group = self.group_queue.deq()
if group:
self._break_one_group(group)
self.flush()
self.output.write(self.newline)
self.output.write(' ' * self.indentation)
self.output_width = self.indentation
self.buffer_width = 0
def begin_group(self, indent=0, open=''):
"""
Begin a group.
The first parameter specifies the indentation for the next line (usually
the width of the opening text), the second the opening text. All
parameters are optional.
"""
if open:
self.text(open)
group = Group(self.group_stack[-1].depth + 1)
self.group_stack.append(group)
self.group_queue.enq(group)
self.indentation += indent
def _enumerate(self, seq):
"""like enumerate, but with an upper limit on the number of items"""
for idx, x in enumerate(seq):
if self.max_seq_length and idx >= self.max_seq_length:
self.text(',')
self.breakable()
self.text('...')
return
yield idx, x
def end_group(self, dedent=0, close=''):
"""End a group. See `begin_group` for more details."""
self.indentation -= dedent
group = self.group_stack.pop()
if not group.breakables:
self.group_queue.remove(group)
if close:
self.text(close)
def flush(self):
"""Flush data that is left in the buffer."""
for data in self.buffer:
self.output_width += data.output(self.output, self.output_width)
self.buffer.clear()
self.buffer_width = 0
def _get_mro(obj_class):
""" Get a reasonable method resolution order of a class and its superclasses
for both old-style and new-style classes.
"""
if not hasattr(obj_class, '__mro__'):
# Old-style class. Mix in object to make a fake new-style class.
try:
obj_class = type(obj_class.__name__, (obj_class, object), {})
except TypeError:
# Old-style extension type that does not descend from object.
# FIXME: try to construct a more thorough MRO.
mro = [obj_class]
else:
mro = obj_class.__mro__[1:-1]
else:
mro = obj_class.__mro__
return mro
class RepresentationPrinter(PrettyPrinter):
"""
Special pretty printer that has a `pretty` method that calls the pretty
printer for a python object.
This class stores processing data on `self` so you must *never* use
this class in a threaded environment. Always lock it or reinstanciate
it.
Instances also have a verbose flag callbacks can access to control their
output. For example the default instance repr prints all attributes and
methods that are not prefixed by an underscore if the printer is in
verbose mode.
"""
def __init__(self, output, verbose=False, max_width=79, newline='\n',
singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None,
max_seq_length=MAX_SEQ_LENGTH):
PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length)
self.verbose = verbose
self.stack = []
if singleton_pprinters is None:
singleton_pprinters = _singleton_pprinters.copy()
self.singleton_pprinters = singleton_pprinters
if type_pprinters is None:
type_pprinters = _type_pprinters.copy()
self.type_pprinters = type_pprinters
if deferred_pprinters is None:
deferred_pprinters = _deferred_type_pprinters.copy()
self.deferred_pprinters = deferred_pprinters
def pretty(self, obj):
"""Pretty print the given object."""
obj_id = id(obj)
cycle = obj_id in self.stack
self.stack.append(obj_id)
self.begin_group()
try:
obj_class = _safe_getattr(obj, '__class__', None) or type(obj)
# First try to find registered singleton printers for the type.
try:
printer = self.singleton_pprinters[obj_id]
except (TypeError, KeyError):
pass
else:
return printer(obj, self, cycle)
# Next walk the mro and check for either:
# 1) a registered printer
# 2) a _repr_pretty_ method
for cls in _get_mro(obj_class):
if cls in self.type_pprinters:
# printer registered in self.type_pprinters
return self.type_pprinters[cls](obj, self, cycle)
else:
# deferred printer
printer = self._in_deferred_types(cls)
if printer is not None:
return printer(obj, self, cycle)
else:
# Finally look for special method names.
# Some objects automatically create any requested
# attribute. Try to ignore most of them by checking for
# callability.
if '_repr_pretty_' in cls.__dict__:
meth = cls._repr_pretty_
if callable(meth):
return meth(obj, self, cycle)
if cls is not object \
and callable(cls.__dict__.get('__repr__')):
return _repr_pprint(obj, self, cycle)
return _default_pprint(obj, self, cycle)
finally:
self.end_group()
self.stack.pop()
def _in_deferred_types(self, cls):
"""
Check if the given class is specified in the deferred type registry.
Returns the printer from the registry if it exists, and None if the
class is not in the registry. Successful matches will be moved to the
regular type registry for future use.
"""
mod = _safe_getattr(cls, '__module__', None)
name = _safe_getattr(cls, '__name__', None)
key = (mod, name)
printer = None
if key in self.deferred_pprinters:
# Move the printer over to the regular registry.
printer = self.deferred_pprinters.pop(key)
self.type_pprinters[cls] = printer
return printer
class Printable(object):
def output(self, stream, output_width):
return output_width
class Text(Printable):
def __init__(self):
self.objs = []
self.width = 0
def output(self, stream, output_width):
for obj in self.objs:
stream.write(obj)
return output_width + self.width
def add(self, obj, width):
self.objs.append(obj)
self.width += width
class Breakable(Printable):
def __init__(self, seq, width, pretty):
self.obj = seq
self.width = width
self.pretty = pretty
self.indentation = pretty.indentation
self.group = pretty.group_stack[-1]
self.group.breakables.append(self)
def output(self, stream, output_width):
self.group.breakables.popleft()
if self.group.want_break:
stream.write(self.pretty.newline)
stream.write(' ' * self.indentation)
return self.indentation
if not self.group.breakables:
self.pretty.group_queue.remove(self.group)
stream.write(self.obj)
return output_width + self.width
class Group(Printable):
def __init__(self, depth):
self.depth = depth
self.breakables = deque()
self.want_break = False
class GroupQueue(object):
def __init__(self, *groups):
self.queue = []
for group in groups:
self.enq(group)
def enq(self, group):
depth = group.depth
while depth > len(self.queue) - 1:
self.queue.append([])
self.queue[depth].append(group)
def deq(self):
for stack in self.queue:
for idx, group in enumerate(reversed(stack)):
if group.breakables:
del stack[idx]
group.want_break = True
return group
for group in stack:
group.want_break = True
del stack[:]
def remove(self, group):
try:
self.queue[group.depth].remove(group)
except ValueError:
pass
class RawText:
""" Object such that ``p.pretty(RawText(value))`` is the same as ``p.text(value)``.
An example usage of this would be to show a list as binary numbers, using
``p.pretty([RawText(bin(i)) for i in integers])``.
"""
def __init__(self, value):
self.value = value
def _repr_pretty_(self, p, cycle):
p.text(self.value)
class CallExpression:
""" Object which emits a line-wrapped call expression in the form `__name(*args, **kwargs)` """
def __init__(__self, __name, *args, **kwargs):
# dunders are to avoid clashes with kwargs, as python's name manging
# will kick in.
self = __self
self.name = __name
self.args = args
self.kwargs = kwargs
@classmethod
def factory(cls, name):
def inner(*args, **kwargs):
return cls(name, *args, **kwargs)
return inner
def _repr_pretty_(self, p, cycle):
# dunders are to avoid clashes with kwargs, as python's name manging
# will kick in.
started = False
def new_item():
nonlocal started
if started:
p.text(",")
p.breakable()
started = True
prefix = self.name + "("
with p.group(len(prefix), prefix, ")"):
for arg in self.args:
new_item()
p.pretty(arg)
for arg_name, arg in self.kwargs.items():
new_item()
arg_prefix = arg_name + "="
with p.group(len(arg_prefix), arg_prefix):
p.pretty(arg)
class RawStringLiteral:
""" Wrapper that shows a string with a `r` prefix """
def __init__(self, value):
self.value = value
def _repr_pretty_(self, p, cycle):
base_repr = repr(self.value)
if base_repr[:1] in 'uU':
base_repr = base_repr[1:]
prefix = 'ur'
else:
prefix = 'r'
base_repr = prefix + base_repr.replace('\\\\', '\\')
p.text(base_repr)
def _default_pprint(obj, p, cycle):
"""
The default print function. Used if an object does not provide one and
it's none of the builtin objects.
"""
klass = _safe_getattr(obj, '__class__', None) or type(obj)
if _safe_getattr(klass, '__repr__', None) is not object.__repr__:
# A user-provided repr. Find newlines and replace them with p.break_()
_repr_pprint(obj, p, cycle)
return
p.begin_group(1, '<')
p.pretty(klass)
p.text(' at 0x%x' % id(obj))
if cycle:
p.text(' ...')
elif p.verbose:
first = True
for key in dir(obj):
if not key.startswith('_'):
try:
value = getattr(obj, key)
except AttributeError:
continue
if isinstance(value, types.MethodType):
continue
if not first:
p.text(',')
p.breakable()
p.text(key)
p.text('=')
step = len(key) + 1
p.indentation += step
p.pretty(value)
p.indentation -= step
first = False
p.end_group(1, '>')
def _seq_pprinter_factory(start, end):
"""
Factory that returns a pprint function useful for sequences. Used by
the default pprint for tuples and lists.
"""
def inner(obj, p, cycle):
if cycle:
return p.text(start + '...' + end)
step = len(start)
p.begin_group(step, start)
for idx, x in p._enumerate(obj):
if idx:
p.text(',')
p.breakable()
p.pretty(x)
if len(obj) == 1 and isinstance(obj, tuple):
# Special case for 1-item tuples.
p.text(',')
p.end_group(step, end)
return inner
def _set_pprinter_factory(start, end):
"""
Factory that returns a pprint function useful for sets and frozensets.
"""
def inner(obj, p, cycle):
if cycle:
return p.text(start + '...' + end)
if len(obj) == 0:
# Special case.
p.text(type(obj).__name__ + '()')
else:
step = len(start)
p.begin_group(step, start)
# Like dictionary keys, we will try to sort the items if there aren't too many
if not (p.max_seq_length and len(obj) >= p.max_seq_length):
items = _sorted_for_pprint(obj)
else:
items = obj
for idx, x in p._enumerate(items):
if idx:
p.text(',')
p.breakable()
p.pretty(x)
p.end_group(step, end)
return inner
def _dict_pprinter_factory(start, end):
"""
Factory that returns a pprint function used by the default pprint of
dicts and dict proxies.
"""
def inner(obj, p, cycle):
if cycle:
return p.text('{...}')
step = len(start)
p.begin_group(step, start)
keys = obj.keys()
for idx, key in p._enumerate(keys):
if idx:
p.text(',')
p.breakable()
p.pretty(key)
p.text(': ')
p.pretty(obj[key])
p.end_group(step, end)
return inner
def _super_pprint(obj, p, cycle):
"""The pprint for the super type."""
p.begin_group(8, '<super: ')
p.pretty(obj.__thisclass__)
p.text(',')
p.breakable()
if PYPY: # In PyPy, super() objects don't have __self__ attributes
dself = obj.__repr__.__self__
p.pretty(None if dself is obj else dself)
else:
p.pretty(obj.__self__)
p.end_group(8, '>')
class _ReFlags:
def __init__(self, value):
self.value = value
def _repr_pretty_(self, p, cycle):
done_one = False
for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
'UNICODE', 'VERBOSE', 'DEBUG'):
if self.value & getattr(re, flag):
if done_one:
p.text('|')
p.text('re.' + flag)
done_one = True
def _re_pattern_pprint(obj, p, cycle):
"""The pprint function for regular expression patterns."""
re_compile = CallExpression.factory('re.compile')
if obj.flags:
p.pretty(re_compile(RawStringLiteral(obj.pattern), _ReFlags(obj.flags)))
else:
p.pretty(re_compile(RawStringLiteral(obj.pattern)))
def _types_simplenamespace_pprint(obj, p, cycle):
"""The pprint function for types.SimpleNamespace."""
namespace = CallExpression.factory('namespace')
if cycle:
p.pretty(namespace(RawText("...")))
else:
p.pretty(namespace(**obj.__dict__))
def _type_pprint(obj, p, cycle):
"""The pprint for classes and types."""
# Heap allocated types might not have the module attribute,
# and others may set it to None.
# Checks for a __repr__ override in the metaclass. Can't compare the
# type(obj).__repr__ directly because in PyPy the representation function
# inherited from type isn't the same type.__repr__
if [m for m in _get_mro(type(obj)) if "__repr__" in vars(m)][:1] != [type]:
_repr_pprint(obj, p, cycle)
return
mod = _safe_getattr(obj, '__module__', None)
try:
name = obj.__qualname__
if not isinstance(name, str):
# This can happen if the type implements __qualname__ as a property
# or other descriptor in Python 2.
raise Exception("Try __name__")
except Exception:
name = obj.__name__
if not isinstance(name, str):
name = '<unknown type>'
if mod in (None, '__builtin__', 'builtins', 'exceptions'):
p.text(name)
else:
p.text(mod + '.' + name)
def _repr_pprint(obj, p, cycle):
"""A pprint that just redirects to the normal repr function."""
# Find newlines and replace them with p.break_()
output = repr(obj)
lines = output.splitlines()
with p.group():
for idx, output_line in enumerate(lines):
if idx:
p.break_()
p.text(output_line)
def _function_pprint(obj, p, cycle):
"""Base pprint for all functions and builtin functions."""
name = _safe_getattr(obj, '__qualname__', obj.__name__)
mod = obj.__module__
if mod and mod not in ('__builtin__', 'builtins', 'exceptions'):
name = mod + '.' + name
try:
func_def = name + str(signature(obj))
except ValueError:
func_def = name
p.text('<function %s>' % func_def)
def _exception_pprint(obj, p, cycle):
"""Base pprint for all exceptions."""
name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__)
if obj.__class__.__module__ not in ('exceptions', 'builtins'):
name = '%s.%s' % (obj.__class__.__module__, name)
p.pretty(CallExpression(name, *getattr(obj, 'args', ())))
#: the exception base
try:
_exception_base = BaseException
except NameError:
_exception_base = Exception
#: printers for builtin types
_type_pprinters = {
int: _repr_pprint,
float: _repr_pprint,
str: _repr_pprint,
tuple: _seq_pprinter_factory('(', ')'),
list: _seq_pprinter_factory('[', ']'),
dict: _dict_pprinter_factory('{', '}'),
set: _set_pprinter_factory('{', '}'),
frozenset: _set_pprinter_factory('frozenset({', '})'),
super: _super_pprint,
_re_pattern_type: _re_pattern_pprint,
type: _type_pprint,
types.FunctionType: _function_pprint,
types.BuiltinFunctionType: _function_pprint,
types.MethodType: _repr_pprint,
types.SimpleNamespace: _types_simplenamespace_pprint,
datetime.datetime: _repr_pprint,
datetime.timedelta: _repr_pprint,
_exception_base: _exception_pprint
}
# render os.environ like a dict
_env_type = type(os.environ)
# future-proof in case os.environ becomes a plain dict?
if _env_type is not dict:
_type_pprinters[_env_type] = _dict_pprinter_factory('environ{', '}')
try:
# In PyPy, types.DictProxyType is dict, setting the dictproxy printer
# using dict.setdefault avoids overwriting the dict printer
_type_pprinters.setdefault(types.DictProxyType,
_dict_pprinter_factory('dict_proxy({', '})'))
_type_pprinters[types.ClassType] = _type_pprint
_type_pprinters[types.SliceType] = _repr_pprint
except AttributeError: # Python 3
_type_pprinters[types.MappingProxyType] = \
_dict_pprinter_factory('mappingproxy({', '})')
_type_pprinters[slice] = _repr_pprint
_type_pprinters[range] = _repr_pprint
_type_pprinters[bytes] = _repr_pprint
#: printers for types specified by name
_deferred_type_pprinters = {
}
def for_type(typ, func):
"""
Add a pretty printer for a given type.
"""
oldfunc = _type_pprinters.get(typ, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_type_pprinters[typ] = func
return oldfunc
def for_type_by_name(type_module, type_name, func):
"""
Add a pretty printer for a type specified by the module and name of a type
rather than the type object itself.
"""
key = (type_module, type_name)
oldfunc = _deferred_type_pprinters.get(key, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_deferred_type_pprinters[key] = func
return oldfunc
#: printers for the default singletons
_singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis,
NotImplemented]), _repr_pprint)
def _defaultdict_pprint(obj, p, cycle):
cls_ctor = CallExpression.factory(obj.__class__.__name__)
if cycle:
p.pretty(cls_ctor(RawText("...")))
else:
p.pretty(cls_ctor(obj.default_factory, dict(obj)))
def _ordereddict_pprint(obj, p, cycle):
cls_ctor = CallExpression.factory(obj.__class__.__name__)
if cycle:
p.pretty(cls_ctor(RawText("...")))
elif len(obj):
p.pretty(cls_ctor(list(obj.items())))
else:
p.pretty(cls_ctor())
def _deque_pprint(obj, p, cycle):
cls_ctor = CallExpression.factory(obj.__class__.__name__)
if cycle:
p.pretty(cls_ctor(RawText("...")))
else:
p.pretty(cls_ctor(list(obj)))
def _counter_pprint(obj, p, cycle):
cls_ctor = CallExpression.factory(obj.__class__.__name__)
if cycle:
p.pretty(cls_ctor(RawText("...")))
elif len(obj):
p.pretty(cls_ctor(dict(obj)))
else:
p.pretty(cls_ctor())
def _userlist_pprint(obj, p, cycle):
cls_ctor = CallExpression.factory(obj.__class__.__name__)
if cycle:
p.pretty(cls_ctor(RawText("...")))
else:
p.pretty(cls_ctor(obj.data))
for_type_by_name('collections', 'defaultdict', _defaultdict_pprint)
for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint)
for_type_by_name('collections', 'deque', _deque_pprint)
for_type_by_name('collections', 'Counter', _counter_pprint)
for_type_by_name("collections", "UserList", _userlist_pprint)
if __name__ == '__main__':
from random import randrange
class Foo(object):
def __init__(self):
self.foo = 1
self.bar = re.compile(r'\s+')
self.blub = dict.fromkeys(range(30), randrange(1, 40))
self.hehe = 23424.234234
self.list = ["blub", "blah", self]
def get_foo(self):
print("foo")
pprint(Foo(), verbose=True)

Binary file not shown.

View File

@ -0,0 +1,85 @@
"""Tests for pylab tools module.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib imports
import time
# Our own imports
from IPython.lib import backgroundjobs as bg
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
t_short = 0.0001 # very short interval to wait on jobs
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
def sleeper(interval=t_short, *a, **kw):
args = dict(interval=interval,
other_args=a,
kw_args=kw)
time.sleep(interval)
return args
def crasher(interval=t_short, *a, **kw):
time.sleep(interval)
raise Exception("Dead job with interval %s" % interval)
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def test_result():
"""Test job submission and result retrieval"""
jobs = bg.BackgroundJobManager()
j = jobs.new(sleeper)
j.join()
assert j.result["interval"] == t_short
def test_flush():
"""Test job control"""
jobs = bg.BackgroundJobManager()
j = jobs.new(sleeper)
j.join()
assert len(jobs.completed) == 1
assert len(jobs.dead) == 0
jobs.flush()
assert len(jobs.completed) == 0
def test_dead():
"""Test control of dead jobs"""
jobs = bg.BackgroundJobManager()
j = jobs.new(crasher)
j.join()
assert len(jobs.completed) == 0
assert len(jobs.dead) == 1
jobs.flush()
assert len(jobs.dead) == 0
def test_longer():
"""Test control of longer-running jobs"""
jobs = bg.BackgroundJobManager()
# Sleep for long enough for the following two checks to still report the
# job as running, but not so long that it makes the test suite noticeably
# slower.
j = jobs.new(sleeper, 0.1)
assert len(jobs.running) == 1
assert len(jobs.completed) == 0
j.join()
assert len(jobs.running) == 0
assert len(jobs.completed) == 1

View File

@ -0,0 +1,19 @@
from IPython.core.error import TryNext
from IPython.lib.clipboard import ClipboardEmpty
from IPython.testing.decorators import skip_if_no_x11
@skip_if_no_x11
def test_clipboard_get():
# Smoketest for clipboard access - we can't easily guarantee that the
# clipboard is accessible and has something on it, but this tries to
# exercise the relevant code anyway.
try:
a = get_ipython().hooks.clipboard_get()
except ClipboardEmpty:
# Nothing in clipboard to get
pass
except TryNext:
# No clipboard access API available
pass
else:
assert isinstance(a, str)

View File

@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
"""Test suite for the deepreload module."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import types
from pathlib import Path
import pytest
from tempfile import TemporaryDirectory
from IPython.lib.deepreload import modules_reloading
from IPython.lib.deepreload import reload as dreload
from IPython.utils.syspathcontext import prepended_to_syspath
def test_deepreload():
"Test that dreload does deep reloads and skips excluded modules."
with TemporaryDirectory() as tmpdir:
with prepended_to_syspath(tmpdir):
tmpdirpath = Path(tmpdir)
with open(tmpdirpath / "A.py", "w", encoding="utf-8") as f:
f.write("class Object:\n pass\nok = True\n")
with open(tmpdirpath / "B.py", "w", encoding="utf-8") as f:
f.write("import A\nassert A.ok, 'we are fine'\n")
import A
import B
# Test that A is not reloaded.
obj = A.Object()
dreload(B, exclude=["A"])
assert isinstance(obj, A.Object) is True
# Test that an import failure will not blow-up us.
A.ok = False
with pytest.raises(AssertionError, match="we are fine"):
dreload(B, exclude=["A"])
assert len(modules_reloading) == 0
assert not A.ok
# Test that A is reloaded.
obj = A.Object()
A.ok = False
dreload(B)
assert A.ok
assert isinstance(obj, A.Object) is False
def test_not_module():
pytest.raises(TypeError, dreload, "modulename")
def test_not_in_sys_modules():
fake_module = types.ModuleType("fake_module")
with pytest.raises(ImportError, match="not in sys.modules"):
dreload(fake_module)

View File

@ -0,0 +1,272 @@
"""Tests for IPython.lib.display.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from tempfile import NamedTemporaryFile, mkdtemp
from os.path import split, join as pjoin, dirname
import pathlib
from unittest import TestCase, mock
import struct
import wave
from io import BytesIO
# Third-party imports
import pytest
try:
import numpy
except ImportError:
pass
# Our own imports
from IPython.lib import display
from IPython.testing.decorators import skipif_not_numpy
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
#--------------------------
# FileLink tests
#--------------------------
def test_instantiation_FileLink():
"""FileLink: Test class can be instantiated"""
fl = display.FileLink('example.txt')
# TODO: remove if when only Python >= 3.6 is supported
fl = display.FileLink(pathlib.PurePath('example.txt'))
def test_warning_on_non_existent_path_FileLink():
"""FileLink: Calling _repr_html_ on non-existent files returns a warning"""
fl = display.FileLink("example.txt")
assert fl._repr_html_().startswith("Path (<tt>example.txt</tt>)")
def test_existing_path_FileLink():
"""FileLink: Calling _repr_html_ functions as expected on existing filepath
"""
tf = NamedTemporaryFile()
fl = display.FileLink(tf.name)
actual = fl._repr_html_()
expected = "<a href='%s' target='_blank'>%s</a><br>" % (tf.name, tf.name)
assert actual == expected
def test_existing_path_FileLink_repr():
"""FileLink: Calling repr() functions as expected on existing filepath
"""
tf = NamedTemporaryFile()
fl = display.FileLink(tf.name)
actual = repr(fl)
expected = tf.name
assert actual == expected
def test_error_on_directory_to_FileLink():
"""FileLink: Raises error when passed directory
"""
td = mkdtemp()
pytest.raises(ValueError, display.FileLink, td)
#--------------------------
# FileLinks tests
#--------------------------
def test_instantiation_FileLinks():
"""FileLinks: Test class can be instantiated
"""
fls = display.FileLinks('example')
def test_warning_on_non_existent_path_FileLinks():
"""FileLinks: Calling _repr_html_ on non-existent files returns a warning"""
fls = display.FileLinks("example")
assert fls._repr_html_().startswith("Path (<tt>example</tt>)")
def test_existing_path_FileLinks():
"""FileLinks: Calling _repr_html_ functions as expected on existing dir
"""
td = mkdtemp()
tf1 = NamedTemporaryFile(dir=td)
tf2 = NamedTemporaryFile(dir=td)
fl = display.FileLinks(td)
actual = fl._repr_html_()
actual = actual.split('\n')
actual.sort()
# the links should always have forward slashes, even on windows, so replace
# backslashes with forward slashes here
expected = ["%s/<br>" % td,
"&nbsp;&nbsp;<a href='%s' target='_blank'>%s</a><br>" %\
(tf2.name.replace("\\","/"),split(tf2.name)[1]),
"&nbsp;&nbsp;<a href='%s' target='_blank'>%s</a><br>" %\
(tf1.name.replace("\\","/"),split(tf1.name)[1])]
expected.sort()
# We compare the sorted list of links here as that's more reliable
assert actual == expected
def test_existing_path_FileLinks_alt_formatter():
"""FileLinks: Calling _repr_html_ functions as expected w/ an alt formatter
"""
td = mkdtemp()
tf1 = NamedTemporaryFile(dir=td)
tf2 = NamedTemporaryFile(dir=td)
def fake_formatter(dirname,fnames,included_suffixes):
return ["hello","world"]
fl = display.FileLinks(td,notebook_display_formatter=fake_formatter)
actual = fl._repr_html_()
actual = actual.split('\n')
actual.sort()
expected = ["hello","world"]
expected.sort()
# We compare the sorted list of links here as that's more reliable
assert actual == expected
def test_existing_path_FileLinks_repr():
"""FileLinks: Calling repr() functions as expected on existing directory """
td = mkdtemp()
tf1 = NamedTemporaryFile(dir=td)
tf2 = NamedTemporaryFile(dir=td)
fl = display.FileLinks(td)
actual = repr(fl)
actual = actual.split('\n')
actual.sort()
expected = ['%s/' % td, ' %s' % split(tf1.name)[1],' %s' % split(tf2.name)[1]]
expected.sort()
# We compare the sorted list of links here as that's more reliable
assert actual == expected
def test_existing_path_FileLinks_repr_alt_formatter():
"""FileLinks: Calling repr() functions as expected w/ alt formatter
"""
td = mkdtemp()
tf1 = NamedTemporaryFile(dir=td)
tf2 = NamedTemporaryFile(dir=td)
def fake_formatter(dirname,fnames,included_suffixes):
return ["hello","world"]
fl = display.FileLinks(td,terminal_display_formatter=fake_formatter)
actual = repr(fl)
actual = actual.split('\n')
actual.sort()
expected = ["hello","world"]
expected.sort()
# We compare the sorted list of links here as that's more reliable
assert actual == expected
def test_error_on_file_to_FileLinks():
"""FileLinks: Raises error when passed file
"""
td = mkdtemp()
tf1 = NamedTemporaryFile(dir=td)
pytest.raises(ValueError, display.FileLinks, tf1.name)
def test_recursive_FileLinks():
"""FileLinks: Does not recurse when recursive=False
"""
td = mkdtemp()
tf = NamedTemporaryFile(dir=td)
subtd = mkdtemp(dir=td)
subtf = NamedTemporaryFile(dir=subtd)
fl = display.FileLinks(td)
actual = str(fl)
actual = actual.split('\n')
assert len(actual) == 4, actual
fl = display.FileLinks(td, recursive=False)
actual = str(fl)
actual = actual.split('\n')
assert len(actual) == 2, actual
def test_audio_from_file():
path = pjoin(dirname(__file__), 'test.wav')
display.Audio(filename=path)
class TestAudioDataWithNumpy(TestCase):
@skipif_not_numpy
def test_audio_from_numpy_array(self):
test_tone = get_test_tone()
audio = display.Audio(test_tone, rate=44100)
assert len(read_wav(audio.data)) == len(test_tone)
@skipif_not_numpy
def test_audio_from_list(self):
test_tone = get_test_tone()
audio = display.Audio(list(test_tone), rate=44100)
assert len(read_wav(audio.data)) == len(test_tone)
@skipif_not_numpy
def test_audio_from_numpy_array_without_rate_raises(self):
self.assertRaises(ValueError, display.Audio, get_test_tone())
@skipif_not_numpy
def test_audio_data_normalization(self):
expected_max_value = numpy.iinfo(numpy.int16).max
for scale in [1, 0.5, 2]:
audio = display.Audio(get_test_tone(scale), rate=44100)
actual_max_value = numpy.max(numpy.abs(read_wav(audio.data)))
assert actual_max_value == expected_max_value
@skipif_not_numpy
def test_audio_data_without_normalization(self):
max_int16 = numpy.iinfo(numpy.int16).max
for scale in [1, 0.5, 0.2]:
test_tone = get_test_tone(scale)
test_tone_max_abs = numpy.max(numpy.abs(test_tone))
expected_max_value = int(max_int16 * test_tone_max_abs)
audio = display.Audio(test_tone, rate=44100, normalize=False)
actual_max_value = numpy.max(numpy.abs(read_wav(audio.data)))
assert actual_max_value == expected_max_value
def test_audio_data_without_normalization_raises_for_invalid_data(self):
self.assertRaises(
ValueError,
lambda: display.Audio([1.001], rate=44100, normalize=False))
self.assertRaises(
ValueError,
lambda: display.Audio([-1.001], rate=44100, normalize=False))
def simulate_numpy_not_installed():
try:
import numpy
return mock.patch('numpy.array', mock.MagicMock(side_effect=ImportError))
except ModuleNotFoundError:
return lambda x:x
@simulate_numpy_not_installed()
class TestAudioDataWithoutNumpy(TestAudioDataWithNumpy):
# All tests from `TestAudioDataWithNumpy` are inherited.
@skipif_not_numpy
def test_audio_raises_for_nested_list(self):
stereo_signal = [list(get_test_tone())] * 2
self.assertRaises(TypeError, lambda: display.Audio(stereo_signal, rate=44100))
@skipif_not_numpy
def get_test_tone(scale=1):
return numpy.sin(2 * numpy.pi * 440 * numpy.linspace(0, 1, 44100)) * scale
def read_wav(data):
with wave.open(BytesIO(data)) as wave_file:
wave_data = wave_file.readframes(wave_file.getnframes())
num_samples = wave_file.getnframes() * wave_file.getnchannels()
return struct.unpack('<%sh' % num_samples, wave_data)
def test_code_from_file():
c = display.Code(filename=__file__)
assert c._repr_html_().startswith('<style>')

View File

@ -0,0 +1,32 @@
"""Test installing editor hooks"""
import sys
from unittest import mock
from IPython import get_ipython
from IPython.lib import editorhooks
def test_install_editor():
called = []
def fake_popen(*args, **kwargs):
called.append({
'args': args,
'kwargs': kwargs,
})
return mock.MagicMock(**{'wait.return_value': 0})
editorhooks.install_editor('foo -l {line} -f {filename}', wait=False)
with mock.patch('subprocess.Popen', fake_popen):
get_ipython().hooks.editor('the file', 64)
assert len(called) == 1
args = called[0]["args"]
kwargs = called[0]["kwargs"]
assert kwargs == {"shell": True}
if sys.platform.startswith("win"):
expected = ["foo", "-l", "64", "-f", "the file"]
else:
expected = "foo -l 64 -f 'the file'"
cmd = args[0]
assert cmd == expected

View File

@ -0,0 +1,11 @@
# encoding: utf-8
from IPython.testing import decorators as dec
def test_import_backgroundjobs():
from IPython.lib import backgroundjobs
def test_import_deepreload():
from IPython.lib import deepreload
def test_import_demo():
from IPython.lib import demo

View File

@ -0,0 +1,192 @@
"""Tests for IPython.utils.path.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from contextlib import contextmanager
from unittest.mock import patch
import pytest
from IPython.lib import latextools
from IPython.testing.decorators import (
onlyif_cmds_exist,
skipif_not_matplotlib,
)
from IPython.utils.process import FindCmdError
@pytest.mark.parametrize('command', ['latex', 'dvipng'])
def test_check_latex_to_png_dvipng_fails_when_no_cmd(command):
def mock_find_cmd(arg):
if arg == command:
raise FindCmdError
with patch.object(latextools, "find_cmd", mock_find_cmd):
assert latextools.latex_to_png_dvipng("whatever", True) is None
@contextmanager
def no_op(*args, **kwargs):
yield
@onlyif_cmds_exist("latex", "dvipng")
@pytest.mark.parametrize("s, wrap", [("$$x^2$$", False), ("x^2", True)])
def test_latex_to_png_dvipng_runs(s, wrap):
"""
Test that latex_to_png_dvipng just runs without error.
"""
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return None
latextools.latex_to_png_dvipng(s, wrap)
with patch_latextool(mock_kpsewhich):
latextools.latex_to_png_dvipng(s, wrap)
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return None
@contextmanager
def patch_latextool(mock=mock_kpsewhich):
with patch.object(latextools, "kpsewhich", mock):
yield
@pytest.mark.parametrize('context', [no_op, patch_latextool])
@pytest.mark.parametrize('s_wrap', [("$x^2$", False), ("x^2", True)])
def test_latex_to_png_mpl_runs(s_wrap, context):
"""
Test that latex_to_png_mpl just runs without error.
"""
try:
import matplotlib
except ImportError:
pytest.skip("This needs matplotlib to be available")
return
s, wrap = s_wrap
with context():
latextools.latex_to_png_mpl(s, wrap)
@skipif_not_matplotlib
def test_latex_to_html():
img = latextools.latex_to_html("$x^2$")
assert "data:image/png;base64,iVBOR" in img
def test_genelatex_no_wrap():
"""
Test genelatex with wrap=False.
"""
def mock_kpsewhich(filename):
assert False, ("kpsewhich should not be called "
"(called with {0})".format(filename))
with patch_latextool(mock_kpsewhich):
assert '\n'.join(latextools.genelatex("body text", False)) == r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
body text
\end{document}'''
def test_genelatex_wrap_with_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is installed.
"""
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return "path/to/breqn.sty"
with patch_latextool(mock_kpsewhich):
assert '\n'.join(latextools.genelatex("x^2", True)) == r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{breqn}
\pagestyle{empty}
\begin{document}
\begin{dmath*}
x^2
\end{dmath*}
\end{document}'''
def test_genelatex_wrap_without_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is not installed.
"""
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return None
with patch_latextool(mock_kpsewhich):
assert '\n'.join(latextools.genelatex("x^2", True)) == r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
$$x^2$$
\end{document}'''
@skipif_not_matplotlib
@onlyif_cmds_exist('latex', 'dvipng')
def test_latex_to_png_color():
"""
Test color settings for latex_to_png.
"""
latex_string = "$x^2$"
default_value = latextools.latex_to_png(latex_string, wrap=False)
default_hexblack = latextools.latex_to_png(latex_string, wrap=False,
color='#000000')
dvipng_default = latextools.latex_to_png_dvipng(latex_string, False)
dvipng_black = latextools.latex_to_png_dvipng(latex_string, False, 'Black')
assert dvipng_default == dvipng_black
mpl_default = latextools.latex_to_png_mpl(latex_string, False)
mpl_black = latextools.latex_to_png_mpl(latex_string, False, 'Black')
assert mpl_default == mpl_black
assert default_value in [dvipng_black, mpl_black]
assert default_hexblack in [dvipng_black, mpl_black]
# Test that dvips name colors can be used without error
dvipng_maroon = latextools.latex_to_png_dvipng(latex_string, False,
'Maroon')
# And that it doesn't return the black one
assert dvipng_black != dvipng_maroon
mpl_maroon = latextools.latex_to_png_mpl(latex_string, False, 'Maroon')
assert mpl_black != mpl_maroon
mpl_white = latextools.latex_to_png_mpl(latex_string, False, 'White')
mpl_hexwhite = latextools.latex_to_png_mpl(latex_string, False, '#FFFFFF')
assert mpl_white == mpl_hexwhite
mpl_white_scale = latextools.latex_to_png_mpl(latex_string, False,
'White', 1.2)
assert mpl_white != mpl_white_scale
def test_latex_to_png_invalid_hex_colors():
"""
Test that invalid hex colors provided to dvipng gives an exception.
"""
latex_string = "$x^2$"
pytest.raises(
ValueError,
lambda: latextools.latex_to_png(
latex_string, backend="dvipng", color="#f00bar"
),
)
pytest.raises(
ValueError,
lambda: latextools.latex_to_png(latex_string, backend="dvipng", color="#f00"),
)

View File

@ -0,0 +1,176 @@
"""Test lexers module"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from unittest import TestCase
from pygments.token import Token
from pygments.lexers import BashLexer
from .. import lexers
class TestLexers(TestCase):
"""Collection of lexers tests"""
def setUp(self):
self.lexer = lexers.IPythonLexer()
self.bash_lexer = BashLexer()
def testIPythonLexer(self):
fragment = '!echo $HOME\n'
tokens = [
(Token.Operator, '!'),
]
tokens.extend(self.bash_lexer.get_tokens(fragment[1:]))
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
fragment_2 = '!' + fragment
tokens_2 = [
(Token.Operator, '!!'),
] + tokens[1:]
self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
fragment_2 = '\t %%!\n' + fragment[1:]
tokens_2 = [
(Token.Text, '\t '),
(Token.Operator, '%%!'),
(Token.Text, '\n'),
] + tokens[1:]
self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
fragment_2 = 'x = ' + fragment
tokens_2 = [
(Token.Name, 'x'),
(Token.Text, ' '),
(Token.Operator, '='),
(Token.Text, ' '),
] + tokens
self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
fragment_2 = 'x, = ' + fragment
tokens_2 = [
(Token.Name, 'x'),
(Token.Punctuation, ','),
(Token.Text, ' '),
(Token.Operator, '='),
(Token.Text, ' '),
] + tokens
self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
fragment_2 = 'x, = %sx ' + fragment[1:]
tokens_2 = [
(Token.Name, 'x'),
(Token.Punctuation, ','),
(Token.Text, ' '),
(Token.Operator, '='),
(Token.Text, ' '),
(Token.Operator, '%'),
(Token.Keyword, 'sx'),
(Token.Text, ' '),
] + tokens[1:]
self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
fragment_2 = 'f = %R function () {}\n'
tokens_2 = [
(Token.Name, 'f'),
(Token.Text, ' '),
(Token.Operator, '='),
(Token.Text, ' '),
(Token.Operator, '%'),
(Token.Keyword, 'R'),
(Token.Text, ' function () {}\n'),
]
self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
fragment_2 = '\t%%xyz\n$foo\n'
tokens_2 = [
(Token.Text, '\t'),
(Token.Operator, '%%'),
(Token.Keyword, 'xyz'),
(Token.Text, '\n$foo\n'),
]
self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
fragment_2 = '%system?\n'
tokens_2 = [
(Token.Operator, '%'),
(Token.Keyword, 'system'),
(Token.Operator, '?'),
(Token.Text, '\n'),
]
self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
fragment_2 = 'x != y\n'
tokens_2 = [
(Token.Name, 'x'),
(Token.Text, ' '),
(Token.Operator, '!='),
(Token.Text, ' '),
(Token.Name, 'y'),
(Token.Text, '\n'),
]
self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
fragment_2 = ' ?math.sin\n'
tokens_2 = [
(Token.Text, ' '),
(Token.Operator, '?'),
(Token.Text, 'math.sin'),
(Token.Text, '\n'),
]
self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
fragment = ' *int*?\n'
tokens = [
(Token.Text, ' *int*'),
(Token.Operator, '?'),
(Token.Text, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
fragment = '%%writefile -a foo.py\nif a == b:\n pass'
tokens = [
(Token.Operator, '%%writefile'),
(Token.Text, ' -a foo.py\n'),
(Token.Keyword, 'if'),
(Token.Text, ' '),
(Token.Name, 'a'),
(Token.Text, ' '),
(Token.Operator, '=='),
(Token.Text, ' '),
(Token.Name, 'b'),
(Token.Punctuation, ':'),
(Token.Text, '\n'),
(Token.Text, ' '),
(Token.Keyword, 'pass'),
(Token.Text, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
fragment = '%%timeit\nmath.sin(0)'
tokens = [
(Token.Operator, '%%timeit\n'),
(Token.Name, 'math'),
(Token.Operator, '.'),
(Token.Name, 'sin'),
(Token.Punctuation, '('),
(Token.Literal.Number.Integer, '0'),
(Token.Punctuation, ')'),
(Token.Text, '\n'),
]
fragment = '%%HTML\n<div>foo</div>'
tokens = [
(Token.Operator, '%%HTML'),
(Token.Text, '\n'),
(Token.Punctuation, '<'),
(Token.Name.Tag, 'div'),
(Token.Punctuation, '>'),
(Token.Text, 'foo'),
(Token.Punctuation, '<'),
(Token.Punctuation, '/'),
(Token.Name.Tag, 'div'),
(Token.Punctuation, '>'),
(Token.Text, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

View File

@ -0,0 +1,536 @@
# coding: utf-8
"""Tests for IPython.lib.pretty."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import Counter, defaultdict, deque, OrderedDict, UserList
import os
import pytest
import types
import string
import sys
import unittest
import pytest
from IPython.lib import pretty
from io import StringIO
class MyList(object):
def __init__(self, content):
self.content = content
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MyList(...)")
else:
with p.group(3, "MyList(", ")"):
for (i, child) in enumerate(self.content):
if i:
p.text(",")
p.breakable()
else:
p.breakable("")
p.pretty(child)
class MyDict(dict):
def _repr_pretty_(self, p, cycle):
p.text("MyDict(...)")
class MyObj(object):
def somemethod(self):
pass
class Dummy1(object):
def _repr_pretty_(self, p, cycle):
p.text("Dummy1(...)")
class Dummy2(Dummy1):
_repr_pretty_ = None
class NoModule(object):
pass
NoModule.__module__ = None
class Breaking(object):
def _repr_pretty_(self, p, cycle):
with p.group(4,"TG: ",":"):
p.text("Breaking(")
p.break_()
p.text(")")
class BreakingRepr(object):
def __repr__(self):
return "Breaking(\n)"
class BadRepr(object):
def __repr__(self):
return 1/0
def test_indentation():
"""Test correct indentation in groups"""
count = 40
gotoutput = pretty.pretty(MyList(range(count)))
expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")"
assert gotoutput == expectedoutput
def test_dispatch():
"""
Test correct dispatching: The _repr_pretty_ method for MyDict
must be found before the registered printer for dict.
"""
gotoutput = pretty.pretty(MyDict())
expectedoutput = "MyDict(...)"
assert gotoutput == expectedoutput
def test_callability_checking():
"""
Test that the _repr_pretty_ method is tested for callability and skipped if
not.
"""
gotoutput = pretty.pretty(Dummy2())
expectedoutput = "Dummy1(...)"
assert gotoutput == expectedoutput
@pytest.mark.parametrize(
"obj,expected_output",
zip(
[
set(),
frozenset(),
set([1]),
frozenset([1]),
set([1, 2]),
frozenset([1, 2]),
set([-1, -2, -3]),
],
[
"set()",
"frozenset()",
"{1}",
"frozenset({1})",
"{1, 2}",
"frozenset({1, 2})",
"{-3, -2, -1}",
],
),
)
def test_sets(obj, expected_output):
"""
Test that set and frozenset use Python 3 formatting.
"""
got_output = pretty.pretty(obj)
assert got_output == expected_output
def test_pprint_heap_allocated_type():
"""
Test that pprint works for heap allocated types.
"""
module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35"
xxlimited = pytest.importorskip(module_name)
output = pretty.pretty(xxlimited.Null)
assert output == "xxlimited.Null"
def test_pprint_nomod():
"""
Test that pprint works for classes with no __module__.
"""
output = pretty.pretty(NoModule)
assert output == "NoModule"
def test_pprint_break():
"""
Test that p.break_ produces expected output
"""
output = pretty.pretty(Breaking())
expected = "TG: Breaking(\n ):"
assert output == expected
def test_pprint_break_repr():
"""
Test that p.break_ is used in repr
"""
output = pretty.pretty([[BreakingRepr()]])
expected = "[[Breaking(\n )]]"
assert output == expected
output = pretty.pretty([[BreakingRepr()]*2])
expected = "[[Breaking(\n ),\n Breaking(\n )]]"
assert output == expected
def test_bad_repr():
"""Don't catch bad repr errors"""
with pytest.raises(ZeroDivisionError):
pretty.pretty(BadRepr())
class BadException(Exception):
def __str__(self):
return -1
class ReallyBadRepr(object):
__module__ = 1
@property
def __class__(self):
raise ValueError("I am horrible")
def __repr__(self):
raise BadException()
def test_really_bad_repr():
with pytest.raises(BadException):
pretty.pretty(ReallyBadRepr())
class SA(object):
pass
class SB(SA):
pass
class TestsPretty(unittest.TestCase):
def test_super_repr(self):
# "<super: module_name.SA, None>"
output = pretty.pretty(super(SA))
self.assertRegex(output, r"<super: \S+.SA, None>")
# "<super: module_name.SA, <module_name.SB at 0x...>>"
sb = SB()
output = pretty.pretty(super(SA, sb))
self.assertRegex(output, r"<super: \S+.SA,\s+<\S+.SB at 0x\S+>>")
def test_long_list(self):
lis = list(range(10000))
p = pretty.pretty(lis)
last2 = p.rsplit('\n', 2)[-2:]
self.assertEqual(last2, [' 999,', ' ...]'])
def test_long_set(self):
s = set(range(10000))
p = pretty.pretty(s)
last2 = p.rsplit('\n', 2)[-2:]
self.assertEqual(last2, [' 999,', ' ...}'])
def test_long_tuple(self):
tup = tuple(range(10000))
p = pretty.pretty(tup)
last2 = p.rsplit('\n', 2)[-2:]
self.assertEqual(last2, [' 999,', ' ...)'])
def test_long_dict(self):
d = { n:n for n in range(10000) }
p = pretty.pretty(d)
last2 = p.rsplit('\n', 2)[-2:]
self.assertEqual(last2, [' 999: 999,', ' ...}'])
def test_unbound_method(self):
output = pretty.pretty(MyObj.somemethod)
self.assertIn('MyObj.somemethod', output)
class MetaClass(type):
def __new__(cls, name):
return type.__new__(cls, name, (object,), {'name': name})
def __repr__(self):
return "[CUSTOM REPR FOR CLASS %s]" % self.name
ClassWithMeta = MetaClass('ClassWithMeta')
def test_metaclass_repr():
output = pretty.pretty(ClassWithMeta)
assert output == "[CUSTOM REPR FOR CLASS ClassWithMeta]"
def test_unicode_repr():
u = u"üniçodé"
ustr = u
class C(object):
def __repr__(self):
return ustr
c = C()
p = pretty.pretty(c)
assert p == u
p = pretty.pretty([c])
assert p == "[%s]" % u
def test_basic_class():
def type_pprint_wrapper(obj, p, cycle):
if obj is MyObj:
type_pprint_wrapper.called = True
return pretty._type_pprint(obj, p, cycle)
type_pprint_wrapper.called = False
stream = StringIO()
printer = pretty.RepresentationPrinter(stream)
printer.type_pprinters[type] = type_pprint_wrapper
printer.pretty(MyObj)
printer.flush()
output = stream.getvalue()
assert output == "%s.MyObj" % __name__
assert type_pprint_wrapper.called is True
def test_collections_userlist():
# Create userlist with cycle
a = UserList()
a.append(a)
cases = [
(UserList(), "UserList([])"),
(
UserList(i for i in range(1000, 1020)),
"UserList([1000,\n"
" 1001,\n"
" 1002,\n"
" 1003,\n"
" 1004,\n"
" 1005,\n"
" 1006,\n"
" 1007,\n"
" 1008,\n"
" 1009,\n"
" 1010,\n"
" 1011,\n"
" 1012,\n"
" 1013,\n"
" 1014,\n"
" 1015,\n"
" 1016,\n"
" 1017,\n"
" 1018,\n"
" 1019])",
),
(a, "UserList([UserList(...)])"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_collections_defaultdict():
# Create defaultdicts with cycles
a = defaultdict()
a.default_factory = a
b = defaultdict(list)
b['key'] = b
# Dictionary order cannot be relied on, test against single keys.
cases = [
(defaultdict(list), 'defaultdict(list, {})'),
(defaultdict(list, {'key': '-' * 50}),
"defaultdict(list,\n"
" {'key': '--------------------------------------------------'})"),
(a, 'defaultdict(defaultdict(...), {})'),
(b, "defaultdict(list, {'key': defaultdict(...)})"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_collections_ordereddict():
# Create OrderedDict with cycle
a = OrderedDict()
a['key'] = a
cases = [
(OrderedDict(), 'OrderedDict()'),
(OrderedDict((i, i) for i in range(1000, 1010)),
'OrderedDict([(1000, 1000),\n'
' (1001, 1001),\n'
' (1002, 1002),\n'
' (1003, 1003),\n'
' (1004, 1004),\n'
' (1005, 1005),\n'
' (1006, 1006),\n'
' (1007, 1007),\n'
' (1008, 1008),\n'
' (1009, 1009)])'),
(a, "OrderedDict([('key', OrderedDict(...))])"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_collections_deque():
# Create deque with cycle
a = deque()
a.append(a)
cases = [
(deque(), 'deque([])'),
(deque(i for i in range(1000, 1020)),
'deque([1000,\n'
' 1001,\n'
' 1002,\n'
' 1003,\n'
' 1004,\n'
' 1005,\n'
' 1006,\n'
' 1007,\n'
' 1008,\n'
' 1009,\n'
' 1010,\n'
' 1011,\n'
' 1012,\n'
' 1013,\n'
' 1014,\n'
' 1015,\n'
' 1016,\n'
' 1017,\n'
' 1018,\n'
' 1019])'),
(a, 'deque([deque(...)])'),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_collections_counter():
class MyCounter(Counter):
pass
cases = [
(Counter(), 'Counter()'),
(Counter(a=1), "Counter({'a': 1})"),
(MyCounter(a=1), "MyCounter({'a': 1})"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_mappingproxy():
MP = types.MappingProxyType
underlying_dict = {}
mp_recursive = MP(underlying_dict)
underlying_dict[2] = mp_recursive
underlying_dict[3] = underlying_dict
cases = [
(MP({}), "mappingproxy({})"),
(MP({None: MP({})}), "mappingproxy({None: mappingproxy({})})"),
(MP({k: k.upper() for k in string.ascii_lowercase}),
"mappingproxy({'a': 'A',\n"
" 'b': 'B',\n"
" 'c': 'C',\n"
" 'd': 'D',\n"
" 'e': 'E',\n"
" 'f': 'F',\n"
" 'g': 'G',\n"
" 'h': 'H',\n"
" 'i': 'I',\n"
" 'j': 'J',\n"
" 'k': 'K',\n"
" 'l': 'L',\n"
" 'm': 'M',\n"
" 'n': 'N',\n"
" 'o': 'O',\n"
" 'p': 'P',\n"
" 'q': 'Q',\n"
" 'r': 'R',\n"
" 's': 'S',\n"
" 't': 'T',\n"
" 'u': 'U',\n"
" 'v': 'V',\n"
" 'w': 'W',\n"
" 'x': 'X',\n"
" 'y': 'Y',\n"
" 'z': 'Z'})"),
(mp_recursive, "mappingproxy({2: {...}, 3: {2: {...}, 3: {...}}})"),
(underlying_dict,
"{2: mappingproxy({2: {...}, 3: {...}}), 3: {...}}"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_simplenamespace():
SN = types.SimpleNamespace
sn_recursive = SN()
sn_recursive.first = sn_recursive
sn_recursive.second = sn_recursive
cases = [
(SN(), "namespace()"),
(SN(x=SN()), "namespace(x=namespace())"),
(SN(a_long_name=[SN(s=string.ascii_lowercase)]*3, a_short_name=None),
"namespace(a_long_name=[namespace(s='abcdefghijklmnopqrstuvwxyz'),\n"
" namespace(s='abcdefghijklmnopqrstuvwxyz'),\n"
" namespace(s='abcdefghijklmnopqrstuvwxyz')],\n"
" a_short_name=None)"),
(sn_recursive, "namespace(first=namespace(...), second=namespace(...))"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
def test_pretty_environ():
dict_repr = pretty.pretty(dict(os.environ))
# reindent to align with 'environ' prefix
dict_indented = dict_repr.replace('\n', '\n' + (' ' * len('environ')))
env_repr = pretty.pretty(os.environ)
assert env_repr == "environ" + dict_indented
def test_function_pretty():
"Test pretty print of function"
# posixpath is a pure python module, its interface is consistent
# across Python distributions
import posixpath
assert pretty.pretty(posixpath.join) == "<function posixpath.join(a, *p)>"
# custom function
def meaning_of_life(question=None):
if question:
return 42
return "Don't panic"
assert "meaning_of_life(question=None)" in pretty.pretty(meaning_of_life)
class OrderedCounter(Counter, OrderedDict):
'Counter that remembers the order elements are first encountered'
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
class MySet(set): # Override repr of a basic type
def __repr__(self):
return 'mine'
def test_custom_repr():
"""A custom repr should override a pretty printer for a parent type"""
oc = OrderedCounter("abracadabra")
assert "OrderedCounter(OrderedDict" in pretty.pretty(oc)
assert pretty.pretty(MySet()) == "mine"