first commit

This commit is contained in:
Ayxan
2022-05-23 00:16:32 +04:00
commit d660f2a4ca
24786 changed files with 4428337 additions and 0 deletions

View File

@ -0,0 +1,5 @@
"""Utilities for converting notebooks to and from different formats."""
from . import filters, postprocessors, preprocessors, writers
from ._version import __version__, version_info # noqa
from .exporters import *

View File

@ -0,0 +1,3 @@
from .nbconvertapp import main
main()

View File

@ -0,0 +1,67 @@
version_info = (6, 5, 0)
pre_info = ""
dev_info = ""
def create_valid_version(release_info, epoch=None, pre_input="", dev_input=""):
"""
Creates a pep440 valid version of version number given a tuple integers
and optional epoch, prerelease and developmental info.
Parameters
----------
release_info : Tuple(Int)
epoch : Int, default None
pre_input : Str, default ''
dev_input : Str, default ''
"""
pep440_err = "The version number is not a pep 440 compliant version number"
if epoch is not None:
epoch_seg = str(epoch) + "!"
else:
epoch_seg = ""
release_seg = ".".join(map(str, release_info))
_magic_pre = ["a", "b", "rc"]
if pre_input != "" and not any([pre_input.startswith(prefix) for prefix in _magic_pre]):
raise ValueError(pep440_err + "\n please fix your prerelease segment.")
else:
pre_seg = pre_input
if dev_input == "":
dev_seg = dev_input
elif not dev_input.startswith(".") and dev_input.startswith("dev"):
dev_seg = "".join([".", dev_input])
elif dev_input.startswith(".dev"):
dev_seg = dev_input
elif dev_input != "":
raise ValueError(pep440_err + "\n please fix your development segment.")
if dev_input != "" and not any([dev_seg.endswith(str(n)) for n in range(10)]):
dev_seg = "".join([dev_seg, "0"])
out_version = "".join([epoch_seg, release_seg, pre_seg, dev_seg])
import re
def is_canonical(version):
return (
re.match(
r"^([1-9]\d*!)?(0|[1-9]\d*)"
r"(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?"
r"(\.post(0|[1-9]\d*))?(\.dev(0|[1-9]\d*))?$",
version,
)
is not None
)
if is_canonical(out_version):
return out_version
else:
raise ValueError(pep440_err)
__version__ = create_valid_version(version_info, pre_input=pre_info, dev_input=dev_info)

View File

@ -0,0 +1,5 @@
import asyncio
import os
if os.name == "nt":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())

View File

@ -0,0 +1,14 @@
from .asciidoc import ASCIIDocExporter
from .base import ExporterNameError, export, get_export_names, get_exporter
from .exporter import Exporter, FilenameExtension
from .html import HTMLExporter
from .latex import LatexExporter
from .markdown import MarkdownExporter
from .notebook import NotebookExporter
from .pdf import PDFExporter
from .python import PythonExporter
from .rst import RSTExporter
from .script import ScriptExporter
from .slides import SlidesExporter
from .templateexporter import TemplateExporter
from .webpdf import WebPDFExporter

View File

@ -0,0 +1,52 @@
"""ASCIIDoc Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import default
from traitlets.config import Config
from .templateexporter import TemplateExporter
class ASCIIDocExporter(TemplateExporter):
"""
Exports to an ASCIIDoc document (.asciidoc)
"""
@default("file_extension")
def _file_extension_default(self):
return ".asciidoc"
@default("template_name")
def _template_name_default(self):
return "asciidoc"
output_mimetype = "text/asciidoc"
export_from_notebook = "AsciiDoc"
@default("raw_mimetypes")
def _raw_mimetypes_default(self):
return ["text/asciidoc/", "text/markdown", "text/html", ""]
@property
def default_config(self):
c = Config(
{
"NbConvertBase": {
"display_data_priority": [
"text/html",
"text/markdown",
"image/svg+xml",
"image/png",
"image/jpeg",
"text/plain",
"text/latex",
]
},
"ExtractOutputPreprocessor": {"enabled": True},
"HighlightMagicsPreprocessor": {"enabled": True},
}
)
c.merge(super().default_config)
return c

View File

@ -0,0 +1,154 @@
"""Module containing single call export functions."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import entrypoints
from nbformat import NotebookNode
from traitlets.config import get_config
from traitlets.log import get_logger
from traitlets.utils.importstring import import_item
from .exporter import Exporter
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
__all__ = [
"export",
"Exporter",
"get_exporter",
"get_export_names",
"ExporterNameError",
]
class ExporterNameError(NameError):
pass
class ExporterDisabledError(ValueError):
pass
def export(exporter, nb, **kw):
"""
Export a notebook object using specific exporter class.
Parameters
----------
exporter : ``Exporter`` class or instance
Class or instance of the exporter that should be used. If the
method initializes its own instance of the class, it is ASSUMED that
the class type provided exposes a constructor (``__init__``) with the same
signature as the base Exporter class.
nb : :class:`~nbformat.NotebookNode`
The notebook to export.
config : config (optional, keyword arg)
User configuration instance.
resources : dict (optional, keyword arg)
Resources used in the conversion process.
Returns
-------
tuple
output : str
The resulting converted notebook.
resources : dictionary
Dictionary of resources used prior to and during the conversion
process.
"""
# Check arguments
if exporter is None:
raise TypeError("Exporter is None")
elif not isinstance(exporter, Exporter) and not issubclass(exporter, Exporter):
raise TypeError("exporter does not inherit from Exporter (base)")
if nb is None:
raise TypeError("nb is None")
# Create the exporter
resources = kw.pop("resources", None)
if isinstance(exporter, Exporter):
exporter_instance = exporter
else:
exporter_instance = exporter(**kw)
# Try to convert the notebook using the appropriate conversion function.
if isinstance(nb, NotebookNode):
output, resources = exporter_instance.from_notebook_node(nb, resources)
elif isinstance(nb, (str,)):
output, resources = exporter_instance.from_filename(nb, resources)
else:
output, resources = exporter_instance.from_file(nb, resources)
return output, resources
def get_exporter(name, config=get_config()): # noqa
"""Given an exporter name or import path, return a class ready to be instantiated
Raises ExporterName if exporter is not found or ExporterDisabledError if not enabled
"""
if name == "ipynb":
name = "notebook"
try:
exporter = entrypoints.get_single("nbconvert.exporters", name).load()
if getattr(exporter(config=config), "enabled", True):
return exporter
else:
raise ExporterDisabledError('Exporter "%s" disabled in configuration' % (name))
except entrypoints.NoSuchEntryPoint:
try:
exporter = entrypoints.get_single("nbconvert.exporters", name.lower()).load()
if getattr(exporter(config=config), "enabled", True):
return exporter
else:
raise ExporterDisabledError('Exporter "%s" disabled in configuration' % (name))
except entrypoints.NoSuchEntryPoint:
pass
if "." in name:
try:
exporter = import_item(name)
if getattr(exporter(config=config), "enabled", True):
return exporter
else:
raise ExporterDisabledError('Exporter "%s" disabled in configuration' % (name))
except ImportError:
log = get_logger()
log.error("Error importing %s" % name, exc_info=True)
raise ExporterNameError(
'Unknown exporter "{}", did you mean one of: {}?'.format(
name, ", ".join(get_export_names())
)
)
def get_export_names(config=get_config()): # noqa
"""Return a list of the currently supported export targets
Exporters can be found in external packages by registering
them as an nbconvert.exporter entrypoint.
"""
exporters = sorted(entrypoints.get_group_named("nbconvert.exporters"))
if os.environ.get("NBCONVERT_DISABLE_CONFIG_EXPORTERS"):
get_logger().info(
"Config exporter loading disabled, no additional exporters will be automatically included."
)
return exporters
enabled_exporters = []
for exporter_name in exporters:
try:
e = get_exporter(exporter_name)(config=config)
if e.enabled:
enabled_exporters.append(exporter_name)
except (ExporterDisabledError, ValueError):
pass
return enabled_exporters

View File

@ -0,0 +1,342 @@
"""This module defines a base Exporter class. For Jinja template-based export,
see templateexporter.py.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import collections
import copy
import datetime
import os
import sys
from typing import Optional
import nbformat
from traitlets import Bool, HasTraits, List, TraitError, Unicode
from traitlets.config import Config
from traitlets.config.configurable import LoggingConfigurable
from traitlets.utils.importstring import import_item
class ResourcesDict(collections.defaultdict):
def __missing__(self, key):
return ""
class FilenameExtension(Unicode):
"""A trait for filename extensions."""
default_value = ""
info_text = "a filename extension, beginning with a dot"
def validate(self, obj, value):
# cast to proper unicode
value = super().validate(obj, value)
# check that it starts with a dot
if value and not value.startswith("."):
msg = "FileExtension trait '{}' does not begin with a dot: {!r}"
raise TraitError(msg.format(self.name, value))
return value
class Exporter(LoggingConfigurable):
"""
Class containing methods that sequentially run a list of preprocessors on a
NotebookNode object and then return the modified NotebookNode object and
accompanying resources dict.
"""
enabled = Bool(True, help="Disable this exporter (and any exporters inherited from it).").tag(
config=True
)
file_extension = FilenameExtension(
help="Extension of the file that should be written to disk"
).tag(config=True)
optimistic_validation = Bool(
False,
help="Reduces the number of validation steps so that it only occurs after all preprocesors have run.",
).tag(config=True)
# MIME type of the result file, for HTTP response headers.
# This is *not* a traitlet, because we want to be able to access it from
# the class, not just on instances.
output_mimetype = ""
# Should this converter be accessible from the notebook front-end?
# If so, should be a friendly name to display (and possibly translated).
export_from_notebook = None
# Configurability, allows the user to easily add filters and preprocessors.
preprocessors = List(help="""List of preprocessors, by name or namespace, to enable.""").tag(
config=True
)
_preprocessors = List()
default_preprocessors = List(
[
"nbconvert.preprocessors.TagRemovePreprocessor",
"nbconvert.preprocessors.RegexRemovePreprocessor",
"nbconvert.preprocessors.ClearOutputPreprocessor",
"nbconvert.preprocessors.ExecutePreprocessor",
"nbconvert.preprocessors.coalesce_streams",
"nbconvert.preprocessors.SVG2PDFPreprocessor",
"nbconvert.preprocessors.LatexPreprocessor",
"nbconvert.preprocessors.HighlightMagicsPreprocessor",
"nbconvert.preprocessors.ExtractOutputPreprocessor",
"nbconvert.preprocessors.ClearMetadataPreprocessor",
],
help="""List of preprocessors available by default, by name, namespace,
instance, or type.""",
).tag(config=True)
def __init__(self, config=None, **kw):
"""
Public constructor
Parameters
----------
config : ``traitlets.config.Config``
User configuration instance.
`**kw`
Additional keyword arguments passed to parent __init__
"""
with_default_config = self.default_config
if config:
with_default_config.merge(config)
super().__init__(config=with_default_config, **kw)
self._init_preprocessors()
self._nb_metadata = {}
@property
def default_config(self):
return Config()
def from_notebook_node(self, nb, resources=None, **kw):
"""
Convert a notebook from a notebook node instance.
Parameters
----------
nb : :class:`~nbformat.NotebookNode`
Notebook node (dict-like with attr-access)
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
`**kw`
Ignored
"""
nb_copy = copy.deepcopy(nb)
resources = self._init_resources(resources)
if "language" in nb["metadata"]:
resources["language"] = nb["metadata"]["language"].lower()
# Preprocess
nb_copy, resources = self._preprocess(nb_copy, resources)
notebook_name = ""
if resources is not None:
name = resources.get("metadata", {}).get("name", "")
path = resources.get("metadata", {}).get("path", "")
notebook_name = os.path.join(path, name)
self._nb_metadata[notebook_name] = nb_copy.metadata
return nb_copy, resources
def from_filename(self, filename: str, resources: Optional[dict] = None, **kw):
"""
Convert a notebook from a notebook file.
Parameters
----------
filename : str
Full filename of the notebook file to open and convert.
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
`**kw`
Ignored
"""
# Pull the metadata from the filesystem.
if resources is None:
resources = ResourcesDict()
if "metadata" not in resources or resources["metadata"] == "":
resources["metadata"] = ResourcesDict()
path, basename = os.path.split(filename)
notebook_name = os.path.splitext(basename)[0]
resources["metadata"]["name"] = notebook_name
resources["metadata"]["path"] = path
modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(filename))
# datetime.strftime date format for ipython
if sys.platform == "win32":
date_format = "%B %d, %Y"
else:
date_format = "%B %-d, %Y"
resources["metadata"]["modified_date"] = modified_date.strftime(date_format)
with open(filename, encoding="utf-8") as f:
return self.from_file(f, resources=resources, **kw)
def from_file(self, file_stream, resources=None, **kw):
"""
Convert a notebook from a notebook file.
Parameters
----------
file_stream : file-like object
Notebook file-like object to convert.
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
`**kw`
Ignored
"""
return self.from_notebook_node(
nbformat.read(file_stream, as_version=4), resources=resources, **kw
)
def register_preprocessor(self, preprocessor, enabled=False):
"""
Register a preprocessor.
Preprocessors are classes that act upon the notebook before it is
passed into the Jinja templating engine. Preprocessors are also
capable of passing additional information to the Jinja
templating engine.
Parameters
----------
preprocessor : `nbconvert.preprocessors.Preprocessor`
A dotted module name, a type, or an instance
enabled : bool
Mark the preprocessor as enabled
"""
if preprocessor is None:
raise TypeError("preprocessor must not be None")
isclass = isinstance(preprocessor, type)
constructed = not isclass
# Handle preprocessor's registration based on it's type
if constructed and isinstance(
preprocessor,
str,
):
# Preprocessor is a string, import the namespace and recursively call
# this register_preprocessor method
preprocessor_cls = import_item(preprocessor)
return self.register_preprocessor(preprocessor_cls, enabled)
if constructed and hasattr(preprocessor, "__call__"): # noqa
# Preprocessor is a function, no need to construct it.
# Register and return the preprocessor.
if enabled:
preprocessor.enabled = True
self._preprocessors.append(preprocessor)
return preprocessor
elif isclass and issubclass(preprocessor, HasTraits):
# Preprocessor is configurable. Make sure to pass in new default for
# the enabled flag if one was specified.
self.register_preprocessor(preprocessor(parent=self), enabled)
elif isclass:
# Preprocessor is not configurable, construct it
self.register_preprocessor(preprocessor(), enabled)
else:
# Preprocessor is an instance of something without a __call__
# attribute.
raise TypeError(
"preprocessor must be callable or an importable constructor, got %r" % preprocessor
)
def _init_preprocessors(self):
"""
Register all of the preprocessors needed for this exporter, disabled
unless specified explicitly.
"""
self._preprocessors = []
# Load default preprocessors (not necessarily enabled by default).
for preprocessor in self.default_preprocessors:
self.register_preprocessor(preprocessor)
# Load user-specified preprocessors. Enable by default.
for preprocessor in self.preprocessors:
self.register_preprocessor(preprocessor, enabled=True)
def _init_resources(self, resources):
# Make sure the resources dict is of ResourcesDict type.
if resources is None:
resources = ResourcesDict()
if not isinstance(resources, ResourcesDict):
new_resources = ResourcesDict()
new_resources.update(resources)
resources = new_resources
# Make sure the metadata extension exists in resources
if "metadata" in resources:
if not isinstance(resources["metadata"], ResourcesDict):
new_metadata = ResourcesDict()
new_metadata.update(resources["metadata"])
resources["metadata"] = new_metadata
else:
resources["metadata"] = ResourcesDict()
if not resources["metadata"]["name"]:
resources["metadata"]["name"] = "Notebook"
# Set the output extension
resources["output_extension"] = self.file_extension
return resources
def _validate_preprocessor(self, nbc, preprocessor):
try:
nbformat.validate(nbc, relax_add_props=True)
except nbformat.ValidationError:
self.log.error("Notebook is invalid after preprocessor %s", preprocessor)
raise
def _preprocess(self, nb, resources):
"""
Preprocess the notebook before passing it into the Jinja engine.
To preprocess the notebook is to successively apply all the
enabled preprocessors. Output from each preprocessor is passed
along to the next one.
Parameters
----------
nb : notebook node
notebook that is being exported.
resources : a dict of additional resources that
can be accessed read/write by preprocessors
"""
# Do a copy.deepcopy first,
# we are never safe enough with what the preprocessors could do.
nbc = copy.deepcopy(nb)
resc = copy.deepcopy(resources)
# Run each preprocessor on the notebook. Carry the output along
# to each preprocessor
for preprocessor in self._preprocessors:
nbc, resc = preprocessor(nbc, resc)
if not self.optimistic_validation:
self._validate_preprocessor(nbc, preprocessor)
if self.optimistic_validation:
self._validate_preprocessor(nbc, preprocessor)
return nbc, resc

View File

@ -0,0 +1,290 @@
"""HTML Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import base64
import json
import mimetypes
import os
from pathlib import Path
import jinja2
import markupsafe
from jupyter_core.paths import jupyter_path
from traitlets import Bool, Unicode, default
from traitlets.config import Config
if tuple(int(x) for x in jinja2.__version__.split(".")[:3]) < (3, 0, 0):
from jinja2 import contextfilter
else:
from jinja2 import pass_context as contextfilter
from jinja2.loaders import split_template_path
from nbconvert.filters.highlight import Highlight2HTML
from nbconvert.filters.markdown_mistune import IPythonRenderer, MarkdownWithMath
from nbconvert.filters.widgetsdatatypefilter import WidgetsDataTypeFilter
from .templateexporter import TemplateExporter
def find_lab_theme(theme_name):
"""
Find a JupyterLab theme location by name.
Parameters
----------
theme_name : str
The name of the labextension theme you want to find.
Raises
------
ValueError
If the theme was not found, or if it was not specific enough.
Returns
-------
theme_name: str
Full theme name (with scope, if any)
labextension_path : Path
The path to the found labextension on the system.
"""
paths = jupyter_path("labextensions")
matching_themes = []
theme_path = None
for path in paths:
for (dirpath, dirnames, filenames) in os.walk(path):
# If it's a federated labextension that contains themes
if "package.json" in filenames and "themes" in dirnames:
# TODO Find the theme name in the JS code instead?
# TODO Find if it's a light or dark theme?
with open(Path(dirpath) / "package.json", encoding="utf-8") as fobj:
labext_name = json.loads(fobj.read())["name"]
if labext_name == theme_name or theme_name in labext_name.split("/"):
matching_themes.append(labext_name)
full_theme_name = labext_name
theme_path = Path(dirpath) / "themes" / labext_name
if len(matching_themes) == 0:
raise ValueError(f'Could not find lab theme "{theme_name}"')
if len(matching_themes) > 1:
raise ValueError(
f'Found multiple themes matching "{theme_name}": {matching_themes}. '
"Please be more specific about which theme you want to use."
)
return full_theme_name, theme_path
class HTMLExporter(TemplateExporter):
"""
Exports a basic HTML document. This exporter assists with the export of
HTML. Inherit from it if you are writing your own HTML template and need
custom preprocessors/filters. If you don't need custom preprocessors/
filters, just change the 'template_file' config option.
"""
export_from_notebook = "HTML"
anchor_link_text = Unicode("", help="The text used as the text for anchor links.").tag(
config=True
)
exclude_anchor_links = Bool(False, help="If anchor links should be included or not.").tag(
config=True
)
require_js_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js",
help="""
URL to load require.js from.
Defaults to loading from cdnjs.
""",
).tag(config=True)
mathjax_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML-full,Safe",
help="""
URL to load Mathjax from.
Defaults to loading from cdnjs.
""",
).tag(config=True)
jquery_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js",
help="""
URL to load jQuery from.
Defaults to loading from cdnjs.
""",
).tag(config=True)
jupyter_widgets_base_url = Unicode(
"https://unpkg.com/", help="URL base for Jupyter widgets"
).tag(config=True)
widget_renderer_url = Unicode("", help="Full URL for Jupyter widgets").tag(config=True)
html_manager_semver_range = Unicode(
"*", help="Semver range for Jupyter widgets HTML manager"
).tag(config=True)
@default("file_extension")
def _file_extension_default(self):
return ".html"
@default("template_name")
def _template_name_default(self):
return "lab"
theme = Unicode(
"light",
help="Template specific theme(e.g. the name of a JupyterLab CSS theme distributed as prebuilt extension for the lab template)",
).tag(config=True)
embed_images = Bool(
False, help="Whether or not to embed images as base64 in markdown cells."
).tag(config=True)
output_mimetype = "text/html"
@property
def default_config(self):
c = Config(
{
"NbConvertBase": {
"display_data_priority": [
"application/vnd.jupyter.widget-view+json",
"application/javascript",
"text/html",
"text/markdown",
"image/svg+xml",
"text/latex",
"image/png",
"image/jpeg",
"text/plain",
]
},
"HighlightMagicsPreprocessor": {"enabled": True},
}
)
c.merge(super().default_config)
return c
@contextfilter
def markdown2html(self, context, source):
"""Markdown to HTML filter respecting the anchor_link_text setting"""
cell = context.get("cell", {})
attachments = cell.get("attachments", {})
path = context.get("resources", {}).get("metadata", {}).get("path", "")
renderer = IPythonRenderer(
escape=False,
attachments=attachments,
embed_images=self.embed_images,
path=path,
anchor_link_text=self.anchor_link_text,
exclude_anchor_links=self.exclude_anchor_links,
)
return MarkdownWithMath(renderer=renderer).render(source)
def default_filters(self):
yield from super().default_filters()
yield ("markdown2html", self.markdown2html)
def from_notebook_node(self, nb, resources=None, **kw):
langinfo = nb.metadata.get("language_info", {})
lexer = langinfo.get("pygments_lexer", langinfo.get("name", None))
highlight_code = self.filters.get(
"highlight_code", Highlight2HTML(pygments_lexer=lexer, parent=self)
)
filter_data_type = WidgetsDataTypeFilter(
notebook_metadata=self._nb_metadata, parent=self, resources=resources
)
self.register_filter("highlight_code", highlight_code)
self.register_filter("filter_data_type", filter_data_type)
return super().from_notebook_node(nb, resources, **kw)
def _init_resources(self, resources):
def resources_include_css(name):
env = self.environment
code = """<style type="text/css">\n%s</style>""" % (env.loader.get_source(env, name)[0])
return markupsafe.Markup(code)
def resources_include_lab_theme(name):
# Try to find the theme with the given name, looking through the labextensions
_, theme_path = find_lab_theme(name)
with open(theme_path / "index.css") as file:
data = file.read()
# Embed assets (fonts, images...)
for asset in os.listdir(theme_path):
local_url = f"url({Path(asset).as_posix()})"
if local_url in data:
mime_type = mimetypes.guess_type(asset)[0]
# Replace asset url by a base64 dataurl
with open(theme_path / asset, "rb") as assetfile:
base64_data = base64.b64encode(assetfile.read())
base64_data = base64_data.replace(b"\n", b"").decode("ascii")
data = data.replace(
local_url, f"url(data:{mime_type};base64,{base64_data})"
)
code = """<style type="text/css">\n%s</style>""" % data
return markupsafe.Markup(code)
def resources_include_js(name):
env = self.environment
code = """<script>\n%s</script>""" % (env.loader.get_source(env, name)[0])
return markupsafe.Markup(code)
def resources_include_url(name):
env = self.environment
mime_type, encoding = mimetypes.guess_type(name)
try:
# we try to load via the jinja loader, but that tries to load
# as (encoded) text
data = env.loader.get_source(env, name)[0].encode("utf8")
except UnicodeDecodeError:
# if that fails (for instance a binary file, png or ttf)
# we mimic jinja2
pieces = split_template_path(name)
for searchpath in self.template_paths:
filename = os.path.join(searchpath, *pieces)
if os.path.exists(filename):
with open(filename, "rb") as f:
data = f.read()
break
else:
raise ValueError(f"No file {name!r} found in {searchpath!r}")
data = base64.b64encode(data)
data = data.replace(b"\n", b"").decode("ascii")
src = f"data:{mime_type};base64,{data}"
return markupsafe.Markup(src)
resources = super()._init_resources(resources)
resources["theme"] = self.theme
resources["include_css"] = resources_include_css
resources["include_lab_theme"] = resources_include_lab_theme
resources["include_js"] = resources_include_js
resources["include_url"] = resources_include_url
resources["require_js_url"] = self.require_js_url
resources["mathjax_url"] = self.mathjax_url
resources["jquery_url"] = self.jquery_url
resources["jupyter_widgets_base_url"] = self.jupyter_widgets_base_url
resources["widget_renderer_url"] = self.widget_renderer_url
resources["html_manager_semver_range"] = self.html_manager_semver_range
return resources

View File

@ -0,0 +1,87 @@
"""LaTeX Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import default
from traitlets.config import Config
from nbconvert.filters.filter_links import resolve_references
from nbconvert.filters.highlight import Highlight2Latex
from .templateexporter import TemplateExporter
class LatexExporter(TemplateExporter):
"""
Exports to a Latex template. Inherit from this class if your template is
LaTeX based and you need custom transformers/filters.
If you don't need custom transformers/filters, just change the
'template_file' config option. Place your template in the special "/latex"
subfolder of the "../templates" folder.
"""
export_from_notebook = "LaTeX"
@default("file_extension")
def _file_extension_default(self):
return ".tex"
@default("template_name")
def _template_name_default(self):
return "latex"
output_mimetype = "text/latex"
def default_filters(self):
yield from super().default_filters()
yield ("resolve_references", resolve_references)
@property
def default_config(self):
c = Config(
{
"NbConvertBase": {
"display_data_priority": [
"text/latex",
"application/pdf",
"image/png",
"image/jpeg",
"image/svg+xml",
"text/markdown",
"text/plain",
]
},
"ExtractOutputPreprocessor": {"enabled": True},
"SVG2PDFPreprocessor": {"enabled": True},
"LatexPreprocessor": {"enabled": True},
"SphinxPreprocessor": {"enabled": True},
"HighlightMagicsPreprocessor": {"enabled": True},
}
)
c.merge(super().default_config)
return c
def from_notebook_node(self, nb, resources=None, **kw):
langinfo = nb.metadata.get("language_info", {})
lexer = langinfo.get("pygments_lexer", langinfo.get("name", None))
highlight_code = self.filters.get(
"highlight_code", Highlight2Latex(pygments_lexer=lexer, parent=self)
)
self.register_filter("highlight_code", highlight_code)
return super().from_notebook_node(nb, resources, **kw)
def _create_environment(self):
environment = super()._create_environment()
# Set special Jinja2 syntax that will not conflict with latex.
environment.block_start_string = "((*"
environment.block_end_string = "*))"
environment.variable_start_string = "((("
environment.variable_end_string = ")))"
environment.comment_start_string = "((="
environment.comment_end_string = "=))"
return environment

View File

@ -0,0 +1,53 @@
"""Markdown Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import default
from traitlets.config import Config
from .templateexporter import TemplateExporter
class MarkdownExporter(TemplateExporter):
"""
Exports to a markdown document (.md)
"""
export_from_notebook = "Markdown"
@default("file_extension")
def _file_extension_default(self):
return ".md"
@default("template_name")
def _template_name_default(self):
return "markdown"
output_mimetype = "text/markdown"
@default("raw_mimetypes")
def _raw_mimetypes_default(self):
return ["text/markdown", "text/html", ""]
@property
def default_config(self):
c = Config(
{
"ExtractOutputPreprocessor": {"enabled": True},
"NbConvertBase": {
"display_data_priority": [
"text/html",
"text/markdown",
"image/svg+xml",
"text/latex",
"image/png",
"image/jpeg",
"text/plain",
]
},
"HighlightMagicsPreprocessor": {"enabled": True},
}
)
c.merge(super().default_config)
return c

View File

@ -0,0 +1,43 @@
"""NotebookExporter class"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import nbformat
from traitlets import Enum, default
from .exporter import Exporter
class NotebookExporter(Exporter):
"""Exports to an IPython notebook.
This is useful when you want to use nbconvert's preprocessors to operate on
a notebook (e.g. to execute it) and then write it back to a notebook file.
"""
nbformat_version = Enum(
list(nbformat.versions),
default_value=nbformat.current_nbformat,
help="""The nbformat version to write.
Use this to downgrade notebooks.
""",
).tag(config=True)
@default("file_extension")
def _file_extension_default(self):
return ".ipynb"
output_mimetype = "application/json"
export_from_notebook = "Notebook"
def from_notebook_node(self, nb, resources=None, **kw):
nb_copy, resources = super().from_notebook_node(nb, resources, **kw)
if self.nbformat_version != nb_copy.nbformat:
resources["output_suffix"] = ".v%i" % self.nbformat_version
else:
resources["output_suffix"] = ".nbconvert"
output = nbformat.writes(nb_copy, version=self.nbformat_version)
if not output.endswith("\n"):
output = output + "\n"
return output, resources

View File

@ -0,0 +1,212 @@
"""Export to PDF via latex"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import subprocess
import sys
from tempfile import TemporaryDirectory
from traitlets import Bool, Instance, Integer, List, Unicode, default
from ..utils import _contextlib_chdir
from .latex import LatexExporter
class LatexFailed(IOError):
"""Exception for failed latex run
Captured latex output is in error.output.
"""
def __init__(self, output):
self.output = output
def __unicode__(self):
return "PDF creating failed, captured latex output:\n%s" % self.output
def __str__(self):
u = self.__unicode__()
return u
def prepend_to_env_search_path(varname, value, envdict):
"""Add value to the environment variable varname in envdict
e.g. prepend_to_env_search_path('BIBINPUTS', '/home/sally/foo', os.environ)
"""
if not value:
return # Nothing to add
envdict[varname] = value + os.pathsep + envdict.get(varname, "")
class PDFExporter(LatexExporter):
"""Writer designed to write to PDF files.
This inherits from `LatexExporter`. It creates a LaTeX file in
a temporary directory using the template machinery, and then runs LaTeX
to create a pdf.
"""
export_from_notebook = "PDF via LaTeX"
latex_count = Integer(3, help="How many times latex will be called.").tag(config=True)
latex_command = List(
["xelatex", "{filename}", "-quiet"], help="Shell command used to compile latex."
).tag(config=True)
bib_command = List(["bibtex", "{filename}"], help="Shell command used to run bibtex.").tag(
config=True
)
verbose = Bool(False, help="Whether to display the output of latex commands.").tag(config=True)
texinputs = Unicode(help="texinputs dir. A notebook's directory is added")
writer = Instance("nbconvert.writers.FilesWriter", args=(), kw={"build_directory": "."})
output_mimetype = "application/pdf"
_captured_output = List()
@default("file_extension")
def _file_extension_default(self):
return ".pdf"
@default("template_extension")
def _template_extension_default(self):
return ".tex.j2"
def run_command(self, command_list, filename, count, log_function, raise_on_failure=None):
"""Run command_list count times.
Parameters
----------
command_list : list
A list of args to provide to Popen. Each element of this
list will be interpolated with the filename to convert.
filename : unicode
The name of the file to convert.
count : int
How many times to run the command.
raise_on_failure: Exception class (default None)
If provided, will raise the given exception for if an instead of
returning False on command failure.
Returns
-------
success : bool
A boolean indicating if the command was successful (True)
or failed (False).
"""
command = [c.format(filename=filename) for c in command_list]
# This will throw a clearer error if the command is not found
cmd = shutil.which(command_list[0])
if cmd is None:
link = "https://nbconvert.readthedocs.io/en/latest/install.html#installing-tex"
raise OSError(
"{formatter} not found on PATH, if you have not installed "
"{formatter} you may need to do so. Find further instructions "
"at {link}.".format(formatter=command_list[0], link=link)
)
times = "time" if count == 1 else "times"
self.log.info("Running %s %i %s: %s", command_list[0], count, times, command)
shell = sys.platform == "win32"
if shell:
command = subprocess.list2cmdline(command)
env = os.environ.copy()
prepend_to_env_search_path("TEXINPUTS", self.texinputs, env)
prepend_to_env_search_path("BIBINPUTS", self.texinputs, env)
prepend_to_env_search_path("BSTINPUTS", self.texinputs, env)
with open(os.devnull, "rb") as null:
stdout = subprocess.PIPE if not self.verbose else None
for _ in range(count):
p = subprocess.Popen(
command,
stdout=stdout,
stderr=subprocess.STDOUT,
stdin=null,
shell=shell,
env=env,
)
out, _ = p.communicate()
if p.returncode:
if self.verbose:
# verbose means I didn't capture stdout with PIPE,
# so it's already been displayed and `out` is None.
out = ""
else:
out = out.decode("utf-8", "replace")
log_function(command, out)
self._captured_output.append(out)
if raise_on_failure:
raise raise_on_failure(
'Failed to run "{command}" command:\n{output}'.format(
command=command, output=out
)
)
return False # failure
return True # success
def run_latex(self, filename, raise_on_failure=LatexFailed):
"""Run xelatex self.latex_count times."""
def log_error(command, out):
self.log.critical("%s failed: %s\n%s", command[0], command, out)
return self.run_command(
self.latex_command, filename, self.latex_count, log_error, raise_on_failure
)
def run_bib(self, filename, raise_on_failure=False):
"""Run bibtex one time."""
filename = os.path.splitext(filename)[0]
def log_error(command, out):
self.log.warning(
"%s had problems, most likely because there were no citations", command[0]
)
self.log.debug("%s output: %s\n%s", command[0], command, out)
return self.run_command(self.bib_command, filename, 1, log_error, raise_on_failure)
def from_notebook_node(self, nb, resources=None, **kw):
latex, resources = super().from_notebook_node(nb, resources=resources, **kw)
# set texinputs directory, so that local files will be found
if resources and resources.get("metadata", {}).get("path"):
self.texinputs = resources["metadata"]["path"]
else:
self.texinputs = os.getcwd()
self._captured_outputs = []
with TemporaryDirectory() as td, _contextlib_chdir.chdir(td):
notebook_name = "notebook"
resources["output_extension"] = ".tex"
tex_file = self.writer.write(latex, resources, notebook_name=notebook_name)
self.log.info("Building PDF")
self.run_latex(tex_file)
if self.run_bib(tex_file):
self.run_latex(tex_file)
pdf_file = notebook_name + ".pdf"
if not os.path.isfile(pdf_file):
raise LatexFailed("\n".join(self._captured_output))
self.log.info("PDF successfully created")
with open(pdf_file, "rb") as f:
pdf_data = f.read()
# convert output extension to pdf
# the writer above required it to be tex
resources["output_extension"] = ".pdf"
# clear figure outputs, extracted by latex export,
# so we don't claim to be a multi-file export.
resources.pop("outputs", None)
return pdf_data, resources

View File

@ -0,0 +1,26 @@
"""Python script Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import default
from .templateexporter import TemplateExporter
class PythonExporter(TemplateExporter):
"""
Exports a Python code file.
Note that the file produced will have a shebang of '#!/usr/bin/env python'
regardless of the actual python version used in the notebook.
"""
@default("file_extension")
def _file_extension_default(self):
return ".py"
@default("template_name")
def _template_name_default(self):
return "python"
output_mimetype = "text/x-python"

View File

@ -0,0 +1,37 @@
"""reStructuredText Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import default
from traitlets.config import Config
from .templateexporter import TemplateExporter
class RSTExporter(TemplateExporter):
"""
Exports reStructuredText documents.
"""
@default("file_extension")
def _file_extension_default(self):
return ".rst"
@default("template_name")
def _template_name_default(self):
return "rst"
output_mimetype = "text/restructuredtext"
export_from_notebook = "reST"
@property
def default_config(self):
c = Config(
{
"ExtractOutputPreprocessor": {"enabled": True},
"HighlightMagicsPreprocessor": {"enabled": True},
}
)
c.merge(super().default_config)
return c

View File

@ -0,0 +1,68 @@
"""Generic script exporter class for any kernel language"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import entrypoints
from traitlets import Dict, default
from .base import get_exporter
from .templateexporter import TemplateExporter
class ScriptExporter(TemplateExporter):
# Caches of already looked-up and instantiated exporters for delegation:
_exporters = Dict()
_lang_exporters = Dict()
export_from_notebook = "Script"
@default("template_file")
def _template_file_default(self):
return "script.j2"
@default("template_name")
def _template_name_default(self):
return "script"
def _get_language_exporter(self, lang_name):
"""Find an exporter for the language name from notebook metadata.
Uses the nbconvert.exporters.script group of entry points.
Returns None if no exporter is found.
"""
if lang_name not in self._lang_exporters:
try:
Exporter = entrypoints.get_single("nbconvert.exporters.script", lang_name).load()
except entrypoints.NoSuchEntryPoint:
self._lang_exporters[lang_name] = None
else:
# TODO: passing config is wrong, but changing this revealed more complicated issues
self._lang_exporters[lang_name] = Exporter(config=self.config, parent=self)
return self._lang_exporters[lang_name]
def from_notebook_node(self, nb, resources=None, **kw):
langinfo = nb.metadata.get("language_info", {})
# delegate to custom exporter, if specified
exporter_name = langinfo.get("nbconvert_exporter")
if exporter_name and exporter_name != "script":
self.log.debug("Loading script exporter: %s", exporter_name)
if exporter_name not in self._exporters:
Exporter = get_exporter(exporter_name)
# TODO: passing config is wrong, but changing this revealed more complicated issues
self._exporters[exporter_name] = Exporter(config=self.config, parent=self)
exporter = self._exporters[exporter_name]
return exporter.from_notebook_node(nb, resources, **kw)
# Look up a script exporter for this notebook's language
lang_name = langinfo.get("name")
if lang_name:
self.log.debug("Using script exporter for language: %s", lang_name)
exporter = self._get_language_exporter(lang_name)
if exporter is not None:
return exporter.from_notebook_node(nb, resources, **kw)
# Fall back to plain script export
self.file_extension = langinfo.get("file_extension", ".txt")
self.output_mimetype = langinfo.get("mimetype", "text/plain")
return super().from_notebook_node(nb, resources, **kw)

View File

@ -0,0 +1,187 @@
"""HTML slide show Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from copy import deepcopy
from warnings import warn
from traitlets import Bool, Unicode, default
from ..preprocessors.base import Preprocessor
from .html import HTMLExporter
class _RevealMetadataPreprocessor(Preprocessor):
# A custom preprocessor adding convenience metadata to cells
def preprocess(self, nb, resources=None):
nb = deepcopy(nb)
for cell in nb.cells:
# Make sure every cell has a slide_type
try:
slide_type = cell.metadata.get("slideshow", {}).get("slide_type", "-")
except AttributeError:
slide_type = "-"
cell.metadata.slide_type = slide_type
# Find the first visible cell
for index, cell in enumerate(nb.cells):
if cell.metadata.slide_type not in {"notes", "skip"}:
cell.metadata.slide_type = "slide"
cell.metadata.slide_start = True
cell.metadata.subslide_start = True
first_slide_ix = index
break
else:
raise ValueError("All cells are hidden, cannot create slideshow")
in_fragment = False
for index, cell in enumerate(nb.cells[first_slide_ix + 1 :], start=(first_slide_ix + 1)):
previous_cell = nb.cells[index - 1]
# Slides are <section> elements in the HTML, subslides (the vertically
# stacked slides) are also <section> elements inside the slides,
# and fragments are <div>s within subslides. Subslide and fragment
# elements can contain content:
# <section>
# <section>
# (content)
# <div class="fragment">(content)</div>
# </section>
# </section>
# Get the slide type. If type is subslide or slide,
# end the last slide/subslide/fragment as applicable.
if cell.metadata.slide_type == "slide":
previous_cell.metadata.slide_end = True
cell.metadata.slide_start = True
if cell.metadata.slide_type in {"subslide", "slide"}:
previous_cell.metadata.fragment_end = in_fragment
previous_cell.metadata.subslide_end = True
cell.metadata.subslide_start = True
in_fragment = False
elif cell.metadata.slide_type == "fragment":
cell.metadata.fragment_start = True
if in_fragment:
previous_cell.metadata.fragment_end = True
else:
in_fragment = True
# The last cell will always be the end of a slide
nb.cells[-1].metadata.fragment_end = in_fragment
nb.cells[-1].metadata.subslide_end = True
nb.cells[-1].metadata.slide_end = True
return nb, resources
class SlidesExporter(HTMLExporter):
"""Exports HTML slides with reveal.js"""
# Overrides from HTMLExporter
#################################
export_from_notebook = "Reveal.js slides"
@default("template_name")
def _template_name_default(self):
return "reveal"
@default("file_extension")
def _file_extension_default(self):
return ".slides.html"
@default("template_extension")
def _template_extension_default(self):
return ".html.j2"
# Extra resources
#################################
reveal_url_prefix = Unicode(
help="""The URL prefix for reveal.js (version 3.x).
This defaults to the reveal CDN, but can be any url pointing to a copy
of reveal.js.
For speaker notes to work, this must be a relative path to a local
copy of reveal.js: e.g., "reveal.js".
If a relative path is given, it must be a subdirectory of the
current directory (from which the server is run).
See the usage documentation
(https://nbconvert.readthedocs.io/en/latest/usage.html#reveal-js-html-slideshow)
for more details.
"""
).tag(config=True)
@default("reveal_url_prefix")
def _reveal_url_prefix_default(self):
if "RevealHelpPreprocessor.url_prefix" in self.config:
warn(
"Please update RevealHelpPreprocessor.url_prefix to "
"SlidesExporter.reveal_url_prefix in config files."
)
return self.config.RevealHelpPreprocessor.url_prefix
return "https://unpkg.com/reveal.js@4.0.2"
reveal_theme = Unicode(
"simple",
help="""
Name of the reveal.js theme to use.
We look for a file with this name under
``reveal_url_prefix``/css/theme/``reveal_theme``.css.
https://github.com/hakimel/reveal.js/tree/master/css/theme has
list of themes that ship by default with reveal.js.
""",
).tag(config=True)
reveal_transition = Unicode(
"slide",
help="""
Name of the reveal.js transition to use.
The list of transitions that ships by default with reveal.js are:
none, fade, slide, convex, concave and zoom.
""",
).tag(config=True)
reveal_scroll = Bool(
False,
help="""
If True, enable scrolling within each slide
""",
).tag(config=True)
reveal_number = Unicode(
"",
help="""
slide number format (e.g. 'c/t'). Choose from:
'c': current, 't': total, 'h': horizontal, 'v': vertical
""",
).tag(config=True)
font_awesome_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.css",
help="""
URL to load font awesome from.
Defaults to loading from cdnjs.
""",
).tag(config=True)
def _init_resources(self, resources):
resources = super()._init_resources(resources)
if "reveal" not in resources:
resources["reveal"] = {}
resources["reveal"]["url_prefix"] = self.reveal_url_prefix
resources["reveal"]["theme"] = self.reveal_theme
resources["reveal"]["transition"] = self.reveal_transition
resources["reveal"]["scroll"] = self.reveal_scroll
resources["reveal"]["number"] = self.reveal_number
return resources

View File

@ -0,0 +1,648 @@
"""This module defines TemplateExporter, a highly configurable converter
that uses Jinja2 to export notebook files into different formats.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
import uuid
import warnings
from pathlib import Path
from jinja2 import (
BaseLoader,
ChoiceLoader,
DictLoader,
Environment,
FileSystemLoader,
TemplateNotFound,
)
from jupyter_core.paths import jupyter_path
from traitlets import Bool, Dict, HasTraits, List, Unicode, default, observe, validate
from traitlets.config import Config
from traitlets.utils.importstring import import_item
from nbconvert import filters
from .exporter import Exporter
# Jinja2 extensions to load.
JINJA_EXTENSIONS = ["jinja2.ext.loopcontrols"]
ROOT = os.path.dirname(__file__)
DEV_MODE = os.path.exists(os.path.join(ROOT, "../../setup.py")) and os.path.exists(
os.path.join(ROOT, "../../share")
)
default_filters = {
"indent": filters.indent,
"markdown2html": filters.markdown2html,
"markdown2asciidoc": filters.markdown2asciidoc,
"ansi2html": filters.ansi2html,
"filter_data_type": filters.DataTypeFilter,
"get_lines": filters.get_lines,
"highlight2html": filters.Highlight2HTML,
"highlight2latex": filters.Highlight2Latex,
"ipython2python": filters.ipython2python,
"posix_path": filters.posix_path,
"markdown2latex": filters.markdown2latex,
"markdown2rst": filters.markdown2rst,
"comment_lines": filters.comment_lines,
"strip_ansi": filters.strip_ansi,
"strip_dollars": filters.strip_dollars,
"strip_files_prefix": filters.strip_files_prefix,
"html2text": filters.html2text,
"add_anchor": filters.add_anchor,
"ansi2latex": filters.ansi2latex,
"wrap_text": filters.wrap_text,
"escape_latex": filters.escape_latex,
"citation2latex": filters.citation2latex,
"path2url": filters.path2url,
"add_prompts": filters.add_prompts,
"ascii_only": filters.ascii_only,
"prevent_list_blocks": filters.prevent_list_blocks,
"get_metadata": filters.get_metadata,
"convert_pandoc": filters.convert_pandoc,
"json_dumps": json.dumps,
# browsers will parse </script>, closing a script tag early
# Since JSON allows escaping forward slash, this will still be parsed by JSON
"escape_html_script": lambda x: x.replace("</script>", "<\\/script>"),
"strip_trailing_newline": filters.strip_trailing_newline,
"text_base64": filters.text_base64,
}
# copy of https://github.com/jupyter/jupyter_server/blob/b62458a7f5ad6b5246d2f142258dedaa409de5d9/jupyter_server/config_manager.py#L19
def recursive_update(target, new):
"""Recursively update one dictionary using another.
None values will delete their keys.
"""
for k, v in new.items():
if isinstance(v, dict):
if k not in target:
target[k] = {}
recursive_update(target[k], v)
if not target[k]:
# Prune empty subdicts
del target[k]
elif v is None:
target.pop(k, None)
else:
target[k] = v
return target # return for convenience
# define function at the top level to avoid pickle errors
def deprecated(msg):
warnings.warn(msg, DeprecationWarning)
class ExtensionTolerantLoader(BaseLoader):
"""A template loader which optionally adds a given extension when searching.
Constructor takes two arguments: *loader* is another Jinja loader instance
to wrap. *extension* is the extension, which will be added to the template
name if finding the template without it fails. This should include the dot,
e.g. '.tpl'.
"""
def __init__(self, loader, extension):
self.loader = loader
self.extension = extension
def get_source(self, environment, template):
try:
return self.loader.get_source(environment, template)
except TemplateNotFound:
if template.endswith(self.extension):
raise TemplateNotFound(template)
return self.loader.get_source(environment, template + self.extension)
def list_templates(self):
return self.loader.list_templates()
class TemplateExporter(Exporter):
"""
Exports notebooks into other file formats. Uses Jinja 2 templating engine
to output new formats. Inherit from this class if you are creating a new
template type along with new filters/preprocessors. If the filters/
preprocessors provided by default suffice, there is no need to inherit from
this class. Instead, override the template_file and file_extension
traits via a config file.
Filters available by default for templates:
{filters}
"""
# finish the docstring
__doc__ = __doc__.format(filters="- " + "\n - ".join(sorted(default_filters.keys())))
_template_cached = None
def _invalidate_template_cache(self, change=None):
self._template_cached = None
@property
def template(self):
if self._template_cached is None:
self._template_cached = self._load_template()
return self._template_cached
_environment_cached = None
def _invalidate_environment_cache(self, change=None):
self._environment_cached = None
self._invalidate_template_cache()
@property
def environment(self):
if self._environment_cached is None:
self._environment_cached = self._create_environment()
return self._environment_cached
@property
def default_config(self):
c = Config(
{
"RegexRemovePreprocessor": {"enabled": True},
"TagRemovePreprocessor": {"enabled": True},
}
)
c.merge(super().default_config)
return c
template_name = Unicode(help="Name of the template to use").tag(
config=True, affects_template=True
)
template_file = Unicode(None, allow_none=True, help="Name of the template file to use").tag(
config=True, affects_template=True
)
raw_template = Unicode("", help="raw template string").tag(affects_environment=True)
enable_async = Bool(False, help="Enable Jinja async template execution").tag(
affects_environment=True
)
_last_template_file = ""
_raw_template_key = "<memory>"
@validate("template_name")
def _template_name_validate(self, change):
template_name = change["value"]
if template_name and template_name.endswith(".tpl"):
warnings.warn(
f"5.x style template name passed '{self.template_name}'. Use --template-name for the template directory with a index.<ext>.j2 file and/or --template-file to denote a different template.",
DeprecationWarning,
)
directory, self.template_file = os.path.split(self.template_name)
if directory:
directory, template_name = os.path.split(directory)
if directory:
if os.path.isabs(directory):
self.extra_template_basedirs = [directory]
return template_name
@observe("template_file")
def _template_file_changed(self, change):
new = change["new"]
if new == "default":
self.template_file = self.default_template
return
# check if template_file is a file path
# rather than a name already on template_path
full_path = os.path.abspath(new)
if os.path.isfile(full_path):
directory, self.template_file = os.path.split(full_path)
self.extra_template_paths = [directory] + self.extra_template_paths
# While not strictly an invalid template file name, the extension hints that there isn't a template directory involved
if self.template_file.endswith(".tpl"):
warnings.warn(
f"5.x style template file passed '{new}'. Use --template-name for the template directory with a index.<ext>.j2 file and/or --template-file to denote a different template.",
DeprecationWarning,
)
@default("template_file")
def _template_file_default(self):
if self.template_extension:
return "index" + self.template_extension
@observe("raw_template")
def _raw_template_changed(self, change):
if not change["new"]:
self.template_file = self._last_template_file
self._invalidate_template_cache()
template_paths = List(["."]).tag(config=True, affects_environment=True)
extra_template_basedirs = List().tag(config=True, affects_environment=True)
extra_template_paths = List([]).tag(config=True, affects_environment=True)
@default("extra_template_basedirs")
def _default_extra_template_basedirs(self):
return [os.getcwd()]
# Extension that the template files use.
template_extension = Unicode().tag(config=True, affects_environment=True)
template_data_paths = List(
jupyter_path("nbconvert", "templates"), help="Path where templates can be installed too."
).tag(affects_environment=True)
# Extension that the template files use.
template_extension = Unicode().tag(config=True, affects_environment=True)
@default("template_extension")
def _template_extension_default(self):
if self.file_extension:
return self.file_extension + ".j2"
else:
return self.file_extension
exclude_input = Bool(
False, help="This allows you to exclude code cell inputs from all templates if set to True."
).tag(config=True)
exclude_input_prompt = Bool(
False, help="This allows you to exclude input prompts from all templates if set to True."
).tag(config=True)
exclude_output = Bool(
False,
help="This allows you to exclude code cell outputs from all templates if set to True.",
).tag(config=True)
exclude_output_prompt = Bool(
False, help="This allows you to exclude output prompts from all templates if set to True."
).tag(config=True)
exclude_output_stdin = Bool(
True,
help="This allows you to exclude output of stdin stream from lab template if set to True.",
).tag(config=True)
exclude_code_cell = Bool(
False, help="This allows you to exclude code cells from all templates if set to True."
).tag(config=True)
exclude_markdown = Bool(
False, help="This allows you to exclude markdown cells from all templates if set to True."
).tag(config=True)
exclude_raw = Bool(
False, help="This allows you to exclude raw cells from all templates if set to True."
).tag(config=True)
exclude_unknown = Bool(
False, help="This allows you to exclude unknown cells from all templates if set to True."
).tag(config=True)
extra_loaders = List(
help="Jinja loaders to find templates. Will be tried in order "
"before the default FileSystem ones.",
).tag(affects_environment=True)
filters = Dict(
help="""Dictionary of filters, by name and namespace, to add to the Jinja
environment."""
).tag(config=True, affects_environment=True)
raw_mimetypes = List(
help="""formats of raw cells to be included in this Exporter's output."""
).tag(config=True)
@default("raw_mimetypes")
def _raw_mimetypes_default(self):
return [self.output_mimetype, ""]
# TODO: passing config is wrong, but changing this revealed more complicated issues
def __init__(self, config=None, **kw):
"""
Public constructor
Parameters
----------
config : config
User configuration instance.
extra_loaders : list[of Jinja Loaders]
ordered list of Jinja loader to find templates. Will be tried in order
before the default FileSystem ones.
template_file : str (optional, kw arg)
Template to use when exporting.
"""
super().__init__(config=config, **kw)
self.observe(
self._invalidate_environment_cache, list(self.traits(affects_environment=True))
)
self.observe(self._invalidate_template_cache, list(self.traits(affects_template=True)))
def _load_template(self):
"""Load the Jinja template object from the template file
This is triggered by various trait changes that would change the template.
"""
# this gives precedence to a raw_template if present
with self.hold_trait_notifications():
if self.template_file != self._raw_template_key:
self._last_template_file = self.template_file
if self.raw_template:
self.template_file = self._raw_template_key
if not self.template_file:
raise ValueError("No template_file specified!")
# First try to load the
# template by name with extension added, then try loading the template
# as if the name is explicitly specified.
template_file = self.template_file
self.log.debug("Attempting to load template %s", template_file)
self.log.debug(" template_paths: %s", os.pathsep.join(self.template_paths))
return self.environment.get_template(template_file)
def from_notebook_node(self, nb, resources=None, **kw):
"""
Convert a notebook from a notebook node instance.
Parameters
----------
nb : :class:`~nbformat.NotebookNode`
Notebook node
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
"""
nb_copy, resources = super().from_notebook_node(nb, resources, **kw)
resources.setdefault("raw_mimetypes", self.raw_mimetypes)
resources["global_content_filter"] = {
"include_code": not self.exclude_code_cell,
"include_markdown": not self.exclude_markdown,
"include_raw": not self.exclude_raw,
"include_unknown": not self.exclude_unknown,
"include_input": not self.exclude_input,
"include_output": not self.exclude_output,
"include_output_stdin": not self.exclude_output_stdin,
"include_input_prompt": not self.exclude_input_prompt,
"include_output_prompt": not self.exclude_output_prompt,
"no_prompt": self.exclude_input_prompt and self.exclude_output_prompt,
}
# Top level variables are passed to the template_exporter here.
output = self.template.render(nb=nb_copy, resources=resources)
output = output.lstrip("\r\n")
return output, resources
def _register_filter(self, environ, name, jinja_filter):
"""
Register a filter.
A filter is a function that accepts and acts on one string.
The filters are accessible within the Jinja templating engine.
Parameters
----------
name : str
name to give the filter in the Jinja engine
filter : filter
"""
if jinja_filter is None:
raise TypeError("filter")
isclass = isinstance(jinja_filter, type)
constructed = not isclass
# Handle filter's registration based on it's type
if constructed and isinstance(jinja_filter, (str,)):
# filter is a string, import the namespace and recursively call
# this register_filter method
filter_cls = import_item(jinja_filter)
return self._register_filter(environ, name, filter_cls)
if constructed and hasattr(jinja_filter, "__call__"): # noqa
# filter is a function, no need to construct it.
environ.filters[name] = jinja_filter
return jinja_filter
elif isclass and issubclass(jinja_filter, HasTraits):
# filter is configurable. Make sure to pass in new default for
# the enabled flag if one was specified.
filter_instance = jinja_filter(parent=self)
self._register_filter(environ, name, filter_instance)
elif isclass:
# filter is not configurable, construct it
filter_instance = jinja_filter()
self._register_filter(environ, name, filter_instance)
else:
# filter is an instance of something without a __call__
# attribute.
raise TypeError("filter")
def register_filter(self, name, jinja_filter):
"""
Register a filter.
A filter is a function that accepts and acts on one string.
The filters are accessible within the Jinja templating engine.
Parameters
----------
name : str
name to give the filter in the Jinja engine
filter : filter
"""
return self._register_filter(self.environment, name, jinja_filter)
def default_filters(self):
"""Override in subclasses to provide extra filters.
This should return an iterable of 2-tuples: (name, class-or-function).
You should call the method on the parent class and include the filters
it provides.
If a name is repeated, the last filter provided wins. Filters from
user-supplied config win over filters provided by classes.
"""
return default_filters.items()
def _create_environment(self):
"""
Create the Jinja templating environment.
"""
paths = self.template_paths
self.log.debug("Template paths:\n\t%s", "\n\t".join(paths))
loaders = self.extra_loaders + [
ExtensionTolerantLoader(FileSystemLoader(paths), self.template_extension),
DictLoader({self._raw_template_key: self.raw_template}),
]
environment = Environment(
loader=ChoiceLoader(loaders),
extensions=JINJA_EXTENSIONS,
enable_async=self.enable_async,
)
environment.globals["uuid4"] = uuid.uuid4
# Add default filters to the Jinja2 environment
for key, value in self.default_filters():
self._register_filter(environment, key, value)
# Load user filters. Overwrite existing filters if need be.
if self.filters:
for key, user_filter in self.filters.items():
self._register_filter(environment, key, user_filter)
return environment
def _init_preprocessors(self):
super()._init_preprocessors()
conf = self._get_conf()
preprocessors = conf.get("preprocessors", {})
# preprocessors is a dict for three reasons
# * We rely on recursive_update, which can only merge dicts, lists will be overwritten
# * We can use the key with numerical prefixing to guarantee ordering (/etc/*.d/XY-file style)
# * We can disable preprocessors by overwriting the value with None
for _, preprocessor in sorted(preprocessors.items(), key=lambda x: x[0]):
if preprocessor is not None:
kwargs = preprocessor.copy()
preprocessor_cls = kwargs.pop("type")
preprocessor_cls = import_item(preprocessor_cls)
if preprocessor_cls.__name__ in self.config:
kwargs.update(self.config[preprocessor_cls.__name__])
preprocessor = preprocessor_cls(**kwargs)
self.register_preprocessor(preprocessor)
def _get_conf(self):
conf = {} # the configuration once all conf files are merged
for path in map(Path, self.template_paths):
conf_path = path / "conf.json"
if conf_path.exists():
with conf_path.open() as f:
conf = recursive_update(conf, json.load(f))
return conf
@default("template_paths")
def _template_paths(self, prune=True, root_dirs=None):
paths = []
root_dirs = self.get_prefix_root_dirs()
template_names = self.get_template_names()
for template_name in template_names:
for base_dir in self.extra_template_basedirs:
path = os.path.join(base_dir, template_name)
if not prune or os.path.exists(path):
paths.append(path)
for root_dir in root_dirs:
base_dir = os.path.join(root_dir, "nbconvert", "templates")
path = os.path.join(base_dir, template_name)
if not prune or os.path.exists(path):
paths.append(path)
for root_dir in root_dirs:
# we include root_dir for when we want to be very explicit, e.g.
# {% extends 'nbconvert/templates/classic/base.html' %}
paths.append(root_dir)
# we include base_dir for when we want to be explicit, but less than root_dir, e.g.
# {% extends 'classic/base.html' %}
base_dir = os.path.join(root_dir, "nbconvert", "templates")
paths.append(base_dir)
compatibility_dir = os.path.join(root_dir, "nbconvert", "templates", "compatibility")
paths.append(compatibility_dir)
additional_paths = []
for path in self.template_data_paths:
if not prune or os.path.exists(path):
additional_paths.append(path)
return paths + self.extra_template_paths + additional_paths
@classmethod
def get_compatibility_base_template_conf(cls, name):
# Hard-coded base template confs to use for backwards compatibility for 5.x-only templates
if name == "display_priority":
return dict(base_template="base")
if name == "full":
return dict(base_template="classic", mimetypes={"text/html": True})
def get_template_names(self):
# finds a list of template names where each successive template name is the base template
template_names = []
root_dirs = self.get_prefix_root_dirs()
base_template = self.template_name
merged_conf = {} # the configuration once all conf files are merged
while base_template is not None:
template_names.append(base_template)
conf = {}
found_at_least_one = False
for base_dir in self.extra_template_basedirs:
template_dir = os.path.join(base_dir, base_template)
if os.path.exists(template_dir):
found_at_least_one = True
conf_file = os.path.join(template_dir, "conf.json")
if os.path.exists(conf_file):
with open(conf_file) as f:
conf = recursive_update(json.load(f), conf)
for root_dir in root_dirs:
template_dir = os.path.join(root_dir, "nbconvert", "templates", base_template)
if os.path.exists(template_dir):
found_at_least_one = True
conf_file = os.path.join(template_dir, "conf.json")
if os.path.exists(conf_file):
with open(conf_file) as f:
conf = recursive_update(json.load(f), conf)
if not found_at_least_one:
# Check for backwards compatibility template names
for root_dir in root_dirs:
compatibility_file = base_template + ".tpl"
compatibility_path = os.path.join(
root_dir, "nbconvert", "templates", "compatibility", compatibility_file
)
if os.path.exists(compatibility_path):
found_at_least_one = True
warnings.warn(
f"5.x template name passed '{self.template_name}'. Use 'lab' or 'classic' for new template usage.",
DeprecationWarning,
)
self.template_file = compatibility_file
conf = self.get_compatibility_base_template_conf(base_template)
self.template_name = conf.get("base_template")
break
if not found_at_least_one:
paths = "\n\t".join(root_dirs)
raise ValueError(
"No template sub-directory with name %r found in the following paths:\n\t%s"
% (base_template, paths)
)
merged_conf = recursive_update(dict(conf), merged_conf)
base_template = conf.get("base_template")
conf = merged_conf
mimetypes = [mimetype for mimetype, enabled in conf.get("mimetypes", {}).items() if enabled]
if self.output_mimetype and self.output_mimetype not in mimetypes and mimetypes:
supported_mimetypes = "\n\t".join(mimetypes)
raise ValueError(
"Unsupported mimetype %r for template %r, mimetypes supported are: \n\t%s"
% (self.output_mimetype, self.template_name, supported_mimetypes)
)
return template_names
def get_prefix_root_dirs(self):
# We look at the usual jupyter locations, and for development purposes also
# relative to the package directory (first entry, meaning with highest precedence)
root_dirs = []
if DEV_MODE:
root_dirs.append(os.path.abspath(os.path.join(ROOT, "..", "..", "share", "jupyter")))
root_dirs.extend(jupyter_path())
return root_dirs
def _init_resources(self, resources):
resources = super()._init_resources(resources)
resources["deprecated"] = deprecated
return resources

View File

@ -0,0 +1,39 @@
"""Base TestCase class for testing Exporters"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from ...tests.base import TestsBase
all_raw_mimetypes = {
"text/x-python",
"text/markdown",
"text/html",
"text/restructuredtext",
"text/latex",
}
class ExportersTestsBase(TestsBase):
"""Contains base test functions for exporters"""
exporter_class = None
should_include_raw = None
def _get_notebook(self, nb_name="notebook2.ipynb"):
return os.path.join(self._get_files_path(), nb_name)
def test_raw_cell_inclusion(self):
"""test raw cell inclusion based on raw_mimetype metadata"""
if self.should_include_raw is None:
return
exporter = self.exporter_class()
(output, resources) = exporter.from_filename(self._get_notebook("rawtest.ipynb"))
for inc in self.should_include_raw:
self.assertIn("raw %s" % inc, output, "should include %s" % inc)
self.assertIn("no raw_mimetype metadata", output)
for exc in all_raw_mimetypes.difference(self.should_include_raw):
self.assertNotIn("raw %s" % exc, output, "should exclude %s" % exc)
self.assertNotIn("never be included", output)

View File

@ -0,0 +1,47 @@
"""
Contains CheesePreprocessor
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from ...preprocessors.base import Preprocessor
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class CheesePreprocessor(Preprocessor):
"""
Adds a cheese tag to the resources object
"""
def __init__(self, **kw):
"""
Public constructor
"""
super().__init__(**kw)
def preprocess(self, nb, resources):
"""
Sphinx preprocessing to apply on each notebook.
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
"""
resources["cheese"] = "real"
return nb, resources

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,4 @@
{%- extends 'lab/index.html.j2' -%}
{%- block body_footer -%}
UNIQUE
{%- endblock body_footer -%}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,59 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
"test input: input value\n"
]
},
{
"data": {
"text/plain": [
"'input value'"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"input(\"test input:\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,240 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"[<matplotlib.lines.Line2D at 0x10f695240>]"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
},
{
"data": {
"image/png": [
"iVBORw0KGgoAAAANSUhEUgAAAu0AAAH/CAYAAADjSONqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\n",
"AAAWJQAAFiUBSVIk8AAAIABJREFUeJzt3X/M7ndd3/HXezmJG7QpEdqyrbD21OYgLMbNmRK6aM+w\n",
"DKMhwDS4RNnmkM0xLIqMyTY5JSEwNy2jiJOxOu1MiEQLW8xoQTBDKttcIJlz/YE9BaFWZdACtoVp\n",
"3/vjum56zn3u69w/rl/f73U9HsmVb3t9r/tzf+/e33Puz/3s5/p+q7sDAAAM159Z9wEAAADnZ9IO\n",
"AAADZ9IOAAADZ9IOAAADZ9IOAAADZ9IOAAADZ9IOAAADZ9IOAAADZ9IOAAADZ9IOAAADZ9IOAAAD\n",
"Z9IOAAADZ9IOAAADZ9IOAAADN9ekvaq+q6puqqoPV9UXquqxqrrliGNdVlU3V9X9VfVoVZ2uqhur\n",
"6knzHCMAAIzdsTk//p8n+YYkX0zy6STPSNKHHaSqrkxyR5KLk7wnyZ1Jrk5yfZLnV9U13f25OY8V\n",
"AABGad7lMa9KclV3X5TkB+cY5+2ZTNhf2d0v7u7Xdfdzk9yY5ESSN855nAAAMFrVfegwvvdAVdcm\n",
"+WCS/9jdLz3Ex12Z5J4kp7v7yl37LkjyQCb1/tLufnghBwsAACMyhDeinpxub9+9o7u/lOQjSZ6Y\n",
"5NmrPCgAABiKIUzaT0y3d8/Yf890e9UKjgUAAAZnCJP2i6bbh2bs33neVWQAANhK8149ZlCqajEL\n",
"9AEAYB/dXav6XEMo7Tsl/aIZ+3eef3AFxwIAAIMzhNJ+53R7Ysb+nbXss9a876GTybXjX53knd2H\n",
"v3Y8m6WqepW/DTN8zgn24rxgL84LdlvH6o4hlPYPTbfXVdVZfyCq6sIk1yT54yQfPcSYtya5MMk7\n",
"ktxWlacv4kABAGAdVjZpr6pjVfWMqjp+5vPdfW8ml3u8Iskrdn3YDUmekOSW7n7kEJ/ubyX520n+\n",
"b5Lrkvx2VX6gKn5LBgBgdOa6uVJVvTDJC6f/+tQkz0tyb5LfmD73R939mulrL5/u+2R3X7FrnONJ\n",
"7khySZL3ZrJk5uok1ya5K8lzuvvzBzieTh5/U0BVLk3yM0leNH3J+5O8rDufOvQXy6j5X5vs5pxg\n",
"L84L9uK8YLfdc86VfM45J+2vT/L65Jw14ztfwH3dfXz62sszmbR/9bldY12W5A1Jnp/kyUnuz2SZ\n",
"yw3dPetykLvHOOc/4LSuvyTJ26bjWuu+hfyFy27OCfbivGAvzgt2G92kfWjO9x9Qdd9u/sJlN+cE\n",
"e3FesBfnBbutY9I+hDeirkR3/iDWugMAMEJbM2lPku50d96V5FlxhZltc8O6D4DBcU6wF+cFe3Fe\n",
"sHZbszzm3Nda6w4AwOFZ0z6no/wHtNYdAIDDsKZ9Dax1BwBg6La+tJ/98ao7AADnp7SvmeoOAMAQ\n",
"Ke0zx1LdAQA4l9I+IKo7AABDobQfaFzVHQCACaV9oFR3AADWSWk/9OdQ3QEAtpnSPgKqOwAAq6a0\n",
"z/X5VHcAgG2jtI+M6g4AwCoo7Qv73Ko7AMA2UNpHTHUHAGBZlPalHIfqDgCwqZT2DaG6AwCwSEr7\n",
"kqnuAACbRWnfQKo7AADzUtpXSHUHABg/pX3Dqe4AAByF0r4mqjsAwDgp7VtEdQcA4KCU9gFQ3QEA\n",
"xkNp31KqOwAA56O0D4zqDgAwbEo7qjsAAOdQ2gdMdQcAGB6lnbOo7gAAJEr7aKjuAADDoLQzk+oO\n",
"ALC9lPYRUt0BANZHaedAVHcAgO2itI+c6g4AsFpKO4emugMAbD6lfYOo7gAAy6e0MxfVHQBgMynt\n",
"G0p1BwBYDqWdhVHdAQA2h9K+BVR3AIDFUdpZCtUdAGDclPYto7oDAMxHaWfpVHcAgPFR2reY6g4A\n",
"cHhKOyulugMAjIPSThLVHQDgoJR21kZ1BwAYLqWdc6juAACzKe0MguoOADAsSjvnpboDAJxNaWdw\n",
"VHcAgPVT2jkw1R0AQGln4FR3AID1UNo5EtUdANhWSjujoboDAKyO0s7cVHcAYJso7YyS6g4AsFxK\n",
"OwulugMAm05pZ/RUdwCAxVPaWRrVHQDYREo7G0V1BwBYDKWdlVDdAYBNobSzsVR3AICjU9pZOdUd\n",
"ABgzpZ2toLoDAByO0s5aqe4AwNgo7Wwd1R0AYH9KO4OhugMAY6C0s9VUdwCAvSntDJLqDgAMldIO\n",
"U6o7AMDjlHYGT3UHAIZEaYc9qO4AwLZbyKS9qi6rqpur6v6qerSqTlfVjVX1pEOO8x1VdXtVfbqq\n",
"Hq6q362qX6qqZy/iOBmv7nR33pXkWUluTXJhknckua0qT1/rwQEALNncy2Oq6sokdyS5OMl7ktyZ\n",
"5OokJ5PcleSa7v7cAcb5l0lek+Sz03E+m+SqJC9IcizJS7v7F/cZw/KYLTCt6y9J8rYkT07yxSSv\n",
"TvLO7mzOei8AYJDWMedcxKT9tkyWK7yyu3/6jOd/MskPJ/nZ7v7BfcZ4apLPJPmDJN/Q3Z89Y9+1\n",
"ST6Y5HR3X7nPOCbtW8RadwBgHUY3aZ9W9nuyx4S6qi5I8kCSTnJpdz98nnGuTvKbSd7b3S/aY/8X\n",
"knR3X7TP8Zi0bxnVHQBYtTG+EfXkdHv77h3d/aUkH0nyxCT7rUm/O8lXklxdVU8+c0dVfUuSC5J8\n",
"YM5jZQNZ6w4AbIN5J+0nptu7Z+y/Z7q96nyDdPfnk7w2yaVJfqeq3lFVb6qqX0pyWya/FPyDOY+V\n",
"DeYKMwDAJpt30r6zXOWhGft3nt/3KjLd/W8ymXQdS/KyTCbx35Xk95L8/Jnr3GEvqjsAsKkGc532\n",
"qvonSd6d5OeSHE/yhCTflOTeJL84vbrMQcfq8zxOLeP4GQ7VHQCYV1WdmjWfXMfxzDtp3ynps94g\n",
"uvP8g+cbZHqFmDdn8kbUH+3u+7r70e7+WCZXBvlMkldX1RUHOajurvM8Th1kDMZNdQcA5tHdp2bN\n",
"J9dxPPNO2u+cbk/M2L+zln3Wmvcd3zndfmj3ju5+JMn/yORYv/GwB8h2U90BgE0w76R9Z5J9XVWd\n",
"NQGqqguTXJPkj5N8dJ9xvma6vWTG/oun268c5SDZbqo7ADB2c03au/veTK7sckWSV+zafUMm69Jv\n",
"mdbyVNWxqnpGVR3f9dr/Ot2+vKr+wpk7qurbM5n8P5LJnVfhSFR3AGCsFnFH1OOZTKYvSfLeTJbM\n",
"XJ3k2iR3JXnO9JKOqarLM3lj6Se7+4ozxqhMLu34bZncHOfWTO6O+vWZLJ3pJK/q7pv2ORY3V+JA\n",
"3E0VADiq0d0R9auDVF2W5A1Jnp/JXSnvz2TifUN3P3TG6y7PZNJ+X3cf3zXGsUxq/fckeWYmlf7/\n",
"JvnvSd7a3fveXMmkncNwN1UA4ChGO2kfCpN2jkJ1BwAOYx1zzsFcpx3WxVp3AGDolHY4g+oOAOxH\n",
"aYc1U90BgCFS2mEG1R0A2IvSDgOiugMAQ6G0wwGo7gDADqUdBkp1BwDWSWmHQ1LdAWC7Ke0wAqo7\n",
"ALBqSjvMQXUHgO2jtMPIqO4AwCoo7bAgqjsAbAelHUZMdQcAlkVphyVQ3QFgcyntsCFUdwBgkZR2\n",
"WDLVHQA2i9IOG0h1BwDmpbTDCqnuADB+SjtsONUdADgKpR3WRHUHgHFS2mGLqO4AwEEp7TAAqjsA\n",
"jIfSDltKdQcAzkdph4FR3QFg2JR2QHUHAM6htMOAqe4AMDxKO3AW1R0ASJR2GA3VHQCGQWkHZlLd\n",
"AWB7Ke0wQqo7AKyP0g4ciOoOANtFaYeRU90BYLWUduDQVHcA2HxKO2wQ1R0Alk9pB+aiugPAZlLa\n",
"YUOp7gCwHEo7sDCqOwBsDqUdtoDqDgCLo7QDS6G6A8C4Ke2wZVR3AJiP0g4sneoOAOOjtMMWU90B\n",
"4PCUdmClVHcAGAelHUiiugPAQSntwNqo7gAwXEo7cA7VHQBmU9qBQVDdAWBYlHbgvFR3ADib0g4M\n",
"juoOAOuntAMHproDgNIODJzqDgDrobQDR6K6A7CtlHZgNFR3AFgdpR2Ym+oOwDZR2oFRUt0BYLmU\n",
"dmChVHcANp3SDoye6g4Ai6e0A0ujugOwiZR2YKOo7gCwGEo7sBKqOwCbQmkHNpbqDgBHp7QDK6e6\n",
"AzBmSjuwFVR3ADgcpR1YK9UdgLFR2oGto7oDwP6UdmAwVHcAxkBpB7aa6g4Ae1PagUFS3QEYKqUd\n",
"YEp1B4DHKe3A4KnuAAyJ0g6wB9UdgG2ntAOjoroDsG5KO8A+VHcAtpHSDoyW6g7AOoyytFfVZVV1\n",
"c1XdX1WPVtXpqrqxqp50hLGeW1W3VtUD07E+U1Xvq6pvn/c4gc2jugOwLeYq7VV1ZZI7klyc5D1J\n",
"7kxydZKTSe5Kck13f+6AY/1Ekh9N8ntJ/kuSzya5JMlfTfKB7v6nBxhDaYctpboDsCrrmHPOO2m/\n",
"LZOy9cru/ukznv/JJD+c5Ge7+wcPMM4PJPnZJP8hycu7+0927T+2+7kZ45i0wxab1vWXJHlbkicn\n",
"+WKSVyd5Z3c2Zy0gAGs1qkn7tLLfk+R0d1+5a98FSR5I0kku7e6HzzPO12RS1/84yVUHmZyfZyyT\n",
"dkB1B2Cpxram/eR0e/vuHd39pSQfSfLEJM/eZ5zrkjwlya8k6ar6jqp6bVVdX1X7fSzAOax1B2DT\n",
"zDNpPzHd3j1j/z3T7VX7jPPN0+2Xk3w8yX9O8qYkNya5o6p+vaqeMsdxAluoO92ddyV5VpJbk1yY\n",
"5B1JbqvK09d6cABwSPNM2i+abh+asX/n+f2uInPJdPuaJH+a5K8nuSDJN2RS8b8lybuPfpjANlPd\n",
"AdgEQ7i50s4x/L8kL+juO7r74e7+7UzWo346ybdaKgMcleoOwNjNM2nfKekXzdi/8/yD+4yzs/9j\n",
"3X3Wm8S6+5Ekt03/9ZtzQFXV53mcOug4wGZR3QE4qKo6NWs+uY7jmWfSfud0e2LG/p217LPWvO8e\n",
"Z9bkfuf5P3fA40p313kepw46DrB5VHcADqK7T82aT67jeOaZtH9our2uqs46+Kq6MMk1mVzG8aP7\n",
"jPNrmVwa8pm7x5n6y9Pt6TmOFeAsqjsAY3LkSXt335vJG0WvSPKKXbtvSPKEJLdMl7ikqo5V1TOq\n",
"6viucT6VyRVj/lKS68/cV1XPS/I3k3w+yfuOeqwAe1HdARiLee+IejzJHZlcAea9mSx1uTrJtUnu\n",
"SvKc7v789LWXJ7k3ySe7+4pd4/zF6ThPy6S8fzyTXwZemMkVZb6nu289wPG4uRJwJO6mCsBBjeqO\n",
"qF8doOqyJG9I8vxMftDdn0mxuqG7HzrjdZdnMmm/r7uP7zHOU5L8eJIXJPnzmbzR9cNJ3tTdv3XA\n",
"YzFpB+bibqoA7GeUk/YhMWkHFkF1B+B8TNrnZNIOLJLqDsBe1jHnHMLNlQAGyRVmABgKpR3gAFR3\n",
"AHYo7QADpboDsE5KO8Ahqe4A201pBxgB1R2AVVPaAeagugNsH6UdYGRUdwBWQWkHWBDVHWA7KO0A\n",
"I6a6A7AsSjvAEqjuAJtLaQfYEKo7AIuktAMsmeoOsFmUdoANpLoDMC+lHWCFVHeA8VPaATac6g7A\n",
"USjtAGuiugOMk9IOsEVUdwAOSmkHGADVHWA8lHaALaW6A3A+SjvAwKjuAMOmtAOgugNwDqUdYMBU\n",
"d4DhUdoBOIvqDkCitAOMhuoOMAxKOwAzqe4A20tpBxgh1R1gfZR2AA5EdQfYLko7wMip7gCrpbQD\n",
"cGiqO8DmU9oBNojqDrB8SjsAc1HdATaT0g6woVR3gOVQ2gFYGNUdYHMo7QBbQHUHWBylHYClUN0B\n",
"xk1pB9gyqjvAfJR2AJZOdQcYH6UdYIup7gCHp7QDsFKqO8A4KO0AJFHdAQ5KaQdgbVR3gOFS2gE4\n",
"h+oOMJvSDsAgqO4Aw6K0A3BeqjvA2ZR2AAZHdQdYP6UdgANT3QGUdgAGTnUHWA+lHYAjUd2BbaW0\n",
"AzAaqjvA6ijtAMxNdQe2idIOwCip7gDLpbQDsFCqO7DplHYARk91B1g8pR2ApVHdgU2ktAOwUVR3\n",
"gMVQ2gFYCdUd2BRKOwAbS3UHODqlHYCVU92BMVPaAdgKqjvA4SjtAKyV6g6MjdIOwNZR3QH2p7QD\n",
"MBiqOzAGSjsAW011B9ib0g7AIKnuwFAp7QAwpboDPE5pB2DwVHdgSJR2ANiD6g5sO6UdgFFR3YF1\n",
"U9oBYB+qO7CNlHYARkt1B9ZBaQeAQ1DdgW2htAOwEVR3YFWUdgA4ItUd2GRzT9qr6rKqurmq7q+q\n",
"R6vqdFXdWFVPmmPM762qx6aPvz/vMQKwHbrT3XlXkmcluTXJhUnekeS2qjx9rQcHMIe5Ju1VdWWS\n",
"/5nk7yb5aJKfSnJvkuuT/GZVfe0Rxnxakrcl+dL0qc1ZvwPASqjuwKaZt7S/PcnFSV7Z3S/u7td1\n",
"93OT3JjkRJI3HmawqqokP5fkj5L82zmPDYAtproDm+TIk/ZpZb8uyenu/uldu1+f5OEk31tVTzjE\n",
"sD+U5GSSvzf9eACYi+oObIJ5SvvJ6fb23Tu6+0tJPpLkiUmefZDBqurrk7w5yVu6+zfmOC4AOIvq\n",
"DozdPJP2E9Pt3TP23zPdXrXfQFV1LMktSe5L8ro5jgkAZlLdgbGaZ9J+0XT70Iz9O88f5CoyP57k\n",
"G5P83e7+8hzHBADnpboDY7T267RX1dVJfizJv+ru/7bu4wFgO6juwJjMM2nfKekXzdi/8/yDswaY\n",
"Lov5hSR3ZfLm1T1fdtgDq6o+z+PUYccDYDOp7sAsVXVq1nxyHcczz6T9zun2xIz9O2vZZ615T5IL\n",
"pq97ZpJHz7ih0mOZLJlJkn83fe7Ggx5Yd9d5HqcOOg4A20F1B3br7lOz5pPrOJ7qPtovC1V1PMkn\n",
"kpxO8nV9xkBVdWGS38/kxkiXdPcjM8b4s0luyt43UPqmJH8lyYczKfHv7+5373NMnUwm7Yf+ggAg\n",
"SVUuTfIzSV40fer9SV7WnU+t76iAIVnHnPPIk/Ykqar3JXlekh/q7red8fxPJXlVkn/b3f9o+tyx\n",
"JF+X5Cvdfe8Bxj6VSW1/WXfffMDjMWkHYG7Tuv6STO7Q/eQkX0zy6iTv7Hanbth265hzzvtG1H+U\n",
"5A+TvLWqbq2qN1XVBzOZsN+V5J+d8drLkvxOkl+b83MCwFJZ6w4MzVyT9mkx/2tJ/kOSq5P8SJIr\n",
"krwlybO7+/N7fdhBhz/EawFg4ax1B4ZiruUxQ2N5DADLYq07sGOMy2MAYCuo7sA6Ke0AcEiqO2w3\n",
"pR0ARkB1B1ZNaQeAOajusH2UdgAYGdUdWAWlHQAWRHWH7aC0A8CIqe7AsijtALAEqjtsLqUdADaE\n",
"6g4sktIOAEumusNmUdoBYAOp7sC8lHYAWCHVHcZPaQeADae6A0ehtAPAmqjuME5KOwBsEdUdOCil\n",
"HQAGQHWH8VDaAWBLqe7A+SjtADAwqjsMm9IOAKjuwDmUdgAYMNUdhkdpBwDOoroDidIOAKOhusMw\n",
"KO0AwEyqO2wvpR0ARkh1h/VR2gGAA1HdYbso7QAwcqo7rJbSDgAcmuoOm09pB4ANorrD8intAMBc\n",
"VHfYTEo7AGwo1R2WQ2kHABZGdYfNobQDwBZQ3WFxlHYAYClUdxg3pR0AtozqDvNR2gGApVPdYXyU\n",
"dgDYYqo7HJ7SDgCslOoO46C0AwBJVHc4KKUdAFgb1R2GS2kHAM6husNsSjsAMAiqOwyL0g4AnJfq\n",
"DmdT2gGAwVHdYf2UdgDgwFR3UNoBgIFT3WE9lHYA4EhUd7aV0g4AjIbqDqujtAMAc1Pd2SZKOwAw\n",
"Sqo7LJfSDgAslOrOplPaAYDRU91h8ZR2AGBpVHc2kdIOAGwU1R0WQ2kHAFZCdWdTKO0AwMZS3eHo\n",
"lHYAYOVUd8ZMaQcAtoLqDoejtAMAa6W6MzZKOwCwdVR32J/SDgAMhurOGCjtAMBWU91hb0o7ADBI\n",
"qjtDpbQDAEyp7vA4pR0AGDzVnSFR2gEA9qC6s+2UdgBgVFR31k1pBwDYh+rONlLaAYDRUt1ZB6Ud\n",
"AOAQVHe2hdIOAGwE1Z1VUdoBAI5IdWeTKe0AwMZR3VkmpR0AYAFUdzaN0g4AbDTVnUVT2gEAFkx1\n",
"ZxMsZNJeVZdV1c1VdX9VPVpVp6vqxqp60gE//mur6mVVdWtVfaKqHq6qB6vqw1X1/VXlDxQAcGTd\n",
"6e68K8mzktya5MIk70hyW1WevtaDgwOYe3lMVV2Z5I4kFyd5T5I7k1yd5GSSu5Jc092f22eMf5jk\n",
"7UnuT/KhJJ9K8tQkL05yUZJf7u7vPsCxWB4DAJzXtK6/JMnbkjw5yReTvDrJO7uzOeuGWZp1zDkX\n",
"MWm/LZP/zfTK7v7pM57/ySQ/nORnu/sH9xnjZJIndPev7nr+0iT/PcnTknxXd//KPuOYtAMAB2Kt\n",
"O0c1ukn7tLLfk+R0d1+5a98FSR5I0kku7e6Hj/g5fizJG5Pc1N3X7/Nak3YA4MBUd45ijG9EPTnd\n",
"3r57R3d/KclHkjwxybPn+Bx/smsLALAQ1rozFvNO2k9Mt3fP2H/PdHvVUQavqmNJXjr91/cdZQwA\n",
"gP24wgxDN++k/aLp9qEZ+3eeP9BVZPbw5kx+8/3V7n7/EccAANiX6s6QDfY67VX1Q0l+JMn/SfJ9\n",
"h/zYPs/j1DKOFwDYDKo7SVJVp2bNJ9dxPPNO2ndK+kUz9u88/+BhBq2qf5zkLUn+d5KT3X2oj+/u\n",
"Os/j1GHGAgC2j+pOd5+aNZ9cx/HMO2m/c7o9MWP/zlr2WWvez1FVr0ry1iT/K5MJ+x8e/fAAAI5O\n",
"dWco5r3k4/Ekn0hyOsnX9RmDVdWFSX4/k0s+XtLdjxxgvNcmeVOSjyW5br+bMu3x8S75CAAsheu6\n",
"s2N0l3zs7nszudzjFUlesWv3DUmekOSWnQl7VR2rqmdMJ/tnqap/kcmE/beSPPewE3YAgGVS3Vmn\n",
"RdwR9XiSO5JckuS9mSyZuTrJtUnuSvKc7v789LWXJ7k3ySe7+4ozxvg7SX4uyZ8muSnJF/b4VKe7\n",
"++f3ORalHQBYOtV9u43ujqhfHaTqsiRvSPL8TO4mdn8mb9q4obsfOuN1l2cyab+vu4+f8fzrk7w+\n",
"k6U0s774X+/uv7HPcZi0AwAr4W6q22u0k/ahMGkHAFZNdd8+o1vTDgCw7ax1ZxWUdgCABVHdt4PS\n",
"DgAwYqo7y6K0AwAsgeq+uZR2AIANobqzSEo7AMCSqe6bRWkHANhAqjvzUtoBAFZIdR8/pR0AYMOp\n",
"7hyF0g4AsCaq+zgp7QAAW0R156CUdgCAAVDdx0NpBwDYUqo756O0AwAMjOo+bEo7AACqO+dQ2gEA\n",
"Bkx1Hx6lHQCAs6juJEo7AMBoqO7DoLQDADCT6r69lHYAgBFS3ddHaQcA4EBU9+2itAMAjJzqvlpK\n",
"OwAAh6a6bz6lHQBgg6juy6e0AwAwF9V9MyntAAAbSnVfDqUdAICFUd03h9IOALAFVPfFUdoBAFgK\n",
"1X3clHYAgC2jus9HaQcAYOlU9/FR2gEAtpjqfnhKOwAAK6W6j4PSDgBAEtX9oJR2AADWRnUfLqUd\n",
"AIBzqO6zKe0AAAyC6j4sSjsAAOelup9NaQcAYHBU9/VT2gEAODDVXWkHAGDgVPf1UNoBADiSba3u\n",
"SjsAAKOhuq+O0g4AwNy2qbor7QAAjJLqvlxKOwAAC7Xp1V1pBwBg9FT3xVPaAQBYmk2s7ko7AAAb\n",
"RXVfDKUdAICV2JTqrrQDALCxVPejU9oBAFi5MVd3pR0AgK2guh+O0g4AwFqNrbor7QAAbB3VfX9K\n",
"OwAAgzGG6q60AwCw1VT3vSntAAAM0lCru9IOAABTqvvjlHYAAAZvSNVdaQcAgD1se3VX2gEAGJV1\n",
"V3elHQAA9rGN1V1pBwBgtNZR3ZV2AAA4hG2p7ko7AAAbYVXVXWkHAIAj2uTqrrQDALBxllndlXYA\n",
"AFiATavuSjsAABtt0dVdaQcAgAXbhOqutAMAsDUWUd2VdgAAWKKxVnelHQCArXTU6j7K0l5Vl1XV\n",
"zVV1f1U9WlWnq+rGqnrSOsaBvVTVqXUfA8PinGAvzgv24rzYXGOq7nOV9qq6MskdSS5O8p4kdya5\n",
"OsnJJHcluaa7P7fCcZR29lRV7bzgTM4J9uK8YC/Oi+1wmOo+xtL+9kwm2q/s7hd39+u6+7lJbkxy\n",
"IskbVzwOAAAc2tCr+5FL+7SO35PkdHdfuWvfBUkeSNJJLu3uh5c9zvT1Sjt7UknYzTnBXpwX7MV5\n",
"sX32q+5jK+0np9vbd+/o7i8l+UiSJyZ59orGAQCAuQ2xus8zaT8x3d49Y/890+1VKxoHAAAWojvd\n",
"nXcleVaSW5NcmOQdSW5bx/HMM2m/aLp9aMb+nef3u/rLosYBAICFmlHdV+7YOj7psu2sM4IzOS/Y\n",
"zTnBXpwX7MV5wbrNU9p3CvhFM/bvPP/gisYBAICNNE9pv3O6PTFj/84a9Flr1Rc9jqvGAACwkea5\n",
"5OPxJJ9IcjrJ1/UZA1XVhUl+P5NLNV7S3Y8sexwAANhUR14e0933ZnKZxiuSvGLX7huSPCHJLTsT\n",
"7ao6VlXPmE7SjzwOAABsmyOX9uSrlfyOJJckeW8mS12uTnJtkruSPKe7Pz997eVJ7k3yye6+4qjj\n",
"AADAtplr0p4kVXVZkjckeX6SJye5P5NrWd7Q3Q+d8brLM5m039fdx486DgAAbJu5J+0AAMByzXPJ\n",
"RwAAYAVM2gEAYOAGPWmvqsuq6uaqur+qHq2q01V1Y1U9aR3jMAzzfj+r6mur6mVVdWtVfaKqHq6q\n",
"B6vqw1X1/VXlev8jtIw/51X1vVX12PTx9xd5vKzGIs+Lqnru9O+NB6Zjfaaq3ldV376MY2d5Fji/\n",
"+I6qur2qPj39WfK7VfVLVfXsZR07i1dV31VVN03nAV+Y/p1/yxHHWtqcc7Br2qvqykyuKHNxkvfk\n",
"8SvKnMzkijLXdPfnVjUOw7CI72dV/cMkb8/kzc4fSvKpJE9N8uJM7sD7y9393cv6Gli8Zfw5r6qn\n",
"JflfmcSNC5K8rLtvXuRxs1yLPC+q6ieS/GiS30vyX5J8NpMrnv3VJB/o7n+68C+ApVjg/OJfJnlN\n",
"JufCe6ZH+b3pAAAGc0lEQVTbq5K8IJObV760u39xGV8Di1VVH0/yDUm+mOQzSZ6R5D9290sPOc5y\n",
"55zdPchHktuSPJbkFbue/8np8z+zynE8hvFYxPdz+ofnO/Z4/tIkn5yO8+J1f60eqz0vdn1cJflA\n",
"knuS/MR0jO9f99fpsZ7zIskPTF9/c5Jje+w/5zmP4T4W9HPkqUn+NJP485Rd+66djvO76/5aPQ58\n",
"Tlyb5MrpP3/r9Pv3C0cYZ6lzzkGW9ulvKvckOd3dV+7ad0GSBzK5S+ql3f3wssdhGFbx/ayqH0vy\n",
"xiQ3dff1cx4yK7CM86Kqrk/yU5n85f1tSX48SvuoLPDnyNdkUtf/OMlV3f0nyztqlm2B58XVSX4z\n",
"yXu7+0V77P9Cku7uixZ5/CxfVV2b5IM5ZGlfxRxlqGvaT063t+/e0d1fSvKRJE9Mst+asUWNwzCs\n",
"4vv5J7u2DN9Cz4uq+vokb07ylu7+jUUdJCu3qPPiuiRPSfIrSXq6hvm1VXW9dcujtKjz4u4kX0ly\n",
"dVU9+cwdVfUtmSyp+8DcR8uYLH2OMtRJ+4np9u4Z+++Zbq9a0TgMw1K/n1V1LMnOb9XvO8oYrMXC\n",
"zovpOXBLkvuSvG7uI2OdFnVefPN0++UkH0/yn5O8KcmNSe6oql+vqqfMc6Cs1ELOi57cpf21mSyr\n",
"/J2qekdVvamqfimTJRK3J/kHCzhexmPpc85jR/3AJdv530mz7oS68/x+78Rd1DgMw7K/n29O8qwk\n",
"v9rd7z/iGKzeIs+LH0/yjZm8WejL8x4Ya7Wo8+KS6fY1Sf53kr+eyeT9eJJ/neR5Sd6dxysbw7aw\n",
"vy+6+99U1SeT/PskLztj1yeS/Hx3f/bIR8kYLX3OOdTSDitVVT+U5EeS/J8k37fmw2ENpmtUfyzJ\n",
"v+ru/7bu42Ewdn5O/r8kL+juO7r74e7+7SQvSvLpJN9qqcz2qap/kskvbD+XyS9xT0jyTUnuTfKL\n",
"06vLwMIMddK+89vIrDdw7Dz/4IrGYRiW8v2sqn+c5C2ZVLST3e18GJe5z4vpsphfyOSSXK+f9bIj\n",
"HR3rsqi/L3b2f6y7P3Xmju5+JJOlEMnjy2gYtoWcF9M3K745kzei/mh339fdj3b3xzL5Ze4zSV5d\n",
"VVcs4JgZh6XPOYc6ab9zuj0xY//OeqBZ64YWPQ7DsPDvZ1W9KslbM7ke98nu/sOjHx5rsojz4oLp\n",
"656Z5NEzbqj0WCZLZpLk302fu3HuI2YVFv1zZNYP2p3n/9wBj4v1WtR58Z3T7Yd275j+Mvc/Mplj\n",
"feNhD5DRWvqcc6hr2nf+EFxXVdVnXJeyqi5Mck0ml9/66IrGYRgW+v2sqtdm8oayjyW5rt1ka6wW\n",
"cV48msm61L2ugftNSf5Kkg9nUuLvWMRBs3SL+vvi1zI5L565e5ypvzzdnl7AMbN8izovvma6vWTG\n",
"/oun268c9UAZnaXPOQdZ2rv73kzeeX1Fklfs2n1DJuvGbpn+NpuqOlZVz6iq4/OMw7At6ryY7vsX\n",
"mUzYfyvJc03Yx2sR58X0f2v/QHe/fPcjk6uFJJM3lr28u9+9/K+KeS3w58inMjkH/lKSs+7dUFXP\n",
"S/I3k3w+rjg1Cgv8OfJfp9uXV9VfOHNHVX17JhO0R+KX/I2zzjnnIG+ulCTT/xh3ZPJb7Hvz+K1g\n",
"r82kdj1nesmlVNXlmbzx45PdfcVRx2H4FnFeVNXfyeSNQ3+a5KYkX9jjU53u7p9f1tfBYi3q74sZ\n",
"Y5+KmyuN0gJ/jvzF6ThPy6S8fzyTH8wvzOTvke/p7luX/gWxEAv6OVKZvJ/h25J8McmtSf4gyddn\n",
"snSmk7yqu29axdfEfKrqhZn8eU4md7t9Xibf9517dfxRd79m+trLs64552Fun7rqR5LLMrlt9P2Z\n",
"XCP3dCZ3Kbxo1+suz+T2sPfOM47HOB7znheZvNHwsUx+2D424/HBdX+dHqs9L84z7uun58r3r/tr\n",
"9FjfeZHJDZbemsk1/L+c5A+T/HKSv7bur9FjPedFJkuMr8/kzqgPZXKFoQeS/Kck37bur9HjUOfD\n",
"mfOCMx+P7f7+r3POOdjSDgAATAxyTTsAAPA4k3YAABg4k3YAABg4k3YAABg4k3YAABg4k3YAABg4\n",
"k3YAABg4k3YAABg4k3YAABg4k3YAABg4k3YAABg4k3YAABg4k3YAABg4k3YAABg4k3YAABg4k3YA\n",
"ABg4k3YAABi4/w/Y3UZ5IHmVbAAAAABJRU5ErkJggg==\n"
],
"text/plain": [
"<matplotlib.figure.Figure at 0x10d0da080>"
]
},
"metadata": {
"image/png": {
"height": 255,
"width": 374
}
},
"output_type": "display_data"
}
],
"source": [
"%matplotlib inline\n",
"%config InlineBackend.figure_formats = set(['retina'])\n",
"import matplotlib.pyplot as plt\n",
"plt.plot([0,1],[1,0])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "IPython mydev (Python 3)",
"name": "python3_mydev"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.4.2"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

View File

@ -0,0 +1,90 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(100,)"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"evs = np.zeros(100)\n",
"evs.shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
" "
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
" "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.1"
}
},
"nbformat": 4,
"nbformat_minor": 1
}

View File

@ -0,0 +1,77 @@
{
"cells": [
{
"cell_type": "raw",
"metadata": {
"raw_mimetype": "text/html"
},
"source": [
"<b>raw html</b>"
]
},
{
"cell_type": "raw",
"metadata": {
"raw_mimetype": "text/markdown"
},
"source": [
"* raw markdown\n",
"* bullet\n",
"* list"
]
},
{
"cell_type": "raw",
"metadata": {
"raw_mimetype": "text/restructuredtext"
},
"source": [
"``raw rst``\n",
"\n",
".. sourcecode:: python\n",
"\n",
" def foo(): pass\n"
]
},
{
"cell_type": "raw",
"metadata": {
"raw_mimetype": "text/x-python"
},
"source": [
"def bar():\n",
" \"\"\"raw python\"\"\"\n",
" pass"
]
},
{
"cell_type": "raw",
"metadata": {
"raw_mimetype": "text/latex"
},
"source": [
"\\LaTeX\n",
"% raw latex"
]
},
{
"cell_type": "raw",
"metadata": {},
"source": [
"# no raw_mimetype metadata, should be included by default"
]
},
{
"cell_type": "raw",
"metadata": {
"raw_mimetype": "doesnotexist"
},
"source": [
"garbage format defined, should never be included"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 0
}

View File

@ -0,0 +1,504 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[<matplotlib.lines.Line2D at 0x7f4c63ec5518>]"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
},
{
"data": {
"image/svg+xml": [
"<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\"?>\n",
"<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\n",
" \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n",
"<!-- Created with matplotlib (http://matplotlib.org/) -->\n",
"<svg height=\"252.018125pt\" version=\"1.1\" viewBox=\"0 0 375.603125 252.018125\" width=\"375.603125pt\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n",
" <defs>\n",
" <style type=\"text/css\">\n",
"*{stroke-linecap:butt;stroke-linejoin:round;}\n",
" </style>\n",
" </defs>\n",
" <g id=\"figure_1\">\n",
" <g id=\"patch_1\">\n",
" <path d=\"M 0 252.018125 \n",
"L 375.603125 252.018125 \n",
"L 375.603125 0 \n",
"L 0 0 \n",
"z\n",
"\" style=\"fill:none;\"/>\n",
" </g>\n",
" <g id=\"axes_1\">\n",
" <g id=\"patch_2\">\n",
" <path d=\"M 30.103125 228.14 \n",
"L 364.903125 228.14 \n",
"L 364.903125 10.7 \n",
"L 30.103125 10.7 \n",
"z\n",
"\" style=\"fill:#ffffff;\"/>\n",
" </g>\n",
" <g id=\"matplotlib.axis_1\">\n",
" <g id=\"xtick_1\">\n",
" <g id=\"line2d_1\">\n",
" <defs>\n",
" <path d=\"M 0 0 \n",
"L 0 3.5 \n",
"\" id=\"mbaa5d3ac27\" style=\"stroke:#000000;stroke-width:0.8;\"/>\n",
" </defs>\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"45.321307\" xlink:href=\"#mbaa5d3ac27\" y=\"228.14\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_1\">\n",
" <!-- 0 -->\n",
" <defs>\n",
" <path d=\"M 31.78125 66.40625 \n",
"Q 24.171875 66.40625 20.328125 58.90625 \n",
"Q 16.5 51.421875 16.5 36.375 \n",
"Q 16.5 21.390625 20.328125 13.890625 \n",
"Q 24.171875 6.390625 31.78125 6.390625 \n",
"Q 39.453125 6.390625 43.28125 13.890625 \n",
"Q 47.125 21.390625 47.125 36.375 \n",
"Q 47.125 51.421875 43.28125 58.90625 \n",
"Q 39.453125 66.40625 31.78125 66.40625 \n",
"z\n",
"M 31.78125 74.21875 \n",
"Q 44.046875 74.21875 50.515625 64.515625 \n",
"Q 56.984375 54.828125 56.984375 36.375 \n",
"Q 56.984375 17.96875 50.515625 8.265625 \n",
"Q 44.046875 -1.421875 31.78125 -1.421875 \n",
"Q 19.53125 -1.421875 13.0625 8.265625 \n",
"Q 6.59375 17.96875 6.59375 36.375 \n",
"Q 6.59375 54.828125 13.0625 64.515625 \n",
"Q 19.53125 74.21875 31.78125 74.21875 \n",
"z\n",
"\" id=\"DejaVuSans-30\"/>\n",
" </defs>\n",
" <g transform=\"translate(42.140057 242.738437)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-30\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"xtick_2\">\n",
" <g id=\"line2d_2\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"106.194034\" xlink:href=\"#mbaa5d3ac27\" y=\"228.14\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_2\">\n",
" <!-- 1 -->\n",
" <defs>\n",
" <path d=\"M 12.40625 8.296875 \n",
"L 28.515625 8.296875 \n",
"L 28.515625 63.921875 \n",
"L 10.984375 60.40625 \n",
"L 10.984375 69.390625 \n",
"L 28.421875 72.90625 \n",
"L 38.28125 72.90625 \n",
"L 38.28125 8.296875 \n",
"L 54.390625 8.296875 \n",
"L 54.390625 0 \n",
"L 12.40625 0 \n",
"z\n",
"\" id=\"DejaVuSans-31\"/>\n",
" </defs>\n",
" <g transform=\"translate(103.012784 242.738437)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-31\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"xtick_3\">\n",
" <g id=\"line2d_3\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"167.066761\" xlink:href=\"#mbaa5d3ac27\" y=\"228.14\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_3\">\n",
" <!-- 2 -->\n",
" <defs>\n",
" <path d=\"M 19.1875 8.296875 \n",
"L 53.609375 8.296875 \n",
"L 53.609375 0 \n",
"L 7.328125 0 \n",
"L 7.328125 8.296875 \n",
"Q 12.9375 14.109375 22.625 23.890625 \n",
"Q 32.328125 33.6875 34.8125 36.53125 \n",
"Q 39.546875 41.84375 41.421875 45.53125 \n",
"Q 43.3125 49.21875 43.3125 52.78125 \n",
"Q 43.3125 58.59375 39.234375 62.25 \n",
"Q 35.15625 65.921875 28.609375 65.921875 \n",
"Q 23.96875 65.921875 18.8125 64.3125 \n",
"Q 13.671875 62.703125 7.8125 59.421875 \n",
"L 7.8125 69.390625 \n",
"Q 13.765625 71.78125 18.9375 73 \n",
"Q 24.125 74.21875 28.421875 74.21875 \n",
"Q 39.75 74.21875 46.484375 68.546875 \n",
"Q 53.21875 62.890625 53.21875 53.421875 \n",
"Q 53.21875 48.921875 51.53125 44.890625 \n",
"Q 49.859375 40.875 45.40625 35.40625 \n",
"Q 44.1875 33.984375 37.640625 27.21875 \n",
"Q 31.109375 20.453125 19.1875 8.296875 \n",
"z\n",
"\" id=\"DejaVuSans-32\"/>\n",
" </defs>\n",
" <g transform=\"translate(163.885511 242.738437)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-32\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"xtick_4\">\n",
" <g id=\"line2d_4\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"227.939489\" xlink:href=\"#mbaa5d3ac27\" y=\"228.14\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_4\">\n",
" <!-- 3 -->\n",
" <defs>\n",
" <path d=\"M 40.578125 39.3125 \n",
"Q 47.65625 37.796875 51.625 33 \n",
"Q 55.609375 28.21875 55.609375 21.1875 \n",
"Q 55.609375 10.40625 48.1875 4.484375 \n",
"Q 40.765625 -1.421875 27.09375 -1.421875 \n",
"Q 22.515625 -1.421875 17.65625 -0.515625 \n",
"Q 12.796875 0.390625 7.625 2.203125 \n",
"L 7.625 11.71875 \n",
"Q 11.71875 9.328125 16.59375 8.109375 \n",
"Q 21.484375 6.890625 26.8125 6.890625 \n",
"Q 36.078125 6.890625 40.9375 10.546875 \n",
"Q 45.796875 14.203125 45.796875 21.1875 \n",
"Q 45.796875 27.640625 41.28125 31.265625 \n",
"Q 36.765625 34.90625 28.71875 34.90625 \n",
"L 20.21875 34.90625 \n",
"L 20.21875 43.015625 \n",
"L 29.109375 43.015625 \n",
"Q 36.375 43.015625 40.234375 45.921875 \n",
"Q 44.09375 48.828125 44.09375 54.296875 \n",
"Q 44.09375 59.90625 40.109375 62.90625 \n",
"Q 36.140625 65.921875 28.71875 65.921875 \n",
"Q 24.65625 65.921875 20.015625 65.03125 \n",
"Q 15.375 64.15625 9.8125 62.3125 \n",
"L 9.8125 71.09375 \n",
"Q 15.4375 72.65625 20.34375 73.4375 \n",
"Q 25.25 74.21875 29.59375 74.21875 \n",
"Q 40.828125 74.21875 47.359375 69.109375 \n",
"Q 53.90625 64.015625 53.90625 55.328125 \n",
"Q 53.90625 49.265625 50.4375 45.09375 \n",
"Q 46.96875 40.921875 40.578125 39.3125 \n",
"z\n",
"\" id=\"DejaVuSans-33\"/>\n",
" </defs>\n",
" <g transform=\"translate(224.758239 242.738437)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-33\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"xtick_5\">\n",
" <g id=\"line2d_5\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"288.812216\" xlink:href=\"#mbaa5d3ac27\" y=\"228.14\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_5\">\n",
" <!-- 4 -->\n",
" <defs>\n",
" <path d=\"M 37.796875 64.3125 \n",
"L 12.890625 25.390625 \n",
"L 37.796875 25.390625 \n",
"z\n",
"M 35.203125 72.90625 \n",
"L 47.609375 72.90625 \n",
"L 47.609375 25.390625 \n",
"L 58.015625 25.390625 \n",
"L 58.015625 17.1875 \n",
"L 47.609375 17.1875 \n",
"L 47.609375 0 \n",
"L 37.796875 0 \n",
"L 37.796875 17.1875 \n",
"L 4.890625 17.1875 \n",
"L 4.890625 26.703125 \n",
"z\n",
"\" id=\"DejaVuSans-34\"/>\n",
" </defs>\n",
" <g transform=\"translate(285.630966 242.738437)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-34\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"xtick_6\">\n",
" <g id=\"line2d_6\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"349.684943\" xlink:href=\"#mbaa5d3ac27\" y=\"228.14\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_6\">\n",
" <!-- 5 -->\n",
" <defs>\n",
" <path d=\"M 10.796875 72.90625 \n",
"L 49.515625 72.90625 \n",
"L 49.515625 64.59375 \n",
"L 19.828125 64.59375 \n",
"L 19.828125 46.734375 \n",
"Q 21.96875 47.46875 24.109375 47.828125 \n",
"Q 26.265625 48.1875 28.421875 48.1875 \n",
"Q 40.625 48.1875 47.75 41.5 \n",
"Q 54.890625 34.8125 54.890625 23.390625 \n",
"Q 54.890625 11.625 47.5625 5.09375 \n",
"Q 40.234375 -1.421875 26.90625 -1.421875 \n",
"Q 22.3125 -1.421875 17.546875 -0.640625 \n",
"Q 12.796875 0.140625 7.71875 1.703125 \n",
"L 7.71875 11.625 \n",
"Q 12.109375 9.234375 16.796875 8.0625 \n",
"Q 21.484375 6.890625 26.703125 6.890625 \n",
"Q 35.15625 6.890625 40.078125 11.328125 \n",
"Q 45.015625 15.765625 45.015625 23.390625 \n",
"Q 45.015625 31 40.078125 35.4375 \n",
"Q 35.15625 39.890625 26.703125 39.890625 \n",
"Q 22.75 39.890625 18.8125 39.015625 \n",
"Q 14.890625 38.140625 10.796875 36.28125 \n",
"z\n",
"\" id=\"DejaVuSans-35\"/>\n",
" </defs>\n",
" <g transform=\"translate(346.503693 242.738437)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-35\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"matplotlib.axis_2\">\n",
" <g id=\"ytick_1\">\n",
" <g id=\"line2d_7\">\n",
" <defs>\n",
" <path d=\"M 0 0 \n",
"L -3.5 0 \n",
"\" id=\"m7fb83757f4\" style=\"stroke:#000000;stroke-width:0.8;\"/>\n",
" </defs>\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"30.103125\" xlink:href=\"#m7fb83757f4\" y=\"218.256364\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_7\">\n",
" <!-- 0.0 -->\n",
" <defs>\n",
" <path d=\"M 10.6875 12.40625 \n",
"L 21 12.40625 \n",
"L 21 0 \n",
"L 10.6875 0 \n",
"z\n",
"\" id=\"DejaVuSans-2e\"/>\n",
" </defs>\n",
" <g transform=\"translate(7.2 222.055582)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-30\"/>\n",
" <use x=\"63.623047\" xlink:href=\"#DejaVuSans-2e\"/>\n",
" <use x=\"95.410156\" xlink:href=\"#DejaVuSans-30\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"ytick_2\">\n",
" <g id=\"line2d_8\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"30.103125\" xlink:href=\"#m7fb83757f4\" y=\"193.547273\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_8\">\n",
" <!-- 0.5 -->\n",
" <g transform=\"translate(7.2 197.346491)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-30\"/>\n",
" <use x=\"63.623047\" xlink:href=\"#DejaVuSans-2e\"/>\n",
" <use x=\"95.410156\" xlink:href=\"#DejaVuSans-35\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"ytick_3\">\n",
" <g id=\"line2d_9\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"30.103125\" xlink:href=\"#m7fb83757f4\" y=\"168.838182\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_9\">\n",
" <!-- 1.0 -->\n",
" <g transform=\"translate(7.2 172.637401)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-31\"/>\n",
" <use x=\"63.623047\" xlink:href=\"#DejaVuSans-2e\"/>\n",
" <use x=\"95.410156\" xlink:href=\"#DejaVuSans-30\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"ytick_4\">\n",
" <g id=\"line2d_10\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"30.103125\" xlink:href=\"#m7fb83757f4\" y=\"144.129091\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_10\">\n",
" <!-- 1.5 -->\n",
" <g transform=\"translate(7.2 147.92831)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-31\"/>\n",
" <use x=\"63.623047\" xlink:href=\"#DejaVuSans-2e\"/>\n",
" <use x=\"95.410156\" xlink:href=\"#DejaVuSans-35\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"ytick_5\">\n",
" <g id=\"line2d_11\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"30.103125\" xlink:href=\"#m7fb83757f4\" y=\"119.42\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_11\">\n",
" <!-- 2.0 -->\n",
" <g transform=\"translate(7.2 123.219219)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-32\"/>\n",
" <use x=\"63.623047\" xlink:href=\"#DejaVuSans-2e\"/>\n",
" <use x=\"95.410156\" xlink:href=\"#DejaVuSans-30\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"ytick_6\">\n",
" <g id=\"line2d_12\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"30.103125\" xlink:href=\"#m7fb83757f4\" y=\"94.710909\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_12\">\n",
" <!-- 2.5 -->\n",
" <g transform=\"translate(7.2 98.510128)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-32\"/>\n",
" <use x=\"63.623047\" xlink:href=\"#DejaVuSans-2e\"/>\n",
" <use x=\"95.410156\" xlink:href=\"#DejaVuSans-35\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"ytick_7\">\n",
" <g id=\"line2d_13\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"30.103125\" xlink:href=\"#m7fb83757f4\" y=\"70.001818\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_13\">\n",
" <!-- 3.0 -->\n",
" <g transform=\"translate(7.2 73.801037)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-33\"/>\n",
" <use x=\"63.623047\" xlink:href=\"#DejaVuSans-2e\"/>\n",
" <use x=\"95.410156\" xlink:href=\"#DejaVuSans-30\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"ytick_8\">\n",
" <g id=\"line2d_14\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"30.103125\" xlink:href=\"#m7fb83757f4\" y=\"45.292727\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_14\">\n",
" <!-- 3.5 -->\n",
" <g transform=\"translate(7.2 49.091946)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-33\"/>\n",
" <use x=\"63.623047\" xlink:href=\"#DejaVuSans-2e\"/>\n",
" <use x=\"95.410156\" xlink:href=\"#DejaVuSans-35\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"ytick_9\">\n",
" <g id=\"line2d_15\">\n",
" <g>\n",
" <use style=\"stroke:#000000;stroke-width:0.8;\" x=\"30.103125\" xlink:href=\"#m7fb83757f4\" y=\"20.583636\"/>\n",
" </g>\n",
" </g>\n",
" <g id=\"text_15\">\n",
" <!-- 4.0 -->\n",
" <g transform=\"translate(7.2 24.382855)scale(0.1 -0.1)\">\n",
" <use xlink:href=\"#DejaVuSans-34\"/>\n",
" <use x=\"63.623047\" xlink:href=\"#DejaVuSans-2e\"/>\n",
" <use x=\"95.410156\" xlink:href=\"#DejaVuSans-30\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <g id=\"line2d_16\">\n",
" <path clip-path=\"url(#pf878579141)\" d=\"M 45.321307 218.256364 \n",
"L 106.194034 70.001818 \n",
"L 167.066761 20.583636 \n",
"L 227.939489 20.583636 \n",
"L 288.812216 70.001818 \n",
"L 349.684943 218.256364 \n",
"\" style=\"fill:none;stroke:#1f77b4;stroke-linecap:square;stroke-width:1.5;\"/>\n",
" </g>\n",
" <g id=\"patch_3\">\n",
" <path d=\"M 30.103125 228.14 \n",
"L 30.103125 10.7 \n",
"\" style=\"fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;\"/>\n",
" </g>\n",
" <g id=\"patch_4\">\n",
" <path d=\"M 364.903125 228.14 \n",
"L 364.903125 10.7 \n",
"\" style=\"fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;\"/>\n",
" </g>\n",
" <g id=\"patch_5\">\n",
" <path d=\"M 30.103125 228.14 \n",
"L 364.903125 228.14 \n",
"\" style=\"fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;\"/>\n",
" </g>\n",
" <g id=\"patch_6\">\n",
" <path d=\"M 30.103125 10.7 \n",
"L 364.903125 10.7 \n",
"\" style=\"fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;\"/>\n",
" </g>\n",
" </g>\n",
" </g>\n",
" <defs>\n",
" <clipPath id=\"pf878579141\">\n",
" <rect height=\"217.44\" width=\"334.8\" x=\"30.103125\" y=\"10.7\"/>\n",
" </clipPath>\n",
" </defs>\n",
"</svg>\n"
],
"text/plain": [
"<Figure size 432x288 with 1 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
}
],
"source": [
"%matplotlib inline\n",
"%config InlineBackend.figure_formats = ['svg'] \n",
"import matplotlib.pyplot as plt\n",
"plt.plot((0,1,2,3,4,5),(0,3,4,4,3,0))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -0,0 +1,70 @@
"""Tests for ASCIIDocExporter`"""
# -----------------------------------------------------------------------------
# Copyright (c) 2016, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import re
from traitlets.config import Config
from ...tests.utils import onlyif_cmds_exist
from ..asciidoc import ASCIIDocExporter
from .base import ExportersTestsBase
# -----------------------------------------------------------------------------
# Class
# -----------------------------------------------------------------------------
in_regex = r"In\[(.*)\]:"
out_regex = r"Out\[(.*)\]:"
class TestASCIIDocExporter(ExportersTestsBase):
"""Tests for ASCIIDocExporter"""
exporter_class = ASCIIDocExporter
def test_constructor(self):
"""
Can a ASCIIDocExporter be constructed?
"""
ASCIIDocExporter()
@onlyif_cmds_exist("pandoc")
def test_export(self):
"""
Can a ASCIIDocExporter export something?
"""
(output, resources) = ASCIIDocExporter().from_filename(self._get_notebook())
assert len(output) > 0
assert re.findall(in_regex, output)
assert re.findall(out_regex, output)
@onlyif_cmds_exist("pandoc")
def test_export_no_prompt(self):
"""
Can a ASCIIDocExporter export something without prompts?
"""
no_prompt = {
"TemplateExporter": {
"exclude_input_prompt": True,
"exclude_output_prompt": True,
}
}
c_no_prompt = Config(no_prompt)
exporter = ASCIIDocExporter(config=c_no_prompt)
(output, resources) = exporter.from_filename(
self._get_notebook(nb_name="prompt_numbers.ipynb")
)
assert not re.findall(in_regex, output)
assert not re.findall(out_regex, output)

View File

@ -0,0 +1,116 @@
"""
Module with tests for export.py
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
import nbformat
import pytest
from traitlets.config import Config
import nbconvert.tests
from ..base import (
ExporterDisabledError,
ExporterNameError,
export,
get_export_names,
get_exporter,
)
from ..exporter import Exporter
from ..python import PythonExporter
from .base import ExportersTestsBase
class TestExport(ExportersTestsBase):
"""Contains test functions for export.py"""
def test_export_wrong_name(self):
"""
Is the right error thrown when a bad template name is used?
"""
try:
exporter = get_exporter("not_a_name")
export(exporter, self._get_notebook())
except ExporterNameError as e:
pass
def test_export_disabled(self):
"""
Trying to use a disabled exporter should raise ExporterDisbledError
"""
config = Config({"NotebookExporter": {"enabled": False}})
with pytest.raises(ExporterDisabledError):
get_exporter("notebook", config=config)
def test_export_filename(self):
"""
Can a notebook be exported by filename?
"""
exporter = get_exporter("python")
(output, resources) = export(exporter, self._get_notebook())
assert len(output) > 0
def test_export_nbnode(self):
"""
Can a notebook be exported by a notebook node handle?
"""
with open(self._get_notebook()) as f:
notebook = nbformat.read(f, 4)
exporter = get_exporter("python")
(output, resources) = export(exporter, notebook)
assert len(output) > 0
def test_export_filestream(self):
"""
Can a notebook be exported by a filesteam?
"""
with open(self._get_notebook()) as f:
exporter = get_exporter("python")
(output, resources) = export(exporter, f)
assert len(output) > 0
def test_export_using_exporter(self):
"""
Can a notebook be exported using an instanciated exporter?
"""
(output, resources) = export(PythonExporter(), self._get_notebook())
assert len(output) > 0
def test_export_using_exporter_class(self):
"""
Can a notebook be exported using an exporter class type?
"""
(output, resources) = export(PythonExporter, self._get_notebook())
assert len(output) > 0
def test_export_resources(self):
"""
Can a notebook be exported along with a custom resources dict?
"""
(output, resources) = export(PythonExporter, self._get_notebook(), resources={})
assert len(output) > 0
def test_no_exporter(self):
"""
Is the right error thrown if no exporter is provided?
"""
try:
(output, resources) = export(None, self._get_notebook())
except TypeError:
pass
def test_get_exporter_entrypoint():
p = os.path.join(os.path.dirname(nbconvert.tests.__file__), "exporter_entrypoint")
sys.path.insert(0, p)
assert "entrypoint_test" in get_export_names()
try:
cls = get_exporter("entrypoint_test")
assert issubclass(cls, Exporter), cls
finally:
del sys.path[0]

View File

@ -0,0 +1,89 @@
"""
Module with tests for exporter.py
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
from unittest.mock import patch
from traitlets.config import Config
from ...preprocessors.base import Preprocessor
from ..base import get_export_names
from ..exporter import Exporter
from .base import ExportersTestsBase
# -----------------------------------------------------------------------------
# Class
# -----------------------------------------------------------------------------
class PizzaPreprocessor(Preprocessor):
"""Simple preprocessor that adds a 'pizza' entry to the NotebookNode. Used
to test Exporter.
"""
def preprocess(self, nb, resources):
nb["pizza"] = "cheese"
return nb, resources
class TestExporter(ExportersTestsBase):
"""Contains test functions for exporter.py"""
def test_constructor(self):
"""Can an Exporter be constructed?"""
Exporter()
def test_export(self):
"""Can an Exporter export something?"""
exporter = Exporter()
(notebook, resources) = exporter.from_filename(self._get_notebook())
assert isinstance(notebook, dict)
def test_preprocessor(self):
"""Do preprocessors work?"""
config = Config({"Exporter": {"preprocessors": [PizzaPreprocessor()]}})
exporter = Exporter(config=config)
(notebook, resources) = exporter.from_filename(self._get_notebook())
self.assertEqual(notebook["pizza"], "cheese")
def test_get_export_names_disable(self):
"""Can we disable all exporters then enable a single one"""
config = Config({"Exporter": {"enabled": False}, "NotebookExporter": {"enabled": True}})
export_names = get_export_names(config=config)
self.assertEqual(export_names, ["notebook"])
def test_get_exporter_disable_config_exporters(self):
"""
Does get_export_names behave correctly with respect to
NBCONVERT_DISABLE_CONFIG_EXPORTERS being set in the
environment?
"""
config = Config({"Exporter": {"enabled": False}, "NotebookExporter": {"enabled": True}})
os.environ["NBCONVERT_DISABLE_CONFIG_EXPORTERS"] = "1"
with patch("nbconvert.exporters.base.get_exporter") as exp:
export_names = get_export_names(config=config)
# get_export_names should not call get_exporter for
# any of the entry points because we return before then.
exp.assert_not_called()
# We should have all exporters, not just the ones
# enabled in the config
self.assertNotEqual(export_names, ["notebook"])
# In the absence of this variable we should revert to
# the normal behavior.
del os.environ["NBCONVERT_DISABLE_CONFIG_EXPORTERS"]
export_names = get_export_names(config=config)
self.assertEqual(export_names, ["notebook"])

View File

@ -0,0 +1,136 @@
"""Tests for HTMLExporter"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
from nbformat import v4
from traitlets.config import Config
from ..html import HTMLExporter
from .base import ExportersTestsBase
class TestHTMLExporter(ExportersTestsBase):
"""Tests for HTMLExporter"""
exporter_class = HTMLExporter
should_include_raw = ["html"]
def test_constructor(self):
"""
Can a HTMLExporter be constructed?
"""
HTMLExporter()
def test_export(self):
"""
Can a HTMLExporter export something?
"""
(output, resources) = HTMLExporter().from_filename(self._get_notebook())
assert len(output) > 0
def test_export_classic(self):
"""
Can a HTMLExporter export using the 'classic' template?
"""
(output, resources) = HTMLExporter(template_name="classic").from_filename(
self._get_notebook()
)
assert len(output) > 0
def test_export_notebook(self):
"""
Can a HTMLExporter export using the 'lab' template?
"""
(output, resources) = HTMLExporter(template_name="lab").from_filename(self._get_notebook())
assert len(output) > 0
def test_prompt_number(self):
"""
Does HTMLExporter properly format input and output prompts?
"""
no_prompt_conf = Config(
{
"TemplateExporter": {
"exclude_input_prompt": True,
"exclude_output_prompt": True,
}
}
)
exporter = HTMLExporter(config=no_prompt_conf, template_name="lab")
(output, resources) = exporter.from_filename(
self._get_notebook(nb_name="prompt_numbers.ipynb")
)
in_regex = r"In&nbsp;\[(.*)\]:"
out_regex = r"Out\[(.*)\]:"
assert not re.findall(in_regex, output)
assert not re.findall(out_regex, output)
def test_png_metadata(self):
"""
Does HTMLExporter with the 'classic' template treat pngs with width/height metadata correctly?
"""
(output, resources) = HTMLExporter(template_name="classic").from_filename(
self._get_notebook(nb_name="pngmetadata.ipynb")
)
check_for_png = re.compile(r'<img src="[^"]*?"([^>]*?)>')
result = check_for_png.search(output)
attr_string = result.group(1)
assert "width" in attr_string
assert "height" in attr_string
def test_javascript_output(self):
nb = v4.new_notebook(
cells=[
v4.new_code_cell(
outputs=[
v4.new_output(
output_type="display_data",
data={"application/javascript": "javascript_output();"},
)
]
)
]
)
(output, resources) = HTMLExporter(template_name="classic").from_notebook_node(nb)
self.assertIn("javascript_output", output)
def test_attachments(self):
(output, resources) = HTMLExporter(template_name="classic").from_file(
self._get_notebook(nb_name="attachment.ipynb")
)
check_for_png = re.compile(r'<img src="[^"]*?"([^>]*?)>')
result = check_for_png.search(output)
self.assertTrue(result.group(0).strip().startswith('<img src="data:image/png;base64,iVBOR'))
self.assertTrue(result.group(1).strip().startswith('alt="image.png"'))
check_for_data = re.compile(r'<img src="(?P<url>[^"]*?)"')
results = check_for_data.findall(output)
assert results[0] != results[1], "attachments only need to be unique within a cell"
assert "image/svg" in results[1], "second image should use svg"
def test_custom_filter_highlight_code(self):
# Overwriting filters takes place at: Exporter.from_notebook_node
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
def custom_highlight_code(source, language="python", metadata=None):
return source + " ADDED_TEXT"
filters = {"highlight_code": custom_highlight_code}
(output, resources) = HTMLExporter(
template_name="classic", filters=filters
).from_notebook_node(nb)
self.assertTrue("ADDED_TEXT" in output)
def test_basic_name(self):
"""
Can a HTMLExporter export using the 'basic' template?
"""
(output, resources) = HTMLExporter(template_name="basic").from_filename(
self._get_notebook()
)
assert len(output) > 0

View File

@ -0,0 +1,189 @@
"""Tests for Latex exporter"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os.path
import re
import textwrap
from tempfile import TemporaryDirectory
from jinja2 import DictLoader
from nbformat import v4, write
from traitlets.config import Config
from ...tests.utils import onlyif_cmds_exist
from ..latex import LatexExporter
from .base import ExportersTestsBase
current_dir = os.path.dirname(__file__)
class TestLatexExporter(ExportersTestsBase):
"""Contains test functions for latex.py"""
exporter_class = LatexExporter
should_include_raw = ["latex"]
def test_constructor(self):
"""
Can a LatexExporter be constructed?
"""
LatexExporter()
@onlyif_cmds_exist("pandoc")
def test_export(self):
"""
Can a LatexExporter export something?
"""
(output, resources) = LatexExporter().from_filename(self._get_notebook())
assert len(output) > 0
@onlyif_cmds_exist("pandoc")
def test_export_book(self):
"""
Can a LatexExporter export using 'report' template?
"""
(output, resources) = LatexExporter(template_file="report").from_filename(
self._get_notebook()
)
assert len(output) > 0
@onlyif_cmds_exist("pandoc")
def test_very_long_cells(self):
"""
Torture test that long cells do not cause issues
"""
lorem_ipsum_text = textwrap.dedent(
"""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec
dignissim, ipsum non facilisis tempus, dui felis tincidunt metus,
nec pulvinar neque odio eget risus. Nulla nisi lectus, cursus
suscipit interdum at, ultrices sit amet orci. Mauris facilisis
imperdiet elit, vitae scelerisque ipsum dignissim non. Integer
consequat malesuada neque sit amet pulvinar. Curabitur pretium
ut turpis eget aliquet. Maecenas sagittis lacus sed lectus
volutpat, eu adipiscing purus pulvinar. Maecenas consequat
luctus urna, eget cursus quam mollis a. Aliquam vitae ornare
erat, non hendrerit urna. Sed eu diam nec massa egestas pharetra
at nec tellus. Fusce feugiat lacus quis urna sollicitudin volutpat.
Quisque at sapien non nibh feugiat tempus ac ultricies purus.
"""
)
lorem_ipsum_text = lorem_ipsum_text.replace("\n", " ") + "\n\n"
large_lorem_ipsum_text = "".join([lorem_ipsum_text] * 3000)
notebook_name = "lorem_ipsum_long.ipynb"
nb = v4.new_notebook(cells=[v4.new_markdown_cell(source=large_lorem_ipsum_text)])
with TemporaryDirectory() as td:
nbfile = os.path.join(td, notebook_name)
with open(nbfile, "w") as f:
write(nb, f, 4)
(output, resources) = LatexExporter().from_filename(nbfile)
assert len(output) > 0
@onlyif_cmds_exist("pandoc")
def test_prompt_number_color(self):
"""
Does LatexExporter properly format input and output prompts in color?
"""
(output, resources) = LatexExporter().from_filename(
self._get_notebook(nb_name="prompt_numbers.ipynb")
)
in_regex = r"\\prompt\{In\}\{incolor\}\{(\d+|\s*)\}"
out_regex = r"\\prompt\{Out\}\{outcolor\}\{(\d+|\s*)\}"
ins = ["2", "10", " ", " ", "0"]
outs = ["10"]
assert re.findall(in_regex, output) == ins
assert re.findall(out_regex, output) == outs
@onlyif_cmds_exist("pandoc")
def test_prompt_number_color_ipython(self):
"""
Does LatexExporter properly format input and output prompts in color?
Uses an in memory latex template to load style_ipython as the cell style.
"""
my_loader_tplx = DictLoader(
{
"my_template": r"""
((* extends 'style_ipython.tex.j2' *))
((* block docclass *))
\documentclass[11pt]{article}
((* endblock docclass *))
"""
}
)
class MyExporter(LatexExporter):
template_file = "my_template"
(output, resources) = MyExporter(extra_loaders=[my_loader_tplx]).from_filename(
self._get_notebook(nb_name="prompt_numbers.ipynb")
)
in_regex = r"In \[\{\\color\{incolor\}(.*)\}\]:"
out_regex = r"Out\[\{\\color\{outcolor\}(.*)\}\]:"
ins = ["2", "10", " ", " ", "0"]
outs = ["10"]
assert re.findall(in_regex, output) == ins
assert re.findall(out_regex, output) == outs
@onlyif_cmds_exist("pandoc")
def test_no_prompt_yes_input(self):
no_prompt = {
"TemplateExporter": {
"exclude_input_prompt": True,
"exclude_output_prompt": True,
}
}
c_no_prompt = Config(no_prompt)
exporter = LatexExporter(config=c_no_prompt)
(output, resources) = exporter.from_filename(
self._get_notebook(nb_name="prompt_numbers.ipynb")
)
assert "shape" in output
assert "evs" in output
@onlyif_cmds_exist("pandoc", "inkscape")
def test_svg(self):
"""
Can a LatexExporter export when it recieves raw binary strings form svg?
"""
filename = os.path.join(current_dir, "files", "svg.ipynb")
(output, resources) = LatexExporter().from_filename(filename)
assert len(output) > 0
def test_in_memory_template_tplx(self):
# Loads in an in memory latex template (.tplx) using jinja2.DictLoader
# creates a class that uses this template with the template_file argument
# converts an empty notebook using this mechanism
my_loader_tplx = DictLoader({"my_template": "{%- extends 'index' -%}"})
class MyExporter(LatexExporter):
template_file = "my_template"
exporter = MyExporter(extra_loaders=[my_loader_tplx])
nb = v4.new_notebook()
out, resources = exporter.from_notebook_node(nb)
def test_custom_filter_highlight_code(self):
# Overwriting filters takes place at: Exporter.from_notebook_node
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
def custom_highlight_code(source, language="python", metadata=None, strip_verbatim=False):
return source + " ADDED_TEXT"
filters = {"highlight_code": custom_highlight_code}
(output, resources) = LatexExporter(filters=filters).from_notebook_node(nb)
self.assertTrue("ADDED_TEXT" in output)

View File

@ -0,0 +1,40 @@
"""Tests for MarkdownExporter"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from ..markdown import MarkdownExporter
from .base import ExportersTestsBase
# -----------------------------------------------------------------------------
# Class
# -----------------------------------------------------------------------------
class TestMarkdownExporter(ExportersTestsBase):
"""Tests for MarkdownExporter"""
exporter_class = MarkdownExporter
should_include_raw = ["markdown", "html"]
def test_constructor(self):
"""
Can a MarkdownExporter be constructed?
"""
MarkdownExporter()
def test_export(self):
"""
Can a MarkdownExporter export something?
"""
(output, resources) = MarkdownExporter().from_filename(self._get_notebook())
assert len(output) > 0

View File

@ -0,0 +1,41 @@
"""Tests for notebook.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
from nbformat import validate
from nbconvert.tests.base import assert_big_text_equal
from ..notebook import NotebookExporter
from .base import ExportersTestsBase
class TestNotebookExporter(ExportersTestsBase):
"""Contains test functions for notebook.py"""
exporter_class = NotebookExporter
def test_export(self):
"""
Does the NotebookExporter return the file unchanged?
"""
with open(self._get_notebook()) as f:
file_contents = f.read()
(output, resources) = self.exporter_class().from_filename(self._get_notebook())
assert len(output) > 0
assert_big_text_equal(output, file_contents)
def test_downgrade_3(self):
exporter = self.exporter_class(nbformat_version=3)
(output, resources) = exporter.from_filename(self._get_notebook())
nb = json.loads(output)
validate(nb)
def test_downgrade_2(self):
exporter = self.exporter_class(nbformat_version=2)
(output, resources) = exporter.from_filename(self._get_notebook())
nb = json.loads(output)
self.assertEqual(nb["nbformat"], 2)

View File

@ -0,0 +1,39 @@
"""Tests for PDF export"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
from tempfile import TemporaryDirectory
from ...tests.utils import onlyif_cmds_exist
from ..pdf import PDFExporter
from .base import ExportersTestsBase
# -----------------------------------------------------------------------------
# Class
# -----------------------------------------------------------------------------
class TestPDF(ExportersTestsBase):
"""Test PDF export"""
exporter_class = PDFExporter
def test_constructor(self):
"""Can a PDFExporter be constructed?"""
self.exporter_class()
@onlyif_cmds_exist("xelatex", "pandoc")
def test_export(self):
"""Smoke test PDFExporter"""
with TemporaryDirectory() as td:
file_name = os.path.basename(self._get_notebook())
newpath = os.path.join(td, file_name)
shutil.copy(self._get_notebook(), newpath)
(output, resources) = self.exporter_class(latex_count=1).from_filename(newpath)
self.assertIsInstance(output, bytes)
assert len(output) > 0
# all temporary file should be cleaned up
assert {file_name} == set(os.listdir(td))

View File

@ -0,0 +1,24 @@
"""Tests for PythonExporter"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from ..python import PythonExporter
from .base import ExportersTestsBase
class TestPythonExporter(ExportersTestsBase):
"""Tests for PythonExporter"""
exporter_class = PythonExporter
should_include_raw = ["python"]
def test_constructor(self):
"""Can a PythonExporter be constructed?"""
self.exporter_class()
def test_export(self):
"""Can a PythonExporter export something?"""
(output, resources) = self.exporter_class().from_filename(self._get_notebook())
self.assertIn("coding: utf-8", output)
self.assertIn("#!/usr/bin/env python", output)

View File

@ -0,0 +1,67 @@
"""Tests for RSTExporter"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
import nbformat
from nbformat import v4
from ...tests.utils import onlyif_cmds_exist
from ..rst import RSTExporter
from .base import ExportersTestsBase
class TestRSTExporter(ExportersTestsBase):
"""Tests for RSTExporter"""
exporter_class = RSTExporter
should_include_raw = ["rst"]
def test_constructor(self):
"""
Can a RSTExporter be constructed?
"""
RSTExporter()
@onlyif_cmds_exist("pandoc")
def test_export(self):
"""
Can a RSTExporter export something?
"""
(output, resources) = RSTExporter().from_filename(self._get_notebook())
assert len(output) > 0
@onlyif_cmds_exist("pandoc")
def test_empty_code_cell(self):
"""No empty code cells in rst"""
nbname = self._get_notebook()
with open(nbname, encoding="utf8") as f:
nb = nbformat.read(f, 4)
exporter = self.exporter_class()
(output, resources) = exporter.from_notebook_node(nb)
# add an empty code cell
nb.cells.append(v4.new_code_cell(source=""))
(output2, resources) = exporter.from_notebook_node(nb)
# adding an empty code cell shouldn't change output
self.assertEqual(output.strip(), output2.strip())
@onlyif_cmds_exist("pandoc")
def test_png_metadata(self):
"""
Does RSTExporter treat pngs with width/height metadata correctly?
"""
(output, resources) = RSTExporter().from_filename(
self._get_notebook(nb_name="pngmetadata.ipynb")
)
assert len(output) > 0
check_for_png = re.compile(r".. image::.*?\n\s+(.*?)\n\s*\n", re.DOTALL)
result = check_for_png.search(output)
assert result is not None
attr_string = result.group(1)
assert ":width:" in attr_string
assert ":height:" in attr_string
assert "px" in attr_string

View File

@ -0,0 +1,75 @@
"""Tests for ScriptExporter"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
from nbformat import v4
import nbconvert
from ..script import ScriptExporter
from .base import ExportersTestsBase
class TestScriptExporter(ExportersTestsBase):
"""Tests for ScriptExporter"""
exporter_class = ScriptExporter
def test_constructor(self):
"""Construct ScriptExporter"""
e = self.exporter_class()
def test_export(self):
"""ScriptExporter can export something"""
(output, resources) = self.exporter_class().from_filename(self._get_notebook())
assert len(output) > 0
def test_export_python(self):
"""delegate to custom exporter from language_info"""
exporter = self.exporter_class()
pynb = v4.new_notebook()
(output, resources) = self.exporter_class().from_notebook_node(pynb)
self.assertNotIn("# coding: utf-8", output)
pynb.metadata.language_info = {
"name": "python",
"mimetype": "text/x-python",
"nbconvert_exporter": "python",
}
(output, resources) = self.exporter_class().from_notebook_node(pynb)
self.assertIn("# coding: utf-8", output)
def test_export_config_transfer(self):
"""delegate config to custom exporter from language_info"""
nb = v4.new_notebook()
nb.metadata.language_info = {
"name": "python",
"mimetype": "text/x-python",
"nbconvert_exporter": "python",
}
exporter = self.exporter_class()
exporter.from_notebook_node(nb)
assert exporter._exporters["python"] != exporter
assert exporter._exporters["python"].config == exporter.config
def test_script_exporter_entrypoint():
nb = v4.new_notebook()
nb.metadata.language_info = {
"name": "dummy",
"mimetype": "text/x-dummy",
}
p = os.path.join(os.path.dirname(nbconvert.tests.__file__), "exporter_entrypoint")
sys.path.insert(0, p)
try:
output, _ = ScriptExporter().from_notebook_node(nb)
assert output == "dummy-script-exported"
finally:
sys.path.remove(p)

View File

@ -0,0 +1,82 @@
"""Tests for SlidesExporter"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from nbformat import v4 as nbformat
from ..slides import SlidesExporter, _RevealMetadataPreprocessor
from .base import ExportersTestsBase
class TestSlidesExporter(ExportersTestsBase):
"""Tests for SlidesExporter"""
exporter_class = SlidesExporter
should_include_raw = ["html"]
def test_constructor(self):
"""
Can a SlidesExporter be constructed?
"""
SlidesExporter()
def test_export(self):
"""
Can a SlidesExporter export something?
"""
(output, resources) = SlidesExporter().from_filename(self._get_notebook())
assert len(output) > 0
def test_export_reveal(self):
"""
Can a SlidesExporter export using the 'reveal' template?
"""
(output, resources) = SlidesExporter().from_filename(self._get_notebook())
assert len(output) > 0
def build_notebook(self):
"""Build a reveal slides notebook in memory for use with tests."""
outputs = [nbformat.new_output(output_type="stream", name="stdout", text="a")]
slide_metadata = {"slideshow": {"slide_type": "slide"}}
subslide_metadata = {"slideshow": {"slide_type": "subslide"}}
fragment_metadata = {"slideshow": {"slide_type": "fragment"}}
cells = [
nbformat.new_code_cell(source="", execution_count=1, outputs=outputs),
nbformat.new_markdown_cell(source="", metadata=slide_metadata),
nbformat.new_code_cell(source="", execution_count=2, outputs=outputs),
nbformat.new_markdown_cell(source="", metadata=slide_metadata),
nbformat.new_markdown_cell(source="", metadata=subslide_metadata),
nbformat.new_markdown_cell(source="", metadata=fragment_metadata),
nbformat.new_code_cell(source="", execution_count=1, outputs=outputs),
]
return nbformat.new_notebook(cells=cells)
def test_metadata_preprocessor(self):
preprocessor = _RevealMetadataPreprocessor()
nb = self.build_notebook()
nb, resources = preprocessor.preprocess(nb)
cells = nb.cells
# Make sure correct metadata tags are available on every cell.
for cell in cells:
assert "slide_type" in cell.metadata
# Make sure slide end is only applied to the cells preceeding slide
# cells.
assert not cells[1].metadata.get("slide_end", False)
# Verify 'slide-end'
assert cells[0].metadata["slide_end"]
assert cells[2].metadata["slide_end"]
assert cells[2].metadata["subslide_end"]
assert not cells[3].metadata.get("slide_end", False)
assert cells[3].metadata["subslide_end"]
assert cells[-1].metadata["fragment_end"]
assert cells[-1].metadata["subslide_end"]
assert cells[-1].metadata["slide_end"]

View File

@ -0,0 +1,653 @@
"""
Module with tests for templateexporter.py
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
from concurrent.futures import ProcessPoolExecutor
from tempfile import TemporaryDirectory
from unittest.mock import patch
import pytest
from jinja2 import TemplateNotFound
from nbformat import v4
from traitlets import default
from traitlets.config import Config
from ...utils import _contextlib_chdir
from ..html import HTMLExporter
from ..markdown import MarkdownExporter
from ..rst import RSTExporter
from ..templateexporter import TemplateExporter
from .base import ExportersTestsBase
from .cheese import CheesePreprocessor
raw_template = """{%- extends 'index.rst.j2' -%}
{%- block in_prompt -%}
blah
{%- endblock in_prompt -%}
"""
class SampleExporter(TemplateExporter):
"""
Exports a Python code file.
"""
@default("file_extension")
def _file_extension_default(self):
return ".py"
@default("template_name")
def _template_name_default(self):
return "python"
class TestExporter(ExportersTestsBase):
"""Contains test functions for exporter.py"""
def test_constructor(self):
"""
Can a TemplateExporter be constructed?
"""
TemplateExporter()
def test_export(self):
"""
Can a TemplateExporter export something?
"""
exporter = self._make_exporter()
(output, resources) = exporter.from_filename(self._get_notebook())
assert len(output) > 0
def test_extract_outputs(self):
"""
If the ExtractOutputPreprocessor is enabled, are outputs extracted?
"""
config = Config({"ExtractOutputPreprocessor": {"enabled": True}})
exporter = self._make_exporter(config=config)
(output, resources) = exporter.from_filename(self._get_notebook())
assert resources is not None
assert isinstance(resources["outputs"], dict)
assert len(resources["outputs"]) > 0
def test_preprocessor_class(self):
"""
Can a preprocessor be added to the preprocessors list by class type?
"""
config = Config({"Exporter": {"preprocessors": [CheesePreprocessor]}})
exporter = self._make_exporter(config=config)
(output, resources) = exporter.from_filename(self._get_notebook())
assert resources is not None
assert resources["cheese"] == "real"
def test_preprocessor_instance(self):
"""
Can a preprocessor be added to the preprocessors list by instance?
"""
config = Config({"Exporter": {"preprocessors": [CheesePreprocessor()]}})
exporter = self._make_exporter(config=config)
(output, resources) = exporter.from_filename(self._get_notebook())
assert resources is not None
assert resources["cheese"] == "real"
def test_preprocessor_dottedobjectname(self):
"""
Can a preprocessor be added to the preprocessors list by dotted object name?
"""
config = Config(
{"Exporter": {"preprocessors": ["nbconvert.exporters.tests.cheese.CheesePreprocessor"]}}
)
exporter = self._make_exporter(config=config)
(output, resources) = exporter.from_filename(self._get_notebook())
assert resources is not None
assert resources["cheese"] == "real"
def test_preprocessor_via_method(self):
"""
Can a preprocessor be added via the Exporter convenience method?
"""
exporter = self._make_exporter()
exporter.register_preprocessor(CheesePreprocessor, enabled=True)
(output, resources) = exporter.from_filename(self._get_notebook())
assert resources is not None
assert resources["cheese"] == "real"
def test_pickle(self):
"""
Can exporters be pickled & called across processes?
"""
exporter = self._make_exporter()
executor = ProcessPoolExecutor()
(output, resources) = executor.submit(exporter.from_filename, self._get_notebook()).result()
assert len(output) > 0
def test_absolute_template_file(self):
with TemporaryDirectory() as td:
template = os.path.join(td, "abstemplate.ext.j2")
test_output = "absolute!"
with open(template, "w") as f:
f.write(test_output)
config = Config()
config.TemplateExporter.template_file = template
exporter = self._make_exporter(config=config)
assert exporter.template.filename == template
assert os.path.dirname(template) in exporter.template_paths
def test_relative_template_file(self):
with TemporaryDirectory() as td, _contextlib_chdir.chdir(td):
with patch("os.getcwd", return_value=os.path.abspath(td)):
template = os.path.join("relative", "relative_template.ext.j2")
template_abs = os.path.abspath(os.path.join(td, template))
os.mkdir(os.path.dirname(template_abs))
test_output = "relative!"
with open(template_abs, "w") as f:
f.write(test_output)
config = Config()
config.TemplateExporter.template_file = template
exporter = self._make_exporter(config=config)
assert os.path.abspath(exporter.template.filename) == template_abs
assert os.path.dirname(template_abs) in [
os.path.abspath(d) for d in exporter.template_paths
]
def test_absolute_template_file_compatibility(self):
with TemporaryDirectory() as td:
template = os.path.join(td, "abstemplate.tpl")
test_output = "absolute!"
with open(template, "w") as f:
f.write(test_output)
config = Config()
config.TemplateExporter.template_file = template
with pytest.warns(DeprecationWarning):
exporter = self._make_exporter(config=config)
assert exporter.template.filename == template
assert os.path.dirname(template) in exporter.template_paths
def test_relative_template_file_compatibility(self):
with TemporaryDirectory() as td, _contextlib_chdir.chdir(td):
with patch("os.getcwd", return_value=os.path.abspath(td)):
template = os.path.join("relative", "relative_template.tpl")
template_abs = os.path.abspath(os.path.join(td, template))
os.mkdir(os.path.dirname(template_abs))
test_output = "relative!"
with open(template_abs, "w") as f:
f.write(test_output)
config = Config()
config.TemplateExporter.template_file = template
with pytest.warns(DeprecationWarning):
exporter = self._make_exporter(config=config)
assert os.path.abspath(exporter.template.filename) == template_abs
assert os.path.dirname(template_abs) in [
os.path.abspath(d) for d in exporter.template_paths
]
def test_absolute_template_name_tpl_compatibility(self):
with TemporaryDirectory() as td:
template = os.path.join(td, "abstemplate.tpl")
test_output = "absolute!"
with open(template, "w") as f:
f.write(test_output)
config = Config()
# We're setting the template_name instead of the template_file
config.TemplateExporter.template_name = template
with pytest.warns(DeprecationWarning):
exporter = self._make_exporter(config=config)
assert exporter.template.filename == template
assert os.path.dirname(template) in exporter.template_paths
# Can't use @pytest.mark.parametrize without removing all self.assert calls in all tests... repeating some here
def absolute_template_name_5x_compatibility_test(self, template, mimetype=None):
config = Config()
# We're setting the template_name instead of the template_file
config.TemplateExporter.template_name = template
with pytest.warns(DeprecationWarning):
exporter = self._make_exporter(config=config)
template_dir, template_file = os.path.split(exporter.template.filename)
_, compat_dir = os.path.split(template_dir)
assert compat_dir == "compatibility"
assert template_file == template + ".tpl"
assert template_dir in exporter.template_paths
def test_absolute_template_name_5x_compatibility_full(self):
self.absolute_template_name_5x_compatibility_test("full", "text/html")
def test_absolute_template_name_5x_compatibility_display_priority(self):
self.absolute_template_name_5x_compatibility_test("display_priority")
# Can't use @pytest.mark.parametrize without removing all self.assert calls in all tests... repeating some here
def relative_template_test(self, template):
with TemporaryDirectory() as td, _contextlib_chdir.chdir(td):
with patch("os.getcwd", return_value=os.path.abspath(td)):
template_abs = os.path.abspath(os.path.join(td, template))
dirname = os.path.dirname(template_abs)
if not os.path.exists(dirname):
os.mkdir(dirname)
test_output = "relative!"
with open(template_abs, "w") as f:
f.write(test_output)
config = Config()
# We're setting the template_name instead of the template_file
config.TemplateExporter.template_name = template
with pytest.warns(DeprecationWarning):
exporter = self._make_exporter(config=config)
assert os.path.abspath(exporter.template.filename) == template_abs
assert os.path.dirname(template_abs) in [
os.path.abspath(d) for d in exporter.template_paths
]
def test_relative_template_name_tpl_compatibility_local(self):
self.relative_template_test("relative_template.tpl")
def test_relative_template_name_tpl_compatibility_nested(self):
self.relative_template_test(os.path.join("relative", "relative_template.tpl"))
def test_relative_template_name_tpl_compatibility_dot(self):
self.relative_template_test(os.path.join(".", "relative_template.tpl"))
def test_relative_template_name_tpl_compatibility_dot_nested(self):
self.relative_template_test(os.path.join(".", "relative", "relative_template.tpl"))
def test_absolute_template_dir(self):
with TemporaryDirectory() as td:
template = "mytemplate"
template_file = os.path.join(td, template, "index.py.j2")
template_dir = os.path.dirname(template_file)
os.mkdir(template_dir)
test_output = "absolute!"
with open(template_file, "w") as f:
f.write(test_output)
config = Config()
config.TemplateExporter.template_name = template
config.TemplateExporter.extra_template_basedirs = [td]
exporter = self._make_exporter(config=config)
assert exporter.template.filename == template_file
assert exporter.template_name == template
assert os.path.join(td, template) in exporter.template_paths
def test_local_template_dir(self):
with TemporaryDirectory() as td, _contextlib_chdir.chdir(td):
with patch("os.getcwd", return_value=os.path.abspath(td)):
template = "mytemplate"
template_file = os.path.join(template, "index.py.j2")
template_abs = os.path.abspath(os.path.join(td, template_file))
template_conf = os.path.abspath(os.path.join(td, template, "conf.json"))
os.mkdir(os.path.dirname(template_abs))
test_output = "local!"
with open(template_abs, "w") as f:
f.write(test_output)
with open(template_conf, "w") as f:
# Mimic having a superset of accepted mimetypes
f.write(
json.dumps(
Config(
mimetypes={
"text/x-python": True,
"text/html": True,
}
)
)
)
config = Config()
config.TemplateExporter.template_name = template
exporter = self._make_exporter(config=config)
assert os.path.abspath(exporter.template.filename) == template_abs
assert exporter.template_name == template
assert os.path.join(td, template) in exporter.template_paths
def test_local_template_file_extending_lab(self):
template_file = os.path.join(self._get_files_path(), "lablike.html.j2")
exporter = HTMLExporter(template_file=template_file, template_name="lab")
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
output, resources = exporter.from_notebook_node(nb)
assert "UNIQUE" in output
def test_raw_template_attr(self):
"""
Verify that you can assign a in memory template string by overwriting
`raw_template` as simple(non-traitlet) attribute
"""
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
class AttrExporter(TemplateExporter):
raw_template = raw_template
exporter_attr = AttrExporter(template_name="rst")
output_attr, _ = exporter_attr.from_notebook_node(nb)
assert "blah" in output_attr
def test_raw_template_init(self):
"""
Test that template_file and raw_template traitlets play nicely together.
- source assigns template_file default first, then raw_template
- checks that the raw_template overrules template_file if set
- checks that once raw_template is set to '', template_file returns
"""
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
class AttrExporter(RSTExporter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.raw_template = raw_template
exporter_init = AttrExporter()
output_init, _ = exporter_init.from_notebook_node(nb)
assert "blah" in output_init
exporter_init.raw_template = ""
assert exporter_init.template_file == "index.rst.j2"
output_init, _ = exporter_init.from_notebook_node(nb)
assert "blah" not in output_init
def test_raw_template_dynamic_attr(self):
"""
Test that template_file and raw_template traitlets play nicely together.
- source assigns template_file default first, then raw_template
- checks that the raw_template overrules template_file if set
- checks that once raw_template is set to '', template_file returns
"""
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
class AttrDynamicExporter(TemplateExporter):
@default("default_template_file")
def _template_file_default(self):
return "index.rst.j2"
@default("raw_template")
def _raw_template_default(self):
return raw_template
exporter_attr_dynamic = AttrDynamicExporter(template_name="rst")
output_attr_dynamic, _ = exporter_attr_dynamic.from_notebook_node(nb)
assert "blah" in output_attr_dynamic
exporter_attr_dynamic.raw_template = ""
assert exporter_attr_dynamic.template_file == "index.rst.j2"
output_attr_dynamic, _ = exporter_attr_dynamic.from_notebook_node(nb)
assert "blah" not in output_attr_dynamic
def test_raw_template_dynamic_attr_reversed(self):
"""
Test that template_file and raw_template traitlets play nicely together.
- source assigns raw_template default first, then template_file
- checks that the raw_template overrules template_file if set
- checks that once raw_template is set to '', template_file returns
"""
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
class AttrDynamicExporter(TemplateExporter):
@default("raw_template")
def _raw_template_default(self):
return raw_template
@default("default_template_file")
def _template_file_default(self):
return "index.rst.j2"
exporter_attr_dynamic = AttrDynamicExporter(template_name="rst")
output_attr_dynamic, _ = exporter_attr_dynamic.from_notebook_node(nb)
assert "blah" in output_attr_dynamic
exporter_attr_dynamic.raw_template = ""
assert exporter_attr_dynamic.template_file == "index.rst.j2"
output_attr_dynamic, _ = exporter_attr_dynamic.from_notebook_node(nb)
assert "blah" not in output_attr_dynamic
def test_raw_template_constructor(self):
"""
Test `raw_template` as a keyword argument in the exporter constructor.
"""
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
output_constructor, _ = TemplateExporter(
template_name="rst", raw_template=raw_template
).from_notebook_node(nb)
assert "blah" in output_constructor
def test_raw_template_assignment(self):
"""
Test `raw_template` assigned after the fact on non-custom Exporter.
"""
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
exporter_assign = TemplateExporter(template_name="rst")
exporter_assign.raw_template = raw_template
output_assign, _ = exporter_assign.from_notebook_node(nb)
assert "blah" in output_assign
def test_raw_template_reassignment(self):
"""
Test `raw_template` reassigned after the fact on non-custom Exporter.
"""
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
exporter_reassign = TemplateExporter(template_name="rst")
exporter_reassign.raw_template = raw_template
output_reassign, _ = exporter_reassign.from_notebook_node(nb)
assert "blah" in output_reassign
exporter_reassign.raw_template = raw_template.replace("blah", "baz")
output_reassign, _ = exporter_reassign.from_notebook_node(nb)
assert "baz" in output_reassign
def test_raw_template_deassignment(self):
"""
Test `raw_template` does not overwrite template_file if deassigned after
being assigned to a non-custom Exporter.
"""
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
exporter_deassign = RSTExporter()
exporter_deassign.raw_template = raw_template
output_deassign, _ = exporter_deassign.from_notebook_node(nb)
assert "blah" in output_deassign
exporter_deassign.raw_template = ""
assert exporter_deassign.template_file == "index.rst.j2"
output_deassign, _ = exporter_deassign.from_notebook_node(nb)
assert "blah" not in output_deassign
def test_raw_template_dereassignment(self):
"""
Test `raw_template` does not overwrite template_file if deassigned after
being assigned to a non-custom Exporter.
"""
nb = v4.new_notebook()
nb.cells.append(v4.new_code_cell("some_text"))
exporter_dereassign = RSTExporter()
exporter_dereassign.raw_template = raw_template
output_dereassign, _ = exporter_dereassign.from_notebook_node(nb)
assert "blah" in output_dereassign
exporter_dereassign.raw_template = raw_template.replace("blah", "baz")
output_dereassign, _ = exporter_dereassign.from_notebook_node(nb)
assert "baz" in output_dereassign
exporter_dereassign.raw_template = ""
assert exporter_dereassign.template_file == "index.rst.j2"
output_dereassign, _ = exporter_dereassign.from_notebook_node(nb)
assert "blah" not in output_dereassign
def test_fail_to_find_template_file(self):
# Create exporter with invalid template file, check that it doesn't
# exist in the environment, try to convert empty notebook. Failure is
# expected due to nonexistant template file.
template = "does_not_exist.tpl"
exporter = TemplateExporter(template_file=template)
assert template not in exporter.environment.list_templates(extensions=["tpl"])
nb = v4.new_notebook()
with pytest.raises(TemplateNotFound):
out, resources = exporter.from_notebook_node(nb)
def test_exclude_code_cell(self):
no_io = {
"TemplateExporter": {
"exclude_output": True,
"exclude_input": True,
"exclude_input_prompt": False,
"exclude_output_prompt": False,
"exclude_markdown": False,
"exclude_code_cell": False,
"exclude_output_stdin": True,
}
}
c_no_io = Config(no_io)
exporter_no_io = TemplateExporter(config=c_no_io, template_name="markdown")
exporter_no_io.template_file = "index.md.j2"
nb_no_io, resources_no_io = exporter_no_io.from_filename(self._get_notebook())
assert not resources_no_io["global_content_filter"]["include_input"]
assert not resources_no_io["global_content_filter"]["include_output"]
no_code = {
"TemplateExporter": {
"exclude_output": False,
"exclude_input": False,
"exclude_input_prompt": False,
"exclude_output_prompt": False,
"exclude_markdown": False,
"exclude_code_cell": True,
"exclude_output_stdin": True,
}
}
c_no_code = Config(no_code)
exporter_no_code = TemplateExporter(config=c_no_code, template_name="markdown")
exporter_no_code.template_file = "index.md.j2"
nb_no_code, resources_no_code = exporter_no_code.from_filename(self._get_notebook())
assert not resources_no_code["global_content_filter"]["include_code"]
assert nb_no_io == nb_no_code
def test_exclude_input_prompt(self):
no_input_prompt = {
"TemplateExporter": {
"exclude_output": False,
"exclude_input": False,
"exclude_input_prompt": True,
"exclude_output_prompt": False,
"exclude_markdown": False,
"exclude_code_cell": False,
"exclude_output_stdin": True,
}
}
c_no_input_prompt = Config(no_input_prompt)
exporter_no_input_prompt = MarkdownExporter(config=c_no_input_prompt)
nb_no_input_prompt, resources_no_input_prompt = exporter_no_input_prompt.from_filename(
self._get_notebook()
)
assert not resources_no_input_prompt["global_content_filter"]["include_input_prompt"]
assert "# In[" not in nb_no_input_prompt
def test_exclude_markdown(self):
no_md = {
"TemplateExporter": {
"exclude_output": False,
"exclude_input": False,
"exclude_input_prompt": False,
"exclude_output_prompt": False,
"exclude_markdown": True,
"exclude_code_cell": False,
"exclude_output_stdin": True,
}
}
c_no_md = Config(no_md)
exporter_no_md = TemplateExporter(config=c_no_md, template_name="python")
exporter_no_md.template_file = "index.py.j2"
nb_no_md, resources_no_md = exporter_no_md.from_filename(self._get_notebook())
assert not resources_no_md["global_content_filter"]["include_markdown"]
assert "First import NumPy and Matplotlib" not in nb_no_md
def test_exclude_output_prompt(self):
no_output_prompt = {
"TemplateExporter": {
"exclude_output": False,
"exclude_input": False,
"exclude_input_prompt": False,
"exclude_output_prompt": True,
"exclude_markdown": False,
"exclude_code_cell": False,
"exclude_output_stdin": True,
}
}
c_no_output_prompt = Config(no_output_prompt)
exporter_no_output_prompt = HTMLExporter(config=c_no_output_prompt)
nb_no_output_prompt, resources_no_output_prompt = exporter_no_output_prompt.from_filename(
self._get_notebook()
)
assert not resources_no_output_prompt["global_content_filter"]["include_output_prompt"]
assert "Out[1]" not in nb_no_output_prompt
def test_exclude_output_stdin(self):
no_output_stdin = {
"TemplateExporter": {
"exclude_output": False,
"exclude_input": False,
"exclude_input_prompt": False,
"exclude_output_prompt": True,
"exclude_markdown": False,
"exclude_code_cell": False,
"exclude_output_stdin": True,
}
}
c_no_output_stdin = Config(no_output_stdin)
exporter_no_output_prompt = HTMLExporter(config=c_no_output_stdin)
nb_no_output_stdin, resources_no_output_stdin = exporter_no_output_prompt.from_filename(
self._get_notebook("notebook3.ipynb")
)
assert not resources_no_output_stdin["global_content_filter"]["include_output_stdin"]
assert "test input: input value" not in nb_no_output_stdin
def test_include_output_stdin(self):
output_stdin = {
"TemplateExporter": {
"exclude_output": False,
"exclude_input": False,
"exclude_input_prompt": False,
"exclude_output_prompt": True,
"exclude_markdown": False,
"exclude_code_cell": False,
"exclude_output_stdin": False,
}
}
c_output_stdin = Config(output_stdin)
exporter_output_stdin = HTMLExporter(config=c_output_stdin)
nb_output_stdin, resources_output_stdin = exporter_output_stdin.from_filename(
self._get_notebook("notebook3.ipynb")
)
assert resources_output_stdin["global_content_filter"]["include_output_stdin"]
assert "test input: input value" in nb_output_stdin
def test_remove_elements_with_tags(self):
conf = Config(
{
"TagRemovePreprocessor": {
"remove_cell_tags": ["remove_cell"],
"remove_all_outputs_tags": ["remove_output"],
"remove_input_tags": ["remove_input"],
},
}
)
exporter = MarkdownExporter(config=conf)
nb, resources = exporter.from_filename(self._get_notebook())
assert "hist(evs.real)" not in nb
assert "cell is just markdown testing whether" not in nb
assert "(100,)" not in nb
def _make_exporter(self, config=None):
exporter = SampleExporter(config=config)
return exporter

View File

@ -0,0 +1,47 @@
"""Tests for the latex preprocessor"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from unittest.mock import patch
import pytest
from ..webpdf import WebPDFExporter
from .base import ExportersTestsBase
class TestWebPDFExporter(ExportersTestsBase):
"""Contains test functions for webpdf.py"""
exporter_class = WebPDFExporter
@pytest.mark.network
def test_export(self):
"""
Can a TemplateExporter export something?
"""
(output, resources) = WebPDFExporter(allow_chromium_download=True).from_filename(
self._get_notebook()
)
assert len(output) > 0
@patch("pyppeteer.util.check_chromium", return_value=False)
def test_webpdf_without_chromium(self, mock_check_chromium):
"""
Generate PDFs if chromium not present?
"""
with pytest.raises(RuntimeError):
WebPDFExporter(allow_chromium_download=False).from_filename(self._get_notebook())
def test_webpdf_without_pyppeteer(self):
"""
Generate PDFs if chromium not present?
"""
with pytest.raises(RuntimeError):
exporter = WebPDFExporter()
with open(self._get_notebook(), encoding="utf-8") as f:
nb = exporter.from_file(f, resources={})
# Have to do this as the very last action as traitlets do dynamic importing often
with patch("builtins.__import__", side_effect=ModuleNotFoundError("Fake missing")):
exporter.from_notebook_node(nb)

View File

@ -0,0 +1,159 @@
"""Export to PDF via a headless browser"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import concurrent.futures
import os
import tempfile
from traitlets import Bool, default
from .html import HTMLExporter
class WebPDFExporter(HTMLExporter):
"""Writer designed to write to PDF files.
This inherits from :class:`HTMLExporter`. It creates the HTML using the
template machinery, and then run pyppeteer to create a pdf.
"""
export_from_notebook = "PDF via HTML"
allow_chromium_download = Bool(
False,
help="Whether to allow downloading Chromium if no suitable version is found on the system.",
).tag(config=True)
paginate = Bool(
True,
help="""
Split generated notebook into multiple pages.
If False, a PDF with one long page will be generated.
Set to True to match behavior of LaTeX based PDF generator
""",
).tag(config=True)
output_mimetype = "text/html"
@default("file_extension")
def _file_extension_default(self):
return ".html"
@default("template_name")
def _template_name_default(self):
return "webpdf"
disable_sandbox = Bool(
False,
help="""
Disable chromium security sandbox when converting to PDF.
WARNING: This could cause arbitrary code execution in specific circumstances,
where JS in your notebook can execute serverside code! Please use with
caution.
``https://github.com/puppeteer/puppeteer/blob/main@%7B2020-12-14T17:22:24Z%7D/docs/troubleshooting.md#setting-up-chrome-linux-sandbox``
has more information.
This is required for webpdf to work inside most container environments.
""",
).tag(config=True)
def _check_launch_reqs(self):
try:
from pyppeteer import launch
from pyppeteer.util import check_chromium
except ModuleNotFoundError as e:
raise RuntimeError(
"Pyppeteer is not installed to support Web PDF conversion. "
"Please install `nbconvert[webpdf]` to enable."
) from e
if not self.allow_chromium_download and not check_chromium():
raise RuntimeError(
"No suitable chromium executable found on the system. "
"Please use '--allow-chromium-download' to allow downloading one."
)
return launch
def run_pyppeteer(self, html):
"""Run pyppeteer."""
async def main(temp_file):
args = ["--no-sandbox"] if self.disable_sandbox else []
browser = await self._check_launch_reqs()(
handleSIGINT=False, handleSIGTERM=False, handleSIGHUP=False, args=args
)
page = await browser.newPage()
await page.emulateMedia("screen")
await page.waitFor(100)
await page.goto(f"file://{temp_file.name}", waitUntil="networkidle0")
await page.waitFor(100)
pdf_params = {"printBackground": True}
if not self.paginate:
# Floating point precision errors cause the printed
# PDF from spilling over a new page by a pixel fraction.
dimensions = await page.evaluate(
"""() => {
const rect = document.body.getBoundingClientRect();
return {
width: Math.ceil(rect.width) + 1,
height: Math.ceil(rect.height) + 1,
}
}"""
)
width = dimensions["width"]
height = dimensions["height"]
# 200 inches is the maximum size for Adobe Acrobat Reader.
pdf_params.update(
{
"width": min(width, 200 * 72),
"height": min(height, 200 * 72),
}
)
pdf_data = await page.pdf(pdf_params)
await browser.close()
return pdf_data
pool = concurrent.futures.ThreadPoolExecutor()
# Create a temporary file to pass the HTML code to Chromium:
# Unfortunately, tempfile on Windows does not allow for an already open
# file to be opened by a separate process. So we must close it first
# before calling Chromium. We also specify delete=False to ensure the
# file is not deleted after closing (the default behavior).
temp_file = tempfile.NamedTemporaryFile(suffix=".html", delete=False)
with temp_file:
temp_file.write(html.encode("utf-8"))
try:
# TODO: when dropping Python 3.6, use
# pdf_data = pool.submit(asyncio.run, main(temp_file)).result()
def run_coroutine(coro):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(coro)
pdf_data = pool.submit(run_coroutine, main(temp_file)).result()
finally:
# Ensure the file is deleted even if pypeteer raises an exception
os.unlink(temp_file.name)
return pdf_data
def from_notebook_node(self, nb, resources=None, **kw):
self._check_launch_reqs()
html, resources = super().from_notebook_node(nb, resources=resources, **kw)
self.log.info("Building PDF")
pdf_data = self.run_pyppeteer(html)
self.log.info("PDF successfully created")
# convert output extension to pdf
# the writer above required it to be html
resources["output_extension"] = ".pdf"
return pdf_data, resources

View File

@ -0,0 +1,11 @@
from nbconvert.utils.text import indent
from .ansi import *
from .citation import *
from .datatypefilter import *
from .highlight import *
from .latex import *
from .markdown import *
from .metadata import *
from .pandoc import *
from .strings import *

View File

@ -0,0 +1,292 @@
"""Filters for processing ANSI colors within Jinja templates."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
import markupsafe
__all__ = ["strip_ansi", "ansi2html", "ansi2latex"]
_ANSI_RE = re.compile("\x1b\\[(.*?)([@-~])")
_ANSI_COLORS = (
"ansi-black",
"ansi-red",
"ansi-green",
"ansi-yellow",
"ansi-blue",
"ansi-magenta",
"ansi-cyan",
"ansi-white",
"ansi-black-intense",
"ansi-red-intense",
"ansi-green-intense",
"ansi-yellow-intense",
"ansi-blue-intense",
"ansi-magenta-intense",
"ansi-cyan-intense",
"ansi-white-intense",
)
def strip_ansi(source):
"""
Remove ANSI escape codes from text.
Parameters
----------
source : str
Source to remove the ANSI from
"""
return _ANSI_RE.sub("", source)
def ansi2html(text):
"""
Convert ANSI colors to HTML colors.
Parameters
----------
text : unicode
Text containing ANSI colors to convert to HTML
"""
text = markupsafe.escape(text)
return _ansi2anything(text, _htmlconverter)
def ansi2latex(text):
"""
Convert ANSI colors to LaTeX colors.
Parameters
----------
text : unicode
Text containing ANSI colors to convert to LaTeX
"""
return _ansi2anything(text, _latexconverter)
def _htmlconverter(fg, bg, bold, underline, inverse):
"""
Return start and end tags for given foreground/background/bold/underline.
"""
if (fg, bg, bold, underline, inverse) == (None, None, False, False, False):
return "", ""
classes = []
styles = []
if inverse:
fg, bg = bg, fg
if isinstance(fg, int):
classes.append(_ANSI_COLORS[fg] + "-fg")
elif fg:
styles.append("color: rgb({},{},{})".format(*fg))
elif inverse:
classes.append("ansi-default-inverse-fg")
if isinstance(bg, int):
classes.append(_ANSI_COLORS[bg] + "-bg")
elif bg:
styles.append("background-color: rgb({},{},{})".format(*bg))
elif inverse:
classes.append("ansi-default-inverse-bg")
if bold:
classes.append("ansi-bold")
if underline:
classes.append("ansi-underline")
starttag = "<span"
if classes:
starttag += ' class="' + " ".join(classes) + '"'
if styles:
starttag += ' style="' + "; ".join(styles) + '"'
starttag += ">"
return starttag, "</span>"
def _latexconverter(fg, bg, bold, underline, inverse):
"""
Return start and end markup given foreground/background/bold/underline.
"""
if (fg, bg, bold, underline, inverse) == (None, None, False, False, False):
return "", ""
starttag, endtag = "", ""
if inverse:
fg, bg = bg, fg
if isinstance(fg, int):
starttag += r"\textcolor{" + _ANSI_COLORS[fg] + "}{"
endtag = "}" + endtag
elif fg:
# See http://tex.stackexchange.com/a/291102/13684
starttag += r"\def\tcRGB{\textcolor[RGB]}\expandafter"
starttag += r"\tcRGB\expandafter{\detokenize{%s,%s,%s}}{" % fg
endtag = "}" + endtag
elif inverse:
starttag += r"\textcolor{ansi-default-inverse-fg}{"
endtag = "}" + endtag
if isinstance(bg, int):
starttag += r"\setlength{\fboxsep}{0pt}"
starttag += r"\colorbox{" + _ANSI_COLORS[bg] + "}{"
endtag = r"\strut}" + endtag
elif bg:
starttag += r"\setlength{\fboxsep}{0pt}"
# See http://tex.stackexchange.com/a/291102/13684
starttag += r"\def\cbRGB{\colorbox[RGB]}\expandafter"
starttag += r"\cbRGB\expandafter{\detokenize{%s,%s,%s}}{" % bg
endtag = r"\strut}" + endtag
elif inverse:
starttag += r"\setlength{\fboxsep}{0pt}"
starttag += r"\colorbox{ansi-default-inverse-bg}{"
endtag = r"\strut}" + endtag
if bold:
starttag += r"\textbf{"
endtag = "}" + endtag
if underline:
starttag += r"\underline{"
endtag = "}" + endtag
return starttag, endtag
def _ansi2anything(text, converter):
r"""
Convert ANSI colors to HTML or LaTeX.
See https://en.wikipedia.org/wiki/ANSI_escape_code
Accepts codes like '\x1b[32m' (red) and '\x1b[1;32m' (bold, red).
Non-color escape sequences (not ending with 'm') are filtered out.
Ideally, this should have the same behavior as the function
fixConsole() in notebook/notebook/static/base/js/utils.js.
"""
fg, bg = None, None
bold = False
underline = False
inverse = False
numbers = []
out = []
while text:
m = _ANSI_RE.search(text)
if m:
if m.group(2) == "m":
try:
# Empty code is same as code 0
numbers = [int(n) if n else 0 for n in m.group(1).split(";")]
except ValueError:
pass # Invalid color specification
else:
pass # Not a color code
chunk, text = text[: m.start()], text[m.end() :]
else:
chunk, text = text, ""
if chunk:
starttag, endtag = converter(
fg + 8 if bold and fg in range(8) else fg, bg, bold, underline, inverse
)
out.append(starttag)
out.append(chunk)
out.append(endtag)
while numbers:
n = numbers.pop(0)
if n == 0:
# Code 0 (same as empty code): reset everything
fg = bg = None
bold = underline = inverse = False
elif n == 1:
bold = True
elif n == 4:
underline = True
elif n == 5:
# Code 5: blinking
bold = True
elif n == 7:
inverse = True
elif n in (21, 22):
bold = False
elif n == 24:
underline = False
elif n == 27:
inverse = False
elif 30 <= n <= 37:
fg = n - 30
elif n == 38:
try:
fg = _get_extended_color(numbers)
except ValueError:
numbers.clear()
elif n == 39:
fg = None
elif 40 <= n <= 47:
bg = n - 40
elif n == 48:
try:
bg = _get_extended_color(numbers)
except ValueError:
numbers.clear()
elif n == 49:
bg = None
elif 90 <= n <= 97:
fg = n - 90 + 8
elif 100 <= n <= 107:
bg = n - 100 + 8
else:
pass # Unknown codes are ignored
return "".join(out)
def _get_extended_color(numbers):
n = numbers.pop(0)
if n == 2 and len(numbers) >= 3:
# 24-bit RGB
r = numbers.pop(0)
g = numbers.pop(0)
b = numbers.pop(0)
if not all(0 <= c <= 255 for c in (r, g, b)):
raise ValueError()
elif n == 5 and len(numbers) >= 1:
# 256 colors
idx = numbers.pop(0)
if idx < 0:
raise ValueError()
elif idx < 16:
# 16 default terminal colors
return idx
elif idx < 232:
# 6x6x6 color cube, see http://stackoverflow.com/a/27165165/500098
r = (idx - 16) // 36
r = 55 + r * 40 if r > 0 else 0
g = ((idx - 16) % 36) // 6
g = 55 + g * 40 if g > 0 else 0
b = (idx - 16) % 6
b = 55 + b * 40 if b > 0 else 0
elif idx < 256:
# grayscale, see http://stackoverflow.com/a/27165165/500098
r = g = b = (idx - 232) * 10 + 8
else:
raise ValueError()
else:
raise ValueError()
return r, g, b

View File

@ -0,0 +1,109 @@
"""Citation handling for LaTeX output."""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from html.parser import HTMLParser
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
__all__ = ["citation2latex"]
def citation2latex(s):
"""Parse citations in Markdown cells.
This looks for HTML tags having a data attribute names ``data-cite``
and replaces it by the call to LaTeX cite command. The transformation
looks like this::
<cite data-cite="granger">(Granger, 2013)</cite>
Becomes ::
\\cite{granger}
Any HTML tag can be used, which allows the citations to be formatted
in HTML in any manner.
"""
parser = CitationParser()
parser.feed(s)
parser.close()
outtext = ""
startpos = 0
for citation in parser.citelist:
outtext += s[startpos : citation[1]]
outtext += "\\cite{%s}" % citation[0]
startpos = citation[2] if len(citation) == 3 else -1
outtext += s[startpos:] if startpos != -1 else ""
return outtext
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class CitationParser(HTMLParser):
"""Citation Parser
Replaces html tags with data-cite attribute with respective latex \\cite.
Inherites from HTMLParser, overrides:
- handle_starttag
- handle_endtag
"""
# number of open tags
opentags = None
# list of found citations
citelist = None
# active citation tag
citetag = None
def __init__(self):
self.citelist = []
self.opentags = 0
HTMLParser.__init__(self)
def get_offset(self):
# Compute startposition in source
lin, offset = self.getpos()
pos = 0
for _ in range(lin - 1):
pos = self.data.find("\n", pos) + 1
return pos + offset
def handle_starttag(self, tag, attrs):
# for each tag check if attributes are present and if no citation is active
if self.opentags == 0 and len(attrs) > 0:
for atr, data in attrs:
if atr.lower() == "data-cite":
self.citetag = tag
self.opentags = 1
self.citelist.append([data, self.get_offset()])
return
if tag == self.citetag:
# found an open citation tag but not the starting one
self.opentags += 1
def handle_endtag(self, tag):
if tag == self.citetag:
# found citation tag check if starting one
if self.opentags == 1:
pos = self.get_offset()
self.citelist[-1].append(pos + len(tag) + 3)
self.opentags -= 1
def feed(self, data):
self.data = data
HTMLParser.feed(self, data)

View File

@ -0,0 +1,46 @@
"""Filter used to select the first preferred output format available.
The filter contained in the file allows the converter templates to select
the output format that is most valuable to the active export format. The
value of the different formats is set via
NbConvertBase.display_data_priority
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
from warnings import warn
from ..utils.base import NbConvertBase
__all__ = ["DataTypeFilter"]
class DataTypeFilter(NbConvertBase):
"""Returns the preferred display format"""
def __call__(self, output):
"""Return the first available format in the priority.
Produces a UserWarning if no compatible mimetype is found.
`output` is dict with structure {mimetype-of-element: value-of-element}
"""
for fmt in self.display_data_priority:
if fmt in output:
return [fmt]
warn(
"Your element with mimetype(s) {mimetypes}"
" is not able to be represented.".format(mimetypes=output.keys())
)
return []

View File

@ -0,0 +1,41 @@
#!/usr/bin/env python3
"""A pandoc filter used in converting notebooks to Latex.
Converts links between notebooks to Latex cross-references.
"""
import re
from pandocfilters import RawInline, applyJSONFilters, stringify
def resolve_references(source):
"""
This applies the resolve_one_reference to the text passed in via the source argument.
This expects content in the form of a string encoded JSON object as represented
internally in ``pandoc``.
"""
return applyJSONFilters([resolve_one_reference], source)
def resolve_one_reference(key, val, fmt, meta):
"""
This takes a tuple of arguments that are compatible with ``pandocfilters.walk()`` that
allows identifying hyperlinks in the document and transforms them into valid LaTeX
\\hyperref{} calls so that linking to headers between cells is possible.
See the documentation in ``pandocfilters.walk()`` for further information on the meaning
and specification of ``key``, ``val``, ``fmt``, and ``meta``.
"""
if key == "Link":
text = stringify(val[1])
target = val[2][0]
m = re.match(r"#(.+)$", target)
if m:
# pandoc automatically makes labels for headings.
label = m.group(1).lower()
label = re.sub(r"[^\w-]+", "", label) # Strip HTML entities
text = re.sub(r"_", r"\_", text) # Escape underscores in display text
return RawInline("tex", rf"\hyperref[{label}]{{{text}}}")
# Other elements will be returned unchanged.

View File

@ -0,0 +1,181 @@
"""
Module containing filter functions that allow code to be highlighted
from within Jinja templates.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
# pygments must not be imported at the module level
# because errors should be raised at runtime if it's actually needed,
# not import time, when it may not be needed.
from warnings import warn
from traitlets import observe
from traitlets.config import Dict
from nbconvert.utils.base import NbConvertBase
MULTILINE_OUTPUTS = ["text", "html", "svg", "latex", "javascript", "json"]
__all__ = ["Highlight2HTML", "Highlight2Latex"]
class Highlight2HTML(NbConvertBase):
extra_formatter_options = Dict(
{},
help="""
Extra set of options to control how code is highlighted.
Passed through to the pygments' HtmlFormatter class.
See available list in https://pygments.org/docs/formatters/#HtmlFormatter
""",
config=True,
)
def __init__(self, pygments_lexer=None, **kwargs):
self.pygments_lexer = pygments_lexer or "ipython3"
super().__init__(**kwargs)
@observe("default_language")
def _default_language_changed(self, change):
warn(
"Setting default_language in config is deprecated as of 5.0, "
"please use language_info metadata instead."
)
self.pygments_lexer = change["new"]
def __call__(self, source, language=None, metadata=None):
"""
Return a syntax-highlighted version of the input source as html output.
Parameters
----------
source : str
source of the cell to highlight
language : str
language to highlight the syntax of
metadata : NotebookNode cell metadata
metadata of the cell to highlight
"""
from pygments.formatters import HtmlFormatter
if not language:
language = self.pygments_lexer
return _pygments_highlight(
source if len(source) > 0 else " ",
# needed to help post processors:
HtmlFormatter(cssclass=" highlight hl-" + language, **self.extra_formatter_options),
language,
metadata,
)
class Highlight2Latex(NbConvertBase):
extra_formatter_options = Dict(
{},
help="""
Extra set of options to control how code is highlighted.
Passed through to the pygments' LatexFormatter class.
See available list in https://pygments.org/docs/formatters/#LatexFormatter
""",
config=True,
)
def __init__(self, pygments_lexer=None, **kwargs):
self.pygments_lexer = pygments_lexer or "ipython3"
super().__init__(**kwargs)
@observe("default_language")
def _default_language_changed(self, change):
warn(
"Setting default_language in config is deprecated as of 5.0, "
"please use language_info metadata instead."
)
self.pygments_lexer = change["new"]
def __call__(self, source, language=None, metadata=None, strip_verbatim=False):
"""
Return a syntax-highlighted version of the input source as latex output.
Parameters
----------
source : str
source of the cell to highlight
language : str
language to highlight the syntax of
metadata : NotebookNode cell metadata
metadata of the cell to highlight
strip_verbatim : bool
remove the Verbatim environment that pygments provides by default
"""
from pygments.formatters import LatexFormatter
if not language:
language = self.pygments_lexer
latex = _pygments_highlight(
source, LatexFormatter(**self.extra_formatter_options), language, metadata
)
if strip_verbatim:
latex = latex.replace(r"\begin{Verbatim}[commandchars=\\\{\}]" + "\n", "") # noqa
return latex.replace("\n\\end{Verbatim}\n", "")
else:
return latex
def _pygments_highlight(source, output_formatter, language="ipython", metadata=None):
"""
Return a syntax-highlighted version of the input source
Parameters
----------
source : str
source of the cell to highlight
output_formatter : Pygments formatter
language : str
language to highlight the syntax of
metadata : NotebookNode cell metadata
metadata of the cell to highlight
"""
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
# If the cell uses a magic extension language,
# use the magic language instead.
if language.startswith("ipython") and metadata and "magics_language" in metadata:
language = metadata["magics_language"]
lexer = None
if language == "ipython2":
try:
from IPython.lib.lexers import IPythonLexer
except ImportError:
warn("IPython lexer unavailable, falling back on Python")
language = "python"
else:
lexer = IPythonLexer()
elif language == "ipython3":
try:
from IPython.lib.lexers import IPython3Lexer
except ImportError:
warn("IPython3 lexer unavailable, falling back on Python 3")
language = "python3"
else:
lexer = IPython3Lexer()
if lexer is None:
try:
lexer = get_lexer_by_name(language, stripall=True)
except ClassNotFound:
warn("No lexer found for language %r. Treating as plain text." % language)
from pygments.lexers.special import TextLexer
lexer = TextLexer()
return highlight(source, lexer, output_formatter)

View File

@ -0,0 +1,61 @@
"""Latex filters.
Module of useful filters for processing Latex within Jinja latex templates.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import re
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
LATEX_RE_SUBS = ((re.compile(r"\.\.\.+"), r"{\\ldots}"),)
# Latex substitutions for escaping latex.
# see: http://stackoverflow.com/questions/16259923/how-can-i-escape-latex-special-characters-inside-django-templates
LATEX_SUBS = {
"&": r"\&",
"%": r"\%",
"$": r"\$",
"#": r"\#",
"_": r"\_",
"{": r"\{",
"}": r"\}",
"~": r"\textasciitilde{}",
"^": r"\^{}",
"\\": r"\textbackslash{}",
}
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
__all__ = ["escape_latex"]
def escape_latex(text):
"""
Escape characters that may conflict with latex.
Parameters
----------
text : str
Text containing characters that may conflict with Latex
"""
text = "".join(LATEX_SUBS.get(c, c) for c in text)
for pattern, replacement in LATEX_RE_SUBS:
text = pattern.sub(replacement, text)
return text

View File

@ -0,0 +1,101 @@
"""Markdown filters
This file contains a collection of utility filters for dealing with
markdown within Jinja templates.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
try:
from .markdown_mistune import markdown2html_mistune
except ImportError as e:
# store in variable for Python 3
_mistune_import_error = e
def markdown2html_mistune(source):
"""mistune is unavailable, raise ImportError"""
raise ImportError("markdown2html requires mistune: %s" % _mistune_import_error)
from .pandoc import convert_pandoc
__all__ = [
"markdown2html",
"markdown2html_pandoc",
"markdown2html_mistune",
"markdown2latex",
"markdown2rst",
"markdown2asciidoc",
]
def markdown2latex(source, markup="markdown", extra_args=None):
"""
Convert a markdown string to LaTeX via pandoc.
This function will raise an error if pandoc is not installed.
Any error messages generated by pandoc are printed to stderr.
Parameters
----------
source : string
Input string, assumed to be valid markdown.
markup : string
Markup used by pandoc's reader
default : pandoc extended markdown
(see https://pandoc.org/README.html#pandocs-markdown)
Returns
-------
out : string
Output as returned by pandoc.
"""
return convert_pandoc(source, markup, "latex", extra_args=extra_args)
def markdown2html_pandoc(source, extra_args=None):
"""
Convert a markdown string to HTML via pandoc.
"""
extra_args = extra_args or ["--mathjax"]
return convert_pandoc(source, "markdown", "html", extra_args=extra_args)
def markdown2asciidoc(source, extra_args=None):
"""Convert a markdown string to asciidoc via pandoc"""
extra_args = extra_args or ["--atx-headers"]
asciidoc = convert_pandoc(source, "markdown", "asciidoc", extra_args=extra_args)
# workaround for https://github.com/jgm/pandoc/issues/3068
if "__" in asciidoc:
asciidoc = re.sub(r"\b__([\w \n-]+)__([:,.\n\)])", r"_\1_\2", asciidoc)
# urls / links:
asciidoc = re.sub(r"\(__([\w\/-:\.]+)__\)", r"(_\1_)", asciidoc)
return asciidoc
# The mistune renderer is the default, because it's simple to depend on it
markdown2html = markdown2html_mistune
def markdown2rst(source, extra_args=None):
"""
Convert a markdown string to ReST via pandoc.
This function will raise an error if pandoc is not installed.
Any error messages generated by pandoc are printed to stderr.
Parameters
----------
source : string
Input string, assumed to be valid markdown.
Returns
-------
out : string
Output as returned by pandoc.
"""
return convert_pandoc(source, "markdown", "rst", extra_args=extra_args)

View File

@ -0,0 +1,247 @@
"""Markdown filters with mistune
Used from markdown.py
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import base64
import mimetypes
import os
import re
from functools import partial
try:
from html import escape
html_escape = partial(escape, quote=False)
except ImportError:
# Python 2
from cgi import escape as html_escape
import bs4
import mistune
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
from nbconvert.filters.strings import add_anchor
class InvalidNotebook(Exception):
pass
class MathBlockGrammar(mistune.BlockGrammar):
"""This defines a single regex comprised of the different patterns that
identify math content spanning multiple lines. These are used by the
MathBlockLexer.
"""
multi_math_str = "|".join(
[r"^\$\$.*?\$\$", r"^\\\\\[.*?\\\\\]", r"^\\begin\{([a-z]*\*?)\}(.*?)\\end\{\1\}"]
)
multiline_math = re.compile(multi_math_str, re.DOTALL)
class MathBlockLexer(mistune.BlockLexer):
"""This acts as a pass-through to the MathInlineLexer. It is needed in
order to avoid other block level rules splitting math sections apart.
"""
default_rules = ["multiline_math"] + mistune.BlockLexer.default_rules
def __init__(self, rules=None, **kwargs):
if rules is None:
rules = MathBlockGrammar()
super().__init__(rules, **kwargs)
def parse_multiline_math(self, m):
"""Add token to pass through mutiline math."""
self.tokens.append({"type": "multiline_math", "text": m.group(0)})
class MathInlineGrammar(mistune.InlineGrammar):
"""This defines different ways of declaring math objects that should be
passed through to mathjax unaffected. These are used by the MathInlineLexer.
"""
inline_math = re.compile(r"^\$(.+?)\$|^\\\\\((.+?)\\\\\)", re.DOTALL)
block_math = re.compile(r"^\$\$(.*?)\$\$|^\\\\\[(.*?)\\\\\]", re.DOTALL)
latex_environment = re.compile(r"^\\begin\{([a-z]*\*?)\}(.*?)\\end\{\1\}", re.DOTALL)
text = re.compile(r"^[\s\S]+?(?=[\\<!\[_*`~$]|https?://| {2,}\n|$)")
class MathInlineLexer(mistune.InlineLexer):
r"""This interprets the content of LaTeX style math objects using the rules
defined by the MathInlineGrammar.
In particular this grabs ``$$...$$``, ``\\[...\\]``, ``\\(...\\)``, ``$...$``,
and ``\begin{foo}...\end{foo}`` styles for declaring mathematics. It strips
delimiters from all these varieties, and extracts the type of environment
in the last case (``foo`` in this example).
"""
default_rules = [
"block_math",
"inline_math",
"latex_environment",
] + mistune.InlineLexer.default_rules
def __init__(self, renderer, rules=None, **kwargs):
if rules is None:
rules = MathInlineGrammar()
super().__init__(renderer, rules, **kwargs)
def output_inline_math(self, m):
return self.renderer.inline_math(m.group(1) or m.group(2))
def output_block_math(self, m):
return self.renderer.block_math(m.group(1) or m.group(2) or "")
def output_latex_environment(self, m):
return self.renderer.latex_environment(m.group(1), m.group(2))
class MarkdownWithMath(mistune.Markdown):
def __init__(self, renderer, **kwargs):
if "inline" not in kwargs:
kwargs["inline"] = MathInlineLexer
if "block" not in kwargs:
kwargs["block"] = MathBlockLexer
super().__init__(renderer, **kwargs)
def output_multiline_math(self):
return self.inline(self.token["text"])
class IPythonRenderer(mistune.Renderer):
def block_code(self, code, lang):
if lang:
try:
lexer = get_lexer_by_name(lang, stripall=True)
except ClassNotFound:
code = lang + "\n" + code
lang = None
if not lang:
return "\n<pre><code>%s</code></pre>\n" % mistune.escape(code)
formatter = HtmlFormatter()
return highlight(code, lexer, formatter)
def block_html(self, html):
embed_images = self.options.get("embed_images", False)
if embed_images:
html = self._html_embed_images(html)
return super().block_html(html)
def inline_html(self, html):
embed_images = self.options.get("embed_images", False)
if embed_images:
html = self._html_embed_images(html)
return super().inline_html(html)
def header(self, text, level, raw=None):
html = super().header(text, level, raw=raw)
if self.options.get("exclude_anchor_links"):
return html
anchor_link_text = self.options.get("anchor_link_text", "")
return add_anchor(html, anchor_link_text=anchor_link_text)
def escape_html(self, text):
return html_escape(text)
def block_math(self, text):
return "$$%s$$" % self.escape_html(text)
def latex_environment(self, name, text):
name = self.escape_html(name)
text = self.escape_html(text)
return rf"\begin{{{name}}}{text}\end{{{name}}}"
def inline_math(self, text):
return "$%s$" % self.escape_html(text)
def image(self, src, title, text):
"""Rendering a image with title and text.
:param src: source link of the image.
:param title: title text of the image.
:param text: alt text of the image.
"""
attachments = self.options.get("attachments", {})
attachment_prefix = "attachment:"
embed_images = self.options.get("embed_images", False)
if src.startswith(attachment_prefix):
name = src[len(attachment_prefix) :]
if name not in attachments:
raise InvalidNotebook(f"missing attachment: {name}")
attachment = attachments[name]
# we choose vector over raster, and lossless over lossy
preferred_mime_types = ["image/svg+xml", "image/png", "image/jpeg"]
for preferred_mime_type in preferred_mime_types:
if preferred_mime_type in attachment:
break
else: # otherwise we choose the first mimetype we can find
preferred_mime_type = list(attachment.keys())[0]
mime_type = preferred_mime_type
data = attachment[mime_type]
src = "data:" + mime_type + ";base64," + data
elif embed_images:
base64_url = self._src_to_base64(src)
if base64_url is not None:
src = base64_url
return super().image(src, title, text)
def _src_to_base64(self, src):
"""Turn the source file into a base64 url.
:param src: source link of the file.
:return: the base64 url or None if the file was not found.
"""
path = self.options.get("path", "")
src_path = os.path.join(path, src)
if not os.path.exists(src_path):
return None
with open(src_path, "rb") as fobj:
mime_type = mimetypes.guess_type(src_path)[0]
base64_data = base64.b64encode(fobj.read())
base64_data = base64_data.replace(b"\n", b"").decode("ascii")
return f"data:{mime_type};base64,{base64_data}"
def _html_embed_images(self, html):
parsed_html = bs4.BeautifulSoup(html, features="html.parser")
imgs = parsed_html.find_all("img")
# Replace img tags's sources by base64 dataurls
for img in imgs:
if "src" not in img.attrs:
continue
base64_url = self._src_to_base64(img.attrs["src"])
if base64_url is not None:
img.attrs["src"] = base64_url
return str(parsed_html)
def markdown2html_mistune(source):
"""Convert a markdown string to HTML using mistune"""
return MarkdownWithMath(renderer=IPythonRenderer(escape=False)).render(source)

View File

@ -0,0 +1,17 @@
"""filters for metadata"""
def get_metadata(output, key, mimetype=None):
"""Resolve an output metadata key
If mimetype given, resolve at mimetype level first,
then fallback to top-level.
Otherwise, just resolve at top-level.
Returns None if no data found.
"""
md = output.get("metadata") or {}
if mimetype and mimetype in md:
value = md[mimetype].get(key)
if value is not None:
return value
return md.get(key)

View File

@ -0,0 +1,24 @@
from nbconvert.utils.pandoc import pandoc
def convert_pandoc(source, from_format, to_format, extra_args=None):
"""Convert between any two formats using pandoc.
This function will raise an error if pandoc is not installed.
Any error messages generated by pandoc are printed to stderr.
Parameters
----------
source : string
Input string, assumed to be valid in from_format.
from_format : string
Pandoc format of source.
to_format : string
Pandoc format for output.
Returns
-------
out : string
Output as returned by pandoc.
"""
return pandoc(source, from_format, to_format, extra_args=extra_args)

View File

@ -0,0 +1,264 @@
"""String filters.
Contains a collection of useful string manipulation filters for use in Jinja
templates.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import base64
import os
import re
import textwrap
import warnings
from urllib.parse import quote
from xml.etree.ElementTree import Element
# defusedxml does safe(r) parsing of untrusted XML data
from defusedxml import ElementTree
__all__ = [
"wrap_text",
"html2text",
"add_anchor",
"strip_dollars",
"strip_files_prefix",
"comment_lines",
"get_lines",
"ipython2python",
"posix_path",
"path2url",
"add_prompts",
"ascii_only",
"prevent_list_blocks",
"strip_trailing_newline",
"text_base64",
]
def wrap_text(text, width=100):
"""
Intelligently wrap text.
Wrap text without breaking words if possible.
Parameters
----------
text : str
Text to wrap.
width : int, optional
Number of characters to wrap to, default 100.
"""
split_text = text.split("\n")
wrp = map(lambda x: textwrap.wrap(x, width), split_text)
wrpd = map("\n".join, wrp)
return "\n".join(wrpd)
def html2text(element):
"""extract inner text from html
Analog of jQuery's $(element).text()
"""
if isinstance(element, (str,)):
try:
element = ElementTree.fromstring(element)
except Exception:
# failed to parse, just return it unmodified
return element
text = element.text or ""
for child in element:
text += html2text(child)
text += element.tail or ""
return text
def _convert_header_id(header_contents):
"""Convert header contents to valid id value. Takes string as input, returns string.
Note: this may be subject to change in the case of changes to how we wish to generate ids.
For use on markdown headings.
"""
# Valid IDs need to be non-empty and contain no space characters, but are otherwise arbitrary.
# However, these IDs are also used in URL fragments, which are more restrictive, so we URL
# encode any characters that are not valid in URL fragments.
return quote(header_contents.replace(" ", "-"), safe="?/:@!$&'()*+,;=")
def add_anchor(html, anchor_link_text=""):
"""Add an id and an anchor-link to an html header
For use on markdown headings
"""
try:
h = ElementTree.fromstring(html)
except Exception:
# failed to parse, just return it unmodified
return html
link = _convert_header_id(html2text(h))
h.set("id", link)
a = Element("a", {"class": "anchor-link", "href": "#" + link})
try:
# Test if the anchor link text is HTML (e.g. an image)
a.append(ElementTree.fromstring(anchor_link_text))
except Exception:
# If we fail to parse, assume we've just got regular text
a.text = anchor_link_text
h.append(a)
return ElementTree.tostring(h).decode(encoding="utf-8")
def add_prompts(code, first=">>> ", cont="... "):
"""Add prompts to code snippets"""
new_code = []
code_list = code.split("\n")
new_code.append(first + code_list[0])
for line in code_list[1:]:
new_code.append(cont + line)
return "\n".join(new_code)
def strip_dollars(text):
"""
Remove all dollar symbols from text
Parameters
----------
text : str
Text to remove dollars from
"""
return text.strip("$")
files_url_pattern = re.compile(r'(src|href)\=([\'"]?)/?files/')
markdown_url_pattern = re.compile(r"(!?)\[(?P<caption>.*?)\]\(/?files/(?P<location>.*?)\)")
def strip_files_prefix(text):
"""
Fix all fake URLs that start with ``files/``, stripping out the ``files/`` prefix.
Applies to both urls (for html) and relative paths (for markdown paths).
Parameters
----------
text : str
Text in which to replace 'src="files/real...' with 'src="real...'
"""
cleaned_text = files_url_pattern.sub(r"\1=\2", text)
cleaned_text = markdown_url_pattern.sub(r"\1[\2](\3)", cleaned_text)
return cleaned_text
def comment_lines(text, prefix="# "):
"""
Build a Python comment line from input text.
Parameters
----------
text : str
Text to comment out.
prefix : str
Character to append to the start of each line.
"""
# Replace line breaks with line breaks and comment symbols.
# Also add a comment symbol at the beginning to comment out
# the first line.
return prefix + ("\n" + prefix).join(text.split("\n"))
def get_lines(text, start=None, end=None):
"""
Split the input text into separate lines and then return the
lines that the caller is interested in.
Parameters
----------
text : str
Text to parse lines from.
start : int, optional
First line to grab from.
end : int, optional
Last line to grab from.
"""
# Split the input into lines.
lines = text.split("\n")
# Return the right lines.
return "\n".join(lines[start:end]) # re-join
def ipython2python(code):
"""Transform IPython syntax to pure Python syntax
Parameters
----------
code : str
IPython code, to be transformed to pure Python
"""
try:
from IPython.core.inputtransformer2 import TransformerManager
except ImportError:
warnings.warn(
"IPython is needed to transform IPython syntax to pure Python."
" Install ipython if you need this functionality."
)
return code
else:
isp = TransformerManager()
return isp.transform_cell(code)
def posix_path(path):
"""Turn a path into posix-style path/to/etc
Mainly for use in latex on Windows,
where native Windows paths are not allowed.
"""
if os.path.sep != "/":
return path.replace(os.path.sep, "/")
return path
def path2url(path):
"""Turn a file path into a URL"""
parts = path.split(os.path.sep)
return "/".join(quote(part) for part in parts)
def ascii_only(s):
"""ensure a string is ascii"""
return s.encode("ascii", "replace").decode("ascii")
def prevent_list_blocks(s):
"""
Prevent presence of enumerate or itemize blocks in latex headings cells
"""
out = re.sub(r"(^\s*\d*)\.", r"\1\.", s)
out = re.sub(r"(^\s*)\-", r"\1\-", out)
out = re.sub(r"(^\s*)\+", r"\1\+", out)
out = re.sub(r"(^\s*)\*", r"\1\*", out)
return out
def strip_trailing_newline(text):
"""
Strips a newline from the end of text.
"""
if text.endswith("\n"):
text = text[:-1]
return text
def text_base64(text):
"""
Encode base64 text
"""
return base64.b64encode(text.encode()).decode()

View File

@ -0,0 +1,71 @@
"""
Module with tests for ansi filters
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from ...tests.base import TestsBase
from ..ansi import ansi2html, ansi2latex, strip_ansi
class TestAnsi(TestsBase):
"""Contains test functions for ansi.py"""
def test_strip_ansi(self):
"""strip_ansi test"""
correct_outputs = {
"\x1b[32m\x1b[1m\x1b[0;44m\x1b[38;2;255;0;255m\x1b[;m\x1b[m": "",
"hello\x1b[000;34m": "hello",
"he\x1b[1;33m\x1b[;36mllo": "hello",
"\x1b[;34mhello": "hello",
"\x1b[31mh\x1b[31me\x1b[31ml\x1b[31ml\x1b[31mo\x1b[31m": "hello",
"hel\x1b[;00;;032;;;32mlo": "hello",
"hello": "hello",
}
for inval, outval in correct_outputs.items():
self.assertEqual(outval, strip_ansi(inval))
def test_ansi2html(self):
"""ansi2html test"""
correct_outputs = {
"\x1b[31m": "",
"hello\x1b[34m": "hello",
"he\x1b[32m\x1b[36mllo": 'he<span class="ansi-cyan-fg">llo</span>',
"\x1b[1;33mhello": '<span class="ansi-yellow-intense-fg ansi-bold">hello</span>',
"\x1b[37mh\x1b[0;037me\x1b[;0037ml\x1b[00;37ml\x1b[;;37mo": '<span class="ansi-white-fg">h</span><span class="ansi-white-fg">e</span><span class="ansi-white-fg">l</span><span class="ansi-white-fg">l</span><span class="ansi-white-fg">o</span>',
"hel\x1b[0;32mlo": 'hel<span class="ansi-green-fg">lo</span>',
"hellø": "hellø",
"\x1b[1mhello\x1b[33mworld\x1b[0m": '<span class="ansi-bold">hello</span><span class="ansi-yellow-intense-fg ansi-bold">world</span>',
"he\x1b[4mll\x1b[24mo": 'he<span class="ansi-underline">ll</span>o',
"\x1b[35mhe\x1b[7mll\x1b[27mo": '<span class="ansi-magenta-fg">he</span><span class="ansi-default-inverse-fg ansi-magenta-bg">ll</span><span class="ansi-magenta-fg">o</span>',
"\x1b[44mhe\x1b[7mll\x1b[27mo": '<span class="ansi-blue-bg">he</span><span class="ansi-blue-fg ansi-default-inverse-bg">ll</span><span class="ansi-blue-bg">o</span>',
}
for inval, outval in correct_outputs.items():
self.assertEqual(outval, ansi2html(inval))
def test_ansi2latex(self):
"""ansi2latex test"""
correct_outputs = {
"\x1b[31m": "",
"hello\x1b[34m": "hello",
"he\x1b[32m\x1b[36mllo": r"he\textcolor{ansi-cyan}{llo}",
"\x1b[1;33mhello": r"\textcolor{ansi-yellow-intense}{\textbf{hello}}",
"\x1b[37mh\x1b[0;037me\x1b[;0037ml\x1b[00;37ml\x1b[;;37mo": r"\textcolor{ansi-white}{h}\textcolor{ansi-white}{e}\textcolor{ansi-white}{l}\textcolor{ansi-white}{l}\textcolor{ansi-white}{o}",
"hel\x1b[0;32mlo": r"hel\textcolor{ansi-green}{lo}",
"hello": "hello",
"hello\x1b[34mthere\x1b[mworld": r"hello\textcolor{ansi-blue}{there}world",
"hello\x1b[mthere": "hellothere",
"hello\x1b[01;34mthere": r"hello\textcolor{ansi-blue-intense}{\textbf{there}}",
"hello\x1b[001;34mthere": r"hello\textcolor{ansi-blue-intense}{\textbf{there}}",
"\x1b[1mhello\x1b[33mworld\x1b[0m": r"\textbf{hello}\textcolor{ansi-yellow-intense}{\textbf{world}}",
"he\x1b[4mll\x1b[24mo": "he\\underline{ll}o",
"\x1b[35mhe\x1b[7mll\x1b[27mo": r"\textcolor{ansi-magenta}{he}\textcolor{ansi-default-inverse-fg}{\setlength{\fboxsep}{0pt}\colorbox{ansi-magenta}{ll\strut}}\textcolor{ansi-magenta}{o}",
"\x1b[44mhe\x1b[7mll\x1b[27mo": r"\setlength{\fboxsep}{0pt}\colorbox{ansi-blue}{he\strut}\textcolor{ansi-blue}{\setlength{\fboxsep}{0pt}\colorbox{ansi-default-inverse-bg}{ll\strut}}\setlength{\fboxsep}{0pt}\colorbox{ansi-blue}{o\strut}",
}
for inval, outval in correct_outputs.items():
self.assertEqual(outval, ansi2latex(inval))

View File

@ -0,0 +1,120 @@
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
import pytest
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from ..citation import citation2latex
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
test_md = {
"""
# My Heading
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus ac magna non augue
porttitor scelerisque ac id diam <cite data-cite="granger">Granger</cite>. Mauris elit
velit, lobortis sed interdum at, vestibulum vitae libero <strong data-cite="fperez">Perez</strong>.
Lorem ipsum dolor sit amet, consectetur adipiscing elit
<em data-cite="takluyver">Thomas</em>. Quisque iaculis ligula ut ipsum mattis viverra.
<p>Here is a plain paragraph that should be unaffected. It contains simple
relations like 1<2 & 4>5.</p>
* One <cite data-cite="jdfreder">Jonathan</cite>.
* Two <cite data-cite="carreau">Matthias</cite>.
* Three <cite data-cite="ivanov">Paul</cite>.
""": r"""
# My Heading
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus ac magna non augue
porttitor scelerisque ac id diam \cite{granger}. Mauris elit
velit, lobortis sed interdum at, vestibulum vitae libero \cite{fperez}.
Lorem ipsum dolor sit amet, consectetur adipiscing elit
\cite{takluyver}. Quisque iaculis ligula ut ipsum mattis viverra.
<p>Here is a plain paragraph that should be unaffected. It contains simple
relations like 1<2 & 4>5.</p>
* One \cite{jdfreder}.
* Two \cite{carreau}.
* Three \cite{ivanov}.
""",
# No citations
r"""The quick brown fox jumps over the lazy dog.""": r"""The quick brown fox jumps over the lazy dog.""",
# Simple inline
r"""Foo <cite data-cite=asdf>Text</cite> bar""": r"""Foo \cite{asdf} bar""",
# Multiline
r"""<cite data-cite=ewqr>Text
</cite>Foo""": r"""\cite{ewqr}Foo""",
# Nested tags
r"""<div><div data-cite=Foo><div>Text</div></div></div> Bar""": r"""<div>\cite{Foo}</div> Bar""",
# Including Maths
r"""Foo $3*2*1$ <div data-cite=Foo>Text</div> Bar""": r"""Foo $3*2*1$ \cite{Foo} Bar""",
# Missing end tag
r"""<cite data-cite=asdf>Test Foo""": r"""\cite{asdf}""",
r"""<cite data-cite=asdf><cite>Test Foo""": r"""\cite{asdf}""",
r"""<cite data-cite=asdf><cite>Test</cite> Foo""": r"""\cite{asdf}""",
# Multiple arguments
r"""<cite width=qwer data-cite=asdf>Test</cite> Foo""": r"""\cite{asdf} Foo""",
# Wrong capitalization
r"""<CITE data-cite=asdf>Test</cite> Foo""": r"""\cite{asdf} Foo""",
r"""<cite DATA-CITE=asdf>Test</cite> Foo""": r"""\cite{asdf} Foo""",
# Wrong end tag
r"""<asd data-cite=wer> ksjfs </asdf> sdf ds """: r"""\cite{wer}""",
r"""<asd data-cite=wer>""": r"""\cite{wer}""",
# Invalid tag names
r"""<frog> <foo data-cite=wer></foo>""": r"""<frog> \cite{wer}""",
# Non-nested tags
r"""<strong> <h1> <cite data-cite=asdf></cite>Test</strong> Foo </h1>""": r"""<strong> <h1> \cite{asdf}Test</strong> Foo </h1>""",
# LXML errors
r"""Foo
\begin{eqnarray}
1 & <cite data-cite=bar>bar1</cite> \\
3 & 4 \\
\end{eqnarray}""": r"""Foo
\begin{eqnarray}
1 & \cite{bar} \\
3 & 4 \\
\end{eqnarray}""",
r"""
1<2 is true, but 3>4 is false.
$1<2$ is true, but $3>4$ is false.
1<2 it is even worse if it is alone in a line.""": r"""
1<2 is true, but 3>4 is false.
$1<2$ is true, but $3>4$ is false.
1<2 it is even worse if it is alone in a line.""",
r"""
1 < 2 is true, but 3 > 4 is false
$1 < 2$ is true, but $3 > 4$ is false
1 < 2 it is even worse if it is alone in a line.
""": r"""
1 < 2 is true, but 3 > 4 is false
$1 < 2$ is true, but $3 > 4$ is false
1 < 2 it is even worse if it is alone in a line.
""",
}
@pytest.mark.parametrize(
["in_arg", "out_arg"], [(in_arg, out_arg) for (in_arg, out_arg) in test_md.items()]
)
def test_citation2latex(in_arg, out_arg):
"""Are citations parsed properly?"""
assert citation2latex(in_arg) == out_arg

View File

@ -0,0 +1,44 @@
"""Module with tests for DataTypeFilter"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import pytest
from ...tests.base import TestsBase
from ..datatypefilter import DataTypeFilter
class TestDataTypeFilter(TestsBase):
"""Contains test functions for datatypefilter.py"""
def test_constructor(self):
"""Can an instance of a DataTypeFilter be created?"""
DataTypeFilter()
def test_junk_types(self):
"""Can the DataTypeFilter pickout a useful type from a dict with junk types as keys?"""
filter = DataTypeFilter()
assert "image/png" in filter({"hair": "1", "water": 2, "image/png": 3, "rock": 4.0})
assert "application/pdf" in filter(
{
"application/pdf": "file_path",
"hair": 2,
"water": "yay",
"png": "not a png",
"rock": "is a rock",
}
)
with pytest.warns(UserWarning):
self.assertEqual(
filter(
{"hair": "this is not", "water": "going to return anything", "rock": "or is it"}
),
[],
)
def test_null(self):
"""Will the DataTypeFilter fail if no types are passed in?"""
filter = DataTypeFilter()
with pytest.warns(UserWarning):
self.assertEqual(filter({}), [])

View File

@ -0,0 +1,82 @@
"""
Module with tests for Highlight
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import xml
from ...tests.base import TestsBase
from ..highlight import Highlight2HTML, Highlight2Latex
# -----------------------------------------------------------------------------
# Class
# -----------------------------------------------------------------------------
highlight2html = Highlight2HTML()
highlight2latex = Highlight2Latex()
highlight2html_ruby = Highlight2HTML(pygments_lexer="ruby")
class TestHighlight(TestsBase):
"""Contains test functions for highlight.py"""
# Hello world test, magics test, blank string test
tests = [
"""
#Hello World Example
import foo
def say(text):
foo.bar(text)
end
say('Hello World!')
""",
"""
%%pylab
plot(x,y, 'r')
""",
]
tokens = [["Hello World Example", "say", "text", "import", "def"], ["pylab", "plot"]]
def test_highlight2html(self):
"""highlight2html test"""
for index, test in enumerate(self.tests):
self._try_highlight(highlight2html, test, self.tokens[index])
def test_highlight2latex(self):
"""highlight2latex test"""
for index, test in enumerate(self.tests):
self._try_highlight(highlight2latex, test, self.tokens[index])
def test_parse_html_many_lang(self):
ht = highlight2html(self.tests[0])
rb = highlight2html_ruby(self.tests[0])
for lang, tkns in [(ht, ("def",)), (rb, ("def", "end"))]:
root = xml.etree.ElementTree.fromstring(lang)
self.assertEqual(self._extract_tokens(root, "k"), set(tkns))
def _extract_tokens(self, root, cls):
return set(map(lambda x: x.text, root.findall(".//*[@class='" + cls + "']")))
def _try_highlight(self, method, test, tokens):
"""Try highlighting source, look for key tokens"""
results = method(test)
for token in tokens:
assert token in results

View File

@ -0,0 +1,47 @@
"""
Module with tests for Latex
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from ...tests.base import TestsBase
from ..latex import escape_latex
# -----------------------------------------------------------------------------
# Class
# -----------------------------------------------------------------------------
class TestLatex(TestsBase):
def test_escape_latex(self):
"""escape_latex test"""
tests = [
(r"How are \you doing today?", r"How are \textbackslash{}you doing today?"),
(
r"\escapechar=`\A\catcode`\|=0 |string|foo",
r"\textbackslash{}escapechar=`\textbackslash{}A\textbackslash{}catcode`\textbackslash{}|=0 |string|foo",
),
(
r"# $ % & ~ _ ^ \ { }",
r"\# \$ \% \& \textasciitilde{} \_ \^{} \textbackslash{} \{ \}",
),
("...", r"{\ldots}"),
("", ""),
]
for test in tests:
self._try_escape_latex(test[0], test[1])
def _try_escape_latex(self, test, result):
"""Try to remove latex from string"""
self.assertEqual(escape_latex(test), result)

View File

@ -0,0 +1,254 @@
"""Tests for conversions from markdown to other formats"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
from copy import copy
from functools import partial
from html import unescape
from jinja2 import Environment
from ...tests.base import TestsBase
from ...tests.utils import onlyif_cmds_exist
from ..markdown import markdown2html
from ..pandoc import convert_pandoc
class TestMarkdown(TestsBase):
tests = [
"*test",
"**test",
"*test*",
"_test_",
"__test__",
"__*test*__",
"**test**",
"#test",
"##test",
"test\n----",
"test [link](https://google.com/)",
]
tokens = [
"*test",
"**test",
"test",
"test",
"test",
"test",
"test",
"test",
"test",
"test",
("test", "https://google.com/"),
]
@onlyif_cmds_exist("pandoc")
def test_markdown2latex(self):
"""markdown2latex test"""
for index, test in enumerate(self.tests):
self._try_markdown(
partial(convert_pandoc, from_format="markdown", to_format="latex"),
test,
self.tokens[index],
)
@onlyif_cmds_exist("pandoc")
def test_markdown2latex_markup(self):
"""markdown2latex with markup kwarg test"""
# This string should be passed through unaltered with pandoc's
# markdown_strict reader
s = "1) arabic number with parenthesis"
self.assertEqual(convert_pandoc(s, "markdown_strict", "latex"), s)
# This string should be passed through unaltered with pandoc's
# markdown_strict+tex_math_dollars reader
s = r"$\alpha$ latex math"
# sometimes pandoc uses $math$, sometimes it uses \(math\)
expected = re.compile(r"(\$|\\\()\\alpha(\$|\\\)) latex math")
assertRegex = self.assertRegex
assertRegex(convert_pandoc(s, "markdown_strict+tex_math_dollars", "latex"), expected)
@onlyif_cmds_exist("pandoc")
def test_pandoc_extra_args(self):
# pass --no-wrap
s = "\n".join(
[
"#latex {{long_line | md2l(['--wrap=none'])}}",
"#rst {{long_line | md2r(['--columns', '5'])}}",
]
)
long_line = " ".join(["long"] * 30)
env = Environment()
env.filters.update(
{
"md2l": lambda code, extra_args: convert_pandoc(
code, from_format="markdown", to_format="latex", extra_args=extra_args
),
"md2r": lambda code, extra_args: convert_pandoc(
code, from_format="markdown", to_format="rst", extra_args=extra_args
),
}
)
tpl = env.from_string(s)
rendered = tpl.render(long_line=long_line)
_, latex, rst = rendered.split("#")
self.assertEqual(latex.strip(), "latex %s" % long_line)
self.assertEqual(rst.strip(), "rst %s" % long_line.replace(" ", "\n"))
def test_markdown2html(self):
"""markdown2html test"""
for index, test in enumerate(self.tests):
self._try_markdown(markdown2html, test, self.tokens[index])
def test_markdown2html_heading_anchors(self):
for md, tokens in [
("# test", ("<h1", ">test", 'id="test"', "&#182;</a>", "anchor-link")),
(
"###test head space",
("<h3", ">test head space", 'id="test-head-space"', "&#182;</a>", "anchor-link"),
),
]:
self._try_markdown(markdown2html, md, tokens)
def test_markdown2html_math(self):
# Mathematical expressions not containing <, >, &
# should be passed through unaltered
# all the "<", ">", "&" must be escaped correctly
cases = [
(
"\\begin{equation*}\n" # noqa
+ (
"\\left( \\sum_{k=1}^n a_k b_k \\right)^2 "
"\\leq \\left( \\sum_{k=1}^n a_k^2 \\right) "
"\\left( \\sum_{k=1}^n b_k^2 \\right)\n"
)
+ "\\end{equation*}"
),
("$$\na = 1 *3* 5\n$$"),
"$ a = 1 *3* 5 $",
"$s_i = s_{i}\n$",
"$a<b&b<lt$",
"$a<b&lt;b>a;a-b<0$",
"$<k'>$",
"$$a<b&b<lt$$",
"$$a<b&lt;b>a;a-b<0$$",
"$$<k'>$$",
("$$x\n=\n2$$"),
(
"$$\n"
"b = \\left[\n"
"P\\left(\\right)\n"
"- (l_1\\leftrightarrow l_2\n)"
"\\right]\n"
"$$"
),
("\\begin{equation*}\nx = 2 *55* 7\n\\end{equation*}"),
"""$
\\begin{tabular}{ l c r }
1 & 2 & 3 \\
4 & 5 & 6 \\
7 & 8 & 9 \\
\\end{tabular}$""",
]
for case in cases:
result = markdown2html(case)
# find the equation in the generated texts
search_result = re.search(r"\$.*\$", result, re.DOTALL)
if search_result is None:
search_result = re.search(
"\\\\begin\\{equation.*\\}.*\\\\end\\{equation.*\\}", result, re.DOTALL
)
math = search_result.group(0)
# the resulting math part can not contain "<", ">" or
# "&" not followed by "lt;", "gt;", or "amp;".
self.assertNotIn("<", math)
self.assertNotIn(">", math)
self.assertNotRegex(math, "&(?![gt;|lt;|amp;])")
# the result should be able to be unescaped correctly
self.assertEqual(case, unescape(math))
def test_markdown2html_math_mixed(self):
"""ensure markdown between inline and inline-block math works and
test multiple LaTeX markup syntaxes.
"""
case = """The entries of \\\\(C\\\\) are given by the exact formula:
$$
C_{ik} = \\sum_{j=1}^n A_{ij} B_{jk},
$$
but you can _implement_ this computation in many ways.
$\approx 2mnp$ flops are needed for \\\\[ C_{ik} = \\sum_{j=1}^n A_{ij} B_{jk} \\\\].
Also check empty math blocks work correctly:
$$$$
\\\\[\\\\]"""
output_check = (
case.replace("_implement_", "<em>implement</em>")
.replace("\\\\(", "$")
.replace("\\\\)", "$")
.replace("\\\\[", "$$")
.replace("\\\\]", "$$")
)
# these replacements are needed because we use $ and $$ in our html output
self._try_markdown(markdown2html, case, output_check)
def test_markdown2html_math_paragraph(self):
"""these should all parse without modification"""
cases = [
# https://github.com/ipython/ipython/issues/6724
"""Water that is stored in $t$, $s_t$, must equal the storage content of the previous stage,
$s_{t-1}$, plus a stochastic inflow, $I_t$, minus what is being released in $t$, $r_t$.
With $s_0$ defined as the initial storage content in $t=1$, we have""",
# https://github.com/jupyter/nbviewer/issues/420
"""$C_{ik}$
$$
C_{ik} = \\sum_{j=1}
$$
$C_{ik}$""",
"""$m$
$$
C = \begin{pmatrix}
0 & 0 & 0 & \\cdots & 0 & 0 & -c_0 \\
0 & 0 & 0 & \\cdots & 0 & 1 & -c_{m-1}
\\end{pmatrix}
$$
$x^m$""",
"""$r=\\overline{1,n}$
$$ {\bf
b}_{i}^{r}(t)=(1-t)\\,{\bf b}_{i}^{r-1}(t)+t\\,{\bf b}_{i+1}^{r-1}(t),\\:
i=\\overline{0,n-r}, $$
i.e. the $i^{th}$""",
]
for case in cases:
s = markdown2html(case)
self.assertIn(case, unescape(s))
@onlyif_cmds_exist("pandoc")
def test_markdown2rst(self):
"""markdown2rst test"""
# Modify token array for rst, escape asterisk
tokens = copy(self.tokens)
tokens[0] = r"\*test"
tokens[1] = r"\**test"
for index, test in enumerate(self.tests):
self._try_markdown(
partial(convert_pandoc, from_format="markdown", to_format="rst"),
test,
tokens[index],
)
def _try_markdown(self, method, test, tokens):
results = method(test)
if isinstance(tokens, (str,)):
self.assertIn(tokens, results)
else:
for token in tokens:
self.assertIn(token, results)

View File

@ -0,0 +1,20 @@
from nbconvert.filters import get_metadata
def test_get_metadata():
output = {
"metadata": {
"width": 1,
"height": 2,
"image/png": {
"unconfined": True,
"height": 3,
},
}
}
assert get_metadata(output, "nowhere") is None
assert get_metadata(output, "height") == 2
assert get_metadata(output, "unconfined") is None
assert get_metadata(output, "unconfined", "image/png") is True
assert get_metadata(output, "width", "image/png") == 1
assert get_metadata(output, "height", "image/png") == 3

View File

@ -0,0 +1,206 @@
"""
Module with tests for Strings
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import re
from ...tests.base import TestsBase
from ..strings import (
add_anchor,
add_prompts,
ascii_only,
comment_lines,
get_lines,
html2text,
ipython2python,
posix_path,
prevent_list_blocks,
strip_dollars,
strip_files_prefix,
wrap_text,
)
# -----------------------------------------------------------------------------
# Class
# -----------------------------------------------------------------------------
class TestStrings(TestsBase):
def test_wrap_text(self):
"""wrap_text test"""
test_text = """
Tush! never tell me; I take it much unkindly
That thou, Iago, who hast had my purse
As if the strings were thine, shouldst know of this.
"""
for length in [30, 5, 1]:
self._confirm_wrap_text(test_text, length)
def _confirm_wrap_text(self, text, length):
for line in wrap_text(text, length).split("\n"):
assert len(line) <= length
def test_html2text(self):
"""html2text test"""
# TODO: More tests
self.assertEqual(html2text("<name>joe</name>"), "joe")
def test_add_anchor(self):
"""add_anchor test"""
# TODO: More tests
results = add_anchor("<b>Hello World!</b>")
assert "Hello World!" in results
assert 'id="' in results
assert 'class="anchor-link"' in results
assert "<b" in results
assert "</b>" in results
def test_add_anchor_fail(self):
"""add_anchor does nothing when it fails"""
html = "<h1>Hello <br>World!</h1>"
results = add_anchor(html)
self.assertEqual(html, results)
def test_add_anchor_valid_url_fragment(self):
"""add_anchor creates a valid URL fragment"""
results = add_anchor(r"<h1>$\pi$ with #s and unicode 中</h1>")
match = re.search(r'href="#(.*?)"', results)
assert match
assert len(match.groups()) == 1
href = match.groups()[0]
assert len(href) > 0
# No invalid characters should be present
assert "\\" not in href
assert "#" not in href
assert "" not in href
def test_strip_dollars(self):
"""strip_dollars test"""
tests = [
("", ""),
(" ", " "),
("$$", ""),
("$H$", "H"),
("$He", "He"),
("H$el", "H$el"),
("Hell$", "Hell"),
("Hello", "Hello"),
("W$o$rld", "W$o$rld"),
]
for test in tests:
self._try_strip_dollars(test[0], test[1])
def _try_strip_dollars(self, test, result):
self.assertEqual(strip_dollars(test), result)
def test_strip_files_prefix(self):
"""strip_files_prefix test"""
tests = [
("", ""),
("/files", "/files"),
('test="/files"', 'test="/files"'),
("My files are in `files/`", "My files are in `files/`"),
(
'<a href="files/test.html">files/test.html</a>',
'<a href="test.html">files/test.html</a>',
),
(
'<a href="/files/test.html">files/test.html</a>',
'<a href="test.html">files/test.html</a>',
),
(
"<a href='files/test.html'>files/test.html</a>",
"<a href='test.html'>files/test.html</a>",
),
('<img src="files/url/location.gif">', '<img src="url/location.gif">'),
('<img src="/files/url/location.gif">', '<img src="url/location.gif">'),
("hello![caption]", "hello![caption]"),
("hello![caption](/url/location.gif)", "hello![caption](/url/location.gif)"),
("hello![caption](url/location.gif)", "hello![caption](url/location.gif)"),
("hello![caption](url/location.gif)", "hello![caption](url/location.gif)"),
("hello![caption](files/url/location.gif)", "hello![caption](url/location.gif)"),
("hello![caption](/files/url/location.gif)", "hello![caption](url/location.gif)"),
("hello [text](/files/url/location.gif)", "hello [text](url/location.gif)"),
("hello [text space](files/url/location.gif)", "hello [text space](url/location.gif)"),
]
for test in tests:
self._try_files_prefix(test[0], test[1])
def _try_files_prefix(self, test, result):
self.assertEqual(strip_files_prefix(test), result)
def test_comment_lines(self):
"""comment_lines test"""
for line in comment_lines("hello\nworld\n!").split("\n"):
assert line.startswith("# ")
for line in comment_lines("hello\nworld\n!", "beep").split("\n"):
assert line.startswith("beep")
def test_get_lines(self):
"""get_lines test"""
text = "hello\nworld\n!"
self.assertEqual(get_lines(text, start=1), "world\n!")
self.assertEqual(get_lines(text, end=2), "hello\nworld")
self.assertEqual(get_lines(text, start=2, end=5), "!")
self.assertEqual(get_lines(text, start=-2), "world\n!")
def test_ipython2python(self):
"""ipython2python test"""
# TODO: More tests
results = ipython2python('%%pylab\nprint("Hello-World")').replace("u'", "'")
self.fuzzy_compare(
results.replace(r"\n", ""),
"get_ipython().run_cell_magic('pylab', '', 'print(\"Hello-World\")')",
ignore_spaces=True,
ignore_newlines=True,
)
def test_posix_path(self):
"""posix_path test"""
path_list = ["foo", "bar"]
expected = "/".join(path_list)
native = os.path.join(*path_list)
filtered = posix_path(native)
self.assertEqual(filtered, expected)
def test_add_prompts(self):
"""add_prompts test"""
text1 = """for i in range(10):\n i += 1\n print i"""
text2 = """>>> for i in range(10):\n... i += 1\n... print i"""
self.assertEqual(text2, add_prompts(text1))
def test_prevent_list_blocks(self):
"""prevent_list_blocks test"""
tests = [
("1. arabic point", "1\\. arabic point"),
("* bullet asterisk", "\\* bullet asterisk"),
("+ bullet Plus Sign", "\\+ bullet Plus Sign"),
("- bullet Hyphen-Minus", "\\- bullet Hyphen-Minus"),
(" 1. spaces + arabic point", " 1\\. spaces + arabic point"),
]
for test in tests:
self.assertEqual(prevent_list_blocks(test[0]), test[1])
def test_ascii_only(self):
"""ascii only test"""
tests = [
("", ""),
(" ", " "),
("Hello", "Hello"),
("Hello 中文", "Hello ??"),
]
for test in tests:
self.assertEqual(test[1], ascii_only(test[0]))

View File

@ -0,0 +1,76 @@
"""Filter used to select the first preferred output format available,
excluding interactive widget format if the widget state is not available.
The filter contained in the file allows the converter templates to select
the output format that is most valuable to the active export format. The
value of the different formats is set via
NbConvertBase.display_data_priority
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
import os
from warnings import warn
from ..utils.base import NbConvertBase
__all__ = ["WidgetsDataTypeFilter"]
WIDGET_VIEW_MIMETYPE = "application/vnd.jupyter.widget-view+json"
WIDGET_STATE_MIMETYPE = "application/vnd.jupyter.widget-state+json"
class WidgetsDataTypeFilter(NbConvertBase):
"""Returns the preferred display format, excluding the widget output if
there is no widget state available"""
def __init__(self, notebook_metadata=None, resources=None, **kwargs):
self.metadata = notebook_metadata
self.notebook_path = ""
if resources is not None:
name = resources.get("metadata", {}).get("name", "")
path = resources.get("metadata", {}).get("path", "")
self.notebook_path = os.path.join(path, name)
super().__init__(**kwargs)
def __call__(self, output):
"""Return the first available format in the priority.
Produces a UserWarning if no compatible mimetype is found.
`output` is dict with structure {mimetype-of-element: value-of-element}
"""
metadata = self.metadata.get(self.notebook_path, {})
widgets_state = (
metadata["widgets"][WIDGET_STATE_MIMETYPE]["state"]
if metadata.get("widgets") is not None
else {}
)
for fmt in self.display_data_priority:
if fmt in output:
# If there is no widget state available, we skip this mimetype
if (
fmt == WIDGET_VIEW_MIMETYPE
and output[WIDGET_VIEW_MIMETYPE]["model_id"] not in widgets_state
):
continue
return [fmt]
warn(
"Your element with mimetype(s) {mimetypes}"
" is not able to be represented.".format(mimetypes=output.keys())
)
return []

View File

@ -0,0 +1,663 @@
#!/usr/bin/env python
"""NbConvert is a utility for conversion of .ipynb files.
Command-line interface for the NbConvert conversion utility.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import glob
import logging
import os
import sys
from textwrap import dedent, fill
from jupyter_core.application import JupyterApp, base_aliases, base_flags
from traitlets import (
Bool,
DottedObjectName,
Instance,
List,
Type,
Unicode,
default,
observe,
)
from traitlets.config import Configurable, catch_config_error
from traitlets.utils.importstring import import_item
from nbconvert import __version__, exporters, postprocessors, preprocessors, writers
from nbconvert.utils.text import indent
from .exporters.base import get_export_names, get_exporter
from .filters.markdown_mistune import InvalidNotebook # noqa For backward compatibility
from .utils.base import NbConvertBase
from .utils.exceptions import ConversionException
from .utils.io import unicode_stdin_stream
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
class DottedOrNone(DottedObjectName):
"""A string holding a valid dotted object name in Python, such as A.b3._c
Also allows for None type.
"""
default_value = ""
def validate(self, obj, value):
if value is not None and len(value) > 0:
return super().validate(obj, value)
else:
return value
nbconvert_aliases = {}
nbconvert_aliases.update(base_aliases)
nbconvert_aliases.update(
{
"to": "NbConvertApp.export_format",
"template": "TemplateExporter.template_name",
"template-file": "TemplateExporter.template_file",
"theme": "HTMLExporter.theme",
"writer": "NbConvertApp.writer_class",
"post": "NbConvertApp.postprocessor_class",
"output": "NbConvertApp.output_base",
"output-dir": "FilesWriter.build_directory",
"reveal-prefix": "SlidesExporter.reveal_url_prefix",
"nbformat": "NotebookExporter.nbformat_version",
}
)
nbconvert_flags = {}
nbconvert_flags.update(base_flags)
nbconvert_flags.update(
{
"execute": (
{"ExecutePreprocessor": {"enabled": True}},
"Execute the notebook prior to export.",
),
"allow-errors": (
{"ExecutePreprocessor": {"allow_errors": True}},
(
"Continue notebook execution even if one of the cells throws "
"an error and include the error message in the cell output "
"(the default behaviour is to abort conversion). This flag "
"is only relevant if '--execute' was specified, too."
),
),
"stdin": (
{
"NbConvertApp": {
"from_stdin": True,
}
},
"read a single notebook file from stdin. Write the resulting notebook with default basename 'notebook.*'",
),
"stdout": (
{"NbConvertApp": {"writer_class": "StdoutWriter"}},
"Write notebook output to stdout instead of files.",
),
"inplace": (
{
"NbConvertApp": {
"use_output_suffix": False,
"export_format": "notebook",
},
"FilesWriter": {"build_directory": ""},
},
"""Run nbconvert in place, overwriting the existing notebook (only
relevant when converting to notebook format)""",
),
"clear-output": (
{
"NbConvertApp": {
"use_output_suffix": False,
"export_format": "notebook",
},
"FilesWriter": {"build_directory": ""},
"ClearOutputPreprocessor": {"enabled": True},
},
"""Clear output of current file and save in place,
overwriting the existing notebook. """,
),
"no-prompt": (
{
"TemplateExporter": {
"exclude_input_prompt": True,
"exclude_output_prompt": True,
}
},
"Exclude input and output prompts from converted document.",
),
"no-input": (
{
"TemplateExporter": {
"exclude_output_prompt": True,
"exclude_input": True,
"exclude_input_prompt": True,
}
},
"""Exclude input cells and output prompts from converted document.
This mode is ideal for generating code-free reports.""",
),
"allow-chromium-download": (
{
"WebPDFExporter": {
"allow_chromium_download": True,
}
},
"""Whether to allow downloading chromium if no suitable version is found on the system.""",
),
"disable-chromium-sandbox": (
{
"WebPDFExporter": {
"disable_sandbox": True,
}
},
"""Disable chromium security sandbox when converting to PDF..""",
),
"show-input": (
{
"TemplateExporter": {
"exclude_input": False,
}
},
"""Shows code input. This flag is only useful for dejavu users.""",
),
"embed-images": (
{
"HTMLExporter": {
"embed_images": True,
}
},
"""Embed the images as base64 dataurls in the output. This flag is only useful for the HTML/WebPDF/Slides exports.""",
),
}
)
class NbConvertApp(JupyterApp):
"""Application used to convert from notebook file type (``*.ipynb``)"""
version = __version__
name = "jupyter-nbconvert"
aliases = nbconvert_aliases
flags = nbconvert_flags
@default("log_level")
def _log_level_default(self):
return logging.INFO
classes = List()
@default("classes")
def _classes_default(self):
classes = [NbConvertBase]
for pkg in (exporters, preprocessors, writers, postprocessors):
for name in dir(pkg):
cls = getattr(pkg, name)
if isinstance(cls, type) and issubclass(cls, Configurable):
classes.append(cls)
return classes
description = Unicode(
"""This application is used to convert notebook files (*.ipynb)
to various other formats.
WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES."""
)
output_base = Unicode(
"",
help="""overwrite base name use for output files.
can only be used when converting one notebook at a time.
""",
).tag(config=True)
use_output_suffix = Bool(
True,
help="""Whether to apply a suffix prior to the extension (only relevant
when converting to notebook format). The suffix is determined by
the exporter, and is usually '.nbconvert'.""",
).tag(config=True)
output_files_dir = Unicode(
"{notebook_name}_files",
help="""Directory to copy extra files (figures) to.
'{notebook_name}' in the string will be converted to notebook
basename.""",
).tag(config=True)
examples = Unicode(
"""
The simplest way to use nbconvert is
> jupyter nbconvert mynotebook.ipynb --to html
Options include {formats}.
> jupyter nbconvert --to latex mynotebook.ipynb
Both HTML and LaTeX support multiple output templates. LaTeX includes
'base', 'article' and 'report'. HTML includes 'basic', 'lab' and
'classic'. You can specify the flavor of the format used.
> jupyter nbconvert --to html --template lab mynotebook.ipynb
You can also pipe the output to stdout, rather than a file
> jupyter nbconvert mynotebook.ipynb --stdout
PDF is generated via latex
> jupyter nbconvert mynotebook.ipynb --to pdf
You can get (and serve) a Reveal.js-powered slideshow
> jupyter nbconvert myslides.ipynb --to slides --post serve
Multiple notebooks can be given at the command line in a couple of
different ways:
> jupyter nbconvert notebook*.ipynb
> jupyter nbconvert notebook1.ipynb notebook2.ipynb
or you can specify the notebooks list in a config file, containing::
c.NbConvertApp.notebooks = ["my_notebook.ipynb"]
> jupyter nbconvert --config mycfg.py
""".format(
formats=get_export_names()
)
)
# Writer specific variables
writer = Instance(
"nbconvert.writers.base.WriterBase",
help="""Instance of the writer class used to write the
results of the conversion.""",
allow_none=True,
)
writer_class = DottedObjectName(
"FilesWriter",
help="""Writer class used to write the
results of the conversion""",
).tag(config=True)
writer_aliases = {
"fileswriter": "nbconvert.writers.files.FilesWriter",
"debugwriter": "nbconvert.writers.debug.DebugWriter",
"stdoutwriter": "nbconvert.writers.stdout.StdoutWriter",
}
writer_factory = Type(allow_none=True)
@observe("writer_class")
def _writer_class_changed(self, change):
new = change["new"]
if new.lower() in self.writer_aliases:
new = self.writer_aliases[new.lower()]
self.writer_factory = import_item(new)
# Post-processor specific variables
postprocessor = Instance(
"nbconvert.postprocessors.base.PostProcessorBase",
help="""Instance of the PostProcessor class used to write the
results of the conversion.""",
allow_none=True,
)
postprocessor_class = DottedOrNone(
help="""PostProcessor class used to write the
results of the conversion"""
).tag(config=True)
postprocessor_aliases = {"serve": "nbconvert.postprocessors.serve.ServePostProcessor"}
postprocessor_factory = Type(None, allow_none=True)
@observe("postprocessor_class")
def _postprocessor_class_changed(self, change):
new = change["new"]
if new.lower() in self.postprocessor_aliases:
new = self.postprocessor_aliases[new.lower()]
if new:
self.postprocessor_factory = import_item(new)
export_format = Unicode(
allow_none=False,
help="""The export format to be used, either one of the built-in formats
{formats}
or a dotted object name that represents the import path for an
``Exporter`` class""".format(
formats=get_export_names()
),
).tag(config=True)
notebooks = List(
[],
help="""List of notebooks to convert.
Wildcards are supported.
Filenames passed positionally will be added to the list.
""",
).tag(config=True)
from_stdin = Bool(False, help="read a single notebook from stdin.").tag(config=True)
@catch_config_error
def initialize(self, argv=None):
"""Initialize application, notebooks, writer, and postprocessor"""
# See https://bugs.python.org/issue37373 :(
if sys.version_info[0] == 3 and sys.version_info[1] >= 8 and sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
self.init_syspath()
super().initialize(argv)
self.init_notebooks()
self.init_writer()
self.init_postprocessor()
def init_syspath(self):
"""Add the cwd to the sys.path ($PYTHONPATH)"""
sys.path.insert(0, os.getcwd())
def init_notebooks(self):
"""Construct the list of notebooks.
If notebooks are passed on the command-line,
they override (rather than add) notebooks specified in config files.
Glob each notebook to replace notebook patterns with filenames.
"""
# Specifying notebooks on the command-line overrides (rather than
# adds) the notebook list
if self.extra_args:
patterns = self.extra_args
else:
patterns = self.notebooks
# Use glob to replace all the notebook patterns with filenames.
filenames = []
for pattern in patterns:
# Use glob to find matching filenames. Allow the user to convert
# notebooks without having to type the extension.
globbed_files = glob.glob(pattern)
globbed_files.extend(glob.glob(pattern + ".ipynb"))
if not globbed_files:
self.log.warning("pattern %r matched no files", pattern)
for filename in globbed_files:
if filename not in filenames:
filenames.append(filename)
self.notebooks = filenames
def init_writer(self):
"""Initialize the writer (which is stateless)"""
self._writer_class_changed({"new": self.writer_class})
self.writer = self.writer_factory(parent=self)
if hasattr(self.writer, "build_directory") and self.writer.build_directory != "":
self.use_output_suffix = False
def init_postprocessor(self):
"""Initialize the postprocessor (which is stateless)"""
self._postprocessor_class_changed({"new": self.postprocessor_class})
if self.postprocessor_factory:
self.postprocessor = self.postprocessor_factory(parent=self)
def start(self):
"""Run start after initialization process has completed"""
super().start()
self.convert_notebooks()
def init_single_notebook_resources(self, notebook_filename):
"""Step 1: Initialize resources
This initializes the resources dictionary for a single notebook.
Returns
-------
dict
resources dictionary for a single notebook that MUST include the following keys:
- config_dir: the location of the Jupyter config directory
- unique_key: the notebook name
- output_files_dir: a directory where output files (not
including the notebook itself) should be saved
"""
basename = os.path.basename(notebook_filename)
notebook_name = basename[: basename.rfind(".")]
if self.output_base:
# strip duplicate extension from output_base, to avoid Basename.ext.ext
if getattr(self.exporter, "file_extension", False):
base, ext = os.path.splitext(self.output_base)
if ext == self.exporter.file_extension:
self.output_base = base
notebook_name = self.output_base
self.log.debug("Notebook name is '%s'", notebook_name)
# first initialize the resources we want to use
resources = {}
resources["config_dir"] = self.config_dir
resources["unique_key"] = notebook_name
output_files_dir = self.output_files_dir.format(notebook_name=notebook_name)
resources["output_files_dir"] = output_files_dir
return resources
def export_single_notebook(self, notebook_filename, resources, input_buffer=None):
"""Step 2: Export the notebook
Exports the notebook to a particular format according to the specified
exporter. This function returns the output and (possibly modified)
resources from the exporter.
Parameters
----------
notebook_filename : str
name of notebook file.
resources : dict
input_buffer :
readable file-like object returning unicode.
if not None, notebook_filename is ignored
Returns
-------
output
dict
resources (possibly modified)
"""
try:
if input_buffer is not None:
output, resources = self.exporter.from_file(input_buffer, resources=resources)
else:
output, resources = self.exporter.from_filename(
notebook_filename, resources=resources
)
except ConversionException:
self.log.error("Error while converting '%s'", notebook_filename, exc_info=True)
self.exit(1)
return output, resources
def write_single_notebook(self, output, resources):
"""Step 3: Write the notebook to file
This writes output from the exporter to file using the specified writer.
It returns the results from the writer.
Parameters
----------
output :
resources : dict
resources for a single notebook including name, config directory
and directory to save output
Returns
-------
file
results from the specified writer output of exporter
"""
if "unique_key" not in resources:
raise KeyError("unique_key MUST be specified in the resources, but it is not")
notebook_name = resources["unique_key"]
if self.use_output_suffix and not self.output_base:
notebook_name += resources.get("output_suffix", "")
write_results = self.writer.write(output, resources, notebook_name=notebook_name)
return write_results
def postprocess_single_notebook(self, write_results):
"""Step 4: Post-process the written file
Only used if a postprocessor has been specified. After the
converted notebook is written to a file in Step 3, this post-processes
the notebook.
"""
# Post-process if post processor has been defined.
if hasattr(self, "postprocessor") and self.postprocessor:
self.postprocessor(write_results)
def convert_single_notebook(self, notebook_filename, input_buffer=None):
"""Convert a single notebook.
Performs the following steps:
1. Initialize notebook resources
2. Export the notebook to a particular format
3. Write the exported notebook to file
4. (Maybe) postprocess the written file
Parameters
----------
notebook_filename : str
input_buffer :
If input_buffer is not None, conversion is done and the buffer is
used as source into a file basenamed by the notebook_filename
argument.
"""
if input_buffer is None:
self.log.info("Converting notebook %s to %s", notebook_filename, self.export_format)
else:
self.log.info("Converting notebook into %s", self.export_format)
resources = self.init_single_notebook_resources(notebook_filename)
output, resources = self.export_single_notebook(
notebook_filename, resources, input_buffer=input_buffer
)
write_results = self.write_single_notebook(output, resources)
self.postprocess_single_notebook(write_results)
def convert_notebooks(self):
"""Convert the notebooks in the self.notebook traitlet"""
# check that the output base isn't specified if there is more than
# one notebook to convert
if self.output_base != "" and len(self.notebooks) > 1:
self.log.error(
"""
UsageError: --output flag or `NbConvertApp.output_base` config option
cannot be used when converting multiple notebooks.
"""
)
self.exit(1)
# no notebooks to convert!
if len(self.notebooks) == 0 and not self.from_stdin:
self.print_help()
sys.exit(-1)
if not self.export_format:
raise ValueError(
"Please specify an output format with '--to <format>'."
f"\nThe following formats are available: {get_export_names()}"
)
# initialize the exporter
cls = get_exporter(self.export_format)
self.exporter = cls(config=self.config)
# convert each notebook
if not self.from_stdin:
for notebook_filename in self.notebooks:
self.convert_single_notebook(notebook_filename)
else:
input_buffer = unicode_stdin_stream()
# default name when conversion from stdin
self.convert_single_notebook("notebook.ipynb", input_buffer=input_buffer)
def document_flag_help(self):
"""
Return a string containing descriptions of all the flags.
"""
flags = "The following flags are defined:\n\n"
for flag, (cfg, fhelp) in self.flags.items():
flags += f"{flag}\n"
flags += indent(fill(fhelp, 80)) + "\n\n"
flags += indent(fill("Long Form: " + str(cfg), 80)) + "\n\n"
return flags
def document_alias_help(self):
"""Return a string containing all of the aliases"""
aliases = "The folowing aliases are defined:\n\n"
for alias, longname in self.aliases.items():
aliases += f"\t**{alias}** ({longname})\n\n"
return aliases
def document_config_options(self):
"""
Provides a much improves version of the configuration documentation by
breaking the configuration options into app, exporter, writer,
preprocessor, postprocessor, and other sections.
"""
categories = {
category: [c for c in self._classes_inc_parents() if category in c.__name__.lower()]
for category in ["app", "exporter", "writer", "preprocessor", "postprocessor"]
}
accounted_for = {c for category in categories.values() for c in category}
categories["other"] = [c for c in self._classes_inc_parents() if c not in accounted_for]
header = dedent(
"""
{section} Options
-----------------------
"""
)
sections = ""
for category in categories:
sections += header.format(section=category.title())
if category in ["exporter", "preprocessor", "writer"]:
sections += f".. image:: _static/{category}_inheritance.png\n\n"
sections += "\n".join(c.class_config_rst_doc() for c in categories[category])
return sections.replace(" : ", r" \: ")
class DejavuApp(NbConvertApp):
def initialize(self, argv=None):
self.config.TemplateExporter.exclude_input = True
self.config.TemplateExporter.exclude_output_prompt = True
self.config.TemplateExporter.exclude_input_prompt = True
self.config.ExecutePreprocessor.enabled = True
self.config.WebPDFExporter.paginate = False
super().initialize(argv)
@default("export_format")
def default_export_format(self):
return "html"
# -----------------------------------------------------------------------------
# Main entry point
# -----------------------------------------------------------------------------
main = launch_new_instance = NbConvertApp.launch_instance
dejavu_main = DejavuApp.launch_instance

View File

@ -0,0 +1,7 @@
from .base import PostProcessorBase
# protect against unavailable tornado
try:
from .serve import ServePostProcessor
except ImportError:
pass

View File

@ -0,0 +1,33 @@
"""
Basic post processor
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from ..utils.base import NbConvertBase
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class PostProcessorBase(NbConvertBase):
def __call__(self, input):
"""
See def postprocess() ...
"""
self.postprocess(input)
def postprocess(self, input):
"""
Post-process output from a writer.
"""
raise NotImplementedError("postprocess")

View File

@ -0,0 +1,119 @@
"""PostProcessor for serving reveal.js HTML slideshows."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import threading
import webbrowser
from tornado import gen, httpserver, ioloop, log, web
from tornado.httpclient import AsyncHTTPClient
from traitlets import Bool, Int, Unicode
from .base import PostProcessorBase
class ProxyHandler(web.RequestHandler):
"""handler the proxies requests from a local prefix to a CDN"""
@gen.coroutine
def get(self, prefix, url):
"""proxy a request to a CDN"""
proxy_url = "/".join([self.settings["cdn"], url])
client = self.settings["client"]
response = yield client.fetch(proxy_url)
for header in ["Content-Type", "Cache-Control", "Date", "Last-Modified", "Expires"]:
if header in response.headers:
self.set_header(header, response.headers[header])
self.finish(response.body)
class ServePostProcessor(PostProcessorBase):
"""Post processor designed to serve files
Proxies reveal.js requests to a CDN if no local reveal.js is present
"""
open_in_browser = Bool(True, help="""Should the browser be opened automatically?""").tag(
config=True
)
browser = Unicode(
"",
help="""Specify what browser should be used to open slides. See
https://docs.python.org/3/library/webbrowser.html#webbrowser.register
to see how keys are mapped to browser executables. If
not specified, the default browser will be determined
by the `webbrowser`
standard library module, which allows setting of the BROWSER
environment variable to override it.
""",
).tag(config=True)
reveal_cdn = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.5.0", help="""URL for reveal.js CDN."""
).tag(config=True)
reveal_prefix = Unicode("reveal.js", help="URL prefix for reveal.js").tag(config=True)
ip = Unicode("127.0.0.1", help="The IP address to listen on.").tag(config=True)
port = Int(8000, help="port for the server to listen on.").tag(config=True)
def postprocess(self, input):
"""Serve the build directory with a webserver."""
dirname, filename = os.path.split(input)
handlers = [
(r"/(.+)", web.StaticFileHandler, {"path": dirname}),
(r"/", web.RedirectHandler, {"url": "/%s" % filename}),
]
if "://" in self.reveal_prefix or self.reveal_prefix.startswith("//"):
# reveal specifically from CDN, nothing to do
pass
elif os.path.isdir(os.path.join(dirname, self.reveal_prefix)):
# reveal prefix exists
self.log.info("Serving local %s", self.reveal_prefix)
else:
self.log.info("Redirecting %s requests to %s", self.reveal_prefix, self.reveal_cdn)
handlers.insert(0, (r"/(%s)/(.*)" % self.reveal_prefix, ProxyHandler))
app = web.Application(
handlers,
cdn=self.reveal_cdn,
client=AsyncHTTPClient(),
)
# hook up tornado logging to our logger
log.app_log = self.log
http_server = httpserver.HTTPServer(app)
http_server.listen(self.port, address=self.ip)
url = "http://%s:%i/%s" % (self.ip, self.port, filename)
print("Serving your slides at %s" % url)
print("Use Control-C to stop this server")
if self.open_in_browser:
try:
browser = webbrowser.get(self.browser or None)
b = lambda: browser.open(url, new=2) # noqa
threading.Thread(target=b).start()
except webbrowser.Error as e:
self.log.warning("No web browser found: %s." % e)
browser = None
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print("\nInterrupted")
def main(path):
"""allow running this module to serve the slides"""
server = ServePostProcessor()
server(path)
if __name__ == "__main__":
import sys
main(sys.argv[1])

View File

@ -0,0 +1,24 @@
"""
Module with tests for the serve post-processor
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import pytest
from ...tests.base import TestsBase
class TestServe(TestsBase):
"""Contains test functions for serve.py"""
def test_constructor(self):
"""Can a ServePostProcessor be constructed?"""
pytest.importorskip("tornado")
try:
from ..serve import ServePostProcessor
except ImportError:
print("Something weird is happening.\nTornado is sometimes present, sometimes not.")
raise
ServePostProcessor()

View File

@ -0,0 +1,19 @@
# Class base Preprocessors
# Backwards compatability for imported name
from nbclient.exceptions import CellExecutionError
from .base import Preprocessor
from .clearmetadata import ClearMetadataPreprocessor
from .clearoutput import ClearOutputPreprocessor
# decorated function Preprocessors
from .coalescestreams import coalesce_streams
from .convertfigures import ConvertFiguresPreprocessor
from .csshtmlheader import CSSHTMLHeaderPreprocessor
from .execute import ExecutePreprocessor
from .extractoutput import ExtractOutputPreprocessor
from .highlightmagics import HighlightMagicsPreprocessor
from .latex import LatexPreprocessor
from .regexremove import RegexRemovePreprocessor
from .svg2pdf import SVG2PDFPreprocessor
from .tagremove import TagRemovePreprocessor

View File

@ -0,0 +1,89 @@
"""Base class for preprocessors"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Bool
from ..utils.base import NbConvertBase
class Preprocessor(NbConvertBase):
"""A configurable preprocessor
Inherit from this class if you wish to have configurability for your
preprocessor.
Any configurable traitlets this class exposed will be configurable in
profiles using c.SubClassName.attribute = value
You can overwrite `preprocess_cell()` to apply a transformation
independently on each cell or `preprocess()` if you prefer your own
logic. See corresponding docstring for information.
Disabled by default and can be enabled via the config by
'c.YourPreprocessorName.enabled = True'
"""
enabled = Bool(False).tag(config=True)
def __init__(self, **kw):
"""
Public constructor
Parameters
----------
config : Config
Configuration file structure
`**kw`
Additional keyword arguments passed to parent
"""
super().__init__(**kw)
def __call__(self, nb, resources):
if self.enabled:
self.log.debug("Applying preprocessor: %s", self.__class__.__name__)
return self.preprocess(nb, resources)
else:
return nb, resources
def preprocess(self, nb, resources):
"""
Preprocessing to apply on each notebook.
Must return modified nb, resources.
If you wish to apply your preprocessing to each cell, you might want
to override preprocess_cell method instead.
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
"""
for index, cell in enumerate(nb.cells):
nb.cells[index], resources = self.preprocess_cell(cell, resources, index)
return nb, resources
def preprocess_cell(self, cell, resources, index):
"""
Override if you want to apply some preprocessing to each cell.
Must return modified cell and resource dictionary.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
index : int
Index of the cell being processed
"""
raise NotImplementedError("should be implemented by subclass")
return cell, resources

View File

@ -0,0 +1,104 @@
"""Module containing a preprocessor that removes metadata from code cells"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Bool, Set
from .base import Preprocessor
class ClearMetadataPreprocessor(Preprocessor):
"""
Removes all the metadata from all code cells in a notebook.
"""
clear_cell_metadata = Bool(
True,
help=("Flag to choose if cell metadata is to be cleared in addition to notebook metadata."),
).tag(config=True)
clear_notebook_metadata = Bool(
True,
help=("Flag to choose if notebook metadata is to be cleared in addition to cell metadata."),
).tag(config=True)
preserve_nb_metadata_mask = Set(
[("language_info", "name")],
help=(
"Indicates the key paths to preserve when deleting metadata "
"across both cells and notebook metadata fields. Tuples of "
"keys can be passed to preserved specific nested values"
),
).tag(config=True)
preserve_cell_metadata_mask = Set(
help=(
"Indicates the key paths to preserve when deleting metadata "
"across both cells and notebook metadata fields. Tuples of "
"keys can be passed to preserved specific nested values"
)
).tag(config=True)
def current_key(self, mask_key):
if isinstance(mask_key, str):
return mask_key
elif len(mask_key) == 0:
# Safeguard
return None
else:
return mask_key[0]
def current_mask(self, mask):
return {self.current_key(k) for k in mask if self.current_key(k) is not None}
def nested_masks(self, mask):
return {
self.current_key(k[0]): k[1:]
for k in mask
if k and not isinstance(k, str) and len(k) > 1
}
def nested_filter(self, items, mask):
keep_current = self.current_mask(mask)
keep_nested_lookup = self.nested_masks(mask)
for k, v in items:
keep_nested = keep_nested_lookup.get(k)
if k in keep_current:
if keep_nested is not None:
if isinstance(v, dict):
yield k, dict(self.nested_filter(v.items(), keep_nested))
else:
yield k, v
def preprocess_cell(self, cell, resources, cell_index):
"""
All the code cells are returned with an empty metadata field.
"""
if self.clear_cell_metadata:
if cell.cell_type == "code":
# Remove metadata
if "metadata" in cell:
cell.metadata = dict(
self.nested_filter(cell.metadata.items(), self.preserve_cell_metadata_mask)
)
return cell, resources
def preprocess(self, nb, resources):
"""
Preprocessing to apply on each notebook.
Must return modified nb, resources.
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
"""
nb, resources = super().preprocess(nb, resources)
if self.clear_notebook_metadata:
if "metadata" in nb:
nb.metadata = dict(
self.nested_filter(nb.metadata.items(), self.preserve_nb_metadata_mask)
)
return nb, resources

View File

@ -0,0 +1,29 @@
"""Module containing a preprocessor that removes the outputs from code cells"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Set
from .base import Preprocessor
class ClearOutputPreprocessor(Preprocessor):
"""
Removes the output from all code cells in a notebook.
"""
remove_metadata_fields = Set({"collapsed", "scrolled"}).tag(config=True)
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell. See base.py for details.
"""
if cell.cell_type == "code":
cell.outputs = []
cell.execution_count = None
# Remove metadata associated with output
if "metadata" in cell:
for field in self.remove_metadata_fields:
cell.metadata.pop(field, None)
return cell, resources

View File

@ -0,0 +1,81 @@
"""Preprocessor for merging consecutive stream outputs for easier handling."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import functools
import re
from traitlets.log import get_logger
def cell_preprocessor(function):
"""
Wrap a function to be executed on all cells of a notebook
The wrapped function should have these parameters:
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
index : int
Index of the cell being processed
"""
@functools.wraps(function)
def wrappedfunc(nb, resources):
get_logger().debug("Applying preprocessor: %s", function.__name__)
for index, cell in enumerate(nb.cells):
nb.cells[index], resources = function(cell, resources, index)
return nb, resources
return wrappedfunc
cr_pat = re.compile(r".*\r(?=[^\n])")
@cell_preprocessor
def coalesce_streams(cell, resources, index):
"""
Merge consecutive sequences of stream output into single stream
to prevent extra newlines inserted at flush calls
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
transformers to pass variables into the Jinja engine.
index : int
Index of the cell being processed
"""
outputs = cell.get("outputs", [])
if not outputs:
return cell, resources
last = outputs[0]
new_outputs = [last]
for output in outputs[1:]:
if (
output.output_type == "stream"
and last.output_type == "stream"
and last.name == output.name
):
last.text += output.text
else:
new_outputs.append(output)
last = output
# process \r characters
for output in new_outputs:
if output.output_type == "stream" and "\r" in output.text:
output.text = cr_pat.sub("", output.text)
cell.outputs = new_outputs
return cell, resources

View File

@ -0,0 +1,50 @@
"""Module containing a preprocessor that converts outputs in the notebook from
one format to another.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Unicode
from .base import Preprocessor
class ConvertFiguresPreprocessor(Preprocessor):
"""
Converts all of the outputs in a notebook from one format to another.
"""
from_format = Unicode(help="Format the converter accepts").tag(config=True)
to_format = Unicode(help="Format the converter writes").tag(config=True)
def __init__(self, **kw):
"""
Public constructor
"""
super().__init__(**kw)
def convert_figure(self, data_format, data):
raise NotImplementedError()
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell,
See base.py
"""
# Loop through all of the datatypes of the outputs in the cell.
for output in cell.get("outputs", []):
if (
output.output_type in {"execute_result", "display_data"}
and self.from_format in output.data
and self.to_format not in output.data
):
output.data[self.to_format] = self.convert_figure(
self.from_format, output.data[self.from_format]
)
return cell, resources

View File

@ -0,0 +1,92 @@
"""Module that pre-processes the notebook for export to HTML.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import hashlib
import os
from jupyterlab_pygments import JupyterStyle
from pygments.style import Style
from traitlets import Type, Unicode, Union
from .base import Preprocessor
try:
from notebook import DEFAULT_STATIC_FILES_PATH
except ImportError:
DEFAULT_STATIC_FILES_PATH = None
class CSSHTMLHeaderPreprocessor(Preprocessor):
"""
Preprocessor used to pre-process notebook for HTML output. Adds IPython notebook
front-end CSS and Pygments CSS to HTML output.
"""
highlight_class = Unicode(".highlight", help="CSS highlight class identifier").tag(config=True)
style = Union(
[Unicode("default"), Type(klass=Style)],
help="Name of the pygments style to use",
default_value=JupyterStyle,
).tag(config=True)
def __init__(self, *pargs, **kwargs):
Preprocessor.__init__(self, *pargs, **kwargs)
self._default_css_hash = None
def preprocess(self, nb, resources):
"""Fetch and add CSS to the resource dictionary
Fetch CSS from IPython and Pygments to add at the beginning
of the html files. Add this css in resources in the
"inlining.css" key
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
"""
resources["inlining"] = {}
resources["inlining"]["css"] = self._generate_header(resources)
return nb, resources
def _generate_header(self, resources):
"""
Fills self.header with lines of CSS extracted from IPython
and Pygments.
"""
from pygments.formatters import HtmlFormatter
header = []
formatter = HtmlFormatter(style=self.style)
pygments_css = formatter.get_style_defs(self.highlight_class)
header.append(pygments_css)
# Load the user's custom CSS and IPython's default custom CSS. If they
# differ, assume the user has made modifications to his/her custom CSS
# and that we should inline it in the nbconvert output.
config_dir = resources["config_dir"]
custom_css_filename = os.path.join(config_dir, "custom", "custom.css")
if os.path.isfile(custom_css_filename):
if DEFAULT_STATIC_FILES_PATH and self._default_css_hash is None:
self._default_css_hash = self._hash(
os.path.join(DEFAULT_STATIC_FILES_PATH, "custom", "custom.css")
)
if self._hash(custom_css_filename) != self._default_css_hash:
with open(custom_css_filename, encoding="utf-8") as f:
header.append(f.read())
return header
def _hash(self, filename):
"""Compute the hash of a file."""
md5 = hashlib.md5()
with open(filename, "rb") as f:
md5.update(f.read())
return md5.digest()

View File

@ -0,0 +1,111 @@
"""Module containing a preprocessor that executes the code cells
and updates outputs"""
from nbclient import NotebookClient
from nbclient import execute as _execute
# Backwards compatability for imported name
from nbclient.exceptions import CellExecutionError # noqa
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from nbformat import NotebookNode
from .base import Preprocessor
def executenb(*args, **kwargs):
from warnings import warn
warn(
"The 'nbconvert.preprocessors.execute.executenb' function was moved to nbclient.execute. "
"We recommend importing that library directly.",
FutureWarning,
)
return _execute(*args, **kwargs)
# We inherit from both classes to allow for traitlets to resolve as they did pre-6.0.
# This unfortunately makes for some ugliness around initialization as NotebookClient
# assumes it's a constructed class with a nb object that we have to hack around.
class ExecutePreprocessor(Preprocessor, NotebookClient):
"""
Executes all the cells in a notebook
"""
def __init__(self, **kw):
nb = kw.get("nb")
Preprocessor.__init__(self, nb=nb, **kw)
NotebookClient.__init__(self, nb, **kw)
def _check_assign_resources(self, resources):
if resources or not hasattr(self, "resources"):
self.resources = resources
def preprocess(self, nb: NotebookNode, resources=None, km=None):
"""
Preprocess notebook executing each code cell.
The input argument *nb* is modified in-place.
Note that this function recalls NotebookClient.__init__, which may look wrong.
However since the preprocess call acts line an init on execution state it's expected.
Therefore, we need to capture it here again to properly reset because traitlet
assignments are not passed. There is a risk if traitlets apply any side effects for
dual init.
The risk should be manageable, and this approach minimizes side-effects relative
to other alternatives.
One alternative but rejected implementation would be to copy the client's init internals
which has already gotten out of sync with nbclient 0.5 release before nbconvert 6.0 released.
Parameters
----------
nb : NotebookNode
Notebook being executed.
resources : dictionary (optional)
Additional resources used in the conversion process. For example,
passing ``{'metadata': {'path': run_path}}`` sets the
execution path to ``run_path``.
km: KernelManager (optional)
Optional kernel manager. If none is provided, a kernel manager will
be created.
Returns
-------
nb : NotebookNode
The executed notebook.
resources : dictionary
Additional resources used in the conversion process.
"""
NotebookClient.__init__(self, nb, km)
self.reset_execution_trackers()
self._check_assign_resources(resources)
with self.setup_kernel():
info_msg = self.wait_for_reply(self.kc.kernel_info())
self.nb.metadata["language_info"] = info_msg["content"]["language_info"]
for index, cell in enumerate(self.nb.cells):
self.preprocess_cell(cell, resources, index)
self.set_widgets_metadata()
return self.nb, self.resources
def preprocess_cell(self, cell, resources, index):
"""
Override if you want to apply some preprocessing to each cell.
Must return modified cell and resource dictionary.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
index : int
Index of the cell being processed
"""
self._check_assign_resources(resources)
cell = self.execute_cell(cell, index, store_history=True)
return cell, self.resources

View File

@ -0,0 +1,148 @@
"""A preprocessor that extracts all of the outputs from the
notebook file. The extracted outputs are returned in the 'resources' dictionary.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
import sys
from binascii import a2b_base64
from mimetypes import guess_extension
from textwrap import dedent
from traitlets import Set, Unicode
from .base import Preprocessor
def guess_extension_without_jpe(mimetype):
"""
This function fixes a problem with '.jpe' extensions
of jpeg images which are then not recognised by latex.
For any other case, the function works in the same way
as mimetypes.guess_extension
"""
ext = guess_extension(mimetype)
if ext == ".jpe":
ext = ".jpeg"
return ext
def platform_utf_8_encode(data):
if isinstance(data, str):
if sys.platform == "win32":
data = data.replace("\n", "\r\n")
data = data.encode("utf-8")
return data
class ExtractOutputPreprocessor(Preprocessor):
"""
Extracts all of the outputs from the notebook file. The extracted
outputs are returned in the 'resources' dictionary.
"""
output_filename_template = Unicode("{unique_key}_{cell_index}_{index}{extension}").tag(
config=True
)
extract_output_types = Set({"image/png", "image/jpeg", "image/svg+xml", "application/pdf"}).tag(
config=True
)
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell,
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
# Get the unique key from the resource dict if it exists. If it does not
# exist, use 'output' as the default. Also, get files directory if it
# has been specified
unique_key = resources.get("unique_key", "output")
output_files_dir = resources.get("output_files_dir", None)
# Make sure outputs key exists
if not isinstance(resources["outputs"], dict):
resources["outputs"] = {}
# Loop through all of the outputs in the cell
for index, out in enumerate(cell.get("outputs", [])):
if out.output_type not in {"display_data", "execute_result"}:
continue
if "text/html" in out.data:
out["data"]["text/html"] = dedent(out["data"]["text/html"])
# Get the output in data formats that the template needs extracted
for mime_type in self.extract_output_types:
if mime_type in out.data:
data = out.data[mime_type]
# Binary files are base64-encoded, SVG is already XML
if mime_type in {"image/png", "image/jpeg", "application/pdf"}:
# data is b64-encoded as text (str, unicode),
# we want the original bytes
data = a2b_base64(data)
elif mime_type == "application/json" or not isinstance(data, str):
# Data is either JSON-like and was parsed into a Python
# object according to the spec, or data is for sure
# JSON. In the latter case we want to go extra sure that
# we enclose a scalar string value into extra quotes by
# serializing it properly.
if isinstance(data, bytes):
# We need to guess the encoding in this
# instance. Some modules that return raw data like
# svg can leave the data in byte form instead of str
data = data.decode("utf-8")
data = platform_utf_8_encode(json.dumps(data))
else:
# All other text_type data will fall into this path
data = platform_utf_8_encode(data)
ext = guess_extension_without_jpe(mime_type)
if ext is None:
ext = "." + mime_type.rsplit("/")[-1]
if out.metadata.get("filename", ""):
filename = out.metadata["filename"]
if not filename.endswith(ext):
filename += ext
else:
filename = self.output_filename_template.format(
unique_key=unique_key, cell_index=cell_index, index=index, extension=ext
)
# On the cell, make the figure available via
# cell.outputs[i].metadata.filenames['mime/type']
# where
# cell.outputs[i].data['mime/type'] contains the data
if output_files_dir is not None:
filename = os.path.join(output_files_dir, filename)
out.metadata.setdefault("filenames", {})
out.metadata["filenames"][mime_type] = filename
if filename in resources["outputs"]:
raise ValueError(
"Your outputs have filename metadata associated "
"with them. Nbconvert saves these outputs to "
"external files using this filename metadata. "
"Filenames need to be unique across the notebook, "
"or images will be overwritten. The filename {} is "
"associated with more than one output. The second "
"output associated with this filename is in cell "
"{}.".format(filename, cell_index)
)
# In the resources, make the figure available via
# resources['outputs']['filename'] = data
resources["outputs"][filename] = data
return cell, resources

View File

@ -0,0 +1,103 @@
"""This preprocessor detect cells using a different language through
magic extensions such as `%%R` or `%%octave`. Cell's metadata is marked
so that the appropriate highlighter can be used in the `highlight`
filter.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import re
from traitlets import Dict
from .base import Preprocessor
class HighlightMagicsPreprocessor(Preprocessor):
"""
Detects and tags code cells that use a different languages than Python.
"""
# list of magic language extensions and their associated pygment lexers
default_languages = Dict(
{
"%%R": "r",
"%%bash": "bash",
"%%cython": "cython",
"%%javascript": "javascript",
"%%julia": "julia",
"%%latex": "latex",
"%%octave": "octave",
"%%perl": "perl",
"%%ruby": "ruby",
"%%sh": "sh",
"%%sql": "sql",
}
)
# user defined language extensions
languages = Dict(
help=(
"Syntax highlighting for magic's extension languages. "
"Each item associates a language magic extension such as %%R, "
"with a pygments lexer such as r."
)
).tag(config=True)
def __init__(self, config=None, **kw):
"""Public constructor"""
super().__init__(config=config, **kw)
# Update the default languages dict with the user configured ones
self.default_languages.update(self.languages)
# build a regular expression to catch language extensions and choose
# an adequate pygments lexer
any_language = "|".join(self.default_languages.keys())
self.re_magic_language = re.compile(rf"^\s*({any_language})\s+")
def which_magic_language(self, source):
"""
When a cell uses another language through a magic extension,
the other language is returned.
If no language magic is detected, this function returns None.
Parameters
----------
source: str
Source code of the cell to highlight
"""
m = self.re_magic_language.match(source)
if m:
# By construction of the re, the matched language must be in the
# languages dictionary
return self.default_languages[m.group(1)]
else:
return None
def preprocess_cell(self, cell, resources, cell_index):
"""
Tags cells using a magic extension language
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
# Only tag code cells
if cell.cell_type == "code":
magic_language = self.which_magic_language(cell.source)
if magic_language:
cell["metadata"]["magics_language"] = magic_language
return cell, resources

View File

@ -0,0 +1,54 @@
"""Module that allows latex output notebooks to be conditioned before
they are converted.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from traitlets import Unicode
from .base import Preprocessor
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class LatexPreprocessor(Preprocessor):
"""Preprocessor for latex destined documents.
Mainly populates the ``latex`` key in the resources dict,
adding definitions for pygments highlight styles.
"""
style = Unicode("default", help="Name of the pygments style to use").tag(config=True)
def preprocess(self, nb, resources):
"""Preprocessing to apply on each notebook.
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
"""
# Generate Pygments definitions for Latex
from pygments.formatters import LatexFormatter
resources.setdefault("latex", {})
resources["latex"].setdefault(
"pygments_definitions", LatexFormatter(style=self.style).get_style_defs()
)
resources["latex"].setdefault("pygments_style_name", self.style)
return nb, resources

View File

@ -0,0 +1,70 @@
"""
Module containing a preprocessor that removes cells if they match
one or more regular expression.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
from traitlets import List, Unicode
from .base import Preprocessor
class RegexRemovePreprocessor(Preprocessor):
"""
Removes cells from a notebook that match one or more regular expression.
For each cell, the preprocessor checks whether its contents match
the regular expressions in the ``patterns`` traitlet which is a list
of unicode strings. If the contents match any of the patterns, the cell
is removed from the notebook.
To modify the list of matched patterns,
modify the patterns traitlet. For example, execute the following command
to convert a notebook to html and remove cells containing only whitespace::
jupyter nbconvert --RegexRemovePreprocessor.patterns="['\\s*\\Z']" mynotebook.ipynb
The command line argument
sets the list of patterns to ``'\\s*\\Z'`` which matches an arbitrary number
of whitespace characters followed by the end of the string.
See https://regex101.com/ for an interactive guide to regular expressions
(make sure to select the python flavor). See
https://docs.python.org/library/re.html for the official regular expression
documentation in python.
"""
patterns = List(Unicode(), default_value=[]).tag(config=True)
def check_conditions(self, cell):
"""
Checks that a cell matches the pattern.
Returns: Boolean.
True means cell should *not* be removed.
"""
# Compile all the patterns into one: each pattern is first wrapped
# by a non-capturing group to ensure the correct order of precedence
# and the patterns are joined with a logical or
pattern = re.compile("|".join("(?:%s)" % pattern for pattern in self.patterns))
# Filter out cells that meet the pattern and have no outputs
return not pattern.match(cell.source)
def preprocess(self, nb, resources):
"""
Preprocessing to apply to each notebook. See base.py for details.
"""
# Skip preprocessing if the list of patterns is empty
if not self.patterns:
return nb, resources
# Filter out cells that meet the conditions
nb.cells = [cell for cell in nb.cells if self.check_conditions(cell)]
return nb, resources

View File

@ -0,0 +1,169 @@
"""
NBConvert Preprocessor for sanitizing HTML rendering of notebooks.
"""
import warnings
from bleach import ALLOWED_ATTRIBUTES, ALLOWED_TAGS, clean
from traitlets import Any, Bool, List, Set, Unicode
_USE_BLEACH_CSS_SANITIZER = False
_USE_BLEACH_STYLES = False
try:
# bleach[css] >=5.0
from bleach.css_sanitizer import ALLOWED_CSS_PROPERTIES as ALLOWED_STYLES
from bleach.css_sanitizer import CSSSanitizer
_USE_BLEACH_CSS_SANITIZER = True
_USE_BLEACH_STYLES = False
except ImportError:
try:
# bleach <5
from bleach import ALLOWED_STYLES
_USE_BLEACH_CSS_SANITIZER = False
_USE_BLEACH_STYLES = True
warnings.warn(
"Support for bleach <5 will be removed in a future version of nbconvert",
DeprecationWarning,
)
except ImportError:
warnings.warn(
"The installed bleach/tinycss2 do not provide CSS sanitization, "
"please upgrade to bleach >=5",
UserWarning,
)
from .base import Preprocessor
__all__ = ["SanitizeHTML"]
class SanitizeHTML(Preprocessor):
# Bleach config.
attributes = Any(
config=True,
default_value=ALLOWED_ATTRIBUTES,
help="Allowed HTML tag attributes",
)
tags = List(
Unicode(),
config=True,
default_value=ALLOWED_TAGS,
help="List of HTML tags to allow",
)
styles = List(
Unicode(),
config=True,
default_value=ALLOWED_STYLES,
help="Allowed CSS styles if <style> tag is allowed",
)
strip = Bool(
config=True,
default_value=False,
help="If True, remove unsafe markup entirely instead of escaping",
)
strip_comments = Bool(
config=True,
default_value=True,
help="If True, strip comments from escaped HTML",
)
# Display data config.
safe_output_keys = Set(
config=True,
default_value={
"metadata", # Not a mimetype per-se, but expected and safe.
"text/plain",
"text/latex",
"application/json",
"image/png",
"image/jpeg",
},
help="Cell output mimetypes to render without modification",
)
sanitized_output_types = Set(
config=True,
default_value={
"text/html",
"text/markdown",
},
help="Cell output types to display after escaping with Bleach.",
)
def preprocess_cell(self, cell, resources, cell_index):
"""
Sanitize potentially-dangerous contents of the cell.
Cell Types:
raw:
Sanitize literal HTML
markdown:
Sanitize literal HTML
code:
Sanitize outputs that could result in code execution
"""
if cell.cell_type == "raw":
# Sanitize all raw cells anyway.
# Only ones with the text/html mimetype should be emitted
# but erring on the side of safety maybe.
cell.source = self.sanitize_html_tags(cell.source)
return cell, resources
elif cell.cell_type == "markdown":
cell.source = self.sanitize_html_tags(cell.source)
return cell, resources
elif cell.cell_type == "code":
cell.outputs = self.sanitize_code_outputs(cell.outputs)
return cell, resources
def sanitize_code_outputs(self, outputs):
"""
Sanitize code cell outputs.
Removes 'text/javascript' fields from display_data outputs, and
runs `sanitize_html_tags` over 'text/html'.
"""
for output in outputs:
# These are always ascii, so nothing to escape.
if output["output_type"] in ("stream", "error"):
continue
data = output.data
to_remove = []
for key in data:
if key in self.safe_output_keys:
continue
elif key in self.sanitized_output_types:
self.log.info("Sanitizing %s" % key)
data[key] = self.sanitize_html_tags(data[key])
else:
# Mark key for removal. (Python doesn't allow deletion of
# keys from a dict during iteration)
to_remove.append(key)
for key in to_remove:
self.log.info("Removing %s" % key)
del data[key]
return outputs
def sanitize_html_tags(self, html_str):
"""
Sanitize a string containing raw HTML tags.
"""
kwargs = dict(
tags=self.tags,
attributes=self.attributes,
strip=self.strip,
strip_comments=self.strip_comments,
)
if _USE_BLEACH_CSS_SANITIZER:
css_sanitizer = CSSSanitizer(allowed_css_properties=self.styles)
kwargs.update(css_sanitizer=css_sanitizer)
elif _USE_BLEACH_STYLES:
kwargs.update(styles=self.styles)
return clean(html_str, **kwargs)

View File

@ -0,0 +1,153 @@
"""Module containing a preprocessor that converts outputs in the notebook from
one format to another.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import base64
import os
import subprocess
import sys
from shutil import which
from tempfile import TemporaryDirectory
from traitlets import List, Unicode, Union, default
from ..utils.io import FormatSafeDict
from .convertfigures import ConvertFiguresPreprocessor
# inkscape path for darwin (macOS)
INKSCAPE_APP = "/Applications/Inkscape.app/Contents/Resources/bin/inkscape"
# Recent versions of Inkscape (v1.0) moved the executable from
# Resources/bin/inkscape to MacOS/inkscape
INKSCAPE_APP_v1 = "/Applications/Inkscape.app/Contents/MacOS/inkscape"
if sys.platform == "win32":
try:
import winreg
except ImportError:
import _winreg as winreg
class SVG2PDFPreprocessor(ConvertFiguresPreprocessor):
"""
Converts all of the outputs in a notebook from SVG to PDF.
"""
@default("from_format")
def _from_format_default(self):
return "image/svg+xml"
@default("to_format")
def _to_format_default(self):
return "application/pdf"
inkscape_version = Unicode(
help="""The version of inkscape being used.
This affects how the conversion command is run.
"""
).tag(config=True)
@default("inkscape_version")
def _inkscape_version_default(self):
p = subprocess.Popen(
[self.inkscape, "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output, _ = p.communicate()
if p.returncode != 0:
raise RuntimeError("Unable to find inkscape executable --version")
return output.decode("utf-8").split(" ")[1]
# FIXME: Deprecate passing a string here
command = Union(
[Unicode(), List()],
help="""
The command to use for converting SVG to PDF
This traitlet is a template, which will be formatted with the keys
to_filename and from_filename.
The conversion call must read the SVG from {from_filename},
and write a PDF to {to_filename}.
It could be a List (recommended) or a String. If string, it will
be passed to a shell for execution.
""",
).tag(config=True)
@default("command")
def _command_default(self):
major_version = self.inkscape_version.split(".")[0]
command = [self.inkscape]
if int(major_version) < 1:
# --without-gui is only needed for inkscape 0.x
command.append("--without-gui")
# --export-pdf is old name for --export-filename
command.append("--export-pdf={to_filename}")
else:
command.append("--export-filename={to_filename}")
command.append("{from_filename}")
return command
inkscape = Unicode(help="The path to Inkscape, if necessary").tag(config=True)
@default("inkscape")
def _inkscape_default(self):
inkscape_path = which("inkscape")
if inkscape_path is not None:
return inkscape_path
if sys.platform == "darwin":
if os.path.isfile(INKSCAPE_APP_v1):
return INKSCAPE_APP_v1
# Order is important. If INKSCAPE_APP exists, prefer it over
# the executable in the MacOS directory.
if os.path.isfile(INKSCAPE_APP):
return INKSCAPE_APP
if sys.platform == "win32":
wr_handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
rkey = winreg.OpenKey(wr_handle, "SOFTWARE\\Classes\\inkscape.svg\\DefaultIcon")
inkscape = winreg.QueryValueEx(rkey, "")[0]
except FileNotFoundError:
raise FileNotFoundError("Inkscape executable not found")
return inkscape
return "inkscape"
def convert_figure(self, data_format, data):
"""
Convert a single SVG figure to PDF. Returns converted data.
"""
# Work in a temporary directory
with TemporaryDirectory() as tmpdir:
# Write fig to temp file
input_filename = os.path.join(tmpdir, "figure.svg")
# SVG data is unicode text
with open(input_filename, "w", encoding="utf8") as f:
f.write(data)
# Call conversion application
output_filename = os.path.join(tmpdir, "figure.pdf")
template_vars = {"from_filename": input_filename, "to_filename": output_filename}
if isinstance(self.command, list):
full_cmd = [s.format_map(FormatSafeDict(**template_vars)) for s in self.command]
else:
# For backwards compatibility with specifying strings
# Okay-ish, since the string is trusted
full_cmd = self.command.format(*template_vars)
subprocess.call(full_cmd, shell=isinstance(full_cmd, str))
# Read output from drive
# return value expects a filename
if os.path.isfile(output_filename):
with open(output_filename, "rb") as f:
# PDF is a nb supported binary, data type, so base64 encode.
return base64.encodebytes(f.read())
else:
raise TypeError("Inkscape svg to pdf conversion failed")

View File

@ -0,0 +1,141 @@
"""
Module containing a preprocessor that removes cells if they match
one or more regular expression.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Set, Unicode
from .base import Preprocessor
class TagRemovePreprocessor(Preprocessor):
"""
Removes inputs, outputs, or cells from a notebook that
have tags that designate they are to be removed prior to exporting
the notebook.
remove_cell_tags
removes cells tagged with these values
remove_all_outputs_tags
removes entire output areas on cells
tagged with these values
remove_single_output_tags
removes individual output objects on
outputs tagged with these values
remove_input_tags
removes inputs tagged with these values
"""
remove_cell_tags = Set(
Unicode(),
default_value=[],
help=(
"Tags indicating which cells are to be removed,"
"matches tags in ``cell.metadata.tags``."
),
).tag(config=True)
remove_all_outputs_tags = Set(
Unicode(),
default_value=[],
help=(
"Tags indicating cells for which the outputs are to be removed,"
"matches tags in ``cell.metadata.tags``."
),
).tag(config=True)
remove_single_output_tags = Set(
Unicode(),
default_value=[],
help=(
"Tags indicating which individual outputs are to be removed,"
"matches output *i* tags in ``cell.outputs[i].metadata.tags``."
),
).tag(config=True)
remove_input_tags = Set(
Unicode(),
default_value=[],
help=(
"Tags indicating cells for which input is to be removed,"
"matches tags in ``cell.metadata.tags``."
),
).tag(config=True)
remove_metadata_fields = Set({"collapsed", "scrolled"}).tag(config=True)
def check_cell_conditions(self, cell, resources, index):
"""
Checks that a cell has a tag that is to be removed
Returns: Boolean.
True means cell should *not* be removed.
"""
# Return true if any of the tags in the cell are removable.
return not self.remove_cell_tags.intersection(cell.get("metadata", {}).get("tags", []))
def preprocess(self, nb, resources):
"""
Preprocessing to apply to each notebook. See base.py for details.
"""
# Skip preprocessing if the list of patterns is empty
if not any(
[
self.remove_cell_tags,
self.remove_all_outputs_tags,
self.remove_single_output_tags,
self.remove_input_tags,
]
):
return nb, resources
# Filter out cells that meet the conditions
nb.cells = [
self.preprocess_cell(cell, resources, index)[0]
for index, cell in enumerate(nb.cells)
if self.check_cell_conditions(cell, resources, index)
]
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell. See base.py for details.
"""
if (
self.remove_all_outputs_tags.intersection(cell.get("metadata", {}).get("tags", []))
and cell.cell_type == "code"
):
cell.outputs = []
cell.execution_count = None
# Remove metadata associated with output
if "metadata" in cell:
for field in self.remove_metadata_fields:
cell.metadata.pop(field, None)
if self.remove_input_tags.intersection(cell.get("metadata", {}).get("tags", [])):
cell.transient = {"remove_source": True}
if cell.get("outputs", []):
cell.outputs = [
output
for output_index, output in enumerate(cell.outputs)
if self.check_output_conditions(output, resources, cell_index, output_index)
]
return cell, resources
def check_output_conditions(self, output, resources, cell_index, output_index):
"""
Checks that an output has a tag that indicates removal.
Returns: Boolean.
True means output should *not* be removed.
"""
return not self.remove_single_output_tags.intersection(
output.get("metadata", {}).get("tags", [])
)

View File

@ -0,0 +1,52 @@
"""utility functions for preprocessor tests"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from nbformat import v4 as nbformat
from ...exporters.exporter import ResourcesDict
from ...tests.base import TestsBase
class PreprocessorTestsBase(TestsBase):
"""Contains test functions preprocessor tests"""
def build_notebook(self, with_json_outputs=False):
"""Build a notebook in memory for use with preprocessor tests"""
outputs = [
nbformat.new_output("stream", name="stdout", text="a"),
nbformat.new_output("display_data", data={"text/plain": "b"}),
nbformat.new_output("stream", name="stdout", text="c"),
nbformat.new_output("stream", name="stdout", text="d"),
nbformat.new_output("stream", name="stderr", text="e"),
nbformat.new_output("stream", name="stderr", text="f"),
nbformat.new_output("display_data", data={"image/png": "Zw=="}), # g
nbformat.new_output("display_data", data={"application/pdf": "aA=="}), # h
]
if with_json_outputs:
outputs.extend(
[
nbformat.new_output("display_data", data={"application/json": [1, 2, 3]}), # j
nbformat.new_output(
"display_data", data={"application/json": {"a": 1, "c": {"b": 2}}}
), # k
nbformat.new_output("display_data", data={"application/json": "abc"}), # l
nbformat.new_output("display_data", data={"application/json": 15.03}), # m
]
)
cells = [
nbformat.new_code_cell(source="$ e $", execution_count=1, outputs=outputs),
nbformat.new_markdown_cell(source="$ e $"),
]
return nbformat.new_notebook(cells=cells)
def build_resources(self):
"""Build an empty resources dictionary."""
res = ResourcesDict()
res["metadata"] = ResourcesDict()
return res

View File

@ -0,0 +1,24 @@
from jupyter_client.manager import KernelManager
class FakeCustomKernelManager(KernelManager):
expected_methods = {
"__init__": 0,
"client": 0,
"start_kernel": 0,
}
def __init__(self, *args, **kwargs):
self.log.info("FakeCustomKernelManager initialized")
self.expected_methods["__init__"] += 1
super().__init__(*args, **kwargs)
def start_kernel(self, *args, **kwargs):
self.log.info("FakeCustomKernelManager started a kernel")
self.expected_methods["start_kernel"] += 1
return super().start_kernel(*args, **kwargs)
def client(self, *args, **kwargs):
self.log.info("FakeCustomKernelManager created a client")
self.expected_methods["client"] += 1
return super().client(*args, **kwargs)

View File

@ -0,0 +1,26 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hello World\n"
]
}
],
"source": [
"print(\"Hello World\")"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 0
}

View File

@ -0,0 +1,49 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"this is a code cell\n"
]
}
],
"source": [
"print('this is a code cell')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# This is a markdown cell"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.2"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,134 @@
"""
Module with tests for the clearmetadata preprocessor.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from ..clearmetadata import ClearMetadataPreprocessor
from .base import PreprocessorTestsBase
class TestClearMetadata(PreprocessorTestsBase):
"""Contains test functions for clearmetadata.py"""
def build_notebook(self):
notebook = super().build_notebook()
notebook.metadata = {
"language_info": {"name": "python", "version": "3.6.7"},
"kernelspec": {"language": "python", "name": "python3"},
}
# Add a test field to the first cell
if "metadata" not in notebook.cells[0]:
notebook.cells[0].metadata = {}
notebook.cells[0].metadata["test_field"] = "test_value"
notebook.cells[0].metadata["test_nested"] = {"test_keep": "keep", "test_filtered": "filter"}
notebook.cells[0].metadata["executeTime"] = dict(
[("end_time", "09:31:50"), ("start_time", "09:31:49")]
)
return notebook
def build_preprocessor(self, **kwargs):
"""Make an instance of a preprocessor"""
preprocessor = ClearMetadataPreprocessor(**kwargs)
preprocessor.enabled = True
return preprocessor
def test_constructor(self):
"""Can a ClearMetadataPreprocessor be constructed?"""
self.build_preprocessor()
def test_default_output(self):
"""Test the output of the ClearMetadataPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
nb, res = preprocessor(nb, res)
assert not nb.cells[0].metadata
# By default we only perserve the langauge name
assert nb.metadata == {"language_info": {"name": "python"}}
def test_cell_only(self):
"""Test the output of the ClearMetadataPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor(clear_notebook_metadata=False)
nb, res = preprocessor(nb, res)
assert not nb.cells[0].metadata
assert nb.metadata
def test_notebook_only(self):
"""Test the output of the ClearMetadataPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor(
clear_cell_metadata=False, preserve_nb_metadata_mask=set()
)
nb, res = preprocessor(nb, res)
assert nb.cells[0].metadata
assert not nb.metadata
def test_selective_cell_metadata(self):
"""Test the output of the ClearMetadataPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor(
preserve_cell_metadata_mask=["test_field"], preserve_nb_metadata_mask=set()
)
nb, res = preprocessor(nb, res)
assert nb.cells[0].metadata == {"test_field": "test_value"}
assert not nb.metadata
def test_selective_cell_tuple_metadata(self):
"""Test the output of the ClearMetadataPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
# Ensure that a tuple length 1 works as well as a string key
preprocessor = self.build_preprocessor(
preserve_cell_metadata_mask=[("test_field",)], preserve_nb_metadata_mask=set()
)
nb, res = preprocessor(nb, res)
assert nb.cells[0].metadata == {"test_field": "test_value"}
assert not nb.metadata
def test_nested_cell_metadata(self):
"""Test the output of the ClearMetadataPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor(
preserve_cell_metadata_mask=[("test_nested", "test_keep")],
preserve_nb_metadata_mask=set(),
)
nb, res = preprocessor(nb, res)
assert nb.cells[0].metadata == {"test_nested": {"test_keep": "keep"}}
assert not nb.metadata
def test_nested_cell_tuple_metadata(self):
"""Test the output of the ClearMetadataPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
# Ensure that a tuple length 1 works as well as a string key
preprocessor = self.build_preprocessor(
preserve_cell_metadata_mask=[("test_nested", ("test_keep",))],
preserve_nb_metadata_mask=set(),
)
nb, res = preprocessor(nb, res)
assert nb.cells[0].metadata == {"test_nested": {"test_keep": "keep"}}
assert not nb.metadata
def test_selective_notebook_metadata(self):
"""Test the output of the ClearMetadataPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor(preserve_nb_metadata_mask=["kernelspec"])
nb, res = preprocessor(nb, res)
assert not nb.cells[0].metadata
assert nb.metadata == {"kernelspec": {"language": "python", "name": "python3"}}

View File

@ -0,0 +1,49 @@
"""
Module with tests for the clearoutput preprocessor.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from ..clearoutput import ClearOutputPreprocessor
from .base import PreprocessorTestsBase
class TestClearOutput(PreprocessorTestsBase):
"""Contains test functions for clearoutput.py"""
def build_notebook(self):
notebook = super().build_notebook()
# Add a test field to the first cell
if "metadata" not in notebook.cells[0]:
notebook.cells[0].metadata = {}
notebook.cells[0].metadata["test_field"] = "test_value"
return notebook
def build_preprocessor(self):
"""Make an instance of a preprocessor"""
preprocessor = ClearOutputPreprocessor()
preprocessor.enabled = True
return preprocessor
def test_constructor(self):
"""Can a ClearOutputPreprocessor be constructed?"""
self.build_preprocessor()
def test_output(self):
"""Test the output of the ClearOutputPreprocessor"""
for remove_test_field in [False, True]:
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
# Also remove the test field in addition to defaults
if remove_test_field:
preprocessor.remove_metadata_fields.add("test_field")
nb, res = preprocessor(nb, res)
assert nb.cells[0].outputs == []
assert nb.cells[0].execution_count is None
if "metadata" in nb.cells[0]:
for field in preprocessor.remove_metadata_fields:
assert field not in nb.cells[0].metadata
# Ensure the test field is only removed when added to the traitlet
assert remove_test_field or "test_field" in nb.cells[0].metadata

View File

@ -0,0 +1,62 @@
"""Tests for the coalescestreams preprocessor"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from nbformat import v4 as nbformat
from ..coalescestreams import coalesce_streams
from .base import PreprocessorTestsBase
class TestCoalesceStreams(PreprocessorTestsBase):
"""Contains test functions for coalescestreams.py"""
def test_coalesce_streams(self):
"""coalesce_streams preprocessor output test"""
nb = self.build_notebook()
res = self.build_resources()
nb, res = coalesce_streams(nb, res)
outputs = nb.cells[0].outputs
self.assertEqual(outputs[0].text, "a")
self.assertEqual(outputs[1].output_type, "display_data")
self.assertEqual(outputs[2].text, "cd")
self.assertEqual(outputs[3].text, "ef")
def test_coalesce_sequenced_streams(self):
"""Can the coalesce streams preprocessor merge a sequence of streams?"""
outputs = [
nbformat.new_output(output_type="stream", name="stdout", text="0"),
nbformat.new_output(output_type="stream", name="stdout", text="1"),
nbformat.new_output(output_type="stream", name="stdout", text="2"),
nbformat.new_output(output_type="stream", name="stdout", text="3"),
nbformat.new_output(output_type="stream", name="stdout", text="4"),
nbformat.new_output(output_type="stream", name="stdout", text="5"),
nbformat.new_output(output_type="stream", name="stdout", text="6"),
nbformat.new_output(output_type="stream", name="stdout", text="7"),
]
cells = [nbformat.new_code_cell(source="# None", execution_count=1, outputs=outputs)]
nb = nbformat.new_notebook(cells=cells)
res = self.build_resources()
nb, res = coalesce_streams(nb, res)
outputs = nb.cells[0].outputs
self.assertEqual(outputs[0].text, "01234567")
def test_coalesce_replace_streams(self):
"""Are \\r characters handled?"""
outputs = [
nbformat.new_output(output_type="stream", name="stdout", text="z"),
nbformat.new_output(output_type="stream", name="stdout", text="\ra"),
nbformat.new_output(output_type="stream", name="stdout", text="\nz\rb"),
nbformat.new_output(output_type="stream", name="stdout", text="\nz"),
nbformat.new_output(output_type="stream", name="stdout", text="\rc\n"),
nbformat.new_output(output_type="stream", name="stdout", text="z\rz\rd"),
]
cells = [nbformat.new_code_cell(source="# None", execution_count=1, outputs=outputs)]
nb = nbformat.new_notebook(cells=cells)
res = self.build_resources()
nb, res = coalesce_streams(nb, res)
outputs = nb.cells[0].outputs
self.assertEqual(outputs[0].text, "a\nb\nc\nd")

View File

@ -0,0 +1,44 @@
"""
Module with tests for the csshtmlheader preprocessor
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from ..csshtmlheader import CSSHTMLHeaderPreprocessor
from .base import PreprocessorTestsBase
# -----------------------------------------------------------------------------
# Class
# -----------------------------------------------------------------------------
class TestCSSHTMLHeader(PreprocessorTestsBase):
"""Contains test functions for csshtmlheader.py"""
def build_preprocessor(self):
"""Make an instance of a preprocessor"""
preprocessor = CSSHTMLHeaderPreprocessor()
preprocessor.enabled = True
return preprocessor
def test_constructor(self):
"""Can a CSSHTMLHeaderPreprocessor be constructed?"""
self.build_preprocessor()
def test_output(self):
"""Test the output of the CSSHTMLHeaderPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
nb, res = preprocessor(nb, res)
assert "css" in res["inlining"]

View File

@ -0,0 +1,105 @@
"""
Module with tests for the execute preprocessor.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import re
from copy import deepcopy
import nbformat
import pytest
from ..execute import ExecutePreprocessor, executenb
addr_pat = re.compile(r"0x[0-9a-f]{7,9}")
def normalize_output(output):
"""
Normalizes (most) outputs for comparison.
"""
output = dict(output)
if "metadata" in output:
del output["metadata"]
if "text" in output:
output["text"] = re.sub(addr_pat, "<HEXADDR>", output["text"])
if "text/plain" in output.get("data", {}):
output["data"]["text/plain"] = re.sub(addr_pat, "<HEXADDR>", output["data"]["text/plain"])
for key, value in output.get("data", {}).items():
if isinstance(value, str):
output["data"][key] = value
return output
def assert_notebooks_equal(expected, actual):
expected_cells = expected["cells"]
actual_cells = actual["cells"]
assert len(expected_cells) == len(actual_cells)
for expected_cell, actual_cell in zip(expected_cells, actual_cells):
expected_outputs = expected_cell.get("outputs", [])
actual_outputs = actual_cell.get("outputs", [])
normalized_expected_outputs = list(map(normalize_output, expected_outputs))
normalized_actual_outputs = list(map(normalize_output, actual_outputs))
assert normalized_expected_outputs == normalized_actual_outputs
expected_execution_count = expected_cell.get("execution_count", None)
actual_execution_count = actual_cell.get("execution_count", None)
assert expected_execution_count == actual_execution_count
def test_basic_execution():
preprocessor = ExecutePreprocessor()
fname = os.path.join(os.path.dirname(__file__), "files", "HelloWorld.ipynb")
with open(fname) as f:
input_nb = nbformat.read(f, 4)
output_nb, _ = preprocessor.preprocess(deepcopy(input_nb))
assert_notebooks_equal(input_nb, output_nb)
def test_mixed_markdown_execution():
preprocessor = ExecutePreprocessor()
fname = os.path.join(os.path.dirname(__file__), "files", "MixedMarkdown.ipynb")
with open(fname) as f:
input_nb = nbformat.read(f, 4)
output_nb, _ = preprocessor.preprocess(deepcopy(input_nb))
assert_notebooks_equal(input_nb, output_nb)
def test_executenb():
fname = os.path.join(os.path.dirname(__file__), "files", "HelloWorld.ipynb")
with open(fname) as f:
input_nb = nbformat.read(f, 4)
with pytest.warns(FutureWarning):
output_nb = executenb(deepcopy(input_nb))
assert_notebooks_equal(input_nb, output_nb)
def test_populate_language_info():
preprocessor = ExecutePreprocessor(kernel_name="python")
nb = nbformat.v4.new_notebook() # Certainly has no language_info.
preprocessor.preprocess(nb, resources={})
# Should mutate input
assert "language_info" in nb.metadata # See that a basic attribute is filled in
def test_preprocess_cell():
class CellReplacer(ExecutePreprocessor):
def preprocess_cell(self, cell, resources, index, **kwargs):
cell.source = "print('Ignored')"
return super().preprocess_cell(cell, resources, index, **kwargs)
preprocessor = CellReplacer()
fname = os.path.join(os.path.dirname(__file__), "files", "HelloWorld.ipynb")
with open(fname) as f:
input_nb = nbformat.read(f, 4)
output_nb, _ = preprocessor.preprocess(deepcopy(input_nb))
expected_nb = deepcopy(input_nb)
for cell in expected_nb.cells:
cell.source = "print('Ignored')"
for output in cell.outputs:
output.text = "Ignored\n"
assert_notebooks_equal(expected_nb, output_nb)

View File

@ -0,0 +1,85 @@
"""Tests for the extractoutput preprocessor"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
from ..extractoutput import ExtractOutputPreprocessor
from .base import PreprocessorTestsBase
class TestExtractOutput(PreprocessorTestsBase):
"""Contains test functions for extractoutput.py"""
def build_preprocessor(self):
"""Make an instance of a preprocessor"""
preprocessor = ExtractOutputPreprocessor()
preprocessor.extract_output_types = {"text/plain", "image/png", "application/pdf"}
preprocessor.enabled = True
return preprocessor
def test_constructor(self):
"""Can a ExtractOutputPreprocessor be constructed?"""
self.build_preprocessor()
def test_output(self):
"""Test the output of the ExtractOutputPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
nb, res = preprocessor(nb, res)
# Check if text was extracted.
output = nb.cells[0].outputs[1]
self.assertIn("filenames", output.metadata)
self.assertIn("text/plain", output.metadata.filenames)
text_filename = output.metadata.filenames["text/plain"]
# Check if png was extracted.
output = nb.cells[0].outputs[6]
self.assertIn("filenames", output.metadata)
self.assertIn("image/png", output.metadata.filenames)
png_filename = output.metadata.filenames["image/png"]
# Check that pdf was extracted
output = nb.cells[0].outputs[7]
self.assertIn("filenames", output.metadata)
self.assertIn("application/pdf", output.metadata.filenames)
pdf_filename = output.metadata.filenames["application/pdf"]
# Verify text output
self.assertIn(text_filename, res["outputs"])
self.assertEqual(res["outputs"][text_filename], b"b")
# Verify png output
self.assertIn(png_filename, res["outputs"])
self.assertEqual(res["outputs"][png_filename], b"g")
# Verify pdf output
self.assertIn(pdf_filename, res["outputs"])
self.assertEqual(res["outputs"][pdf_filename], b"h")
def test_json_extraction(self):
nb = self.build_notebook(with_json_outputs=True)
res = self.build_resources()
preprocessor = self.build_preprocessor()
preprocessor.extract_output_types = {"application/json"}
nb, res = preprocessor(nb, res)
reference = self.build_notebook(with_json_outputs=True).cells[0].outputs
# Verify cell untouched
self.assertEqual(
[out.get("data") for out in nb.cells[0].outputs], [out.get("data") for out in reference]
)
outputs = sorted(res["outputs"].values())
reference_files = []
for out in reference:
try:
data = out["data"]["application/json"]
reference_files.append(json.dumps(data).encode())
except KeyError:
pass
# Verify equivalence of extracted outputs.
self.assertEqual(sorted(outputs), sorted(reference_files))

View File

@ -0,0 +1,53 @@
"""Tests for the HighlightMagics preprocessor"""
from ..highlightmagics import HighlightMagicsPreprocessor
from .base import PreprocessorTestsBase
class TestHighlightMagics(PreprocessorTestsBase):
"""Contains test functions for highlightmagics.py"""
def build_preprocessor(self):
"""Make an instance of a preprocessor"""
preprocessor = HighlightMagicsPreprocessor()
preprocessor.enabled = True
return preprocessor
def test_constructor(self):
"""Can a HighlightMagicsPreprocessor be constructed?"""
self.build_preprocessor()
def test_tagging(self):
"""Test the HighlightMagicsPreprocessor tagging"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
nb.cells[
0
].source = """%%R -i x,y -o XYcoef
lm.fit <- lm(y~x)
par(mfrow=c(2,2))
print(summary(lm.fit))
plot(lm.fit)
XYcoef <- coef(lm.fit)"""
nb, res = preprocessor(nb, res)
assert "magics_language" in nb.cells[0]["metadata"]
self.assertEqual(nb.cells[0]["metadata"]["magics_language"], "r")
def test_no_false_positive(self):
"""Test that HighlightMagicsPreprocessor does not tag false positives"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
nb.cells[
0
].source = """# this should not be detected
print(\"""
%%R -i x, y
\""")"""
nb, res = preprocessor(nb, res)
assert "magics_language" not in nb.cells[0]["metadata"]

View File

@ -0,0 +1,53 @@
"""Tests for the latex preprocessor"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from ..latex import LatexPreprocessor
from .base import PreprocessorTestsBase
class TestLatex(PreprocessorTestsBase):
"""Contains test functions for latex.py"""
def build_preprocessor(self):
"""Make an instance of a preprocessor"""
preprocessor = LatexPreprocessor()
preprocessor.enabled = True
return preprocessor
def test_constructor(self):
"""Can a LatexPreprocessor be constructed?"""
self.build_preprocessor()
def test_output(self):
"""Test the output of the LatexPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
nb, res = preprocessor(nb, res)
# Make sure the code cell wasn't modified.
self.assertEqual(nb.cells[0].source, "$ e $")
# Verify that the markdown cell wasn't processed.
self.assertEqual(nb.cells[1].source, "$ e $")
def test_highlight(self):
"""Check that highlighting style can be changed"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
# Set the style to a known builtin that's not the default
preprocessor.style = "colorful"
nb, res = preprocessor(nb, res)
style_defs = res["latex"]["pygments_definitions"]
# Get the default
from pygments.formatters import LatexFormatter
default_defs = LatexFormatter(style="default").get_style_defs()
# Verify that the style was in fact changed
assert style_defs != default_defs

Some files were not shown because too many files have changed in this diff Show More