mirror of
https://github.com/aykhans/AzSuicideDataVisualization.git
synced 2025-07-01 06:07:47 +00:00
first commit
This commit is contained in:
5
.venv/Lib/site-packages/ipykernel/__init__.py
Normal file
5
.venv/Lib/site-packages/ipykernel/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
from ._version import __version__ # noqa
|
||||
from ._version import kernel_protocol_version # noqa
|
||||
from ._version import kernel_protocol_version_info # noqa
|
||||
from ._version import version_info # noqa
|
||||
from .connect import * # noqa
|
4
.venv/Lib/site-packages/ipykernel/__main__.py
Normal file
4
.venv/Lib/site-packages/ipykernel/__main__.py
Normal file
@ -0,0 +1,4 @@
|
||||
if __name__ == "__main__":
|
||||
from ipykernel import kernelapp as app
|
||||
|
||||
app.launch_new_instance()
|
157
.venv/Lib/site-packages/ipykernel/_eventloop_macos.py
Normal file
157
.venv/Lib/site-packages/ipykernel/_eventloop_macos.py
Normal file
@ -0,0 +1,157 @@
|
||||
"""Eventloop hook for OS X
|
||||
|
||||
Calls NSApp / CoreFoundation APIs via ctypes.
|
||||
"""
|
||||
|
||||
# cribbed heavily from IPython.terminal.pt_inputhooks.osx
|
||||
# obj-c boilerplate from appnope, used under BSD 2-clause
|
||||
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
from threading import Event
|
||||
|
||||
objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("objc")) # type:ignore[arg-type]
|
||||
|
||||
void_p = ctypes.c_void_p
|
||||
|
||||
objc.objc_getClass.restype = void_p
|
||||
objc.sel_registerName.restype = void_p
|
||||
objc.objc_msgSend.restype = void_p
|
||||
objc.objc_msgSend.argtypes = [void_p, void_p]
|
||||
|
||||
msg = objc.objc_msgSend
|
||||
|
||||
|
||||
def _utf8(s):
|
||||
"""ensure utf8 bytes"""
|
||||
if not isinstance(s, bytes):
|
||||
s = s.encode("utf8")
|
||||
return s
|
||||
|
||||
|
||||
def n(name):
|
||||
"""create a selector name (for ObjC methods)"""
|
||||
return objc.sel_registerName(_utf8(name))
|
||||
|
||||
|
||||
def C(classname):
|
||||
"""get an ObjC Class by name"""
|
||||
return objc.objc_getClass(_utf8(classname))
|
||||
|
||||
|
||||
# end obj-c boilerplate from appnope
|
||||
|
||||
# CoreFoundation C-API calls we will use:
|
||||
CoreFoundation = ctypes.cdll.LoadLibrary(
|
||||
ctypes.util.find_library("CoreFoundation") # type:ignore[arg-type]
|
||||
)
|
||||
|
||||
CFAbsoluteTimeGetCurrent = CoreFoundation.CFAbsoluteTimeGetCurrent
|
||||
CFAbsoluteTimeGetCurrent.restype = ctypes.c_double
|
||||
|
||||
CFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent
|
||||
CFRunLoopGetCurrent.restype = void_p
|
||||
|
||||
CFRunLoopGetMain = CoreFoundation.CFRunLoopGetMain
|
||||
CFRunLoopGetMain.restype = void_p
|
||||
|
||||
CFRunLoopStop = CoreFoundation.CFRunLoopStop
|
||||
CFRunLoopStop.restype = None
|
||||
CFRunLoopStop.argtypes = [void_p]
|
||||
|
||||
CFRunLoopTimerCreate = CoreFoundation.CFRunLoopTimerCreate
|
||||
CFRunLoopTimerCreate.restype = void_p
|
||||
CFRunLoopTimerCreate.argtypes = [
|
||||
void_p, # allocator (NULL)
|
||||
ctypes.c_double, # fireDate
|
||||
ctypes.c_double, # interval
|
||||
ctypes.c_int, # flags (0)
|
||||
ctypes.c_int, # order (0)
|
||||
void_p, # callout
|
||||
void_p, # context
|
||||
]
|
||||
|
||||
CFRunLoopAddTimer = CoreFoundation.CFRunLoopAddTimer
|
||||
CFRunLoopAddTimer.restype = None
|
||||
CFRunLoopAddTimer.argtypes = [void_p, void_p, void_p]
|
||||
|
||||
kCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, "kCFRunLoopCommonModes")
|
||||
|
||||
|
||||
def _NSApp():
|
||||
"""Return the global NSApplication instance (NSApp)"""
|
||||
return msg(C("NSApplication"), n("sharedApplication"))
|
||||
|
||||
|
||||
def _wake(NSApp):
|
||||
"""Wake the Application"""
|
||||
event = msg(
|
||||
C("NSEvent"),
|
||||
n(
|
||||
"otherEventWithType:location:modifierFlags:"
|
||||
"timestamp:windowNumber:context:subtype:data1:data2:"
|
||||
),
|
||||
15, # Type
|
||||
0, # location
|
||||
0, # flags
|
||||
0, # timestamp
|
||||
0, # window
|
||||
None, # context
|
||||
0, # subtype
|
||||
0, # data1
|
||||
0, # data2
|
||||
)
|
||||
msg(NSApp, n("postEvent:atStart:"), void_p(event), True)
|
||||
|
||||
|
||||
_triggered = Event()
|
||||
|
||||
|
||||
def stop(timer=None, loop=None):
|
||||
"""Callback to fire when there's input to be read"""
|
||||
_triggered.set()
|
||||
NSApp = _NSApp()
|
||||
# if NSApp is not running, stop CFRunLoop directly,
|
||||
# otherwise stop and wake NSApp
|
||||
if msg(NSApp, n("isRunning")):
|
||||
msg(NSApp, n("stop:"), NSApp)
|
||||
_wake(NSApp)
|
||||
else:
|
||||
CFRunLoopStop(CFRunLoopGetCurrent())
|
||||
|
||||
|
||||
_c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p)
|
||||
_c_stop_callback = _c_callback_func_type(stop)
|
||||
|
||||
|
||||
def _stop_after(delay):
|
||||
"""Register callback to stop eventloop after a delay"""
|
||||
timer = CFRunLoopTimerCreate(
|
||||
None, # allocator
|
||||
CFAbsoluteTimeGetCurrent() + delay, # fireDate
|
||||
0, # interval
|
||||
0, # flags
|
||||
0, # order
|
||||
_c_stop_callback,
|
||||
None,
|
||||
)
|
||||
CFRunLoopAddTimer(
|
||||
CFRunLoopGetMain(),
|
||||
timer,
|
||||
kCFRunLoopCommonModes,
|
||||
)
|
||||
|
||||
|
||||
def mainloop(duration=1):
|
||||
"""run the Cocoa eventloop for the specified duration (seconds)"""
|
||||
|
||||
_triggered.clear()
|
||||
NSApp = _NSApp()
|
||||
_stop_after(duration)
|
||||
msg(NSApp, n("run"))
|
||||
if not _triggered.is_set():
|
||||
# app closed without firing callback,
|
||||
# probably due to last window being closed.
|
||||
# Run the loop manually in this case,
|
||||
# since there may be events still to process (ipython/ipython#9734)
|
||||
CoreFoundation.CFRunLoopRun()
|
20
.venv/Lib/site-packages/ipykernel/_version.py
Normal file
20
.venv/Lib/site-packages/ipykernel/_version.py
Normal file
@ -0,0 +1,20 @@
|
||||
"""
|
||||
store the current version info of the server.
|
||||
"""
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
# Version string must appear intact for tbump versioning
|
||||
__version__ = "6.13.0"
|
||||
|
||||
# Build up version_info tuple for backwards compatibility
|
||||
pattern = r"(?P<major>\d+).(?P<minor>\d+).(?P<patch>\d+)(?P<rest>.*)"
|
||||
match = re.match(pattern, __version__)
|
||||
assert match is not None
|
||||
parts: List[object] = [int(match[part]) for part in ["major", "minor", "patch"]]
|
||||
if match["rest"]:
|
||||
parts.append(match["rest"])
|
||||
version_info = tuple(parts)
|
||||
|
||||
kernel_protocol_version_info = (5, 3)
|
||||
kernel_protocol_version = "%s.%s" % kernel_protocol_version_info
|
2
.venv/Lib/site-packages/ipykernel/comm/__init__.py
Normal file
2
.venv/Lib/site-packages/ipykernel/comm/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
from .comm import * # noqa
|
||||
from .manager import * # noqa
|
184
.venv/Lib/site-packages/ipykernel/comm/comm.py
Normal file
184
.venv/Lib/site-packages/ipykernel/comm/comm.py
Normal file
@ -0,0 +1,184 @@
|
||||
"""Base class for a Comm"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import uuid
|
||||
|
||||
from traitlets import Any, Bool, Bytes, Dict, Instance, Unicode, default
|
||||
from traitlets.config import LoggingConfigurable
|
||||
|
||||
from ipykernel.jsonutil import json_clean
|
||||
from ipykernel.kernelbase import Kernel
|
||||
|
||||
|
||||
class Comm(LoggingConfigurable):
|
||||
"""Class for communicating between a Frontend and a Kernel"""
|
||||
|
||||
kernel = Instance("ipykernel.kernelbase.Kernel", allow_none=True)
|
||||
|
||||
@default("kernel")
|
||||
def _default_kernel(self):
|
||||
if Kernel.initialized():
|
||||
return Kernel.instance()
|
||||
|
||||
comm_id = Unicode()
|
||||
|
||||
@default("comm_id")
|
||||
def _default_comm_id(self):
|
||||
return uuid.uuid4().hex
|
||||
|
||||
primary = Bool(True, help="Am I the primary or secondary Comm?")
|
||||
|
||||
target_name = Unicode("comm")
|
||||
target_module = Unicode(
|
||||
None,
|
||||
allow_none=True,
|
||||
help="""requirejs module from
|
||||
which to load comm target.""",
|
||||
)
|
||||
|
||||
topic = Bytes()
|
||||
|
||||
@default("topic")
|
||||
def _default_topic(self):
|
||||
return ("comm-%s" % self.comm_id).encode("ascii")
|
||||
|
||||
_open_data = Dict(help="data dict, if any, to be included in comm_open")
|
||||
_close_data = Dict(help="data dict, if any, to be included in comm_close")
|
||||
|
||||
_msg_callback = Any()
|
||||
_close_callback = Any()
|
||||
|
||||
_closed = Bool(True)
|
||||
|
||||
def __init__(self, target_name="", data=None, metadata=None, buffers=None, **kwargs):
|
||||
if target_name:
|
||||
kwargs["target_name"] = target_name
|
||||
super().__init__(**kwargs)
|
||||
if self.kernel:
|
||||
if self.primary:
|
||||
# I am primary, open my peer.
|
||||
self.open(data=data, metadata=metadata, buffers=buffers)
|
||||
else:
|
||||
self._closed = False
|
||||
|
||||
def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):
|
||||
"""Helper for sending a comm message on IOPub"""
|
||||
data = {} if data is None else data
|
||||
metadata = {} if metadata is None else metadata
|
||||
content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))
|
||||
self.kernel.session.send(
|
||||
self.kernel.iopub_socket,
|
||||
msg_type,
|
||||
content,
|
||||
metadata=json_clean(metadata),
|
||||
parent=self.kernel.get_parent("shell"),
|
||||
ident=self.topic,
|
||||
buffers=buffers,
|
||||
)
|
||||
|
||||
def __del__(self):
|
||||
"""trigger close on gc"""
|
||||
self.close(deleting=True)
|
||||
|
||||
# publishing messages
|
||||
|
||||
def open(self, data=None, metadata=None, buffers=None):
|
||||
"""Open the frontend-side version of this comm"""
|
||||
if data is None:
|
||||
data = self._open_data
|
||||
comm_manager = getattr(self.kernel, "comm_manager", None)
|
||||
if comm_manager is None:
|
||||
raise RuntimeError(
|
||||
"Comms cannot be opened without a kernel "
|
||||
"and a comm_manager attached to that kernel."
|
||||
)
|
||||
|
||||
comm_manager.register_comm(self)
|
||||
try:
|
||||
self._publish_msg(
|
||||
"comm_open",
|
||||
data=data,
|
||||
metadata=metadata,
|
||||
buffers=buffers,
|
||||
target_name=self.target_name,
|
||||
target_module=self.target_module,
|
||||
)
|
||||
self._closed = False
|
||||
except Exception:
|
||||
comm_manager.unregister_comm(self)
|
||||
raise
|
||||
|
||||
def close(self, data=None, metadata=None, buffers=None, deleting=False):
|
||||
"""Close the frontend-side version of this comm"""
|
||||
if self._closed:
|
||||
# only close once
|
||||
return
|
||||
self._closed = True
|
||||
# nothing to send if we have no kernel
|
||||
# can be None during interpreter cleanup
|
||||
if not self.kernel:
|
||||
return
|
||||
if data is None:
|
||||
data = self._close_data
|
||||
self._publish_msg(
|
||||
"comm_close",
|
||||
data=data,
|
||||
metadata=metadata,
|
||||
buffers=buffers,
|
||||
)
|
||||
if not deleting:
|
||||
# If deleting, the comm can't be registered
|
||||
self.kernel.comm_manager.unregister_comm(self)
|
||||
|
||||
def send(self, data=None, metadata=None, buffers=None):
|
||||
"""Send a message to the frontend-side version of this comm"""
|
||||
self._publish_msg(
|
||||
"comm_msg",
|
||||
data=data,
|
||||
metadata=metadata,
|
||||
buffers=buffers,
|
||||
)
|
||||
|
||||
# registering callbacks
|
||||
|
||||
def on_close(self, callback):
|
||||
"""Register a callback for comm_close
|
||||
|
||||
Will be called with the `data` of the close message.
|
||||
|
||||
Call `on_close(None)` to disable an existing callback.
|
||||
"""
|
||||
self._close_callback = callback
|
||||
|
||||
def on_msg(self, callback):
|
||||
"""Register a callback for comm_msg
|
||||
|
||||
Will be called with the `data` of any comm_msg messages.
|
||||
|
||||
Call `on_msg(None)` to disable an existing callback.
|
||||
"""
|
||||
self._msg_callback = callback
|
||||
|
||||
# handling of incoming messages
|
||||
|
||||
def handle_close(self, msg):
|
||||
"""Handle a comm_close message"""
|
||||
self.log.debug("handle_close[%s](%s)", self.comm_id, msg)
|
||||
if self._close_callback:
|
||||
self._close_callback(msg)
|
||||
|
||||
def handle_msg(self, msg):
|
||||
"""Handle a comm_msg message"""
|
||||
self.log.debug("handle_msg[%s](%s)", self.comm_id, msg)
|
||||
if self._msg_callback:
|
||||
shell = self.kernel.shell
|
||||
if shell:
|
||||
shell.events.trigger("pre_execute")
|
||||
self._msg_callback(msg)
|
||||
if shell:
|
||||
shell.events.trigger("post_execute")
|
||||
|
||||
|
||||
__all__ = ["Comm"]
|
133
.venv/Lib/site-packages/ipykernel/comm/manager.py
Normal file
133
.venv/Lib/site-packages/ipykernel/comm/manager.py
Normal file
@ -0,0 +1,133 @@
|
||||
"""Base class to manage comms"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import logging
|
||||
|
||||
from traitlets import Dict, Instance
|
||||
from traitlets.config import LoggingConfigurable
|
||||
from traitlets.utils.importstring import import_item
|
||||
|
||||
from .comm import Comm
|
||||
|
||||
|
||||
class CommManager(LoggingConfigurable):
|
||||
"""Manager for Comms in the Kernel"""
|
||||
|
||||
kernel = Instance("ipykernel.kernelbase.Kernel")
|
||||
comms = Dict()
|
||||
targets = Dict()
|
||||
|
||||
# Public APIs
|
||||
|
||||
def register_target(self, target_name, f):
|
||||
"""Register a callable f for a given target name
|
||||
|
||||
f will be called with two arguments when a comm_open message is received with `target`:
|
||||
|
||||
- the Comm instance
|
||||
- the `comm_open` message itself.
|
||||
|
||||
f can be a Python callable or an import string for one.
|
||||
"""
|
||||
if isinstance(f, str):
|
||||
f = import_item(f)
|
||||
|
||||
self.targets[target_name] = f
|
||||
|
||||
def unregister_target(self, target_name, f):
|
||||
"""Unregister a callable registered with register_target"""
|
||||
return self.targets.pop(target_name)
|
||||
|
||||
def register_comm(self, comm):
|
||||
"""Register a new comm"""
|
||||
comm_id = comm.comm_id
|
||||
comm.kernel = self.kernel
|
||||
self.comms[comm_id] = comm
|
||||
return comm_id
|
||||
|
||||
def unregister_comm(self, comm):
|
||||
"""Unregister a comm, and close its counterpart"""
|
||||
# unlike get_comm, this should raise a KeyError
|
||||
comm = self.comms.pop(comm.comm_id)
|
||||
|
||||
def get_comm(self, comm_id):
|
||||
"""Get a comm with a particular id
|
||||
|
||||
Returns the comm if found, otherwise None.
|
||||
|
||||
This will not raise an error,
|
||||
it will log messages if the comm cannot be found.
|
||||
"""
|
||||
try:
|
||||
return self.comms[comm_id]
|
||||
except KeyError:
|
||||
self.log.warning("No such comm: %s", comm_id)
|
||||
if self.log.isEnabledFor(logging.DEBUG):
|
||||
# don't create the list of keys if debug messages aren't enabled
|
||||
self.log.debug("Current comms: %s", list(self.comms.keys()))
|
||||
|
||||
# Message handlers
|
||||
def comm_open(self, stream, ident, msg):
|
||||
"""Handler for comm_open messages"""
|
||||
content = msg["content"]
|
||||
comm_id = content["comm_id"]
|
||||
target_name = content["target_name"]
|
||||
f = self.targets.get(target_name, None)
|
||||
comm = Comm(
|
||||
comm_id=comm_id,
|
||||
primary=False,
|
||||
target_name=target_name,
|
||||
)
|
||||
self.register_comm(comm)
|
||||
if f is None:
|
||||
self.log.error("No such comm target registered: %s", target_name)
|
||||
else:
|
||||
try:
|
||||
f(comm, msg)
|
||||
return
|
||||
except Exception:
|
||||
self.log.error("Exception opening comm with target: %s", target_name, exc_info=True)
|
||||
|
||||
# Failure.
|
||||
try:
|
||||
comm.close()
|
||||
except Exception:
|
||||
self.log.error(
|
||||
"""Could not close comm during `comm_open` failure
|
||||
clean-up. The comm may not have been opened yet.""",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
def comm_msg(self, stream, ident, msg):
|
||||
"""Handler for comm_msg messages"""
|
||||
content = msg["content"]
|
||||
comm_id = content["comm_id"]
|
||||
comm = self.get_comm(comm_id)
|
||||
if comm is None:
|
||||
return
|
||||
|
||||
try:
|
||||
comm.handle_msg(msg)
|
||||
except Exception:
|
||||
self.log.error("Exception in comm_msg for %s", comm_id, exc_info=True)
|
||||
|
||||
def comm_close(self, stream, ident, msg):
|
||||
"""Handler for comm_close messages"""
|
||||
content = msg["content"]
|
||||
comm_id = content["comm_id"]
|
||||
comm = self.get_comm(comm_id)
|
||||
if comm is None:
|
||||
return
|
||||
|
||||
self.comms[comm_id]._closed = True
|
||||
del self.comms[comm_id]
|
||||
|
||||
try:
|
||||
comm.handle_close(msg)
|
||||
except Exception:
|
||||
self.log.error("Exception in comm_close for %s", comm_id, exc_info=True)
|
||||
|
||||
|
||||
__all__ = ["CommManager"]
|
97
.venv/Lib/site-packages/ipykernel/compiler.py
Normal file
97
.venv/Lib/site-packages/ipykernel/compiler.py
Normal file
@ -0,0 +1,97 @@
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from IPython.core.compilerop import CachingCompiler
|
||||
|
||||
|
||||
def murmur2_x86(data, seed):
|
||||
m = 0x5BD1E995
|
||||
data = [chr(d) for d in str.encode(data, "utf8")]
|
||||
length = len(data)
|
||||
h = seed ^ length
|
||||
rounded_end = length & 0xFFFFFFFC
|
||||
for i in range(0, rounded_end, 4):
|
||||
k = (
|
||||
(ord(data[i]) & 0xFF)
|
||||
| ((ord(data[i + 1]) & 0xFF) << 8)
|
||||
| ((ord(data[i + 2]) & 0xFF) << 16)
|
||||
| (ord(data[i + 3]) << 24)
|
||||
)
|
||||
k = (k * m) & 0xFFFFFFFF
|
||||
k ^= k >> 24
|
||||
k = (k * m) & 0xFFFFFFFF
|
||||
|
||||
h = (h * m) & 0xFFFFFFFF
|
||||
h ^= k
|
||||
|
||||
val = length & 0x03
|
||||
k = 0
|
||||
if val == 3:
|
||||
k = (ord(data[rounded_end + 2]) & 0xFF) << 16
|
||||
if val in [2, 3]:
|
||||
k |= (ord(data[rounded_end + 1]) & 0xFF) << 8
|
||||
if val in [1, 2, 3]:
|
||||
k |= ord(data[rounded_end]) & 0xFF
|
||||
h ^= k
|
||||
h = (h * m) & 0xFFFFFFFF
|
||||
|
||||
h ^= h >> 13
|
||||
h = (h * m) & 0xFFFFFFFF
|
||||
h ^= h >> 15
|
||||
|
||||
return h
|
||||
|
||||
|
||||
convert_to_long_pathname = lambda filename: filename # noqa
|
||||
|
||||
if sys.platform == "win32":
|
||||
try:
|
||||
import ctypes
|
||||
from ctypes.wintypes import DWORD, LPCWSTR, LPWSTR, MAX_PATH
|
||||
|
||||
_GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
|
||||
_GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
|
||||
_GetLongPathName.restype = DWORD
|
||||
|
||||
def _convert_to_long_pathname(filename):
|
||||
buf = ctypes.create_unicode_buffer(MAX_PATH)
|
||||
rv = _GetLongPathName(filename, buf, MAX_PATH)
|
||||
if rv != 0 and rv <= MAX_PATH:
|
||||
filename = buf.value
|
||||
return filename
|
||||
|
||||
# test that it works so if there are any issues we fail just once here
|
||||
_convert_to_long_pathname(__file__)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
convert_to_long_pathname = _convert_to_long_pathname
|
||||
|
||||
|
||||
def get_tmp_directory():
|
||||
tmp_dir = convert_to_long_pathname(tempfile.gettempdir())
|
||||
pid = os.getpid()
|
||||
return tmp_dir + os.sep + "ipykernel_" + str(pid)
|
||||
|
||||
|
||||
def get_tmp_hash_seed():
|
||||
hash_seed = 0xC70F6907
|
||||
return hash_seed
|
||||
|
||||
|
||||
def get_file_name(code):
|
||||
cell_name = os.environ.get("IPYKERNEL_CELL_NAME")
|
||||
if cell_name is None:
|
||||
name = murmur2_x86(code, get_tmp_hash_seed())
|
||||
cell_name = get_tmp_directory() + os.sep + str(name) + ".py"
|
||||
return cell_name
|
||||
|
||||
|
||||
class XCachingCompiler(CachingCompiler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.log = None
|
||||
|
||||
def get_code_name(self, raw_code, code, number):
|
||||
return get_file_name(raw_code)
|
130
.venv/Lib/site-packages/ipykernel/connect.py
Normal file
130
.venv/Lib/site-packages/ipykernel/connect.py
Normal file
@ -0,0 +1,130 @@
|
||||
"""Connection file-related utilities for the kernel
|
||||
"""
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import json
|
||||
import sys
|
||||
from subprocess import PIPE, Popen
|
||||
from typing import Any, Dict
|
||||
|
||||
import jupyter_client
|
||||
from jupyter_client import write_connection_file
|
||||
|
||||
|
||||
def get_connection_file(app=None):
|
||||
"""Return the path to the connection file of an app
|
||||
|
||||
Parameters
|
||||
----------
|
||||
app : IPKernelApp instance [optional]
|
||||
If unspecified, the currently running app will be used
|
||||
"""
|
||||
from traitlets.utils import filefind
|
||||
|
||||
if app is None:
|
||||
from ipykernel.kernelapp import IPKernelApp
|
||||
|
||||
if not IPKernelApp.initialized():
|
||||
raise RuntimeError("app not specified, and not in a running Kernel")
|
||||
|
||||
app = IPKernelApp.instance()
|
||||
return filefind(app.connection_file, [".", app.connection_dir])
|
||||
|
||||
|
||||
def _find_connection_file(connection_file):
|
||||
"""Return the absolute path for a connection file
|
||||
|
||||
- If nothing specified, return current Kernel's connection file
|
||||
- Otherwise, call jupyter_client.find_connection_file
|
||||
"""
|
||||
if connection_file is None:
|
||||
# get connection file from current kernel
|
||||
return get_connection_file()
|
||||
else:
|
||||
return jupyter_client.find_connection_file(connection_file)
|
||||
|
||||
|
||||
def get_connection_info(connection_file=None, unpack=False):
|
||||
"""Return the connection information for the current Kernel.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
connection_file : str [optional]
|
||||
The connection file to be used. Can be given by absolute path, or
|
||||
IPython will search in the security directory.
|
||||
If run from IPython,
|
||||
|
||||
If unspecified, the connection file for the currently running
|
||||
IPython Kernel will be used, which is only allowed from inside a kernel.
|
||||
unpack : bool [default: False]
|
||||
if True, return the unpacked dict, otherwise just the string contents
|
||||
of the file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
The connection dictionary of the current kernel, as string or dict,
|
||||
depending on `unpack`.
|
||||
"""
|
||||
cf = _find_connection_file(connection_file)
|
||||
|
||||
with open(cf) as f:
|
||||
info_str = f.read()
|
||||
|
||||
if unpack:
|
||||
info = json.loads(info_str)
|
||||
# ensure key is bytes:
|
||||
info["key"] = info.get("key", "").encode()
|
||||
return info
|
||||
|
||||
return info_str
|
||||
|
||||
|
||||
def connect_qtconsole(connection_file=None, argv=None):
|
||||
"""Connect a qtconsole to the current kernel.
|
||||
|
||||
This is useful for connecting a second qtconsole to a kernel, or to a
|
||||
local notebook.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
connection_file : str [optional]
|
||||
The connection file to be used. Can be given by absolute path, or
|
||||
IPython will search in the security directory.
|
||||
If run from IPython,
|
||||
|
||||
If unspecified, the connection file for the currently running
|
||||
IPython Kernel will be used, which is only allowed from inside a kernel.
|
||||
argv : list [optional]
|
||||
Any extra args to be passed to the console.
|
||||
|
||||
Returns
|
||||
-------
|
||||
:class:`subprocess.Popen` instance running the qtconsole frontend
|
||||
"""
|
||||
argv = [] if argv is None else argv
|
||||
|
||||
cf = _find_connection_file(connection_file)
|
||||
|
||||
cmd = ";".join(["from qtconsole import qtconsoleapp", "qtconsoleapp.main()"])
|
||||
|
||||
kwargs: Dict[str, Any] = {}
|
||||
# Launch the Qt console in a separate session & process group, so
|
||||
# interrupting the kernel doesn't kill it.
|
||||
kwargs["start_new_session"] = True
|
||||
|
||||
return Popen(
|
||||
[sys.executable, "-c", cmd, "--existing", cf] + argv,
|
||||
stdout=PIPE,
|
||||
stderr=PIPE,
|
||||
close_fds=(sys.platform != "win32"),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"write_connection_file",
|
||||
"get_connection_file",
|
||||
"get_connection_info",
|
||||
"connect_qtconsole",
|
||||
]
|
32
.venv/Lib/site-packages/ipykernel/control.py
Normal file
32
.venv/Lib/site-packages/ipykernel/control.py
Normal file
@ -0,0 +1,32 @@
|
||||
from threading import Thread
|
||||
|
||||
import zmq
|
||||
|
||||
if zmq.pyzmq_version_info() >= (17, 0):
|
||||
from tornado.ioloop import IOLoop
|
||||
else:
|
||||
# deprecated since pyzmq 17
|
||||
from zmq.eventloop.ioloop import IOLoop
|
||||
|
||||
|
||||
class ControlThread(Thread):
|
||||
def __init__(self, **kwargs):
|
||||
Thread.__init__(self, name="Control", **kwargs)
|
||||
self.io_loop = IOLoop(make_current=False)
|
||||
self.pydev_do_not_trace = True
|
||||
self.is_pydev_daemon_thread = True
|
||||
|
||||
def run(self):
|
||||
self.name = "Control"
|
||||
self.io_loop.make_current()
|
||||
try:
|
||||
self.io_loop.start()
|
||||
finally:
|
||||
self.io_loop.close()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the thread.
|
||||
|
||||
This method is threadsafe.
|
||||
"""
|
||||
self.io_loop.add_callback(self.io_loop.stop)
|
82
.venv/Lib/site-packages/ipykernel/datapub.py
Normal file
82
.venv/Lib/site-packages/ipykernel/datapub.py
Normal file
@ -0,0 +1,82 @@
|
||||
"""Publishing native (typically pickled) objects.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from traitlets import Any, CBytes, Dict, Instance
|
||||
from traitlets.config import Configurable
|
||||
|
||||
from ipykernel.jsonutil import json_clean
|
||||
|
||||
try:
|
||||
# available since ipyparallel 5.0.0
|
||||
from ipyparallel.serialize import serialize_object
|
||||
except ImportError:
|
||||
# Deprecated since ipykernel 4.3.0
|
||||
from ipykernel.serialize import serialize_object
|
||||
|
||||
from jupyter_client.session import Session, extract_header
|
||||
|
||||
|
||||
class ZMQDataPublisher(Configurable):
|
||||
|
||||
topic = topic = CBytes(b"datapub")
|
||||
session = Instance(Session, allow_none=True)
|
||||
pub_socket = Any(allow_none=True)
|
||||
parent_header = Dict({})
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent for outbound messages."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def publish_data(self, data):
|
||||
"""publish a data_message on the IOPub channel
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : dict
|
||||
The data to be published. Think of it as a namespace.
|
||||
"""
|
||||
session = self.session
|
||||
buffers = serialize_object(
|
||||
data,
|
||||
buffer_threshold=session.buffer_threshold,
|
||||
item_threshold=session.item_threshold,
|
||||
)
|
||||
content = json_clean(dict(keys=list(data.keys())))
|
||||
session.send(
|
||||
self.pub_socket,
|
||||
"data_message",
|
||||
content=content,
|
||||
parent=self.parent_header,
|
||||
buffers=buffers,
|
||||
ident=self.topic,
|
||||
)
|
||||
|
||||
|
||||
def publish_data(data):
|
||||
"""publish a data_message on the IOPub channel
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : dict
|
||||
The data to be published. Think of it as a namespace.
|
||||
"""
|
||||
warnings.warn(
|
||||
"ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
from ipykernel.zmqshell import ZMQInteractiveShell
|
||||
|
||||
ZMQInteractiveShell.instance().data_pub.publish_data(data)
|
663
.venv/Lib/site-packages/ipykernel/debugger.py
Normal file
663
.venv/Lib/site-packages/ipykernel/debugger.py
Normal file
@ -0,0 +1,663 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import typing as t
|
||||
|
||||
import zmq
|
||||
from IPython.core.getipython import get_ipython
|
||||
from IPython.core.inputtransformer2 import leading_empty_lines
|
||||
from tornado.locks import Event
|
||||
from tornado.queues import Queue
|
||||
from zmq.utils import jsonapi
|
||||
|
||||
try:
|
||||
from jupyter_client.jsonutil import json_default
|
||||
except ImportError:
|
||||
from jupyter_client.jsonutil import date_default as json_default
|
||||
|
||||
from .compiler import get_file_name, get_tmp_directory, get_tmp_hash_seed
|
||||
|
||||
try:
|
||||
# This import is required to have the next ones working...
|
||||
from debugpy.server import api # noqa
|
||||
|
||||
from _pydevd_bundle import pydevd_frame_utils # isort: skip
|
||||
from _pydevd_bundle.pydevd_suspended_frames import ( # isort: skip
|
||||
SuspendedFramesManager,
|
||||
_FramesTracker,
|
||||
)
|
||||
|
||||
_is_debugpy_available = True
|
||||
except ImportError:
|
||||
_is_debugpy_available = False
|
||||
|
||||
# Required for backwards compatiblity
|
||||
ROUTING_ID = getattr(zmq, "ROUTING_ID", None) or zmq.IDENTITY
|
||||
|
||||
|
||||
class _FakeCode:
|
||||
def __init__(self, co_filename, co_name):
|
||||
self.co_filename = co_filename
|
||||
self.co_name = co_name
|
||||
|
||||
|
||||
class _FakeFrame:
|
||||
def __init__(self, f_code, f_globals, f_locals):
|
||||
self.f_code = f_code
|
||||
self.f_globals = f_globals
|
||||
self.f_locals = f_locals
|
||||
self.f_back = None
|
||||
|
||||
|
||||
class _DummyPyDB:
|
||||
def __init__(self):
|
||||
from _pydevd_bundle.pydevd_api import PyDevdAPI
|
||||
|
||||
self.variable_presentation = PyDevdAPI.VariablePresentation()
|
||||
|
||||
|
||||
class VariableExplorer:
|
||||
def __init__(self):
|
||||
self.suspended_frame_manager = SuspendedFramesManager()
|
||||
self.py_db = _DummyPyDB()
|
||||
self.tracker = _FramesTracker(self.suspended_frame_manager, self.py_db)
|
||||
self.frame = None
|
||||
|
||||
def track(self):
|
||||
var = get_ipython().user_ns
|
||||
self.frame = _FakeFrame(_FakeCode("<module>", get_file_name("sys._getframe()")), var, var)
|
||||
self.tracker.track("thread1", pydevd_frame_utils.create_frames_list_from_frame(self.frame))
|
||||
|
||||
def untrack_all(self):
|
||||
self.tracker.untrack_all()
|
||||
|
||||
def get_children_variables(self, variable_ref=None):
|
||||
var_ref = variable_ref
|
||||
if not var_ref:
|
||||
var_ref = id(self.frame)
|
||||
variables = self.suspended_frame_manager.get_variable(var_ref)
|
||||
return [x.get_var_data() for x in variables.get_children_variables()]
|
||||
|
||||
|
||||
class DebugpyMessageQueue:
|
||||
|
||||
HEADER = "Content-Length: "
|
||||
HEADER_LENGTH = 16
|
||||
SEPARATOR = "\r\n\r\n"
|
||||
SEPARATOR_LENGTH = 4
|
||||
|
||||
def __init__(self, event_callback, log):
|
||||
self.tcp_buffer = ""
|
||||
self._reset_tcp_pos()
|
||||
self.event_callback = event_callback
|
||||
self.message_queue: Queue[t.Any] = Queue()
|
||||
self.log = log
|
||||
|
||||
def _reset_tcp_pos(self):
|
||||
self.header_pos = -1
|
||||
self.separator_pos = -1
|
||||
self.message_size = 0
|
||||
self.message_pos = -1
|
||||
|
||||
def _put_message(self, raw_msg):
|
||||
self.log.debug("QUEUE - _put_message:")
|
||||
msg = t.cast(t.Dict[str, t.Any], jsonapi.loads(raw_msg))
|
||||
if msg["type"] == "event":
|
||||
self.log.debug("QUEUE - received event:")
|
||||
self.log.debug(msg)
|
||||
self.event_callback(msg)
|
||||
else:
|
||||
self.log.debug("QUEUE - put message:")
|
||||
self.log.debug(msg)
|
||||
self.message_queue.put_nowait(msg)
|
||||
|
||||
def put_tcp_frame(self, frame):
|
||||
self.tcp_buffer += frame
|
||||
|
||||
self.log.debug("QUEUE - received frame")
|
||||
while True:
|
||||
# Finds header
|
||||
if self.header_pos == -1:
|
||||
self.header_pos = self.tcp_buffer.find(DebugpyMessageQueue.HEADER)
|
||||
if self.header_pos == -1:
|
||||
return
|
||||
|
||||
self.log.debug("QUEUE - found header at pos %i", self.header_pos)
|
||||
|
||||
# Finds separator
|
||||
if self.separator_pos == -1:
|
||||
hint = self.header_pos + DebugpyMessageQueue.HEADER_LENGTH
|
||||
self.separator_pos = self.tcp_buffer.find(DebugpyMessageQueue.SEPARATOR, hint)
|
||||
if self.separator_pos == -1:
|
||||
return
|
||||
|
||||
self.log.debug("QUEUE - found separator at pos %i", self.separator_pos)
|
||||
|
||||
if self.message_pos == -1:
|
||||
size_pos = self.header_pos + DebugpyMessageQueue.HEADER_LENGTH
|
||||
self.message_pos = self.separator_pos + DebugpyMessageQueue.SEPARATOR_LENGTH
|
||||
self.message_size = int(self.tcp_buffer[size_pos : self.separator_pos])
|
||||
|
||||
self.log.debug("QUEUE - found message at pos %i", self.message_pos)
|
||||
self.log.debug("QUEUE - message size is %i", self.message_size)
|
||||
|
||||
if len(self.tcp_buffer) - self.message_pos < self.message_size:
|
||||
return
|
||||
|
||||
self._put_message(
|
||||
self.tcp_buffer[self.message_pos : self.message_pos + self.message_size]
|
||||
)
|
||||
if len(self.tcp_buffer) - self.message_pos == self.message_size:
|
||||
self.log.debug("QUEUE - resetting tcp_buffer")
|
||||
self.tcp_buffer = ""
|
||||
self._reset_tcp_pos()
|
||||
return
|
||||
else:
|
||||
self.tcp_buffer = self.tcp_buffer[self.message_pos + self.message_size :]
|
||||
self.log.debug("QUEUE - slicing tcp_buffer: %s", self.tcp_buffer)
|
||||
self._reset_tcp_pos()
|
||||
|
||||
async def get_message(self):
|
||||
return await self.message_queue.get()
|
||||
|
||||
|
||||
class DebugpyClient:
|
||||
def __init__(self, log, debugpy_stream, event_callback):
|
||||
self.log = log
|
||||
self.debugpy_stream = debugpy_stream
|
||||
self.event_callback = event_callback
|
||||
self.message_queue = DebugpyMessageQueue(self._forward_event, self.log)
|
||||
self.debugpy_host = "127.0.0.1"
|
||||
self.debugpy_port = -1
|
||||
self.routing_id = None
|
||||
self.wait_for_attach = True
|
||||
self.init_event = Event()
|
||||
self.init_event_seq = -1
|
||||
|
||||
def _get_endpoint(self):
|
||||
host, port = self.get_host_port()
|
||||
return "tcp://" + host + ":" + str(port)
|
||||
|
||||
def _forward_event(self, msg):
|
||||
if msg["event"] == "initialized":
|
||||
self.init_event.set()
|
||||
self.init_event_seq = msg["seq"]
|
||||
self.event_callback(msg)
|
||||
|
||||
def _send_request(self, msg):
|
||||
if self.routing_id is None:
|
||||
self.routing_id = self.debugpy_stream.socket.getsockopt(ROUTING_ID)
|
||||
content = jsonapi.dumps(
|
||||
msg,
|
||||
default=json_default,
|
||||
ensure_ascii=False,
|
||||
allow_nan=False,
|
||||
)
|
||||
content_length = str(len(content))
|
||||
buf = (DebugpyMessageQueue.HEADER + content_length + DebugpyMessageQueue.SEPARATOR).encode(
|
||||
"ascii"
|
||||
)
|
||||
buf += content
|
||||
self.log.debug("DEBUGPYCLIENT:")
|
||||
self.log.debug(self.routing_id)
|
||||
self.log.debug(buf)
|
||||
self.debugpy_stream.send_multipart((self.routing_id, buf))
|
||||
|
||||
async def _wait_for_response(self):
|
||||
# Since events are never pushed to the message_queue
|
||||
# we can safely assume the next message in queue
|
||||
# will be an answer to the previous request
|
||||
return await self.message_queue.get_message()
|
||||
|
||||
async def _handle_init_sequence(self):
|
||||
# 1] Waits for initialized event
|
||||
await self.init_event.wait()
|
||||
|
||||
# 2] Sends configurationDone request
|
||||
configurationDone = {
|
||||
"type": "request",
|
||||
"seq": int(self.init_event_seq) + 1,
|
||||
"command": "configurationDone",
|
||||
}
|
||||
self._send_request(configurationDone)
|
||||
|
||||
# 3] Waits for configurationDone response
|
||||
await self._wait_for_response()
|
||||
|
||||
# 4] Waits for attachResponse and returns it
|
||||
attach_rep = await self._wait_for_response()
|
||||
return attach_rep
|
||||
|
||||
def get_host_port(self):
|
||||
if self.debugpy_port == -1:
|
||||
socket = self.debugpy_stream.socket
|
||||
socket.bind_to_random_port("tcp://" + self.debugpy_host)
|
||||
self.endpoint = socket.getsockopt(zmq.LAST_ENDPOINT).decode("utf-8")
|
||||
socket.unbind(self.endpoint)
|
||||
index = self.endpoint.rfind(":")
|
||||
self.debugpy_port = self.endpoint[index + 1 :]
|
||||
return self.debugpy_host, self.debugpy_port
|
||||
|
||||
def connect_tcp_socket(self):
|
||||
self.debugpy_stream.socket.connect(self._get_endpoint())
|
||||
self.routing_id = self.debugpy_stream.socket.getsockopt(ROUTING_ID)
|
||||
|
||||
def disconnect_tcp_socket(self):
|
||||
self.debugpy_stream.socket.disconnect(self._get_endpoint())
|
||||
self.routing_id = None
|
||||
self.init_event = Event()
|
||||
self.init_event_seq = -1
|
||||
self.wait_for_attach = True
|
||||
|
||||
def receive_dap_frame(self, frame):
|
||||
self.message_queue.put_tcp_frame(frame)
|
||||
|
||||
async def send_dap_request(self, msg):
|
||||
self._send_request(msg)
|
||||
if self.wait_for_attach and msg["command"] == "attach":
|
||||
rep = await self._handle_init_sequence()
|
||||
self.wait_for_attach = False
|
||||
return rep
|
||||
else:
|
||||
rep = await self._wait_for_response()
|
||||
self.log.debug("DEBUGPYCLIENT - returning:")
|
||||
self.log.debug(rep)
|
||||
return rep
|
||||
|
||||
|
||||
class Debugger:
|
||||
|
||||
# Requests that requires that the debugger has started
|
||||
started_debug_msg_types = [
|
||||
"dumpCell",
|
||||
"setBreakpoints",
|
||||
"source",
|
||||
"stackTrace",
|
||||
"variables",
|
||||
"attach",
|
||||
"configurationDone",
|
||||
]
|
||||
|
||||
# Requests that can be handled even if the debugger is not running
|
||||
static_debug_msg_types = ["debugInfo", "inspectVariables", "richInspectVariables", "modules"]
|
||||
|
||||
def __init__(
|
||||
self, log, debugpy_stream, event_callback, shell_socket, session, just_my_code=True
|
||||
):
|
||||
self.log = log
|
||||
self.debugpy_client = DebugpyClient(log, debugpy_stream, self._handle_event)
|
||||
self.shell_socket = shell_socket
|
||||
self.session = session
|
||||
self.is_started = False
|
||||
self.event_callback = event_callback
|
||||
self.just_my_code = just_my_code
|
||||
self.stopped_queue: Queue[t.Any] = Queue()
|
||||
|
||||
self.started_debug_handlers = {}
|
||||
for msg_type in Debugger.started_debug_msg_types:
|
||||
self.started_debug_handlers[msg_type] = getattr(self, msg_type)
|
||||
|
||||
self.static_debug_handlers = {}
|
||||
for msg_type in Debugger.static_debug_msg_types:
|
||||
self.static_debug_handlers[msg_type] = getattr(self, msg_type)
|
||||
|
||||
self.breakpoint_list = {}
|
||||
self.stopped_threads = set()
|
||||
|
||||
self.debugpy_initialized = False
|
||||
self._removed_cleanup = {}
|
||||
|
||||
self.debugpy_host = "127.0.0.1"
|
||||
self.debugpy_port = 0
|
||||
self.endpoint = None
|
||||
|
||||
self.variable_explorer = VariableExplorer()
|
||||
|
||||
def _handle_event(self, msg):
|
||||
if msg["event"] == "stopped":
|
||||
if msg["body"]["allThreadsStopped"]:
|
||||
self.stopped_queue.put_nowait(msg)
|
||||
# Do not forward the event now, will be done in the handle_stopped_event
|
||||
return
|
||||
else:
|
||||
self.stopped_threads.add(msg["body"]["threadId"])
|
||||
self.event_callback(msg)
|
||||
elif msg["event"] == "continued":
|
||||
if msg["body"]["allThreadsContinued"]:
|
||||
self.stopped_threads = set()
|
||||
else:
|
||||
self.stopped_threads.remove(msg["body"]["threadId"])
|
||||
self.event_callback(msg)
|
||||
else:
|
||||
self.event_callback(msg)
|
||||
|
||||
async def _forward_message(self, msg):
|
||||
return await self.debugpy_client.send_dap_request(msg)
|
||||
|
||||
def _build_variables_response(self, request, variables):
|
||||
var_list = [var for var in variables if self.accept_variable(var["name"])]
|
||||
reply = {
|
||||
"seq": request["seq"],
|
||||
"type": "response",
|
||||
"request_seq": request["seq"],
|
||||
"success": True,
|
||||
"command": request["command"],
|
||||
"body": {"variables": var_list},
|
||||
}
|
||||
return reply
|
||||
|
||||
def _accept_stopped_thread(self, thread_name):
|
||||
# TODO: identify Thread-2, Thread-3 and Thread-4. These are NOT
|
||||
# Control, IOPub or Heartbeat threads
|
||||
forbid_list = ["IPythonHistorySavingThread", "Thread-2", "Thread-3", "Thread-4"]
|
||||
return thread_name not in forbid_list
|
||||
|
||||
async def handle_stopped_event(self):
|
||||
# Wait for a stopped event message in the stopped queue
|
||||
# This message is used for triggering the 'threads' request
|
||||
event = await self.stopped_queue.get()
|
||||
req = {"seq": event["seq"] + 1, "type": "request", "command": "threads"}
|
||||
rep = await self._forward_message(req)
|
||||
for thread in rep["body"]["threads"]:
|
||||
if self._accept_stopped_thread(thread["name"]):
|
||||
self.stopped_threads.add(thread["id"])
|
||||
self.event_callback(event)
|
||||
|
||||
@property
|
||||
def tcp_client(self):
|
||||
return self.debugpy_client
|
||||
|
||||
def start(self):
|
||||
if not self.debugpy_initialized:
|
||||
tmp_dir = get_tmp_directory()
|
||||
if not os.path.exists(tmp_dir):
|
||||
os.makedirs(tmp_dir)
|
||||
host, port = self.debugpy_client.get_host_port()
|
||||
code = "import debugpy;"
|
||||
code += 'debugpy.listen(("' + host + '",' + port + "))"
|
||||
content = {"code": code, "silent": True}
|
||||
self.session.send(
|
||||
self.shell_socket,
|
||||
"execute_request",
|
||||
content,
|
||||
None,
|
||||
(self.shell_socket.getsockopt(ROUTING_ID)),
|
||||
)
|
||||
|
||||
ident, msg = self.session.recv(self.shell_socket, mode=0)
|
||||
self.debugpy_initialized = msg["content"]["status"] == "ok"
|
||||
|
||||
# Don't remove leading empty lines when debugging so the breakpoints are correctly positioned
|
||||
cleanup_transforms = get_ipython().input_transformer_manager.cleanup_transforms
|
||||
if leading_empty_lines in cleanup_transforms:
|
||||
index = cleanup_transforms.index(leading_empty_lines)
|
||||
self._removed_cleanup[index] = cleanup_transforms.pop(index)
|
||||
|
||||
self.debugpy_client.connect_tcp_socket()
|
||||
return self.debugpy_initialized
|
||||
|
||||
def stop(self):
|
||||
self.debugpy_client.disconnect_tcp_socket()
|
||||
|
||||
# Restore remove cleanup transformers
|
||||
cleanup_transforms = get_ipython().input_transformer_manager.cleanup_transforms
|
||||
for index in sorted(self._removed_cleanup):
|
||||
func = self._removed_cleanup.pop(index)
|
||||
cleanup_transforms.insert(index, func)
|
||||
|
||||
async def dumpCell(self, message):
|
||||
code = message["arguments"]["code"]
|
||||
file_name = get_file_name(code)
|
||||
|
||||
with open(file_name, "w", encoding="utf-8") as f:
|
||||
f.write(code)
|
||||
|
||||
reply = {
|
||||
"type": "response",
|
||||
"request_seq": message["seq"],
|
||||
"success": True,
|
||||
"command": message["command"],
|
||||
"body": {"sourcePath": file_name},
|
||||
}
|
||||
return reply
|
||||
|
||||
async def setBreakpoints(self, message):
|
||||
source = message["arguments"]["source"]["path"]
|
||||
self.breakpoint_list[source] = message["arguments"]["breakpoints"]
|
||||
return await self._forward_message(message)
|
||||
|
||||
async def source(self, message):
|
||||
reply = {"type": "response", "request_seq": message["seq"], "command": message["command"]}
|
||||
source_path = message["arguments"]["source"]["path"]
|
||||
if os.path.isfile(source_path):
|
||||
with open(source_path, encoding="utf-8") as f:
|
||||
reply["success"] = True
|
||||
reply["body"] = {"content": f.read()}
|
||||
else:
|
||||
reply["success"] = False
|
||||
reply["message"] = "source unavailable"
|
||||
reply["body"] = {}
|
||||
|
||||
return reply
|
||||
|
||||
async def stackTrace(self, message):
|
||||
reply = await self._forward_message(message)
|
||||
# The stackFrames array can have the following content:
|
||||
# { frames from the notebook}
|
||||
# ...
|
||||
# { 'id': xxx, 'name': '<module>', ... } <= this is the first frame of the code from the notebook
|
||||
# { frames from ipykernel }
|
||||
# ...
|
||||
# {'id': yyy, 'name': '<module>', ... } <= this is the first frame of ipykernel code
|
||||
# or only the frames from the notebook.
|
||||
# We want to remove all the frames from ipykernel when they are present.
|
||||
try:
|
||||
sf_list = reply["body"]["stackFrames"]
|
||||
module_idx = len(sf_list) - next(
|
||||
i for i, v in enumerate(reversed(sf_list), 1) if v["name"] == "<module>" and i != 1
|
||||
)
|
||||
reply["body"]["stackFrames"] = reply["body"]["stackFrames"][: module_idx + 1]
|
||||
except StopIteration:
|
||||
pass
|
||||
return reply
|
||||
|
||||
def accept_variable(self, variable_name):
|
||||
forbid_list = [
|
||||
"__name__",
|
||||
"__doc__",
|
||||
"__package__",
|
||||
"__loader__",
|
||||
"__spec__",
|
||||
"__annotations__",
|
||||
"__builtins__",
|
||||
"__builtin__",
|
||||
"__display__",
|
||||
"get_ipython",
|
||||
"debugpy",
|
||||
"exit",
|
||||
"quit",
|
||||
"In",
|
||||
"Out",
|
||||
"_oh",
|
||||
"_dh",
|
||||
"_",
|
||||
"__",
|
||||
"___",
|
||||
]
|
||||
cond = variable_name not in forbid_list
|
||||
cond = cond and not bool(re.search(r"^_\d", variable_name))
|
||||
cond = cond and variable_name[0:2] != "_i"
|
||||
return cond
|
||||
|
||||
async def variables(self, message):
|
||||
reply = {}
|
||||
if not self.stopped_threads:
|
||||
variables = self.variable_explorer.get_children_variables(
|
||||
message["arguments"]["variablesReference"]
|
||||
)
|
||||
return self._build_variables_response(message, variables)
|
||||
else:
|
||||
reply = await self._forward_message(message)
|
||||
# TODO : check start and count arguments work as expected in debugpy
|
||||
reply["body"]["variables"] = [
|
||||
var for var in reply["body"]["variables"] if self.accept_variable(var["name"])
|
||||
]
|
||||
return reply
|
||||
|
||||
async def attach(self, message):
|
||||
host, port = self.debugpy_client.get_host_port()
|
||||
message["arguments"]["connect"] = {"host": host, "port": port}
|
||||
message["arguments"]["logToFile"] = True
|
||||
# Experimental option to break in non-user code.
|
||||
# The ipykernel source is in the call stack, so the user
|
||||
# has to manipulate the step-over and step-into in a wize way.
|
||||
# Set debugOptions for breakpoints in python standard library source.
|
||||
if not self.just_my_code:
|
||||
message["arguments"]["debugOptions"] = ["DebugStdLib"]
|
||||
return await self._forward_message(message)
|
||||
|
||||
async def configurationDone(self, message):
|
||||
reply = {
|
||||
"seq": message["seq"],
|
||||
"type": "response",
|
||||
"request_seq": message["seq"],
|
||||
"success": True,
|
||||
"command": message["command"],
|
||||
}
|
||||
return reply
|
||||
|
||||
async def debugInfo(self, message):
|
||||
breakpoint_list = []
|
||||
for key, value in self.breakpoint_list.items():
|
||||
breakpoint_list.append({"source": key, "breakpoints": value})
|
||||
reply = {
|
||||
"type": "response",
|
||||
"request_seq": message["seq"],
|
||||
"success": True,
|
||||
"command": message["command"],
|
||||
"body": {
|
||||
"isStarted": self.is_started,
|
||||
"hashMethod": "Murmur2",
|
||||
"hashSeed": get_tmp_hash_seed(),
|
||||
"tmpFilePrefix": get_tmp_directory() + os.sep,
|
||||
"tmpFileSuffix": ".py",
|
||||
"breakpoints": breakpoint_list,
|
||||
"stoppedThreads": list(self.stopped_threads),
|
||||
"richRendering": True,
|
||||
"exceptionPaths": ["Python Exceptions"],
|
||||
},
|
||||
}
|
||||
return reply
|
||||
|
||||
async def inspectVariables(self, message):
|
||||
self.variable_explorer.untrack_all()
|
||||
# looks like the implementation of untrack_all in ptvsd
|
||||
# destroys objects we nee din track. We have no choice but
|
||||
# reinstantiate the object
|
||||
self.variable_explorer = VariableExplorer()
|
||||
self.variable_explorer.track()
|
||||
variables = self.variable_explorer.get_children_variables()
|
||||
return self._build_variables_response(message, variables)
|
||||
|
||||
async def richInspectVariables(self, message):
|
||||
reply = {
|
||||
"type": "response",
|
||||
"sequence_seq": message["seq"],
|
||||
"success": False,
|
||||
"command": message["command"],
|
||||
}
|
||||
|
||||
var_name = message["arguments"]["variableName"]
|
||||
valid_name = str.isidentifier(var_name)
|
||||
if not valid_name:
|
||||
reply["body"] = {"data": {}, "metadata": {}}
|
||||
if var_name == "special variables" or var_name == "function variables":
|
||||
reply["success"] = True
|
||||
return reply
|
||||
|
||||
repr_data = {}
|
||||
repr_metadata = {}
|
||||
if not self.stopped_threads:
|
||||
# The code did not hit a breakpoint, we use the intepreter
|
||||
# to get the rich representation of the variable
|
||||
result = get_ipython().user_expressions({var_name: var_name})[var_name]
|
||||
if result.get("status", "error") == "ok":
|
||||
repr_data = result.get("data", {})
|
||||
repr_metadata = result.get("metadata", {})
|
||||
else:
|
||||
# The code has stopped on a breakpoint, we use the setExpression
|
||||
# request to get the rich representation of the variable
|
||||
code = f"get_ipython().display_formatter.format({var_name})"
|
||||
frame_id = message["arguments"]["frameId"]
|
||||
seq = message["seq"]
|
||||
reply = await self._forward_message(
|
||||
{
|
||||
"type": "request",
|
||||
"command": "evaluate",
|
||||
"seq": seq + 1,
|
||||
"arguments": {"expression": code, "frameId": frame_id},
|
||||
}
|
||||
)
|
||||
if reply["success"]:
|
||||
repr_data, repr_metadata = eval(reply["body"]["result"], {}, {})
|
||||
|
||||
body = {
|
||||
"data": repr_data,
|
||||
"metadata": {k: v for k, v in repr_metadata.items() if k in repr_data},
|
||||
}
|
||||
|
||||
reply["body"] = body
|
||||
reply["success"] = True
|
||||
return reply
|
||||
|
||||
async def modules(self, message):
|
||||
modules = list(sys.modules.values())
|
||||
startModule = message.get("startModule", 0)
|
||||
moduleCount = message.get("moduleCount", len(modules))
|
||||
mods = []
|
||||
for i in range(startModule, moduleCount):
|
||||
module = modules[i]
|
||||
filename = getattr(getattr(module, "__spec__", None), "origin", None)
|
||||
if filename and filename.endswith(".py"):
|
||||
mods.append({"id": i, "name": module.__name__, "path": filename})
|
||||
|
||||
reply = {"body": {"modules": mods, "totalModules": len(modules)}}
|
||||
return reply
|
||||
|
||||
async def process_request(self, message):
|
||||
reply = {}
|
||||
|
||||
if message["command"] == "initialize":
|
||||
if self.is_started:
|
||||
self.log.info("The debugger has already started")
|
||||
else:
|
||||
self.is_started = self.start()
|
||||
if self.is_started:
|
||||
self.log.info("The debugger has started")
|
||||
else:
|
||||
reply = {
|
||||
"command": "initialize",
|
||||
"request_seq": message["seq"],
|
||||
"seq": 3,
|
||||
"success": False,
|
||||
"type": "response",
|
||||
}
|
||||
|
||||
handler = self.static_debug_handlers.get(message["command"], None)
|
||||
if handler is not None:
|
||||
reply = await handler(message)
|
||||
elif self.is_started:
|
||||
handler = self.started_debug_handlers.get(message["command"], None)
|
||||
if handler is not None:
|
||||
reply = await handler(message)
|
||||
else:
|
||||
reply = await self._forward_message(message)
|
||||
|
||||
if message["command"] == "disconnect":
|
||||
self.stop()
|
||||
self.breakpoint_list = {}
|
||||
self.stopped_threads = set()
|
||||
self.is_started = False
|
||||
self.log.info("The debugger has stopped")
|
||||
|
||||
return reply
|
90
.venv/Lib/site-packages/ipykernel/displayhook.py
Normal file
90
.venv/Lib/site-packages/ipykernel/displayhook.py
Normal file
@ -0,0 +1,90 @@
|
||||
"""Replacements for sys.displayhook that publish over ZMQ."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import builtins
|
||||
import sys
|
||||
|
||||
from IPython.core.displayhook import DisplayHook
|
||||
from jupyter_client.session import Session, extract_header
|
||||
from traitlets import Any, Dict, Instance
|
||||
|
||||
from ipykernel.jsonutil import encode_images, json_clean
|
||||
|
||||
|
||||
class ZMQDisplayHook:
|
||||
"""A simple displayhook that publishes the object's repr over a ZeroMQ
|
||||
socket."""
|
||||
|
||||
topic = b"execute_result"
|
||||
|
||||
def __init__(self, session, pub_socket):
|
||||
self.session = session
|
||||
self.pub_socket = pub_socket
|
||||
self.parent_header = {}
|
||||
|
||||
def get_execution_count(self):
|
||||
"""This method is replaced in kernelapp"""
|
||||
return 0
|
||||
|
||||
def __call__(self, obj):
|
||||
if obj is None:
|
||||
return
|
||||
|
||||
builtins._ = obj # type:ignore[attr-defined]
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
contents = {
|
||||
"execution_count": self.get_execution_count(),
|
||||
"data": {"text/plain": repr(obj)},
|
||||
"metadata": {},
|
||||
}
|
||||
self.session.send(
|
||||
self.pub_socket, "execute_result", contents, parent=self.parent_header, ident=self.topic
|
||||
)
|
||||
|
||||
def set_parent(self, parent):
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
|
||||
class ZMQShellDisplayHook(DisplayHook):
|
||||
"""A displayhook subclass that publishes data using ZeroMQ. This is intended
|
||||
to work with an InteractiveShell instance. It sends a dict of different
|
||||
representations of the object."""
|
||||
|
||||
topic = None
|
||||
|
||||
session = Instance(Session, allow_none=True)
|
||||
pub_socket = Any(allow_none=True)
|
||||
parent_header = Dict({})
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent for outbound messages."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def start_displayhook(self):
|
||||
self.msg = self.session.msg(
|
||||
"execute_result",
|
||||
{
|
||||
"data": {},
|
||||
"metadata": {},
|
||||
},
|
||||
parent=self.parent_header,
|
||||
)
|
||||
|
||||
def write_output_prompt(self):
|
||||
"""Write the output prompt."""
|
||||
self.msg["content"]["execution_count"] = self.prompt_count
|
||||
|
||||
def write_format_data(self, format_dict, md_dict=None):
|
||||
self.msg["content"]["data"] = json_clean(encode_images(format_dict))
|
||||
self.msg["content"]["metadata"] = md_dict
|
||||
|
||||
def finish_displayhook(self):
|
||||
"""Finish up all displayhook activities."""
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
if self.msg["content"]["data"]:
|
||||
self.session.send(self.pub_socket, self.msg, ident=self.topic)
|
||||
self.msg = None
|
57
.venv/Lib/site-packages/ipykernel/embed.py
Normal file
57
.venv/Lib/site-packages/ipykernel/embed.py
Normal file
@ -0,0 +1,57 @@
|
||||
"""Simple function for embedding an IPython kernel
|
||||
"""
|
||||
# -----------------------------------------------------------------------------
|
||||
# Imports
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
import sys
|
||||
|
||||
from IPython.utils.frame import extract_module_locals
|
||||
|
||||
from .kernelapp import IPKernelApp
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Code
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def embed_kernel(module=None, local_ns=None, **kwargs):
|
||||
"""Embed and start an IPython kernel in a given scope.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
module : ModuleType, optional
|
||||
The module to load into IPython globals (default: caller)
|
||||
local_ns : dict, optional
|
||||
The namespace to load into IPython user namespace (default: caller)
|
||||
**kwargs : various, optional
|
||||
Further keyword args are relayed to the IPKernelApp constructor,
|
||||
allowing configuration of the Kernel. Will only have an effect
|
||||
on the first embed_kernel call for a given process.
|
||||
|
||||
"""
|
||||
# get the app if it exists, or set it up if it doesn't
|
||||
if IPKernelApp.initialized():
|
||||
app = IPKernelApp.instance()
|
||||
else:
|
||||
app = IPKernelApp.instance(**kwargs)
|
||||
app.initialize([])
|
||||
# Undo unnecessary sys module mangling from init_sys_modules.
|
||||
# This would not be necessary if we could prevent it
|
||||
# in the first place by using a different InteractiveShell
|
||||
# subclass, as in the regular embed case.
|
||||
main = app.kernel.shell._orig_sys_modules_main_mod
|
||||
if main is not None:
|
||||
sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main
|
||||
|
||||
# load the calling scope if not given
|
||||
(caller_module, caller_locals) = extract_module_locals(1)
|
||||
if module is None:
|
||||
module = caller_module
|
||||
if local_ns is None:
|
||||
local_ns = caller_locals
|
||||
|
||||
app.kernel.user_module = module
|
||||
app.kernel.user_ns = local_ns
|
||||
app.shell.set_completer_frame()
|
||||
app.start()
|
454
.venv/Lib/site-packages/ipykernel/eventloops.py
Normal file
454
.venv/Lib/site-packages/ipykernel/eventloops.py
Normal file
@ -0,0 +1,454 @@
|
||||
"""Event loop integration for the ZeroMQ-based kernels."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from functools import partial
|
||||
|
||||
import zmq
|
||||
from packaging.version import Version as V
|
||||
from traitlets.config.application import Application
|
||||
|
||||
|
||||
def _use_appnope():
|
||||
"""Should we use appnope for dealing with OS X app nap?
|
||||
|
||||
Checks if we are on OS X 10.9 or greater.
|
||||
"""
|
||||
return sys.platform == "darwin" and V(platform.mac_ver()[0]) >= V("10.9")
|
||||
|
||||
|
||||
def _notify_stream_qt(kernel, stream):
|
||||
|
||||
from IPython.external.qt_for_kernel import QtCore
|
||||
|
||||
def process_stream_events():
|
||||
"""fall back to main loop when there's a socket event"""
|
||||
# call flush to ensure that the stream doesn't lose events
|
||||
# due to our consuming of the edge-triggered FD
|
||||
# flush returns the number of events consumed.
|
||||
# if there were any, wake it up
|
||||
if stream.flush(limit=1):
|
||||
notifier.setEnabled(False)
|
||||
kernel.app.quit()
|
||||
|
||||
fd = stream.getsockopt(zmq.FD)
|
||||
notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app)
|
||||
notifier.activated.connect(process_stream_events)
|
||||
# there may already be unprocessed events waiting.
|
||||
# these events will not wake zmq's edge-triggered FD
|
||||
# since edge-triggered notification only occurs on new i/o activity.
|
||||
# process all the waiting events immediately
|
||||
# so we start in a clean state ensuring that any new i/o events will notify.
|
||||
# schedule first call on the eventloop as soon as it's running,
|
||||
# so we don't block here processing events
|
||||
timer = QtCore.QTimer(kernel.app)
|
||||
timer.setSingleShot(True)
|
||||
timer.timeout.connect(process_stream_events)
|
||||
timer.start(0)
|
||||
|
||||
|
||||
# mapping of keys to loop functions
|
||||
loop_map = {
|
||||
"inline": None,
|
||||
"nbagg": None,
|
||||
"notebook": None,
|
||||
"ipympl": None,
|
||||
"widget": None,
|
||||
None: None,
|
||||
}
|
||||
|
||||
|
||||
def register_integration(*toolkitnames):
|
||||
"""Decorator to register an event loop to integrate with the IPython kernel
|
||||
|
||||
The decorator takes names to register the event loop as for the %gui magic.
|
||||
You can provide alternative names for the same toolkit.
|
||||
|
||||
The decorated function should take a single argument, the IPython kernel
|
||||
instance, arrange for the event loop to call ``kernel.do_one_iteration()``
|
||||
at least every ``kernel._poll_interval`` seconds, and start the event loop.
|
||||
|
||||
:mod:`ipykernel.eventloops` provides and registers such functions
|
||||
for a few common event loops.
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
for name in toolkitnames:
|
||||
loop_map[name] = func
|
||||
|
||||
func.exit_hook = lambda kernel: None
|
||||
|
||||
def exit_decorator(exit_func):
|
||||
"""@func.exit is now a decorator
|
||||
|
||||
to register a function to be called on exit
|
||||
"""
|
||||
func.exit_hook = exit_func
|
||||
return exit_func
|
||||
|
||||
func.exit = exit_decorator
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def _loop_qt(app):
|
||||
"""Inner-loop for running the Qt eventloop
|
||||
|
||||
Pulled from guisupport.start_event_loop in IPython < 5.2,
|
||||
since IPython 5.2 only checks `get_ipython().active_eventloop` is defined,
|
||||
rather than if the eventloop is actually running.
|
||||
"""
|
||||
app._in_event_loop = True
|
||||
app.exec_()
|
||||
app._in_event_loop = False
|
||||
|
||||
|
||||
@register_integration("qt4")
|
||||
def loop_qt4(kernel):
|
||||
"""Start a kernel with PyQt4 event loop integration."""
|
||||
|
||||
from IPython.external.qt_for_kernel import QtGui
|
||||
from IPython.lib.guisupport import get_app_qt4
|
||||
|
||||
kernel.app = get_app_qt4([" "])
|
||||
if isinstance(kernel.app, QtGui.QApplication):
|
||||
kernel.app.setQuitOnLastWindowClosed(False)
|
||||
_notify_stream_qt(kernel, kernel.shell_stream)
|
||||
|
||||
_loop_qt(kernel.app)
|
||||
|
||||
|
||||
@register_integration("qt", "qt5")
|
||||
def loop_qt5(kernel):
|
||||
"""Start a kernel with PyQt5 event loop integration."""
|
||||
if os.environ.get("QT_API", None) is None:
|
||||
try:
|
||||
import PyQt5 # noqa
|
||||
|
||||
os.environ["QT_API"] = "pyqt5"
|
||||
except ImportError:
|
||||
try:
|
||||
import PySide2 # noqa
|
||||
|
||||
os.environ["QT_API"] = "pyside2"
|
||||
except ImportError:
|
||||
os.environ["QT_API"] = "pyqt5"
|
||||
return loop_qt4(kernel)
|
||||
|
||||
|
||||
# exit and watch are the same for qt 4 and 5
|
||||
@loop_qt4.exit
|
||||
@loop_qt5.exit
|
||||
def loop_qt_exit(kernel):
|
||||
kernel.app.exit()
|
||||
|
||||
|
||||
def _loop_wx(app):
|
||||
"""Inner-loop for running the Wx eventloop
|
||||
|
||||
Pulled from guisupport.start_event_loop in IPython < 5.2,
|
||||
since IPython 5.2 only checks `get_ipython().active_eventloop` is defined,
|
||||
rather than if the eventloop is actually running.
|
||||
"""
|
||||
app._in_event_loop = True
|
||||
app.MainLoop()
|
||||
app._in_event_loop = False
|
||||
|
||||
|
||||
@register_integration("wx")
|
||||
def loop_wx(kernel):
|
||||
"""Start a kernel with wx event loop support."""
|
||||
|
||||
import wx
|
||||
|
||||
# Wx uses milliseconds
|
||||
poll_interval = int(1000 * kernel._poll_interval)
|
||||
|
||||
def wake():
|
||||
"""wake from wx"""
|
||||
if kernel.shell_stream.flush(limit=1):
|
||||
kernel.app.ExitMainLoop()
|
||||
return
|
||||
|
||||
# We have to put the wx.Timer in a wx.Frame for it to fire properly.
|
||||
# We make the Frame hidden when we create it in the main app below.
|
||||
class TimerFrame(wx.Frame):
|
||||
def __init__(self, func):
|
||||
wx.Frame.__init__(self, None, -1)
|
||||
self.timer = wx.Timer(self)
|
||||
# Units for the timer are in milliseconds
|
||||
self.timer.Start(poll_interval)
|
||||
self.Bind(wx.EVT_TIMER, self.on_timer)
|
||||
self.func = func
|
||||
|
||||
def on_timer(self, event):
|
||||
self.func()
|
||||
|
||||
# We need a custom wx.App to create our Frame subclass that has the
|
||||
# wx.Timer to defer back to the tornado event loop.
|
||||
class IPWxApp(wx.App):
|
||||
def OnInit(self):
|
||||
self.frame = TimerFrame(wake)
|
||||
self.frame.Show(False)
|
||||
return True
|
||||
|
||||
# The redirect=False here makes sure that wx doesn't replace
|
||||
# sys.stdout/stderr with its own classes.
|
||||
if not (getattr(kernel, "app", None) and isinstance(kernel.app, wx.App)):
|
||||
kernel.app = IPWxApp(redirect=False)
|
||||
|
||||
# The import of wx on Linux sets the handler for signal.SIGINT
|
||||
# to 0. This is a bug in wx or gtk. We fix by just setting it
|
||||
# back to the Python default.
|
||||
import signal
|
||||
|
||||
if not callable(signal.getsignal(signal.SIGINT)):
|
||||
signal.signal(signal.SIGINT, signal.default_int_handler)
|
||||
|
||||
_loop_wx(kernel.app)
|
||||
|
||||
|
||||
@loop_wx.exit
|
||||
def loop_wx_exit(kernel):
|
||||
import wx
|
||||
|
||||
wx.Exit()
|
||||
|
||||
|
||||
@register_integration("tk")
|
||||
def loop_tk(kernel):
|
||||
"""Start a kernel with the Tk event loop."""
|
||||
|
||||
from tkinter import READABLE, Tk
|
||||
|
||||
app = Tk()
|
||||
# Capability detection:
|
||||
# per https://docs.python.org/3/library/tkinter.html#file-handlers
|
||||
# file handlers are not available on Windows
|
||||
if hasattr(app, "createfilehandler"):
|
||||
# A basic wrapper for structural similarity with the Windows version
|
||||
class BasicAppWrapper:
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
self.app.withdraw()
|
||||
|
||||
def process_stream_events(stream, *a, **kw):
|
||||
"""fall back to main loop when there's a socket event"""
|
||||
if stream.flush(limit=1):
|
||||
app.tk.deletefilehandler(stream.getsockopt(zmq.FD))
|
||||
app.quit()
|
||||
|
||||
# For Tkinter, we create a Tk object and call its withdraw method.
|
||||
kernel.app_wrapper = BasicAppWrapper(app)
|
||||
|
||||
notifier = partial(process_stream_events, kernel.shell_stream)
|
||||
# seems to be needed for tk
|
||||
notifier.__name__ = "notifier" # type:ignore[attr-defined]
|
||||
app.tk.createfilehandler(kernel.shell_stream.getsockopt(zmq.FD), READABLE, notifier)
|
||||
# schedule initial call after start
|
||||
app.after(0, notifier)
|
||||
|
||||
app.mainloop()
|
||||
|
||||
else:
|
||||
import asyncio
|
||||
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
doi = kernel.do_one_iteration
|
||||
# Tk uses milliseconds
|
||||
poll_interval = int(1000 * kernel._poll_interval)
|
||||
|
||||
class TimedAppWrapper:
|
||||
def __init__(self, app, func):
|
||||
self.app = app
|
||||
self.app.withdraw()
|
||||
self.func = func
|
||||
|
||||
def on_timer(self):
|
||||
loop = asyncio.get_event_loop()
|
||||
try:
|
||||
loop.run_until_complete(self.func())
|
||||
except Exception:
|
||||
kernel.log.exception("Error in message handler")
|
||||
self.app.after(poll_interval, self.on_timer)
|
||||
|
||||
def start(self):
|
||||
self.on_timer() # Call it once to get things going.
|
||||
self.app.mainloop()
|
||||
|
||||
kernel.app_wrapper = TimedAppWrapper(app, doi)
|
||||
kernel.app_wrapper.start()
|
||||
|
||||
|
||||
@loop_tk.exit
|
||||
def loop_tk_exit(kernel):
|
||||
try:
|
||||
kernel.app_wrapper.app.destroy()
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
|
||||
@register_integration("gtk")
|
||||
def loop_gtk(kernel):
|
||||
"""Start the kernel, coordinating with the GTK event loop"""
|
||||
from .gui.gtkembed import GTKEmbed
|
||||
|
||||
gtk_kernel = GTKEmbed(kernel)
|
||||
gtk_kernel.start()
|
||||
kernel._gtk = gtk_kernel
|
||||
|
||||
|
||||
@loop_gtk.exit
|
||||
def loop_gtk_exit(kernel):
|
||||
kernel._gtk.stop()
|
||||
|
||||
|
||||
@register_integration("gtk3")
|
||||
def loop_gtk3(kernel):
|
||||
"""Start the kernel, coordinating with the GTK event loop"""
|
||||
from .gui.gtk3embed import GTKEmbed
|
||||
|
||||
gtk_kernel = GTKEmbed(kernel)
|
||||
gtk_kernel.start()
|
||||
kernel._gtk = gtk_kernel
|
||||
|
||||
|
||||
@loop_gtk3.exit
|
||||
def loop_gtk3_exit(kernel):
|
||||
kernel._gtk.stop()
|
||||
|
||||
|
||||
@register_integration("osx")
|
||||
def loop_cocoa(kernel):
|
||||
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
|
||||
via the matplotlib MacOSX backend.
|
||||
"""
|
||||
from ._eventloop_macos import mainloop, stop
|
||||
|
||||
real_excepthook = sys.excepthook
|
||||
|
||||
def handle_int(etype, value, tb):
|
||||
"""don't let KeyboardInterrupts look like crashes"""
|
||||
# wake the eventloop when we get a signal
|
||||
stop()
|
||||
if etype is KeyboardInterrupt:
|
||||
print("KeyboardInterrupt caught in CFRunLoop", file=sys.__stdout__)
|
||||
else:
|
||||
real_excepthook(etype, value, tb)
|
||||
|
||||
while not kernel.shell.exit_now:
|
||||
try:
|
||||
# double nested try/except, to properly catch KeyboardInterrupt
|
||||
# due to pyzmq Issue #130
|
||||
try:
|
||||
# don't let interrupts during mainloop invoke crash_handler:
|
||||
sys.excepthook = handle_int
|
||||
mainloop(kernel._poll_interval)
|
||||
if kernel.shell_stream.flush(limit=1):
|
||||
# events to process, return control to kernel
|
||||
return
|
||||
except BaseException:
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
# Ctrl-C shouldn't crash the kernel
|
||||
print("KeyboardInterrupt caught in kernel", file=sys.__stdout__)
|
||||
finally:
|
||||
# ensure excepthook is restored
|
||||
sys.excepthook = real_excepthook
|
||||
|
||||
|
||||
@loop_cocoa.exit
|
||||
def loop_cocoa_exit(kernel):
|
||||
from ._eventloop_macos import stop
|
||||
|
||||
stop()
|
||||
|
||||
|
||||
@register_integration("asyncio")
|
||||
def loop_asyncio(kernel):
|
||||
"""Start a kernel with asyncio event loop support."""
|
||||
import asyncio
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
# loop is already running (e.g. tornado 5), nothing left to do
|
||||
if loop.is_running():
|
||||
return
|
||||
|
||||
if loop.is_closed():
|
||||
# main loop is closed, create a new one
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
loop._should_close = False # type:ignore[attr-defined]
|
||||
|
||||
# pause eventloop when there's an event on a zmq socket
|
||||
def process_stream_events(stream):
|
||||
"""fall back to main loop when there's a socket event"""
|
||||
if stream.flush(limit=1):
|
||||
loop.stop()
|
||||
|
||||
notifier = partial(process_stream_events, kernel.shell_stream)
|
||||
loop.add_reader(kernel.shell_stream.getsockopt(zmq.FD), notifier)
|
||||
loop.call_soon(notifier)
|
||||
|
||||
while True:
|
||||
error = None
|
||||
try:
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
continue
|
||||
except Exception as e:
|
||||
error = e
|
||||
if loop._should_close: # type:ignore[attr-defined]
|
||||
loop.close()
|
||||
if error is not None:
|
||||
raise error
|
||||
break
|
||||
|
||||
|
||||
@loop_asyncio.exit
|
||||
def loop_asyncio_exit(kernel):
|
||||
"""Exit hook for asyncio"""
|
||||
import asyncio
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
@asyncio.coroutine
|
||||
def close_loop():
|
||||
if hasattr(loop, "shutdown_asyncgens"):
|
||||
yield from loop.shutdown_asyncgens()
|
||||
loop._should_close = True # type:ignore[attr-defined]
|
||||
loop.stop()
|
||||
|
||||
if loop.is_running():
|
||||
close_loop()
|
||||
|
||||
elif not loop.is_closed():
|
||||
loop.run_until_complete(close_loop) # type:ignore[call-overload]
|
||||
loop.close()
|
||||
|
||||
|
||||
def enable_gui(gui, kernel=None):
|
||||
"""Enable integration with a given GUI"""
|
||||
if gui not in loop_map:
|
||||
e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys())
|
||||
raise ValueError(e)
|
||||
if kernel is None:
|
||||
if Application.initialized():
|
||||
kernel = getattr(Application.instance(), "kernel", None)
|
||||
if kernel is None:
|
||||
raise RuntimeError(
|
||||
"You didn't specify a kernel,"
|
||||
" and no IPython Application with a kernel appears to be running."
|
||||
)
|
||||
loop = loop_map[gui]
|
||||
if loop and kernel.eventloop is not None and kernel.eventloop is not loop:
|
||||
raise RuntimeError("Cannot activate multiple GUI eventloops")
|
||||
kernel.eventloop = loop
|
15
.venv/Lib/site-packages/ipykernel/gui/__init__.py
Normal file
15
.venv/Lib/site-packages/ipykernel/gui/__init__.py
Normal file
@ -0,0 +1,15 @@
|
||||
"""GUI support for the IPython ZeroMQ kernel.
|
||||
|
||||
This package contains the various toolkit-dependent utilities we use to enable
|
||||
coordination between the IPython kernel and the event loops of the various GUI
|
||||
toolkits.
|
||||
"""
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2010-2011 The IPython Development Team.
|
||||
#
|
||||
# Distributed under the terms of the BSD License.
|
||||
#
|
||||
# The full license is in the file COPYING.txt, distributed as part of this
|
||||
# software.
|
||||
# -----------------------------------------------------------------------------
|
91
.venv/Lib/site-packages/ipykernel/gui/gtk3embed.py
Normal file
91
.venv/Lib/site-packages/ipykernel/gui/gtk3embed.py
Normal file
@ -0,0 +1,91 @@
|
||||
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.
|
||||
"""
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2010-2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING.txt, distributed as part of this software.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Imports
|
||||
# -----------------------------------------------------------------------------
|
||||
# stdlib
|
||||
import sys
|
||||
|
||||
# Third-party
|
||||
import gi
|
||||
|
||||
gi.require_version("Gdk", "3.0")
|
||||
gi.require_version("Gtk", "3.0")
|
||||
from gi.repository import GObject, Gtk
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Classes and functions
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class GTKEmbed:
|
||||
"""A class to embed a kernel into the GTK main event loop."""
|
||||
|
||||
def __init__(self, kernel):
|
||||
self.kernel = kernel
|
||||
# These two will later store the real gtk functions when we hijack them
|
||||
self.gtk_main = None
|
||||
self.gtk_main_quit = None
|
||||
|
||||
def start(self):
|
||||
"""Starts the GTK main event loop and sets our kernel startup routine."""
|
||||
# Register our function to initiate the kernel and start gtk
|
||||
GObject.idle_add(self._wire_kernel)
|
||||
Gtk.main()
|
||||
|
||||
def _wire_kernel(self):
|
||||
"""Initializes the kernel inside GTK.
|
||||
|
||||
This is meant to run only once at startup, so it does its job and
|
||||
returns False to ensure it doesn't get run again by GTK.
|
||||
"""
|
||||
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
|
||||
GObject.timeout_add(int(1000 * self.kernel._poll_interval), self.iterate_kernel)
|
||||
return False
|
||||
|
||||
def iterate_kernel(self):
|
||||
"""Run one iteration of the kernel and return True.
|
||||
|
||||
GTK timer functions must return True to be called again, so we make the
|
||||
call to :meth:`do_one_iteration` and then return True for GTK.
|
||||
"""
|
||||
self.kernel.do_one_iteration()
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
# FIXME: this one isn't getting called because we have no reliable
|
||||
# kernel shutdown. We need to fix that: once the kernel has a
|
||||
# shutdown mechanism, it can call this.
|
||||
if self.gtk_main_quit:
|
||||
self.gtk_main_quit()
|
||||
sys.exit()
|
||||
|
||||
def _hijack_gtk(self):
|
||||
"""Hijack a few key functions in GTK for IPython integration.
|
||||
|
||||
Modifies pyGTK's main and main_quit with a dummy so user code does not
|
||||
block IPython. This allows us to use %run to run arbitrary pygtk
|
||||
scripts from a long-lived IPython session, and when they attempt to
|
||||
start or stop
|
||||
|
||||
Returns
|
||||
-------
|
||||
The original functions that have been hijacked:
|
||||
- Gtk.main
|
||||
- Gtk.main_quit
|
||||
"""
|
||||
|
||||
def dummy(*args, **kw):
|
||||
pass
|
||||
|
||||
# save and trap main and main_quit from gtk
|
||||
orig_main, Gtk.main = Gtk.main, dummy
|
||||
orig_main_quit, Gtk.main_quit = Gtk.main_quit, dummy
|
||||
return orig_main, orig_main_quit
|
88
.venv/Lib/site-packages/ipykernel/gui/gtkembed.py
Normal file
88
.venv/Lib/site-packages/ipykernel/gui/gtkembed.py
Normal file
@ -0,0 +1,88 @@
|
||||
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.
|
||||
"""
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2010-2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING.txt, distributed as part of this software.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Imports
|
||||
# -----------------------------------------------------------------------------
|
||||
# stdlib
|
||||
import sys
|
||||
|
||||
# Third-party
|
||||
import gobject
|
||||
import gtk
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Classes and functions
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class GTKEmbed:
|
||||
"""A class to embed a kernel into the GTK main event loop."""
|
||||
|
||||
def __init__(self, kernel):
|
||||
self.kernel = kernel
|
||||
# These two will later store the real gtk functions when we hijack them
|
||||
self.gtk_main = None
|
||||
self.gtk_main_quit = None
|
||||
|
||||
def start(self):
|
||||
"""Starts the GTK main event loop and sets our kernel startup routine."""
|
||||
# Register our function to initiate the kernel and start gtk
|
||||
gobject.idle_add(self._wire_kernel)
|
||||
gtk.main()
|
||||
|
||||
def _wire_kernel(self):
|
||||
"""Initializes the kernel inside GTK.
|
||||
|
||||
This is meant to run only once at startup, so it does its job and
|
||||
returns False to ensure it doesn't get run again by GTK.
|
||||
"""
|
||||
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
|
||||
gobject.timeout_add(int(1000 * self.kernel._poll_interval), self.iterate_kernel)
|
||||
return False
|
||||
|
||||
def iterate_kernel(self):
|
||||
"""Run one iteration of the kernel and return True.
|
||||
|
||||
GTK timer functions must return True to be called again, so we make the
|
||||
call to :meth:`do_one_iteration` and then return True for GTK.
|
||||
"""
|
||||
self.kernel.do_one_iteration()
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
# FIXME: this one isn't getting called because we have no reliable
|
||||
# kernel shutdown. We need to fix that: once the kernel has a
|
||||
# shutdown mechanism, it can call this.
|
||||
if self.gtk_main_quit:
|
||||
self.gtk_main_quit()
|
||||
sys.exit()
|
||||
|
||||
def _hijack_gtk(self):
|
||||
"""Hijack a few key functions in GTK for IPython integration.
|
||||
|
||||
Modifies pyGTK's main and main_quit with a dummy so user code does not
|
||||
block IPython. This allows us to use %run to run arbitrary pygtk
|
||||
scripts from a long-lived IPython session, and when they attempt to
|
||||
start or stop
|
||||
|
||||
Returns
|
||||
-------
|
||||
The original functions that have been hijacked:
|
||||
- gtk.main
|
||||
- gtk.main_quit
|
||||
"""
|
||||
|
||||
def dummy(*args, **kw):
|
||||
pass
|
||||
|
||||
# save and trap main and main_quit from gtk
|
||||
orig_main, gtk.main = gtk.main, dummy
|
||||
orig_main_quit, gtk.main_quit = gtk.main_quit, dummy
|
||||
return orig_main, orig_main_quit
|
123
.venv/Lib/site-packages/ipykernel/heartbeat.py
Normal file
123
.venv/Lib/site-packages/ipykernel/heartbeat.py
Normal file
@ -0,0 +1,123 @@
|
||||
"""The client and server for a basic ping-pong style heartbeat.
|
||||
"""
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2008-2011 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING, distributed as part of this software.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Imports
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
from threading import Thread
|
||||
|
||||
import zmq
|
||||
from jupyter_client.localinterfaces import localhost
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Code
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class Heartbeat(Thread):
|
||||
"A simple ping-pong style heartbeat that runs in a thread."
|
||||
|
||||
def __init__(self, context, addr=None):
|
||||
if addr is None:
|
||||
addr = ("tcp", localhost(), 0)
|
||||
Thread.__init__(self, name="Heartbeat")
|
||||
self.context = context
|
||||
self.transport, self.ip, self.port = addr
|
||||
self.original_port = self.port
|
||||
if self.original_port == 0:
|
||||
self.pick_port()
|
||||
self.addr = (self.ip, self.port)
|
||||
self.daemon = True
|
||||
self.pydev_do_not_trace = True
|
||||
self.is_pydev_daemon_thread = True
|
||||
self.name = "Heartbeat"
|
||||
|
||||
def pick_port(self):
|
||||
if self.transport == "tcp":
|
||||
s = socket.socket()
|
||||
# '*' means all interfaces to 0MQ, which is '' to socket.socket
|
||||
s.bind(("" if self.ip == "*" else self.ip, 0))
|
||||
self.port = s.getsockname()[1]
|
||||
s.close()
|
||||
elif self.transport == "ipc":
|
||||
self.port = 1
|
||||
while os.path.exists("%s-%s" % (self.ip, self.port)):
|
||||
self.port = self.port + 1
|
||||
else:
|
||||
raise ValueError("Unrecognized zmq transport: %s" % self.transport)
|
||||
return self.port
|
||||
|
||||
def _try_bind_socket(self):
|
||||
c = ":" if self.transport == "tcp" else "-"
|
||||
return self.socket.bind("%s://%s" % (self.transport, self.ip) + c + str(self.port))
|
||||
|
||||
def _bind_socket(self):
|
||||
try:
|
||||
win_in_use = errno.WSAEADDRINUSE # type:ignore[attr-defined]
|
||||
except AttributeError:
|
||||
win_in_use = None
|
||||
|
||||
# Try up to 100 times to bind a port when in conflict to avoid
|
||||
# infinite attempts in bad setups
|
||||
max_attempts = 1 if self.original_port else 100
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
self._try_bind_socket()
|
||||
except zmq.ZMQError as ze:
|
||||
if attempt == max_attempts - 1:
|
||||
raise
|
||||
# Raise if we have any error not related to socket binding
|
||||
if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use:
|
||||
raise
|
||||
# Raise if we have any error not related to socket binding
|
||||
if self.original_port == 0:
|
||||
self.pick_port()
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
return
|
||||
|
||||
def run(self):
|
||||
self.name = "Heartbeat"
|
||||
self.socket = self.context.socket(zmq.ROUTER)
|
||||
self.socket.linger = 1000
|
||||
try:
|
||||
self._bind_socket()
|
||||
except Exception:
|
||||
self.socket.close()
|
||||
raise
|
||||
|
||||
while True:
|
||||
try:
|
||||
zmq.device(zmq.QUEUE, self.socket, self.socket)
|
||||
except zmq.ZMQError as e:
|
||||
if e.errno == errno.EINTR:
|
||||
# signal interrupt, resume heartbeat
|
||||
continue
|
||||
elif e.errno == zmq.ETERM:
|
||||
# context terminated, close socket and exit
|
||||
try:
|
||||
self.socket.close()
|
||||
except zmq.ZMQError:
|
||||
# suppress further errors during cleanup
|
||||
# this shouldn't happen, though
|
||||
pass
|
||||
break
|
||||
elif e.errno == zmq.ENOTSOCK:
|
||||
# socket closed elsewhere, exit
|
||||
break
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
break
|
4
.venv/Lib/site-packages/ipykernel/inprocess/__init__.py
Normal file
4
.venv/Lib/site-packages/ipykernel/inprocess/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
from .blocking import BlockingInProcessKernelClient # noqa
|
||||
from .channels import InProcessChannel, InProcessHBChannel # noqa
|
||||
from .client import InProcessKernelClient # noqa
|
||||
from .manager import InProcessKernelManager # noqa
|
101
.venv/Lib/site-packages/ipykernel/inprocess/blocking.py
Normal file
101
.venv/Lib/site-packages/ipykernel/inprocess/blocking.py
Normal file
@ -0,0 +1,101 @@
|
||||
""" Implements a fully blocking kernel client.
|
||||
|
||||
Useful for test suites and blocking terminal interfaces.
|
||||
"""
|
||||
import sys
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2012 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING.txt, distributed as part of this software.
|
||||
# -----------------------------------------------------------------------------
|
||||
from queue import Empty, Queue
|
||||
|
||||
# IPython imports
|
||||
from traitlets import Type
|
||||
|
||||
# Local imports
|
||||
from .channels import InProcessChannel
|
||||
from .client import InProcessKernelClient
|
||||
|
||||
|
||||
class BlockingInProcessChannel(InProcessChannel):
|
||||
def __init__(self, *args, **kwds):
|
||||
super().__init__(*args, **kwds)
|
||||
self._in_queue: Queue[object] = Queue()
|
||||
|
||||
def call_handlers(self, msg):
|
||||
self._in_queue.put(msg)
|
||||
|
||||
def get_msg(self, block=True, timeout=None):
|
||||
"""Gets a message if there is one that is ready."""
|
||||
if timeout is None:
|
||||
# Queue.get(timeout=None) has stupid uninteruptible
|
||||
# behavior, so wait for a week instead
|
||||
timeout = 604800
|
||||
return self._in_queue.get(block, timeout)
|
||||
|
||||
def get_msgs(self):
|
||||
"""Get all messages that are currently ready."""
|
||||
msgs = []
|
||||
while True:
|
||||
try:
|
||||
msgs.append(self.get_msg(block=False))
|
||||
except Empty:
|
||||
break
|
||||
return msgs
|
||||
|
||||
def msg_ready(self):
|
||||
"""Is there a message that has been received?"""
|
||||
return not self._in_queue.empty()
|
||||
|
||||
|
||||
class BlockingInProcessStdInChannel(BlockingInProcessChannel):
|
||||
def call_handlers(self, msg):
|
||||
"""Overridden for the in-process channel.
|
||||
|
||||
This methods simply calls raw_input directly.
|
||||
"""
|
||||
msg_type = msg["header"]["msg_type"]
|
||||
if msg_type == "input_request":
|
||||
_raw_input = self.client.kernel._sys_raw_input
|
||||
prompt = msg["content"]["prompt"]
|
||||
print(prompt, end="", file=sys.__stdout__)
|
||||
sys.__stdout__.flush()
|
||||
self.client.input(_raw_input())
|
||||
|
||||
|
||||
class BlockingInProcessKernelClient(InProcessKernelClient):
|
||||
|
||||
# The classes to use for the various channels.
|
||||
shell_channel_class = Type(BlockingInProcessChannel)
|
||||
iopub_channel_class = Type(BlockingInProcessChannel)
|
||||
stdin_channel_class = Type(BlockingInProcessStdInChannel)
|
||||
|
||||
def wait_for_ready(self):
|
||||
# Wait for kernel info reply on shell channel
|
||||
while True:
|
||||
self.kernel_info()
|
||||
try:
|
||||
msg = self.shell_channel.get_msg(block=True, timeout=1)
|
||||
except Empty:
|
||||
pass
|
||||
else:
|
||||
if msg["msg_type"] == "kernel_info_reply":
|
||||
# Checking that IOPub is connected. If it is not connected, start over.
|
||||
try:
|
||||
self.iopub_channel.get_msg(block=True, timeout=0.2)
|
||||
except Empty:
|
||||
pass
|
||||
else:
|
||||
self._handle_kernel_info_reply(msg)
|
||||
break
|
||||
|
||||
# Flush IOPub channel
|
||||
while True:
|
||||
try:
|
||||
msg = self.iopub_channel.get_msg(block=True, timeout=0.2)
|
||||
print(msg["msg_type"])
|
||||
except Empty:
|
||||
break
|
97
.venv/Lib/site-packages/ipykernel/inprocess/channels.py
Normal file
97
.venv/Lib/site-packages/ipykernel/inprocess/channels.py
Normal file
@ -0,0 +1,97 @@
|
||||
"""A kernel client for in-process kernels."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from typing import List
|
||||
|
||||
from jupyter_client.channelsabc import HBChannelABC
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Channel classes
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InProcessChannel:
|
||||
"""Base class for in-process channels."""
|
||||
|
||||
proxy_methods: List[object] = []
|
||||
|
||||
def __init__(self, client=None):
|
||||
super().__init__()
|
||||
self.client = client
|
||||
self._is_alive = False
|
||||
|
||||
def is_alive(self):
|
||||
return self._is_alive
|
||||
|
||||
def start(self):
|
||||
self._is_alive = True
|
||||
|
||||
def stop(self):
|
||||
self._is_alive = False
|
||||
|
||||
def call_handlers(self, msg):
|
||||
"""This method is called in the main thread when a message arrives.
|
||||
|
||||
Subclasses should override this method to handle incoming messages.
|
||||
"""
|
||||
raise NotImplementedError("call_handlers must be defined in a subclass.")
|
||||
|
||||
def flush(self, timeout=1.0):
|
||||
pass
|
||||
|
||||
def call_handlers_later(self, *args, **kwds):
|
||||
"""Call the message handlers later.
|
||||
|
||||
The default implementation just calls the handlers immediately, but this
|
||||
method exists so that GUI toolkits can defer calling the handlers until
|
||||
after the event loop has run, as expected by GUI frontends.
|
||||
"""
|
||||
self.call_handlers(*args, **kwds)
|
||||
|
||||
def process_events(self):
|
||||
"""Process any pending GUI events.
|
||||
|
||||
This method will be never be called from a frontend without an event
|
||||
loop (e.g., a terminal frontend).
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class InProcessHBChannel:
|
||||
"""A dummy heartbeat channel interface for in-process kernels.
|
||||
|
||||
Normally we use the heartbeat to check that the kernel process is alive.
|
||||
When the kernel is in-process, that doesn't make sense, but clients still
|
||||
expect this interface.
|
||||
"""
|
||||
|
||||
time_to_dead = 3.0
|
||||
|
||||
def __init__(self, client=None):
|
||||
super().__init__()
|
||||
self.client = client
|
||||
self._is_alive = False
|
||||
self._pause = True
|
||||
|
||||
def is_alive(self):
|
||||
return self._is_alive
|
||||
|
||||
def start(self):
|
||||
self._is_alive = True
|
||||
|
||||
def stop(self):
|
||||
self._is_alive = False
|
||||
|
||||
def pause(self):
|
||||
self._pause = True
|
||||
|
||||
def unpause(self):
|
||||
self._pause = False
|
||||
|
||||
def is_beating(self):
|
||||
return not self._pause
|
||||
|
||||
|
||||
HBChannelABC.register(InProcessHBChannel)
|
204
.venv/Lib/site-packages/ipykernel/inprocess/client.py
Normal file
204
.venv/Lib/site-packages/ipykernel/inprocess/client.py
Normal file
@ -0,0 +1,204 @@
|
||||
"""A client for in-process kernels."""
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Copyright (C) 2012 The IPython Development Team
|
||||
#
|
||||
# Distributed under the terms of the BSD License. The full license is in
|
||||
# the file COPYING, distributed as part of this software.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Imports
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
import asyncio
|
||||
|
||||
from jupyter_client.client import KernelClient
|
||||
from jupyter_client.clientabc import KernelClientABC
|
||||
|
||||
# IPython imports
|
||||
from traitlets import Instance, Type, default
|
||||
|
||||
# Local imports
|
||||
from .channels import InProcessChannel, InProcessHBChannel
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Main kernel Client class
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InProcessKernelClient(KernelClient):
|
||||
"""A client for an in-process kernel.
|
||||
|
||||
This class implements the interface of
|
||||
`jupyter_client.clientabc.KernelClientABC` and allows
|
||||
(asynchronous) frontends to be used seamlessly with an in-process kernel.
|
||||
|
||||
See `jupyter_client.client.KernelClient` for docstrings.
|
||||
"""
|
||||
|
||||
# The classes to use for the various channels.
|
||||
shell_channel_class = Type(InProcessChannel)
|
||||
iopub_channel_class = Type(InProcessChannel)
|
||||
stdin_channel_class = Type(InProcessChannel)
|
||||
control_channel_class = Type(InProcessChannel)
|
||||
hb_channel_class = Type(InProcessHBChannel)
|
||||
|
||||
kernel = Instance("ipykernel.inprocess.ipkernel.InProcessKernel", allow_none=True)
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Channel management methods
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
@default("blocking_class")
|
||||
def _default_blocking_class(self):
|
||||
from .blocking import BlockingInProcessKernelClient
|
||||
|
||||
return BlockingInProcessKernelClient
|
||||
|
||||
def get_connection_info(self):
|
||||
d = super().get_connection_info()
|
||||
d["kernel"] = self.kernel
|
||||
return d
|
||||
|
||||
def start_channels(self, *args, **kwargs):
|
||||
super().start_channels()
|
||||
self.kernel.frontends.append(self)
|
||||
|
||||
@property
|
||||
def shell_channel(self):
|
||||
if self._shell_channel is None:
|
||||
self._shell_channel = self.shell_channel_class(self)
|
||||
return self._shell_channel
|
||||
|
||||
@property
|
||||
def iopub_channel(self):
|
||||
if self._iopub_channel is None:
|
||||
self._iopub_channel = self.iopub_channel_class(self)
|
||||
return self._iopub_channel
|
||||
|
||||
@property
|
||||
def stdin_channel(self):
|
||||
if self._stdin_channel is None:
|
||||
self._stdin_channel = self.stdin_channel_class(self)
|
||||
return self._stdin_channel
|
||||
|
||||
@property
|
||||
def control_channel(self):
|
||||
if self._control_channel is None:
|
||||
self._control_channel = self.control_channel_class(self)
|
||||
return self._control_channel
|
||||
|
||||
@property
|
||||
def hb_channel(self):
|
||||
if self._hb_channel is None:
|
||||
self._hb_channel = self.hb_channel_class(self)
|
||||
return self._hb_channel
|
||||
|
||||
# Methods for sending specific messages
|
||||
# -------------------------------------
|
||||
|
||||
def execute(
|
||||
self, code, silent=False, store_history=True, user_expressions=None, allow_stdin=None
|
||||
):
|
||||
if allow_stdin is None:
|
||||
allow_stdin = self.allow_stdin
|
||||
content = dict(
|
||||
code=code,
|
||||
silent=silent,
|
||||
store_history=store_history,
|
||||
user_expressions=user_expressions or {},
|
||||
allow_stdin=allow_stdin,
|
||||
)
|
||||
msg = self.session.msg("execute_request", content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def complete(self, code, cursor_pos=None):
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
content = dict(code=code, cursor_pos=cursor_pos)
|
||||
msg = self.session.msg("complete_request", content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def inspect(self, code, cursor_pos=None, detail_level=0):
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
content = dict(
|
||||
code=code,
|
||||
cursor_pos=cursor_pos,
|
||||
detail_level=detail_level,
|
||||
)
|
||||
msg = self.session.msg("inspect_request", content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def history(self, raw=True, output=False, hist_access_type="range", **kwds):
|
||||
content = dict(raw=raw, output=output, hist_access_type=hist_access_type, **kwds)
|
||||
msg = self.session.msg("history_request", content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def shutdown(self, restart=False):
|
||||
# FIXME: What to do here?
|
||||
raise NotImplementedError("Cannot shutdown in-process kernel")
|
||||
|
||||
def kernel_info(self):
|
||||
"""Request kernel info."""
|
||||
msg = self.session.msg("kernel_info_request")
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def comm_info(self, target_name=None):
|
||||
"""Request a dictionary of valid comms and their targets."""
|
||||
if target_name is None:
|
||||
content = {}
|
||||
else:
|
||||
content = dict(target_name=target_name)
|
||||
msg = self.session.msg("comm_info_request", content)
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def input(self, string):
|
||||
if self.kernel is None:
|
||||
raise RuntimeError("Cannot send input reply. No kernel exists.")
|
||||
self.kernel.raw_input_str = string
|
||||
|
||||
def is_complete(self, code):
|
||||
msg = self.session.msg("is_complete_request", {"code": code})
|
||||
self._dispatch_to_kernel(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
def _dispatch_to_kernel(self, msg):
|
||||
"""Send a message to the kernel and handle a reply."""
|
||||
kernel = self.kernel
|
||||
if kernel is None:
|
||||
raise RuntimeError("Cannot send request. No kernel exists.")
|
||||
|
||||
stream = kernel.shell_stream
|
||||
self.session.send(stream, msg)
|
||||
msg_parts = stream.recv_multipart()
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(kernel.dispatch_shell(msg_parts))
|
||||
idents, reply_msg = self.session.recv(stream, copy=False)
|
||||
self.shell_channel.call_handlers_later(reply_msg)
|
||||
|
||||
def get_shell_msg(self, block=True, timeout=None):
|
||||
return self.shell_channel.get_msg(block, timeout)
|
||||
|
||||
def get_iopub_msg(self, block=True, timeout=None):
|
||||
return self.iopub_channel.get_msg(block, timeout)
|
||||
|
||||
def get_stdin_msg(self, block=True, timeout=None):
|
||||
return self.stdin_channel.get_msg(block, timeout)
|
||||
|
||||
def get_control_msg(self, block=True, timeout=None):
|
||||
return self.control_channel.get_msg(block, timeout)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# ABC Registration
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
KernelClientABC.register(InProcessKernelClient)
|
8
.venv/Lib/site-packages/ipykernel/inprocess/constants.py
Normal file
8
.venv/Lib/site-packages/ipykernel/inprocess/constants.py
Normal file
@ -0,0 +1,8 @@
|
||||
"""Shared constants.
|
||||
"""
|
||||
|
||||
# Because inprocess communication is not networked, we can use a common Session
|
||||
# key everywhere. This is not just the empty bytestring to avoid tripping
|
||||
# certain security checks in the rest of Jupyter that assumes that empty keys
|
||||
# are insecure.
|
||||
INPROCESS_KEY = b"inprocess"
|
191
.venv/Lib/site-packages/ipykernel/inprocess/ipkernel.py
Normal file
191
.venv/Lib/site-packages/ipykernel/inprocess/ipkernel.py
Normal file
@ -0,0 +1,191 @@
|
||||
"""An in-process kernel"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
|
||||
from IPython.core.interactiveshell import InteractiveShellABC
|
||||
from traitlets import Any, Enum, Instance, List, Type, default
|
||||
|
||||
from ipykernel.ipkernel import IPythonKernel
|
||||
from ipykernel.jsonutil import json_clean
|
||||
from ipykernel.zmqshell import ZMQInteractiveShell
|
||||
|
||||
from ..iostream import BackgroundSocket, IOPubThread, OutStream
|
||||
from .constants import INPROCESS_KEY
|
||||
from .socket import DummySocket
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Main kernel class
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InProcessKernel(IPythonKernel):
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# InProcessKernel interface
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
# The frontends connected to this kernel.
|
||||
frontends = List(Instance("ipykernel.inprocess.client.InProcessKernelClient", allow_none=True))
|
||||
|
||||
# The GUI environment that the kernel is running under. This need not be
|
||||
# specified for the normal operation for the kernel, but is required for
|
||||
# IPython's GUI support (including pylab). The default is 'inline' because
|
||||
# it is safe under all GUI toolkits.
|
||||
gui = Enum(("tk", "gtk", "wx", "qt", "qt4", "inline"), default_value="inline")
|
||||
|
||||
raw_input_str = Any()
|
||||
stdout = Any()
|
||||
stderr = Any()
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Kernel interface
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
shell_class = Type(allow_none=True)
|
||||
_underlying_iopub_socket = Instance(DummySocket, ())
|
||||
iopub_thread = Instance(IOPubThread)
|
||||
|
||||
shell_stream = Instance(DummySocket, ())
|
||||
|
||||
@default("iopub_thread")
|
||||
def _default_iopub_thread(self):
|
||||
thread = IOPubThread(self._underlying_iopub_socket)
|
||||
thread.start()
|
||||
return thread
|
||||
|
||||
iopub_socket = Instance(BackgroundSocket)
|
||||
|
||||
@default("iopub_socket")
|
||||
def _default_iopub_socket(self):
|
||||
return self.iopub_thread.background_socket
|
||||
|
||||
stdin_socket = Instance(DummySocket, ())
|
||||
|
||||
def __init__(self, **traits):
|
||||
super().__init__(**traits)
|
||||
|
||||
self._underlying_iopub_socket.observe(self._io_dispatch, names=["message_sent"])
|
||||
self.shell.kernel = self
|
||||
|
||||
async def execute_request(self, stream, ident, parent):
|
||||
"""Override for temporary IO redirection."""
|
||||
with self._redirected_io():
|
||||
await super().execute_request(stream, ident, parent)
|
||||
|
||||
def start(self):
|
||||
"""Override registration of dispatchers for streams."""
|
||||
self.shell.exit_now = False
|
||||
|
||||
async def _abort_queues(self):
|
||||
"""The in-process kernel doesn't abort requests."""
|
||||
pass
|
||||
|
||||
async def _flush_control_queue(self):
|
||||
"""No need to flush control queues for in-process"""
|
||||
pass
|
||||
|
||||
def _input_request(self, prompt, ident, parent, password=False):
|
||||
# Flush output before making the request.
|
||||
self.raw_input_str = None
|
||||
sys.stderr.flush()
|
||||
sys.stdout.flush()
|
||||
|
||||
# Send the input request.
|
||||
content = json_clean(dict(prompt=prompt, password=password))
|
||||
msg = self.session.msg("input_request", content, parent)
|
||||
for frontend in self.frontends:
|
||||
if frontend.session.session == parent["header"]["session"]:
|
||||
frontend.stdin_channel.call_handlers(msg)
|
||||
break
|
||||
else:
|
||||
logging.error("No frontend found for raw_input request")
|
||||
return ""
|
||||
|
||||
# Await a response.
|
||||
while self.raw_input_str is None:
|
||||
frontend.stdin_channel.process_events()
|
||||
return self.raw_input_str
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Protected interface
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
@contextmanager
|
||||
def _redirected_io(self):
|
||||
"""Temporarily redirect IO to the kernel."""
|
||||
sys_stdout, sys_stderr = sys.stdout, sys.stderr
|
||||
sys.stdout, sys.stderr = self.stdout, self.stderr
|
||||
yield
|
||||
sys.stdout, sys.stderr = sys_stdout, sys_stderr
|
||||
|
||||
# ------ Trait change handlers --------------------------------------------
|
||||
|
||||
def _io_dispatch(self, change):
|
||||
"""Called when a message is sent to the IO socket."""
|
||||
ident, msg = self.session.recv(self.iopub_socket.io_thread.socket, copy=False)
|
||||
for frontend in self.frontends:
|
||||
frontend.iopub_channel.call_handlers(msg)
|
||||
|
||||
# ------ Trait initializers -----------------------------------------------
|
||||
|
||||
@default("log")
|
||||
def _default_log(self):
|
||||
return logging.getLogger(__name__)
|
||||
|
||||
@default("session")
|
||||
def _default_session(self):
|
||||
from jupyter_client.session import Session
|
||||
|
||||
return Session(parent=self, key=INPROCESS_KEY)
|
||||
|
||||
@default("shell_class")
|
||||
def _default_shell_class(self):
|
||||
return InProcessInteractiveShell
|
||||
|
||||
@default("stdout")
|
||||
def _default_stdout(self):
|
||||
return OutStream(self.session, self.iopub_thread, "stdout", watchfd=False)
|
||||
|
||||
@default("stderr")
|
||||
def _default_stderr(self):
|
||||
return OutStream(self.session, self.iopub_thread, "stderr", watchfd=False)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Interactive shell subclass
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InProcessInteractiveShell(ZMQInteractiveShell):
|
||||
|
||||
kernel = Instance("ipykernel.inprocess.ipkernel.InProcessKernel", allow_none=True)
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# InteractiveShell interface
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def enable_gui(self, gui=None):
|
||||
"""Enable GUI integration for the kernel."""
|
||||
if not gui:
|
||||
gui = self.kernel.gui
|
||||
self.active_eventloop = gui
|
||||
|
||||
def enable_matplotlib(self, gui=None):
|
||||
"""Enable matplotlib integration for the kernel."""
|
||||
if not gui:
|
||||
gui = self.kernel.gui
|
||||
return super().enable_matplotlib(gui)
|
||||
|
||||
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
|
||||
"""Activate pylab support at runtime."""
|
||||
if not gui:
|
||||
gui = self.kernel.gui
|
||||
return super().enable_pylab(gui, import_all, welcome_message)
|
||||
|
||||
|
||||
InteractiveShellABC.register(InProcessInteractiveShell)
|
82
.venv/Lib/site-packages/ipykernel/inprocess/manager.py
Normal file
82
.venv/Lib/site-packages/ipykernel/inprocess/manager.py
Normal file
@ -0,0 +1,82 @@
|
||||
"""A kernel manager for in-process kernels."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from jupyter_client.manager import KernelManager
|
||||
from jupyter_client.managerabc import KernelManagerABC
|
||||
from jupyter_client.session import Session
|
||||
from traitlets import DottedObjectName, Instance, default
|
||||
|
||||
from .constants import INPROCESS_KEY
|
||||
|
||||
|
||||
class InProcessKernelManager(KernelManager):
|
||||
"""A manager for an in-process kernel.
|
||||
|
||||
This class implements the interface of
|
||||
`jupyter_client.kernelmanagerabc.KernelManagerABC` and allows
|
||||
(asynchronous) frontends to be used seamlessly with an in-process kernel.
|
||||
|
||||
See `jupyter_client.kernelmanager.KernelManager` for docstrings.
|
||||
"""
|
||||
|
||||
# The kernel process with which the KernelManager is communicating.
|
||||
kernel = Instance("ipykernel.inprocess.ipkernel.InProcessKernel", allow_none=True)
|
||||
# the client class for KM.client() shortcut
|
||||
client_class = DottedObjectName("ipykernel.inprocess.BlockingInProcessKernelClient")
|
||||
|
||||
@default("blocking_class")
|
||||
def _default_blocking_class(self):
|
||||
from .blocking import BlockingInProcessKernelClient
|
||||
|
||||
return BlockingInProcessKernelClient
|
||||
|
||||
@default("session")
|
||||
def _default_session(self):
|
||||
# don't sign in-process messages
|
||||
return Session(key=INPROCESS_KEY, parent=self)
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# Kernel management methods
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
def start_kernel(self, **kwds):
|
||||
from ipykernel.inprocess.ipkernel import InProcessKernel
|
||||
|
||||
self.kernel = InProcessKernel(parent=self, session=self.session)
|
||||
|
||||
def shutdown_kernel(self):
|
||||
self.kernel.iopub_thread.stop()
|
||||
self._kill_kernel()
|
||||
|
||||
def restart_kernel(self, now=False, **kwds):
|
||||
self.shutdown_kernel()
|
||||
self.start_kernel(**kwds)
|
||||
|
||||
@property
|
||||
def has_kernel(self):
|
||||
return self.kernel is not None
|
||||
|
||||
def _kill_kernel(self):
|
||||
self.kernel = None
|
||||
|
||||
def interrupt_kernel(self):
|
||||
raise NotImplementedError("Cannot interrupt in-process kernel.")
|
||||
|
||||
def signal_kernel(self, signum):
|
||||
raise NotImplementedError("Cannot signal in-process kernel.")
|
||||
|
||||
def is_alive(self):
|
||||
return self.kernel is not None
|
||||
|
||||
def client(self, **kwargs):
|
||||
kwargs["kernel"] = self.kernel
|
||||
return super().client(**kwargs)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# ABC Registration
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
KernelManagerABC.register(InProcessKernelManager)
|
40
.venv/Lib/site-packages/ipykernel/inprocess/socket.py
Normal file
40
.venv/Lib/site-packages/ipykernel/inprocess/socket.py
Normal file
@ -0,0 +1,40 @@
|
||||
""" Defines a dummy socket implementing (part of) the zmq.Socket interface. """
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
from queue import Queue
|
||||
|
||||
import zmq
|
||||
from traitlets import HasTraits, Instance, Int
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Dummy socket class
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class DummySocket(HasTraits):
|
||||
"""A dummy socket implementing (part of) the zmq.Socket interface."""
|
||||
|
||||
queue = Instance(Queue, ())
|
||||
message_sent = Int(0) # Should be an Event
|
||||
context = Instance(zmq.Context)
|
||||
|
||||
def _context_default(self):
|
||||
return zmq.Context()
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Socket interface
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def recv_multipart(self, flags=0, copy=True, track=False):
|
||||
return self.queue.get_nowait()
|
||||
|
||||
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
|
||||
msg_parts = list(map(zmq.Message, msg_parts)) # type:ignore[arg-type]
|
||||
self.queue.put_nowait(msg_parts)
|
||||
self.message_sent += 1
|
||||
|
||||
def flush(self, timeout=1.0):
|
||||
"""no-op to comply with stream API"""
|
||||
pass
|
156
.venv/Lib/site-packages/ipykernel/inprocess/tests/test_kernel.py
Normal file
156
.venv/Lib/site-packages/ipykernel/inprocess/tests/test_kernel.py
Normal file
@ -0,0 +1,156 @@
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import unittest
|
||||
from contextlib import contextmanager
|
||||
from io import StringIO
|
||||
|
||||
import pytest
|
||||
import tornado
|
||||
from IPython.utils.io import capture_output
|
||||
from jupyter_client.session import Session
|
||||
|
||||
from ipykernel.inprocess.blocking import BlockingInProcessKernelClient
|
||||
from ipykernel.inprocess.ipkernel import InProcessKernel
|
||||
from ipykernel.inprocess.manager import InProcessKernelManager
|
||||
from ipykernel.tests.utils import assemble_output
|
||||
|
||||
orig_msg = Session.msg
|
||||
|
||||
|
||||
def _init_asyncio_patch():
|
||||
"""set default asyncio policy to be compatible with tornado
|
||||
|
||||
Tornado 6 (at least) is not compatible with the default
|
||||
asyncio implementation on Windows
|
||||
|
||||
Pick the older SelectorEventLoopPolicy on Windows
|
||||
if the known-incompatible default policy is in use.
|
||||
|
||||
do this as early as possible to make it a low priority and overrideable
|
||||
|
||||
ref: https://github.com/tornadoweb/tornado/issues/2608
|
||||
|
||||
FIXME: if/when tornado supports the defaults in asyncio,
|
||||
remove and bump tornado requirement for py38
|
||||
"""
|
||||
if (
|
||||
sys.platform.startswith("win")
|
||||
and sys.version_info >= (3, 8)
|
||||
and tornado.version_info < (6, 1)
|
||||
):
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
from asyncio import (
|
||||
WindowsProactorEventLoopPolicy,
|
||||
WindowsSelectorEventLoopPolicy,
|
||||
)
|
||||
except ImportError:
|
||||
pass
|
||||
# not affected
|
||||
else:
|
||||
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
|
||||
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
|
||||
# fallback to the pre-3.8 default of Selector
|
||||
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
|
||||
|
||||
|
||||
def _inject_cell_id(_self, *args, **kwargs):
|
||||
"""
|
||||
This patch jupyter_client.session:Session.msg to add a cell_id to the return message metadata
|
||||
"""
|
||||
assert isinstance(_self, Session)
|
||||
res = orig_msg(_self, *args, **kwargs)
|
||||
assert "cellId" not in res["metadata"]
|
||||
res["metadata"]["cellId"] = "test_cell_id"
|
||||
return res
|
||||
|
||||
|
||||
@contextmanager
|
||||
def patch_cell_id():
|
||||
try:
|
||||
Session.msg = _inject_cell_id
|
||||
yield
|
||||
finally:
|
||||
Session.msg = orig_msg
|
||||
|
||||
|
||||
class InProcessKernelTestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
_init_asyncio_patch()
|
||||
self.km = InProcessKernelManager()
|
||||
self.km.start_kernel()
|
||||
self.kc = self.km.client()
|
||||
self.kc.start_channels()
|
||||
self.kc.wait_for_ready()
|
||||
|
||||
def test_with_cell_id(self):
|
||||
|
||||
with patch_cell_id():
|
||||
self.kc.execute("1+1")
|
||||
|
||||
def test_pylab(self):
|
||||
"""Does %pylab work in the in-process kernel?"""
|
||||
_ = pytest.importorskip("matplotlib", reason="This test requires matplotlib")
|
||||
kc = self.kc
|
||||
kc.execute("%pylab")
|
||||
out, err = assemble_output(kc.get_iopub_msg)
|
||||
self.assertIn("matplotlib", out)
|
||||
|
||||
def test_raw_input(self):
|
||||
"""Does the in-process kernel handle raw_input correctly?"""
|
||||
io = StringIO("foobar\n")
|
||||
sys_stdin = sys.stdin
|
||||
sys.stdin = io
|
||||
try:
|
||||
self.kc.execute("x = input()")
|
||||
finally:
|
||||
sys.stdin = sys_stdin
|
||||
assert self.km.kernel.shell.user_ns.get("x") == "foobar"
|
||||
|
||||
@pytest.mark.skipif("__pypy__" in sys.builtin_module_names, reason="fails on pypy")
|
||||
def test_stdout(self):
|
||||
"""Does the in-process kernel correctly capture IO?"""
|
||||
kernel = InProcessKernel()
|
||||
|
||||
with capture_output() as io:
|
||||
kernel.shell.run_cell('print("foo")')
|
||||
assert io.stdout == "foo\n"
|
||||
|
||||
kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session)
|
||||
kernel.frontends.append(kc)
|
||||
kc.execute('print("bar")')
|
||||
out, err = assemble_output(kc.get_iopub_msg)
|
||||
assert out == "bar\n"
|
||||
|
||||
@pytest.mark.skip(reason="Currently don't capture during test as pytest does its own capturing")
|
||||
def test_capfd(self):
|
||||
"""Does correctly capture fd"""
|
||||
kernel = InProcessKernel()
|
||||
|
||||
with capture_output() as io:
|
||||
kernel.shell.run_cell('print("foo")')
|
||||
assert io.stdout == "foo\n"
|
||||
|
||||
kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session)
|
||||
kernel.frontends.append(kc)
|
||||
kc.execute("import os")
|
||||
kc.execute('os.system("echo capfd")')
|
||||
out, err = assemble_output(kc.iopub_channel)
|
||||
assert out == "capfd\n"
|
||||
|
||||
def test_getpass_stream(self):
|
||||
"Tests that kernel getpass accept the stream parameter"
|
||||
kernel = InProcessKernel()
|
||||
kernel._allow_stdin = True
|
||||
kernel._input_request = lambda *args, **kwargs: None
|
||||
|
||||
kernel.getpass(stream="non empty")
|
||||
|
||||
def test_do_execute(self):
|
||||
kernel = InProcessKernel()
|
||||
asyncio.run(kernel.do_execute("a=1", True))
|
||||
assert kernel.shell.user_ns["a"] == 1
|
@ -0,0 +1,106 @@
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import unittest
|
||||
|
||||
from ipykernel.inprocess.manager import InProcessKernelManager
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Test case
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InProcessKernelManagerTestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.km = InProcessKernelManager()
|
||||
|
||||
def tearDown(self):
|
||||
if self.km.has_kernel:
|
||||
self.km.shutdown_kernel()
|
||||
|
||||
def test_interface(self):
|
||||
"""Does the in-process kernel manager implement the basic KM interface?"""
|
||||
km = self.km
|
||||
assert not km.has_kernel
|
||||
|
||||
km.start_kernel()
|
||||
assert km.has_kernel
|
||||
assert km.kernel is not None
|
||||
|
||||
kc = km.client()
|
||||
assert not kc.channels_running
|
||||
|
||||
kc.start_channels()
|
||||
assert kc.channels_running
|
||||
|
||||
old_kernel = km.kernel
|
||||
km.restart_kernel()
|
||||
self.assertIsNotNone(km.kernel)
|
||||
assert km.kernel != old_kernel
|
||||
|
||||
km.shutdown_kernel()
|
||||
assert not km.has_kernel
|
||||
|
||||
self.assertRaises(NotImplementedError, km.interrupt_kernel)
|
||||
self.assertRaises(NotImplementedError, km.signal_kernel, 9)
|
||||
|
||||
kc.stop_channels()
|
||||
assert not kc.channels_running
|
||||
|
||||
def test_execute(self):
|
||||
"""Does executing code in an in-process kernel work?"""
|
||||
km = self.km
|
||||
km.start_kernel()
|
||||
kc = km.client()
|
||||
kc.start_channels()
|
||||
kc.wait_for_ready()
|
||||
kc.execute("foo = 1")
|
||||
assert km.kernel.shell.user_ns["foo"] == 1
|
||||
|
||||
def test_complete(self):
|
||||
"""Does requesting completion from an in-process kernel work?"""
|
||||
km = self.km
|
||||
km.start_kernel()
|
||||
kc = km.client()
|
||||
kc.start_channels()
|
||||
kc.wait_for_ready()
|
||||
km.kernel.shell.push({"my_bar": 0, "my_baz": 1})
|
||||
kc.complete("my_ba", 5)
|
||||
msg = kc.get_shell_msg()
|
||||
assert msg["header"]["msg_type"] == "complete_reply"
|
||||
self.assertEqual(sorted(msg["content"]["matches"]), ["my_bar", "my_baz"])
|
||||
|
||||
def test_inspect(self):
|
||||
"""Does requesting object information from an in-process kernel work?"""
|
||||
km = self.km
|
||||
km.start_kernel()
|
||||
kc = km.client()
|
||||
kc.start_channels()
|
||||
kc.wait_for_ready()
|
||||
km.kernel.shell.user_ns["foo"] = 1
|
||||
kc.inspect("foo")
|
||||
msg = kc.get_shell_msg()
|
||||
assert msg["header"]["msg_type"] == "inspect_reply"
|
||||
content = msg["content"]
|
||||
assert content["found"]
|
||||
text = content["data"]["text/plain"]
|
||||
self.assertIn("int", text)
|
||||
|
||||
def test_history(self):
|
||||
"""Does requesting history from an in-process kernel work?"""
|
||||
km = self.km
|
||||
km.start_kernel()
|
||||
kc = km.client()
|
||||
kc.start_channels()
|
||||
kc.wait_for_ready()
|
||||
kc.execute("1")
|
||||
kc.history(hist_access_type="tail", n=1)
|
||||
msg = kc.shell_channel.get_msgs()[-1]
|
||||
assert msg["header"]["msg_type"] == "history_reply"
|
||||
history = msg["content"]["history"]
|
||||
assert len(history) == 1
|
||||
assert history[0][2] == "1"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
588
.venv/Lib/site-packages/ipykernel/iostream.py
Normal file
588
.venv/Lib/site-packages/ipykernel/iostream.py
Normal file
@ -0,0 +1,588 @@
|
||||
"""Wrappers for forwarding stdout/stderr over zmq"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import atexit
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
import warnings
|
||||
from binascii import b2a_hex
|
||||
from collections import deque
|
||||
from io import StringIO, TextIOBase
|
||||
from typing import Any, Callable, Optional
|
||||
from weakref import WeakSet
|
||||
|
||||
import zmq
|
||||
|
||||
if zmq.pyzmq_version_info() >= (17, 0):
|
||||
from tornado.ioloop import IOLoop
|
||||
else:
|
||||
# deprecated since pyzmq 17
|
||||
from zmq.eventloop.ioloop import IOLoop
|
||||
|
||||
from jupyter_client.session import extract_header
|
||||
from zmq.eventloop.zmqstream import ZMQStream
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Globals
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
MASTER = 0
|
||||
CHILD = 1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# IO classes
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class IOPubThread:
|
||||
"""An object for sending IOPub messages in a background thread
|
||||
|
||||
Prevents a blocking main thread from delaying output from threads.
|
||||
|
||||
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
|
||||
whose IO is always run in a thread.
|
||||
"""
|
||||
|
||||
def __init__(self, socket, pipe=False):
|
||||
"""Create IOPub thread
|
||||
|
||||
Parameters
|
||||
----------
|
||||
socket : zmq.PUB Socket
|
||||
the socket on which messages will be sent.
|
||||
pipe : bool
|
||||
Whether this process should listen for IOPub messages
|
||||
piped from subprocesses.
|
||||
"""
|
||||
self.socket = socket
|
||||
self.background_socket = BackgroundSocket(self)
|
||||
self._master_pid = os.getpid()
|
||||
self._pipe_flag = pipe
|
||||
self.io_loop = IOLoop(make_current=False)
|
||||
if pipe:
|
||||
self._setup_pipe_in()
|
||||
self._local = threading.local()
|
||||
self._events: deque[Callable[..., Any]] = deque()
|
||||
self._event_pipes: WeakSet[Any] = WeakSet()
|
||||
self._setup_event_pipe()
|
||||
self.thread = threading.Thread(target=self._thread_main, name="IOPub")
|
||||
self.thread.daemon = True
|
||||
self.thread.pydev_do_not_trace = True # type:ignore[attr-defined]
|
||||
self.thread.is_pydev_daemon_thread = True # type:ignore[attr-defined]
|
||||
self.thread.name = "IOPub"
|
||||
|
||||
def _thread_main(self):
|
||||
"""The inner loop that's actually run in a thread"""
|
||||
self.io_loop.make_current()
|
||||
self.io_loop.start()
|
||||
self.io_loop.close(all_fds=True)
|
||||
|
||||
def _setup_event_pipe(self):
|
||||
"""Create the PULL socket listening for events that should fire in this thread."""
|
||||
ctx = self.socket.context
|
||||
pipe_in = ctx.socket(zmq.PULL)
|
||||
pipe_in.linger = 0
|
||||
|
||||
_uuid = b2a_hex(os.urandom(16)).decode("ascii")
|
||||
iface = self._event_interface = "inproc://%s" % _uuid
|
||||
pipe_in.bind(iface)
|
||||
self._event_puller = ZMQStream(pipe_in, self.io_loop)
|
||||
self._event_puller.on_recv(self._handle_event)
|
||||
|
||||
@property
|
||||
def _event_pipe(self):
|
||||
"""thread-local event pipe for signaling events that should be processed in the thread"""
|
||||
try:
|
||||
event_pipe = self._local.event_pipe
|
||||
except AttributeError:
|
||||
# new thread, new event pipe
|
||||
ctx = self.socket.context
|
||||
event_pipe = ctx.socket(zmq.PUSH)
|
||||
event_pipe.linger = 0
|
||||
event_pipe.connect(self._event_interface)
|
||||
self._local.event_pipe = event_pipe
|
||||
# WeakSet so that event pipes will be closed by garbage collection
|
||||
# when their threads are terminated
|
||||
self._event_pipes.add(event_pipe)
|
||||
return event_pipe
|
||||
|
||||
def _handle_event(self, msg):
|
||||
"""Handle an event on the event pipe
|
||||
|
||||
Content of the message is ignored.
|
||||
|
||||
Whenever *an* event arrives on the event stream,
|
||||
*all* waiting events are processed in order.
|
||||
"""
|
||||
# freeze event count so new writes don't extend the queue
|
||||
# while we are processing
|
||||
n_events = len(self._events)
|
||||
for _ in range(n_events):
|
||||
event_f = self._events.popleft()
|
||||
event_f()
|
||||
|
||||
def _setup_pipe_in(self):
|
||||
"""setup listening pipe for IOPub from forked subprocesses"""
|
||||
ctx = self.socket.context
|
||||
|
||||
# use UUID to authenticate pipe messages
|
||||
self._pipe_uuid = os.urandom(16)
|
||||
|
||||
pipe_in = ctx.socket(zmq.PULL)
|
||||
pipe_in.linger = 0
|
||||
|
||||
try:
|
||||
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
|
||||
except zmq.ZMQError as e:
|
||||
warnings.warn(
|
||||
"Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e
|
||||
+ "\nsubprocess output will be unavailable."
|
||||
)
|
||||
self._pipe_flag = False
|
||||
pipe_in.close()
|
||||
return
|
||||
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
|
||||
self._pipe_in.on_recv(self._handle_pipe_msg)
|
||||
|
||||
def _handle_pipe_msg(self, msg):
|
||||
"""handle a pipe message from a subprocess"""
|
||||
if not self._pipe_flag or not self._is_master_process():
|
||||
return
|
||||
if msg[0] != self._pipe_uuid:
|
||||
print("Bad pipe message: %s", msg, file=sys.__stderr__)
|
||||
return
|
||||
self.send_multipart(msg[1:])
|
||||
|
||||
def _setup_pipe_out(self):
|
||||
# must be new context after fork
|
||||
ctx = zmq.Context()
|
||||
pipe_out = ctx.socket(zmq.PUSH)
|
||||
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
|
||||
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
|
||||
return ctx, pipe_out
|
||||
|
||||
def _is_master_process(self):
|
||||
return os.getpid() == self._master_pid
|
||||
|
||||
def _check_mp_mode(self):
|
||||
"""check for forks, and switch to zmq pipeline if necessary"""
|
||||
if not self._pipe_flag or self._is_master_process():
|
||||
return MASTER
|
||||
else:
|
||||
return CHILD
|
||||
|
||||
def start(self):
|
||||
"""Start the IOPub thread"""
|
||||
self.thread.name = "IOPub"
|
||||
self.thread.start()
|
||||
# make sure we don't prevent process exit
|
||||
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
|
||||
atexit.register(self.stop)
|
||||
|
||||
def stop(self):
|
||||
"""Stop the IOPub thread"""
|
||||
if not self.thread.is_alive():
|
||||
return
|
||||
self.io_loop.add_callback(self.io_loop.stop)
|
||||
self.thread.join()
|
||||
# close *all* event pipes, created in any thread
|
||||
# event pipes can only be used from other threads while self.thread.is_alive()
|
||||
# so after thread.join, this should be safe
|
||||
for event_pipe in self._event_pipes:
|
||||
event_pipe.close()
|
||||
|
||||
def close(self):
|
||||
if self.closed:
|
||||
return
|
||||
self.socket.close()
|
||||
self.socket = None
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self.socket is None
|
||||
|
||||
def schedule(self, f):
|
||||
"""Schedule a function to be called in our IO thread.
|
||||
|
||||
If the thread is not running, call immediately.
|
||||
"""
|
||||
if self.thread.is_alive():
|
||||
self._events.append(f)
|
||||
# wake event thread (message content is ignored)
|
||||
self._event_pipe.send(b"")
|
||||
else:
|
||||
f()
|
||||
|
||||
def send_multipart(self, *args, **kwargs):
|
||||
"""send_multipart schedules actual zmq send in my thread.
|
||||
|
||||
If my thread isn't running (e.g. forked process), send immediately.
|
||||
"""
|
||||
self.schedule(lambda: self._really_send(*args, **kwargs))
|
||||
|
||||
def _really_send(self, msg, *args, **kwargs):
|
||||
"""The callback that actually sends messages"""
|
||||
if self.closed:
|
||||
return
|
||||
|
||||
mp_mode = self._check_mp_mode()
|
||||
|
||||
if mp_mode != CHILD:
|
||||
# we are master, do a regular send
|
||||
self.socket.send_multipart(msg, *args, **kwargs)
|
||||
else:
|
||||
# we are a child, pipe to master
|
||||
# new context/socket for every pipe-out
|
||||
# since forks don't teardown politely, use ctx.term to ensure send has completed
|
||||
ctx, pipe_out = self._setup_pipe_out()
|
||||
pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
|
||||
pipe_out.close()
|
||||
ctx.term()
|
||||
|
||||
|
||||
class BackgroundSocket:
|
||||
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
|
||||
|
||||
io_thread = None
|
||||
|
||||
def __init__(self, io_thread):
|
||||
self.io_thread = io_thread
|
||||
|
||||
def __getattr__(self, attr):
|
||||
"""Wrap socket attr access for backward-compatibility"""
|
||||
if attr.startswith("__") and attr.endswith("__"):
|
||||
# don't wrap magic methods
|
||||
super().__getattr__(attr) # type:ignore[misc]
|
||||
assert self.io_thread is not None
|
||||
if hasattr(self.io_thread.socket, attr):
|
||||
warnings.warn(
|
||||
f"Accessing zmq Socket attribute {attr} on BackgroundSocket"
|
||||
f" is deprecated since ipykernel 4.3.0"
|
||||
f" use .io_thread.socket.{attr}",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return getattr(self.io_thread.socket, attr)
|
||||
super().__getattr__(attr) # type:ignore[misc]
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
if attr == "io_thread" or (attr.startswith("__" and attr.endswith("__"))):
|
||||
super().__setattr__(attr, value)
|
||||
else:
|
||||
warnings.warn(
|
||||
f"Setting zmq Socket attribute {attr} on BackgroundSocket"
|
||||
f" is deprecated since ipykernel 4.3.0"
|
||||
f" use .io_thread.socket.{attr}",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
assert self.io_thread is not None
|
||||
setattr(self.io_thread.socket, attr, value)
|
||||
|
||||
def send(self, msg, *args, **kwargs):
|
||||
return self.send_multipart([msg], *args, **kwargs)
|
||||
|
||||
def send_multipart(self, *args, **kwargs):
|
||||
"""Schedule send in IO thread"""
|
||||
assert self.io_thread is not None
|
||||
return self.io_thread.send_multipart(*args, **kwargs)
|
||||
|
||||
|
||||
class OutStream(TextIOBase):
|
||||
"""A file like object that publishes the stream to a 0MQ PUB socket.
|
||||
|
||||
Output is handed off to an IO Thread
|
||||
"""
|
||||
|
||||
# timeout for flush to avoid infinite hang
|
||||
# in case of misbehavior
|
||||
flush_timeout = 10
|
||||
# The time interval between automatic flushes, in seconds.
|
||||
flush_interval = 0.2
|
||||
topic = None
|
||||
encoding = "UTF-8"
|
||||
_exc: Optional[Any] = None
|
||||
|
||||
def fileno(self):
|
||||
"""
|
||||
Things like subprocess will peak and write to the fileno() of stderr/stdout.
|
||||
"""
|
||||
if getattr(self, "_original_stdstream_copy", None) is not None:
|
||||
return self._original_stdstream_copy
|
||||
else:
|
||||
raise io.UnsupportedOperation("fileno")
|
||||
|
||||
def _watch_pipe_fd(self):
|
||||
"""
|
||||
We've redirected standards steams 0 and 1 into a pipe.
|
||||
|
||||
We need to watch in a thread and redirect them to the right places.
|
||||
|
||||
1) the ZMQ channels to show in notebook interfaces,
|
||||
2) the original stdout/err, to capture errors in terminals.
|
||||
|
||||
We cannot schedule this on the ioloop thread, as this might be blocking.
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
bts = os.read(self._fid, 1000)
|
||||
while bts and self._should_watch:
|
||||
self.write(bts.decode())
|
||||
os.write(self._original_stdstream_copy, bts)
|
||||
bts = os.read(self._fid, 1000)
|
||||
except Exception:
|
||||
self._exc = sys.exc_info()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
session,
|
||||
pub_thread,
|
||||
name,
|
||||
pipe=None,
|
||||
echo=None,
|
||||
*,
|
||||
watchfd=True,
|
||||
isatty=False,
|
||||
):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
name : str {'stderr', 'stdout'}
|
||||
the name of the standard stream to replace
|
||||
watchfd : bool (default, True)
|
||||
Watch the file descripttor corresponding to the replaced stream.
|
||||
This is useful if you know some underlying code will write directly
|
||||
the file descriptor by its number. It will spawn a watching thread,
|
||||
that will swap the give file descriptor for a pipe, read from the
|
||||
pipe, and insert this into the current Stream.
|
||||
isatty : bool (default, False)
|
||||
Indication of whether this stream has termimal capabilities (e.g. can handle colors)
|
||||
|
||||
"""
|
||||
if pipe is not None:
|
||||
warnings.warn(
|
||||
"pipe argument to OutStream is deprecated and ignored since ipykernel 4.2.3.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
# This is necessary for compatibility with Python built-in streams
|
||||
self.session = session
|
||||
if not isinstance(pub_thread, IOPubThread):
|
||||
# Backward-compat: given socket, not thread. Wrap in a thread.
|
||||
warnings.warn(
|
||||
"Since IPykernel 4.3, OutStream should be created with "
|
||||
"IOPubThread, not %r" % pub_thread,
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
pub_thread = IOPubThread(pub_thread)
|
||||
pub_thread.start()
|
||||
self.pub_thread = pub_thread
|
||||
self.name = name
|
||||
self.topic = b"stream." + name.encode()
|
||||
self.parent_header = {}
|
||||
self._master_pid = os.getpid()
|
||||
self._flush_pending = False
|
||||
self._subprocess_flush_pending = False
|
||||
self._io_loop = pub_thread.io_loop
|
||||
self._buffer_lock = threading.RLock()
|
||||
self._buffer = StringIO()
|
||||
self.echo = None
|
||||
self._isatty = bool(isatty)
|
||||
|
||||
if (
|
||||
watchfd
|
||||
and (sys.platform.startswith("linux") or sys.platform.startswith("darwin"))
|
||||
and ("PYTEST_CURRENT_TEST" not in os.environ)
|
||||
):
|
||||
# Pytest set its own capture. Dont redirect from within pytest.
|
||||
|
||||
self._should_watch = True
|
||||
self._setup_stream_redirects(name)
|
||||
|
||||
if echo:
|
||||
if hasattr(echo, "read") and hasattr(echo, "write"):
|
||||
self.echo = echo
|
||||
else:
|
||||
raise ValueError("echo argument must be a file like object")
|
||||
|
||||
def isatty(self):
|
||||
"""Return a bool indicating whether this is an 'interactive' stream.
|
||||
|
||||
Returns:
|
||||
Boolean
|
||||
"""
|
||||
return self._isatty
|
||||
|
||||
def _setup_stream_redirects(self, name):
|
||||
pr, pw = os.pipe()
|
||||
fno = getattr(sys, name).fileno()
|
||||
self._original_stdstream_copy = os.dup(fno)
|
||||
os.dup2(pw, fno)
|
||||
|
||||
self._fid = pr
|
||||
|
||||
self._exc = None
|
||||
self.watch_fd_thread = threading.Thread(target=self._watch_pipe_fd)
|
||||
self.watch_fd_thread.daemon = True
|
||||
self.watch_fd_thread.start()
|
||||
|
||||
def _is_master_process(self):
|
||||
return os.getpid() == self._master_pid
|
||||
|
||||
def set_parent(self, parent):
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def close(self):
|
||||
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
|
||||
self._should_watch = False
|
||||
self.watch_fd_thread.join()
|
||||
if self._exc:
|
||||
etype, value, tb = self._exc
|
||||
traceback.print_exception(etype, value, tb)
|
||||
self.pub_thread = None
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self.pub_thread is None
|
||||
|
||||
def _schedule_flush(self):
|
||||
"""schedule a flush in the IO thread
|
||||
|
||||
call this on write, to indicate that flush should be called soon.
|
||||
"""
|
||||
if self._flush_pending:
|
||||
return
|
||||
self._flush_pending = True
|
||||
|
||||
# add_timeout has to be handed to the io thread via event pipe
|
||||
def _schedule_in_thread():
|
||||
self._io_loop.call_later(self.flush_interval, self._flush)
|
||||
|
||||
self.pub_thread.schedule(_schedule_in_thread)
|
||||
|
||||
def flush(self):
|
||||
"""trigger actual zmq send
|
||||
|
||||
send will happen in the background thread
|
||||
"""
|
||||
if (
|
||||
self.pub_thread
|
||||
and self.pub_thread.thread is not None
|
||||
and self.pub_thread.thread.is_alive()
|
||||
and self.pub_thread.thread.ident != threading.current_thread().ident
|
||||
):
|
||||
# request flush on the background thread
|
||||
self.pub_thread.schedule(self._flush)
|
||||
# wait for flush to actually get through, if we can.
|
||||
evt = threading.Event()
|
||||
self.pub_thread.schedule(evt.set)
|
||||
# and give a timeout to avoid
|
||||
if not evt.wait(self.flush_timeout):
|
||||
# write directly to __stderr__ instead of warning because
|
||||
# if this is happening sys.stderr may be the problem.
|
||||
print("IOStream.flush timed out", file=sys.__stderr__)
|
||||
else:
|
||||
self._flush()
|
||||
|
||||
def _flush(self):
|
||||
"""This is where the actual send happens.
|
||||
|
||||
_flush should generally be called in the IO thread,
|
||||
unless the thread has been destroyed (e.g. forked subprocess).
|
||||
"""
|
||||
self._flush_pending = False
|
||||
self._subprocess_flush_pending = False
|
||||
|
||||
if self.echo is not None:
|
||||
try:
|
||||
self.echo.flush()
|
||||
except OSError as e:
|
||||
if self.echo is not sys.__stderr__:
|
||||
print(f"Flush failed: {e}", file=sys.__stderr__)
|
||||
|
||||
data = self._flush_buffer()
|
||||
if data:
|
||||
# FIXME: this disables Session's fork-safe check,
|
||||
# since pub_thread is itself fork-safe.
|
||||
# There should be a better way to do this.
|
||||
self.session.pid = os.getpid()
|
||||
content = {"name": self.name, "text": data}
|
||||
self.session.send(
|
||||
self.pub_thread,
|
||||
"stream",
|
||||
content=content,
|
||||
parent=self.parent_header,
|
||||
ident=self.topic,
|
||||
)
|
||||
|
||||
def write(self, string: str) -> Optional[int]: # type:ignore[override]
|
||||
"""Write to current stream after encoding if necessary
|
||||
|
||||
Returns
|
||||
-------
|
||||
len : int
|
||||
number of items from input parameter written to stream.
|
||||
|
||||
"""
|
||||
|
||||
if not isinstance(string, str):
|
||||
raise TypeError(f"write() argument must be str, not {type(string)}")
|
||||
|
||||
if self.echo is not None:
|
||||
try:
|
||||
self.echo.write(string)
|
||||
except OSError as e:
|
||||
if self.echo is not sys.__stderr__:
|
||||
print(f"Write failed: {e}", file=sys.__stderr__)
|
||||
|
||||
if self.pub_thread is None:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
else:
|
||||
|
||||
is_child = not self._is_master_process()
|
||||
# only touch the buffer in the IO thread to avoid races
|
||||
with self._buffer_lock:
|
||||
self._buffer.write(string)
|
||||
if is_child:
|
||||
# mp.Pool cannot be trusted to flush promptly (or ever),
|
||||
# and this helps.
|
||||
if self._subprocess_flush_pending:
|
||||
return None
|
||||
self._subprocess_flush_pending = True
|
||||
# We can not rely on self._io_loop.call_later from a subprocess
|
||||
self.pub_thread.schedule(self._flush)
|
||||
else:
|
||||
self._schedule_flush()
|
||||
|
||||
return len(string)
|
||||
|
||||
def writelines(self, sequence):
|
||||
if self.pub_thread is None:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
else:
|
||||
for string in sequence:
|
||||
self.write(string)
|
||||
|
||||
def writable(self):
|
||||
return True
|
||||
|
||||
def _flush_buffer(self):
|
||||
"""clear the current buffer and return the current buffer data."""
|
||||
buf = self._rotate_buffer()
|
||||
data = buf.getvalue()
|
||||
buf.close()
|
||||
return data
|
||||
|
||||
def _rotate_buffer(self):
|
||||
"""Returns the current buffer and replaces it with an empty buffer."""
|
||||
with self._buffer_lock:
|
||||
old_buffer = self._buffer
|
||||
self._buffer = StringIO()
|
||||
return old_buffer
|
660
.venv/Lib/site-packages/ipykernel/ipkernel.py
Normal file
660
.venv/Lib/site-packages/ipykernel/ipkernel.py
Normal file
@ -0,0 +1,660 @@
|
||||
"""The IPython kernel implementation"""
|
||||
|
||||
import asyncio
|
||||
import builtins
|
||||
import getpass
|
||||
import signal
|
||||
import sys
|
||||
import typing as t
|
||||
from contextlib import contextmanager
|
||||
from functools import partial
|
||||
|
||||
from IPython.core import release
|
||||
from IPython.utils.tokenutil import line_at_cursor, token_at_cursor
|
||||
from traitlets import Any, Bool, Instance, List, Type, observe, observe_compat
|
||||
from zmq.eventloop.zmqstream import ZMQStream
|
||||
|
||||
from .comm import CommManager
|
||||
from .compiler import XCachingCompiler
|
||||
from .debugger import Debugger, _is_debugpy_available
|
||||
from .eventloops import _use_appnope
|
||||
from .kernelbase import Kernel as KernelBase
|
||||
from .kernelbase import _accepts_cell_id
|
||||
from .zmqshell import ZMQInteractiveShell
|
||||
|
||||
try:
|
||||
from IPython.core.interactiveshell import _asyncio_runner
|
||||
except ImportError:
|
||||
_asyncio_runner = None
|
||||
|
||||
try:
|
||||
from IPython.core.completer import provisionalcompleter as _provisionalcompleter
|
||||
from IPython.core.completer import rectify_completions as _rectify_completions
|
||||
|
||||
_use_experimental_60_completion = True
|
||||
except ImportError:
|
||||
_use_experimental_60_completion = False
|
||||
|
||||
|
||||
_EXPERIMENTAL_KEY_NAME = "_jupyter_types_experimental"
|
||||
|
||||
|
||||
class IPythonKernel(KernelBase):
|
||||
shell = Instance("IPython.core.interactiveshell.InteractiveShellABC", allow_none=True)
|
||||
shell_class = Type(ZMQInteractiveShell)
|
||||
|
||||
use_experimental_completions = Bool(
|
||||
True,
|
||||
help="Set this flag to False to deactivate the use of experimental IPython completion APIs.",
|
||||
).tag(config=True)
|
||||
|
||||
debugpy_stream = Instance(ZMQStream, allow_none=True) if _is_debugpy_available else None
|
||||
|
||||
user_module = Any()
|
||||
|
||||
@observe("user_module")
|
||||
@observe_compat
|
||||
def _user_module_changed(self, change):
|
||||
if self.shell is not None:
|
||||
self.shell.user_module = change["new"]
|
||||
|
||||
user_ns = Instance(dict, args=None, allow_none=True)
|
||||
|
||||
@observe("user_ns")
|
||||
@observe_compat
|
||||
def _user_ns_changed(self, change):
|
||||
if self.shell is not None:
|
||||
self.shell.user_ns = change["new"]
|
||||
self.shell.init_user_ns()
|
||||
|
||||
# A reference to the Python builtin 'raw_input' function.
|
||||
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
|
||||
_sys_raw_input = Any()
|
||||
_sys_eval_input = Any()
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# Initialize the Debugger
|
||||
if _is_debugpy_available:
|
||||
self.debugger = Debugger(
|
||||
self.log,
|
||||
self.debugpy_stream,
|
||||
self._publish_debug_event,
|
||||
self.debug_shell_socket,
|
||||
self.session,
|
||||
self.debug_just_my_code,
|
||||
)
|
||||
|
||||
# Initialize the InteractiveShell subclass
|
||||
self.shell = self.shell_class.instance(
|
||||
parent=self,
|
||||
profile_dir=self.profile_dir,
|
||||
user_module=self.user_module,
|
||||
user_ns=self.user_ns,
|
||||
kernel=self,
|
||||
compiler_class=XCachingCompiler,
|
||||
)
|
||||
self.shell.displayhook.session = self.session
|
||||
self.shell.displayhook.pub_socket = self.iopub_socket
|
||||
self.shell.displayhook.topic = self._topic("execute_result")
|
||||
self.shell.display_pub.session = self.session
|
||||
self.shell.display_pub.pub_socket = self.iopub_socket
|
||||
|
||||
self.comm_manager = CommManager(parent=self, kernel=self)
|
||||
|
||||
self.shell.configurables.append(self.comm_manager)
|
||||
comm_msg_types = ["comm_open", "comm_msg", "comm_close"]
|
||||
for msg_type in comm_msg_types:
|
||||
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
|
||||
|
||||
if _use_appnope() and self._darwin_app_nap:
|
||||
# Disable app-nap as the kernel is not a gui but can have guis
|
||||
import appnope
|
||||
|
||||
appnope.nope()
|
||||
|
||||
help_links = List(
|
||||
[
|
||||
{
|
||||
"text": "Python Reference",
|
||||
"url": "https://docs.python.org/%i.%i" % sys.version_info[:2],
|
||||
},
|
||||
{
|
||||
"text": "IPython Reference",
|
||||
"url": "https://ipython.org/documentation.html",
|
||||
},
|
||||
{
|
||||
"text": "NumPy Reference",
|
||||
"url": "https://docs.scipy.org/doc/numpy/reference/",
|
||||
},
|
||||
{
|
||||
"text": "SciPy Reference",
|
||||
"url": "https://docs.scipy.org/doc/scipy/reference/",
|
||||
},
|
||||
{
|
||||
"text": "Matplotlib Reference",
|
||||
"url": "https://matplotlib.org/contents.html",
|
||||
},
|
||||
{
|
||||
"text": "SymPy Reference",
|
||||
"url": "http://docs.sympy.org/latest/index.html",
|
||||
},
|
||||
{
|
||||
"text": "pandas Reference",
|
||||
"url": "https://pandas.pydata.org/pandas-docs/stable/",
|
||||
},
|
||||
]
|
||||
).tag(config=True)
|
||||
|
||||
# Kernel info fields
|
||||
implementation = "ipython"
|
||||
implementation_version = release.version
|
||||
language_info = {
|
||||
"name": "python",
|
||||
"version": sys.version.split()[0],
|
||||
"mimetype": "text/x-python",
|
||||
"codemirror_mode": {"name": "ipython", "version": sys.version_info[0]},
|
||||
"pygments_lexer": "ipython%d" % 3,
|
||||
"nbconvert_exporter": "python",
|
||||
"file_extension": ".py",
|
||||
}
|
||||
|
||||
def dispatch_debugpy(self, msg):
|
||||
if _is_debugpy_available:
|
||||
# The first frame is the socket id, we can drop it
|
||||
frame = msg[1].bytes.decode("utf-8")
|
||||
self.log.debug("Debugpy received: %s", frame)
|
||||
self.debugger.tcp_client.receive_dap_frame(frame)
|
||||
|
||||
@property
|
||||
def banner(self):
|
||||
return self.shell.banner
|
||||
|
||||
async def poll_stopped_queue(self):
|
||||
while True:
|
||||
await self.debugger.handle_stopped_event()
|
||||
|
||||
def start(self):
|
||||
self.shell.exit_now = False
|
||||
if self.debugpy_stream is None:
|
||||
self.log.warning("debugpy_stream undefined, debugging will not be enabled")
|
||||
else:
|
||||
self.debugpy_stream.on_recv(self.dispatch_debugpy, copy=False)
|
||||
super().start()
|
||||
if self.debugpy_stream:
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self.poll_stopped_queue(), self.control_thread.io_loop.asyncio_loop
|
||||
)
|
||||
|
||||
def set_parent(self, ident, parent, channel="shell"):
|
||||
"""Overridden from parent to tell the display hook and output streams
|
||||
about the parent message.
|
||||
"""
|
||||
super().set_parent(ident, parent, channel)
|
||||
if channel == "shell":
|
||||
self.shell.set_parent(parent)
|
||||
|
||||
def init_metadata(self, parent):
|
||||
"""Initialize metadata.
|
||||
|
||||
Run at the beginning of each execution request.
|
||||
"""
|
||||
md = super().init_metadata(parent)
|
||||
# FIXME: remove deprecated ipyparallel-specific code
|
||||
# This is required for ipyparallel < 5.0
|
||||
md.update(
|
||||
{
|
||||
"dependencies_met": True,
|
||||
"engine": self.ident,
|
||||
}
|
||||
)
|
||||
return md
|
||||
|
||||
def finish_metadata(self, parent, metadata, reply_content):
|
||||
"""Finish populating metadata.
|
||||
|
||||
Run after completing an execution request.
|
||||
"""
|
||||
# FIXME: remove deprecated ipyparallel-specific code
|
||||
# This is required by ipyparallel < 5.0
|
||||
metadata["status"] = reply_content["status"]
|
||||
if reply_content["status"] == "error" and reply_content["ename"] == "UnmetDependency":
|
||||
metadata["dependencies_met"] = False
|
||||
|
||||
return metadata
|
||||
|
||||
def _forward_input(self, allow_stdin=False):
|
||||
"""Forward raw_input and getpass to the current frontend.
|
||||
|
||||
via input_request
|
||||
"""
|
||||
self._allow_stdin = allow_stdin
|
||||
|
||||
self._sys_raw_input = builtins.input
|
||||
builtins.input = self.raw_input
|
||||
|
||||
self._save_getpass = getpass.getpass
|
||||
getpass.getpass = self.getpass
|
||||
|
||||
def _restore_input(self):
|
||||
"""Restore raw_input, getpass"""
|
||||
builtins.input = self._sys_raw_input
|
||||
|
||||
getpass.getpass = self._save_getpass
|
||||
|
||||
@property # type:ignore[override]
|
||||
def execution_count(self):
|
||||
return self.shell.execution_count
|
||||
|
||||
@execution_count.setter
|
||||
def execution_count(self, value):
|
||||
# Ignore the incrementing done by KernelBase, in favour of our shell's
|
||||
# execution counter.
|
||||
pass
|
||||
|
||||
@contextmanager
|
||||
def _cancel_on_sigint(self, future):
|
||||
"""ContextManager for capturing SIGINT and cancelling a future
|
||||
|
||||
SIGINT raises in the event loop when running async code,
|
||||
but we want it to halt a coroutine.
|
||||
|
||||
Ideally, it would raise KeyboardInterrupt,
|
||||
but this turns it into a CancelledError.
|
||||
At least it gets a decent traceback to the user.
|
||||
"""
|
||||
sigint_future: asyncio.Future[int] = asyncio.Future()
|
||||
|
||||
# whichever future finishes first,
|
||||
# cancel the other one
|
||||
def cancel_unless_done(f, _ignored):
|
||||
if f.cancelled() or f.done():
|
||||
return
|
||||
f.cancel()
|
||||
|
||||
# when sigint finishes,
|
||||
# abort the coroutine with CancelledError
|
||||
sigint_future.add_done_callback(partial(cancel_unless_done, future))
|
||||
# when the main future finishes,
|
||||
# stop watching for SIGINT events
|
||||
future.add_done_callback(partial(cancel_unless_done, sigint_future))
|
||||
|
||||
def handle_sigint(*args):
|
||||
def set_sigint_result():
|
||||
if sigint_future.cancelled() or sigint_future.done():
|
||||
return
|
||||
sigint_future.set_result(1)
|
||||
|
||||
# use add_callback for thread safety
|
||||
self.io_loop.add_callback(set_sigint_result)
|
||||
|
||||
# set the custom sigint hander during this context
|
||||
save_sigint = signal.signal(signal.SIGINT, handle_sigint)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# restore the previous sigint handler
|
||||
signal.signal(signal.SIGINT, save_sigint)
|
||||
|
||||
async def do_execute(
|
||||
self,
|
||||
code,
|
||||
silent,
|
||||
store_history=True,
|
||||
user_expressions=None,
|
||||
allow_stdin=False,
|
||||
*,
|
||||
cell_id=None,
|
||||
):
|
||||
shell = self.shell # we'll need this a lot here
|
||||
|
||||
self._forward_input(allow_stdin)
|
||||
|
||||
reply_content: t.Dict[str, t.Any] = {}
|
||||
if hasattr(shell, "run_cell_async") and hasattr(shell, "should_run_async"):
|
||||
run_cell = shell.run_cell_async
|
||||
should_run_async = shell.should_run_async
|
||||
with_cell_id = _accepts_cell_id(run_cell)
|
||||
else:
|
||||
should_run_async = lambda cell: False # noqa
|
||||
# older IPython,
|
||||
# use blocking run_cell and wrap it in coroutine
|
||||
|
||||
async def run_cell(*args, **kwargs):
|
||||
return shell.run_cell(*args, **kwargs)
|
||||
|
||||
with_cell_id = _accepts_cell_id(shell.run_cell)
|
||||
try:
|
||||
|
||||
# default case: runner is asyncio and asyncio is already running
|
||||
# TODO: this should check every case for "are we inside the runner",
|
||||
# not just asyncio
|
||||
preprocessing_exc_tuple = None
|
||||
try:
|
||||
transformed_cell = self.shell.transform_cell(code)
|
||||
except Exception:
|
||||
transformed_cell = code
|
||||
preprocessing_exc_tuple = sys.exc_info()
|
||||
|
||||
if (
|
||||
_asyncio_runner
|
||||
and shell.loop_runner is _asyncio_runner
|
||||
and asyncio.get_event_loop().is_running()
|
||||
and should_run_async(
|
||||
code,
|
||||
transformed_cell=transformed_cell,
|
||||
preprocessing_exc_tuple=preprocessing_exc_tuple,
|
||||
)
|
||||
):
|
||||
if with_cell_id:
|
||||
coro = run_cell(
|
||||
code,
|
||||
store_history=store_history,
|
||||
silent=silent,
|
||||
transformed_cell=transformed_cell,
|
||||
preprocessing_exc_tuple=preprocessing_exc_tuple,
|
||||
cell_id=cell_id,
|
||||
)
|
||||
else:
|
||||
coro = run_cell(
|
||||
code,
|
||||
store_history=store_history,
|
||||
silent=silent,
|
||||
transformed_cell=transformed_cell,
|
||||
preprocessing_exc_tuple=preprocessing_exc_tuple,
|
||||
)
|
||||
|
||||
coro_future = asyncio.ensure_future(coro)
|
||||
|
||||
with self._cancel_on_sigint(coro_future):
|
||||
res = None
|
||||
try:
|
||||
res = await coro_future
|
||||
finally:
|
||||
shell.events.trigger("post_execute")
|
||||
if not silent:
|
||||
shell.events.trigger("post_run_cell", res)
|
||||
else:
|
||||
# runner isn't already running,
|
||||
# make synchronous call,
|
||||
# letting shell dispatch to loop runners
|
||||
if with_cell_id:
|
||||
res = shell.run_cell(
|
||||
code,
|
||||
store_history=store_history,
|
||||
silent=silent,
|
||||
cell_id=cell_id,
|
||||
)
|
||||
else:
|
||||
res = shell.run_cell(code, store_history=store_history, silent=silent)
|
||||
finally:
|
||||
self._restore_input()
|
||||
|
||||
if res.error_before_exec is not None:
|
||||
err = res.error_before_exec
|
||||
else:
|
||||
err = res.error_in_exec
|
||||
|
||||
if res.success:
|
||||
reply_content["status"] = "ok"
|
||||
else:
|
||||
reply_content["status"] = "error"
|
||||
|
||||
reply_content.update(
|
||||
{
|
||||
"traceback": shell._last_traceback or [],
|
||||
"ename": str(type(err).__name__),
|
||||
"evalue": str(err),
|
||||
}
|
||||
)
|
||||
|
||||
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
|
||||
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method="execute")
|
||||
reply_content["engine_info"] = e_info
|
||||
|
||||
# Return the execution counter so clients can display prompts
|
||||
reply_content["execution_count"] = shell.execution_count - 1
|
||||
|
||||
if "traceback" in reply_content:
|
||||
self.log.info(
|
||||
"Exception in execute request:\n%s",
|
||||
"\n".join(reply_content["traceback"]),
|
||||
)
|
||||
|
||||
# At this point, we can tell whether the main code execution succeeded
|
||||
# or not. If it did, we proceed to evaluate user_expressions
|
||||
if reply_content["status"] == "ok":
|
||||
reply_content["user_expressions"] = shell.user_expressions(user_expressions or {})
|
||||
else:
|
||||
# If there was an error, don't even try to compute expressions
|
||||
reply_content["user_expressions"] = {}
|
||||
|
||||
# Payloads should be retrieved regardless of outcome, so we can both
|
||||
# recover partial output (that could have been generated early in a
|
||||
# block, before an error) and always clear the payload system.
|
||||
reply_content["payload"] = shell.payload_manager.read_payload()
|
||||
# Be aggressive about clearing the payload because we don't want
|
||||
# it to sit in memory until the next execute_request comes in.
|
||||
shell.payload_manager.clear_payload()
|
||||
|
||||
return reply_content
|
||||
|
||||
def do_complete(self, code, cursor_pos):
|
||||
if _use_experimental_60_completion and self.use_experimental_completions:
|
||||
return self._experimental_do_complete(code, cursor_pos)
|
||||
|
||||
# FIXME: IPython completers currently assume single line,
|
||||
# but completion messages give multi-line context
|
||||
# For now, extract line from cell, based on cursor_pos:
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
line, offset = line_at_cursor(code, cursor_pos)
|
||||
line_cursor = cursor_pos - offset
|
||||
|
||||
txt, matches = self.shell.complete("", line, line_cursor)
|
||||
return {
|
||||
"matches": matches,
|
||||
"cursor_end": cursor_pos,
|
||||
"cursor_start": cursor_pos - len(txt),
|
||||
"metadata": {},
|
||||
"status": "ok",
|
||||
}
|
||||
|
||||
async def do_debug_request(self, msg):
|
||||
if _is_debugpy_available:
|
||||
return await self.debugger.process_request(msg)
|
||||
|
||||
def _experimental_do_complete(self, code, cursor_pos):
|
||||
"""
|
||||
Experimental completions from IPython, using Jedi.
|
||||
"""
|
||||
if cursor_pos is None:
|
||||
cursor_pos = len(code)
|
||||
with _provisionalcompleter():
|
||||
raw_completions = self.shell.Completer.completions(code, cursor_pos)
|
||||
completions = list(_rectify_completions(code, raw_completions))
|
||||
|
||||
comps = []
|
||||
for comp in completions:
|
||||
comps.append(
|
||||
dict(
|
||||
start=comp.start,
|
||||
end=comp.end,
|
||||
text=comp.text,
|
||||
type=comp.type,
|
||||
signature=comp.signature,
|
||||
)
|
||||
)
|
||||
|
||||
if completions:
|
||||
s = completions[0].start
|
||||
e = completions[0].end
|
||||
matches = [c.text for c in completions]
|
||||
else:
|
||||
s = cursor_pos
|
||||
e = cursor_pos
|
||||
matches = []
|
||||
|
||||
return {
|
||||
"matches": matches,
|
||||
"cursor_end": e,
|
||||
"cursor_start": s,
|
||||
"metadata": {_EXPERIMENTAL_KEY_NAME: comps},
|
||||
"status": "ok",
|
||||
}
|
||||
|
||||
def do_inspect(self, code, cursor_pos, detail_level=0, omit_sections=()):
|
||||
name = token_at_cursor(code, cursor_pos)
|
||||
|
||||
reply_content: t.Dict[str, t.Any] = {"status": "ok"}
|
||||
reply_content["data"] = {}
|
||||
reply_content["metadata"] = {}
|
||||
try:
|
||||
if release.version_info >= (8,):
|
||||
# `omit_sections` keyword will be available in IPython 8, see
|
||||
# https://github.com/ipython/ipython/pull/13343
|
||||
bundle = self.shell.object_inspect_mime(
|
||||
name,
|
||||
detail_level=detail_level,
|
||||
omit_sections=omit_sections,
|
||||
)
|
||||
else:
|
||||
bundle = self.shell.object_inspect_mime(name, detail_level=detail_level)
|
||||
reply_content["data"].update(bundle)
|
||||
if not self.shell.enable_html_pager:
|
||||
reply_content["data"].pop("text/html")
|
||||
reply_content["found"] = True
|
||||
except KeyError:
|
||||
reply_content["found"] = False
|
||||
|
||||
return reply_content
|
||||
|
||||
def do_history(
|
||||
self,
|
||||
hist_access_type,
|
||||
output,
|
||||
raw,
|
||||
session=0,
|
||||
start=0,
|
||||
stop=None,
|
||||
n=None,
|
||||
pattern=None,
|
||||
unique=False,
|
||||
):
|
||||
if hist_access_type == "tail":
|
||||
hist = self.shell.history_manager.get_tail(
|
||||
n, raw=raw, output=output, include_latest=True
|
||||
)
|
||||
|
||||
elif hist_access_type == "range":
|
||||
hist = self.shell.history_manager.get_range(
|
||||
session, start, stop, raw=raw, output=output
|
||||
)
|
||||
|
||||
elif hist_access_type == "search":
|
||||
hist = self.shell.history_manager.search(
|
||||
pattern, raw=raw, output=output, n=n, unique=unique
|
||||
)
|
||||
else:
|
||||
hist = []
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"history": list(hist),
|
||||
}
|
||||
|
||||
def do_shutdown(self, restart):
|
||||
self.shell.exit_now = True
|
||||
return dict(status="ok", restart=restart)
|
||||
|
||||
def do_is_complete(self, code):
|
||||
transformer_manager = getattr(self.shell, "input_transformer_manager", None)
|
||||
if transformer_manager is None:
|
||||
# input_splitter attribute is deprecated
|
||||
transformer_manager = self.shell.input_splitter
|
||||
status, indent_spaces = transformer_manager.check_complete(code)
|
||||
r = {"status": status}
|
||||
if status == "incomplete":
|
||||
r["indent"] = " " * indent_spaces
|
||||
return r
|
||||
|
||||
def do_apply(self, content, bufs, msg_id, reply_metadata):
|
||||
from .serialize import serialize_object, unpack_apply_message
|
||||
|
||||
shell = self.shell
|
||||
try:
|
||||
working = shell.user_ns
|
||||
|
||||
prefix = "_" + str(msg_id).replace("-", "") + "_"
|
||||
|
||||
f, args, kwargs = unpack_apply_message(bufs, working, copy=False)
|
||||
|
||||
fname = getattr(f, "__name__", "f")
|
||||
|
||||
fname = prefix + "f"
|
||||
argname = prefix + "args"
|
||||
kwargname = prefix + "kwargs"
|
||||
resultname = prefix + "result"
|
||||
|
||||
ns = {fname: f, argname: args, kwargname: kwargs, resultname: None}
|
||||
# print ns
|
||||
working.update(ns)
|
||||
code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
|
||||
try:
|
||||
exec(code, shell.user_global_ns, shell.user_ns)
|
||||
result = working.get(resultname)
|
||||
finally:
|
||||
for key in ns:
|
||||
working.pop(key)
|
||||
|
||||
result_buf = serialize_object(
|
||||
result,
|
||||
buffer_threshold=self.session.buffer_threshold,
|
||||
item_threshold=self.session.item_threshold,
|
||||
)
|
||||
|
||||
except BaseException as e:
|
||||
# invoke IPython traceback formatting
|
||||
shell.showtraceback()
|
||||
reply_content = {
|
||||
"traceback": shell._last_traceback or [],
|
||||
"ename": str(type(e).__name__),
|
||||
"evalue": str(e),
|
||||
}
|
||||
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
|
||||
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method="apply")
|
||||
reply_content["engine_info"] = e_info
|
||||
|
||||
self.send_response(
|
||||
self.iopub_socket,
|
||||
"error",
|
||||
reply_content,
|
||||
ident=self._topic("error"),
|
||||
channel="shell",
|
||||
)
|
||||
self.log.info("Exception in apply request:\n%s", "\n".join(reply_content["traceback"]))
|
||||
result_buf = []
|
||||
reply_content["status"] = "error"
|
||||
else:
|
||||
reply_content = {"status": "ok"}
|
||||
|
||||
return reply_content, result_buf
|
||||
|
||||
def do_clear(self):
|
||||
self.shell.reset(False)
|
||||
return dict(status="ok")
|
||||
|
||||
|
||||
# This exists only for backwards compatibility - use IPythonKernel instead
|
||||
|
||||
|
||||
class Kernel(IPythonKernel):
|
||||
def __init__(self, *args, **kwargs):
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"Kernel is a deprecated alias of ipykernel.ipkernel.IPythonKernel",
|
||||
DeprecationWarning,
|
||||
)
|
||||
super().__init__(*args, **kwargs)
|
162
.venv/Lib/site-packages/ipykernel/jsonutil.py
Normal file
162
.venv/Lib/site-packages/ipykernel/jsonutil.py
Normal file
@ -0,0 +1,162 @@
|
||||
"""Utilities to manipulate JSON objects."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import math
|
||||
import numbers
|
||||
import re
|
||||
import types
|
||||
from binascii import b2a_base64
|
||||
from datetime import datetime
|
||||
|
||||
from jupyter_client._version import version_info as jupyter_client_version
|
||||
|
||||
next_attr_name = "__next__"
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Globals and constants
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# timestamp formats
|
||||
ISO8601 = "%Y-%m-%dT%H:%M:%S.%f"
|
||||
ISO8601_PAT = re.compile(
|
||||
r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?Z?([\+\-]\d{2}:?\d{2})?$"
|
||||
)
|
||||
|
||||
# holy crap, strptime is not threadsafe.
|
||||
# Calling it once at import seems to help.
|
||||
datetime.strptime("1", "%d")
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Classes and functions
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
# constants for identifying png/jpeg data
|
||||
PNG = b"\x89PNG\r\n\x1a\n"
|
||||
# front of PNG base64-encoded
|
||||
PNG64 = b"iVBORw0KG"
|
||||
JPEG = b"\xff\xd8"
|
||||
# front of JPEG base64-encoded
|
||||
JPEG64 = b"/9"
|
||||
# constants for identifying gif data
|
||||
GIF_64 = b"R0lGODdh"
|
||||
GIF89_64 = b"R0lGODlh"
|
||||
# front of PDF base64-encoded
|
||||
PDF64 = b"JVBER"
|
||||
|
||||
JUPYTER_CLIENT_MAJOR_VERSION = jupyter_client_version[0]
|
||||
|
||||
|
||||
def encode_images(format_dict):
|
||||
"""b64-encodes images in a displaypub format dict
|
||||
|
||||
Perhaps this should be handled in json_clean itself?
|
||||
|
||||
Parameters
|
||||
----------
|
||||
format_dict : dict
|
||||
A dictionary of display data keyed by mime-type
|
||||
|
||||
Returns
|
||||
-------
|
||||
format_dict : dict
|
||||
A copy of the same dictionary,
|
||||
but binary image data ('image/png', 'image/jpeg' or 'application/pdf')
|
||||
is base64-encoded.
|
||||
|
||||
"""
|
||||
|
||||
# no need for handling of ambiguous bytestrings on Python 3,
|
||||
# where bytes objects always represent binary data and thus
|
||||
# base64-encoded.
|
||||
return format_dict
|
||||
|
||||
|
||||
def json_clean(obj):
|
||||
"""Deprecated, this is a no-op for jupyter-client>=7.
|
||||
|
||||
Clean an object to ensure it's safe to encode in JSON.
|
||||
|
||||
Atomic, immutable objects are returned unmodified. Sets and tuples are
|
||||
converted to lists, lists are copied and dicts are also copied.
|
||||
|
||||
Note: dicts whose keys could cause collisions upon encoding (such as a dict
|
||||
with both the number 1 and the string '1' as keys) will cause a ValueError
|
||||
to be raised.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj : any python object
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : object
|
||||
A version of the input which will not cause an encoding error when
|
||||
encoded as JSON. Note that this function does not *encode* its inputs,
|
||||
it simply sanitizes it so that there will be no encoding errors later.
|
||||
|
||||
"""
|
||||
if int(JUPYTER_CLIENT_MAJOR_VERSION) >= 7:
|
||||
return obj
|
||||
|
||||
# types that are 'atomic' and ok in json as-is.
|
||||
atomic_ok = (str, type(None))
|
||||
|
||||
# containers that we need to convert into lists
|
||||
container_to_list = (tuple, set, types.GeneratorType)
|
||||
|
||||
# Since bools are a subtype of Integrals, which are a subtype of Reals,
|
||||
# we have to check them in that order.
|
||||
|
||||
if isinstance(obj, bool):
|
||||
return obj
|
||||
|
||||
if isinstance(obj, numbers.Integral):
|
||||
# cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598)
|
||||
return int(obj)
|
||||
|
||||
if isinstance(obj, numbers.Real):
|
||||
# cast out-of-range floats to their reprs
|
||||
if math.isnan(obj) or math.isinf(obj):
|
||||
return repr(obj)
|
||||
return float(obj)
|
||||
|
||||
if isinstance(obj, atomic_ok):
|
||||
return obj
|
||||
|
||||
if isinstance(obj, bytes):
|
||||
# unanmbiguous binary data is base64-encoded
|
||||
# (this probably should have happened upstream)
|
||||
return b2a_base64(obj).decode("ascii")
|
||||
|
||||
if isinstance(obj, container_to_list) or (
|
||||
hasattr(obj, "__iter__") and hasattr(obj, next_attr_name)
|
||||
):
|
||||
obj = list(obj)
|
||||
|
||||
if isinstance(obj, list):
|
||||
return [json_clean(x) for x in obj]
|
||||
|
||||
if isinstance(obj, dict):
|
||||
# First, validate that the dict won't lose data in conversion due to
|
||||
# key collisions after stringification. This can happen with keys like
|
||||
# True and 'true' or 1 and '1', which collide in JSON.
|
||||
nkeys = len(obj)
|
||||
nkeys_collapsed = len(set(map(str, obj)))
|
||||
if nkeys != nkeys_collapsed:
|
||||
raise ValueError(
|
||||
"dict cannot be safely converted to JSON: "
|
||||
"key collision would lead to dropped values"
|
||||
)
|
||||
# If all OK, proceed by making the new dict that will be json-safe
|
||||
out = {}
|
||||
for k, v in obj.items():
|
||||
out[str(k)] = json_clean(v)
|
||||
return out
|
||||
if isinstance(obj, datetime):
|
||||
return obj.strftime(ISO8601)
|
||||
|
||||
# we don't understand it, it's probably an unserializable object
|
||||
raise ValueError("Can't clean for JSON: %r" % obj)
|
728
.venv/Lib/site-packages/ipykernel/kernelapp.py
Normal file
728
.venv/Lib/site-packages/ipykernel/kernelapp.py
Normal file
@ -0,0 +1,728 @@
|
||||
"""An Application for launching a kernel"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import atexit
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
from functools import partial
|
||||
from io import FileIO, TextIOWrapper
|
||||
from logging import StreamHandler
|
||||
|
||||
import zmq
|
||||
from IPython.core.application import (
|
||||
BaseIPythonApplication,
|
||||
base_aliases,
|
||||
base_flags,
|
||||
catch_config_error,
|
||||
)
|
||||
from IPython.core.profiledir import ProfileDir
|
||||
from IPython.core.shellapp import InteractiveShellApp, shell_aliases, shell_flags
|
||||
from jupyter_client import write_connection_file
|
||||
from jupyter_client.connect import ConnectionFileMixin
|
||||
from jupyter_client.session import Session, session_aliases, session_flags
|
||||
from jupyter_core.paths import jupyter_runtime_dir
|
||||
from tornado import ioloop
|
||||
from traitlets import (
|
||||
Any,
|
||||
Bool,
|
||||
Dict,
|
||||
DottedObjectName,
|
||||
Instance,
|
||||
Integer,
|
||||
Type,
|
||||
Unicode,
|
||||
default,
|
||||
)
|
||||
from traitlets.utils import filefind
|
||||
from traitlets.utils.importstring import import_item
|
||||
from zmq.eventloop.zmqstream import ZMQStream
|
||||
|
||||
from .control import ControlThread
|
||||
from .heartbeat import Heartbeat
|
||||
|
||||
# local imports
|
||||
from .iostream import IOPubThread
|
||||
from .ipkernel import IPythonKernel
|
||||
from .parentpoller import ParentPollerUnix, ParentPollerWindows
|
||||
from .zmqshell import ZMQInteractiveShell
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Flags and Aliases
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
kernel_aliases = dict(base_aliases)
|
||||
kernel_aliases.update(
|
||||
{
|
||||
"ip": "IPKernelApp.ip",
|
||||
"hb": "IPKernelApp.hb_port",
|
||||
"shell": "IPKernelApp.shell_port",
|
||||
"iopub": "IPKernelApp.iopub_port",
|
||||
"stdin": "IPKernelApp.stdin_port",
|
||||
"control": "IPKernelApp.control_port",
|
||||
"f": "IPKernelApp.connection_file",
|
||||
"transport": "IPKernelApp.transport",
|
||||
}
|
||||
)
|
||||
|
||||
kernel_flags = dict(base_flags)
|
||||
kernel_flags.update(
|
||||
{
|
||||
"no-stdout": ({"IPKernelApp": {"no_stdout": True}}, "redirect stdout to the null device"),
|
||||
"no-stderr": ({"IPKernelApp": {"no_stderr": True}}, "redirect stderr to the null device"),
|
||||
"pylab": (
|
||||
{"IPKernelApp": {"pylab": "auto"}},
|
||||
"""Pre-load matplotlib and numpy for interactive use with
|
||||
the default matplotlib backend.""",
|
||||
),
|
||||
"trio-loop": (
|
||||
{"InteractiveShell": {"trio_loop": False}},
|
||||
"Enable Trio as main event loop.",
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
# inherit flags&aliases for any IPython shell apps
|
||||
kernel_aliases.update(shell_aliases)
|
||||
kernel_flags.update(shell_flags)
|
||||
|
||||
# inherit flags&aliases for Sessions
|
||||
kernel_aliases.update(session_aliases)
|
||||
kernel_flags.update(session_flags)
|
||||
|
||||
_ctrl_c_message = """\
|
||||
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
|
||||
|
||||
To exit, you will have to explicitly quit this process, by either sending
|
||||
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
|
||||
|
||||
To read more about this, see https://github.com/ipython/ipython/issues/2049
|
||||
|
||||
"""
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Application class for starting an IPython Kernel
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class IPKernelApp(BaseIPythonApplication, InteractiveShellApp, ConnectionFileMixin):
|
||||
name = "ipython-kernel"
|
||||
aliases = Dict(kernel_aliases)
|
||||
flags = Dict(kernel_flags)
|
||||
classes = [IPythonKernel, ZMQInteractiveShell, ProfileDir, Session]
|
||||
# the kernel class, as an importstring
|
||||
kernel_class = Type(
|
||||
"ipykernel.ipkernel.IPythonKernel",
|
||||
klass="ipykernel.kernelbase.Kernel",
|
||||
help="""The Kernel subclass to be used.
|
||||
|
||||
This should allow easy re-use of the IPKernelApp entry point
|
||||
to configure and launch kernels other than IPython's own.
|
||||
""",
|
||||
).tag(config=True)
|
||||
kernel = Any()
|
||||
poller = Any() # don't restrict this even though current pollers are all Threads
|
||||
heartbeat = Instance(Heartbeat, allow_none=True)
|
||||
|
||||
context = Any()
|
||||
shell_socket = Any()
|
||||
control_socket = Any()
|
||||
debugpy_socket = Any()
|
||||
debug_shell_socket = Any()
|
||||
stdin_socket = Any()
|
||||
iopub_socket = Any()
|
||||
iopub_thread = Any()
|
||||
control_thread = Any()
|
||||
|
||||
_ports = Dict()
|
||||
|
||||
subcommands = {
|
||||
"install": (
|
||||
"ipykernel.kernelspec.InstallIPythonKernelSpecApp",
|
||||
"Install the IPython kernel",
|
||||
),
|
||||
}
|
||||
|
||||
# connection info:
|
||||
connection_dir = Unicode()
|
||||
|
||||
@default("connection_dir")
|
||||
def _default_connection_dir(self):
|
||||
return jupyter_runtime_dir()
|
||||
|
||||
@property
|
||||
def abs_connection_file(self):
|
||||
if os.path.basename(self.connection_file) == self.connection_file:
|
||||
return os.path.join(self.connection_dir, self.connection_file)
|
||||
else:
|
||||
return self.connection_file
|
||||
|
||||
# streams, etc.
|
||||
no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True)
|
||||
no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True)
|
||||
trio_loop = Bool(False, help="Set main event loop.").tag(config=True)
|
||||
quiet = Bool(True, help="Only send stdout/stderr to output stream").tag(config=True)
|
||||
outstream_class = DottedObjectName(
|
||||
"ipykernel.iostream.OutStream", help="The importstring for the OutStream factory"
|
||||
).tag(config=True)
|
||||
displayhook_class = DottedObjectName(
|
||||
"ipykernel.displayhook.ZMQDisplayHook", help="The importstring for the DisplayHook factory"
|
||||
).tag(config=True)
|
||||
|
||||
capture_fd_output = Bool(
|
||||
True,
|
||||
help="""Attempt to capture and forward low-level output, e.g. produced by Extension libraries.
|
||||
""",
|
||||
).tag(config=True)
|
||||
|
||||
# polling
|
||||
parent_handle = Integer(
|
||||
int(os.environ.get("JPY_PARENT_PID") or 0),
|
||||
help="""kill this process if its parent dies. On Windows, the argument
|
||||
specifies the HANDLE of the parent process, otherwise it is simply boolean.
|
||||
""",
|
||||
).tag(config=True)
|
||||
interrupt = Integer(
|
||||
int(os.environ.get("JPY_INTERRUPT_EVENT") or 0),
|
||||
help="""ONLY USED ON WINDOWS
|
||||
Interrupt this process when the parent is signaled.
|
||||
""",
|
||||
).tag(config=True)
|
||||
|
||||
def init_crash_handler(self):
|
||||
sys.excepthook = self.excepthook
|
||||
|
||||
def excepthook(self, etype, evalue, tb):
|
||||
# write uncaught traceback to 'real' stderr, not zmq-forwarder
|
||||
traceback.print_exception(etype, evalue, tb, file=sys.__stderr__)
|
||||
|
||||
def init_poller(self):
|
||||
if sys.platform == "win32":
|
||||
if self.interrupt or self.parent_handle:
|
||||
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
|
||||
elif self.parent_handle and self.parent_handle != 1:
|
||||
# PID 1 (init) is special and will never go away,
|
||||
# only be reassigned.
|
||||
# Parent polling doesn't work if ppid == 1 to start with.
|
||||
self.poller = ParentPollerUnix()
|
||||
|
||||
def _try_bind_socket(self, s, port):
|
||||
iface = "%s://%s" % (self.transport, self.ip)
|
||||
if self.transport == "tcp":
|
||||
if port <= 0:
|
||||
port = s.bind_to_random_port(iface)
|
||||
else:
|
||||
s.bind("tcp://%s:%i" % (self.ip, port))
|
||||
elif self.transport == "ipc":
|
||||
if port <= 0:
|
||||
port = 1
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
while os.path.exists(path):
|
||||
port = port + 1
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
else:
|
||||
path = "%s-%i" % (self.ip, port)
|
||||
s.bind("ipc://%s" % path)
|
||||
return port
|
||||
|
||||
def _bind_socket(self, s, port):
|
||||
try:
|
||||
win_in_use = errno.WSAEADDRINUSE # type:ignore[attr-defined]
|
||||
except AttributeError:
|
||||
win_in_use = None
|
||||
|
||||
# Try up to 100 times to bind a port when in conflict to avoid
|
||||
# infinite attempts in bad setups
|
||||
max_attempts = 1 if port else 100
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
return self._try_bind_socket(s, port)
|
||||
except zmq.ZMQError as ze:
|
||||
# Raise if we have any error not related to socket binding
|
||||
if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use:
|
||||
raise
|
||||
if attempt == max_attempts - 1:
|
||||
raise
|
||||
|
||||
def write_connection_file(self):
|
||||
"""write connection info to JSON file"""
|
||||
cf = self.abs_connection_file
|
||||
self.log.debug("Writing connection file: %s", cf)
|
||||
write_connection_file(
|
||||
cf,
|
||||
ip=self.ip,
|
||||
key=self.session.key,
|
||||
transport=self.transport,
|
||||
shell_port=self.shell_port,
|
||||
stdin_port=self.stdin_port,
|
||||
hb_port=self.hb_port,
|
||||
iopub_port=self.iopub_port,
|
||||
control_port=self.control_port,
|
||||
)
|
||||
|
||||
def cleanup_connection_file(self):
|
||||
cf = self.abs_connection_file
|
||||
self.log.debug("Cleaning up connection file: %s", cf)
|
||||
try:
|
||||
os.remove(cf)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
self.cleanup_ipc_files()
|
||||
|
||||
def init_connection_file(self):
|
||||
if not self.connection_file:
|
||||
self.connection_file = "kernel-%s.json" % os.getpid()
|
||||
try:
|
||||
self.connection_file = filefind(self.connection_file, [".", self.connection_dir])
|
||||
except OSError:
|
||||
self.log.debug("Connection file not found: %s", self.connection_file)
|
||||
# This means I own it, and I'll create it in this directory:
|
||||
os.makedirs(os.path.dirname(self.abs_connection_file), mode=0o700, exist_ok=True)
|
||||
# Also, I will clean it up:
|
||||
atexit.register(self.cleanup_connection_file)
|
||||
return
|
||||
try:
|
||||
self.load_connection_file()
|
||||
except Exception:
|
||||
self.log.error(
|
||||
"Failed to load connection file: %r", self.connection_file, exc_info=True
|
||||
)
|
||||
self.exit(1)
|
||||
|
||||
def init_sockets(self):
|
||||
# Create a context, a session, and the kernel sockets.
|
||||
self.log.info("Starting the kernel at pid: %i", os.getpid())
|
||||
assert self.context is None, "init_sockets cannot be called twice!"
|
||||
self.context = context = zmq.Context()
|
||||
atexit.register(self.close)
|
||||
|
||||
self.shell_socket = context.socket(zmq.ROUTER)
|
||||
self.shell_socket.linger = 1000
|
||||
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
|
||||
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
|
||||
|
||||
self.stdin_socket = context.socket(zmq.ROUTER)
|
||||
self.stdin_socket.linger = 1000
|
||||
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
|
||||
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
|
||||
|
||||
if hasattr(zmq, "ROUTER_HANDOVER"):
|
||||
# set router-handover to workaround zeromq reconnect problems
|
||||
# in certain rare circumstances
|
||||
# see ipython/ipykernel#270 and zeromq/libzmq#2892
|
||||
self.shell_socket.router_handover = self.stdin_socket.router_handover = 1
|
||||
|
||||
self.init_control(context)
|
||||
self.init_iopub(context)
|
||||
|
||||
def init_control(self, context):
|
||||
self.control_socket = context.socket(zmq.ROUTER)
|
||||
self.control_socket.linger = 1000
|
||||
self.control_port = self._bind_socket(self.control_socket, self.control_port)
|
||||
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
|
||||
|
||||
self.debugpy_socket = context.socket(zmq.STREAM)
|
||||
self.debugpy_socket.linger = 1000
|
||||
|
||||
self.debug_shell_socket = context.socket(zmq.DEALER)
|
||||
self.debug_shell_socket.linger = 1000
|
||||
if self.shell_socket.getsockopt(zmq.LAST_ENDPOINT):
|
||||
self.debug_shell_socket.connect(self.shell_socket.getsockopt(zmq.LAST_ENDPOINT))
|
||||
|
||||
if hasattr(zmq, "ROUTER_HANDOVER"):
|
||||
# set router-handover to workaround zeromq reconnect problems
|
||||
# in certain rare circumstances
|
||||
# see ipython/ipykernel#270 and zeromq/libzmq#2892
|
||||
self.control_socket.router_handover = 1
|
||||
|
||||
self.control_thread = ControlThread(daemon=True)
|
||||
|
||||
def init_iopub(self, context):
|
||||
self.iopub_socket = context.socket(zmq.PUB)
|
||||
self.iopub_socket.linger = 1000
|
||||
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
|
||||
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
|
||||
self.configure_tornado_logger()
|
||||
self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True)
|
||||
self.iopub_thread.start()
|
||||
# backward-compat: wrap iopub socket API in background thread
|
||||
self.iopub_socket = self.iopub_thread.background_socket
|
||||
|
||||
def init_heartbeat(self):
|
||||
"""start the heart beating"""
|
||||
# heartbeat doesn't share context, because it mustn't be blocked
|
||||
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
|
||||
hb_ctx = zmq.Context()
|
||||
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
|
||||
self.hb_port = self.heartbeat.port
|
||||
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
|
||||
self.heartbeat.start()
|
||||
|
||||
def close(self):
|
||||
"""Close zmq sockets in an orderly fashion"""
|
||||
# un-capture IO before we start closing channels
|
||||
self.reset_io()
|
||||
self.log.info("Cleaning up sockets")
|
||||
if self.heartbeat:
|
||||
self.log.debug("Closing heartbeat channel")
|
||||
self.heartbeat.context.term()
|
||||
if self.iopub_thread:
|
||||
self.log.debug("Closing iopub channel")
|
||||
self.iopub_thread.stop()
|
||||
self.iopub_thread.close()
|
||||
if self.control_thread and self.control_thread.is_alive():
|
||||
self.log.debug("Closing control thread")
|
||||
self.control_thread.stop()
|
||||
self.control_thread.join()
|
||||
|
||||
if self.debugpy_socket and not self.debugpy_socket.closed:
|
||||
self.debugpy_socket.close()
|
||||
if self.debug_shell_socket and not self.debug_shell_socket.closed:
|
||||
self.debug_shell_socket.close()
|
||||
|
||||
for channel in ("shell", "control", "stdin"):
|
||||
self.log.debug("Closing %s channel", channel)
|
||||
socket = getattr(self, channel + "_socket", None)
|
||||
if socket and not socket.closed:
|
||||
socket.close()
|
||||
self.log.debug("Terminating zmq context")
|
||||
self.context.term()
|
||||
self.log.debug("Terminated zmq context")
|
||||
|
||||
def log_connection_info(self):
|
||||
"""display connection info, and store ports"""
|
||||
basename = os.path.basename(self.connection_file)
|
||||
if (
|
||||
basename == self.connection_file
|
||||
or os.path.dirname(self.connection_file) == self.connection_dir
|
||||
):
|
||||
# use shortname
|
||||
tail = basename
|
||||
else:
|
||||
tail = self.connection_file
|
||||
lines = [
|
||||
"To connect another client to this kernel, use:",
|
||||
" --existing %s" % tail,
|
||||
]
|
||||
# log connection info
|
||||
# info-level, so often not shown.
|
||||
# frontends should use the %connect_info magic
|
||||
# to see the connection info
|
||||
for line in lines:
|
||||
self.log.info(line)
|
||||
# also raw print to the terminal if no parent_handle (`ipython kernel`)
|
||||
# unless log-level is CRITICAL (--quiet)
|
||||
if not self.parent_handle and self.log_level < logging.CRITICAL:
|
||||
print(_ctrl_c_message, file=sys.__stdout__)
|
||||
for line in lines:
|
||||
print(line, file=sys.__stdout__)
|
||||
|
||||
self._ports = dict(
|
||||
shell=self.shell_port,
|
||||
iopub=self.iopub_port,
|
||||
stdin=self.stdin_port,
|
||||
hb=self.hb_port,
|
||||
control=self.control_port,
|
||||
)
|
||||
|
||||
def init_blackhole(self):
|
||||
"""redirects stdout/stderr to devnull if necessary"""
|
||||
if self.no_stdout or self.no_stderr:
|
||||
blackhole = open(os.devnull, "w")
|
||||
if self.no_stdout:
|
||||
sys.stdout = sys.__stdout__ = blackhole
|
||||
if self.no_stderr:
|
||||
sys.stderr = sys.__stderr__ = blackhole
|
||||
|
||||
def init_io(self):
|
||||
"""Redirect input streams and set a display hook."""
|
||||
if self.outstream_class:
|
||||
outstream_factory = import_item(str(self.outstream_class))
|
||||
if sys.stdout is not None:
|
||||
sys.stdout.flush()
|
||||
|
||||
e_stdout = None if self.quiet else sys.__stdout__
|
||||
e_stderr = None if self.quiet else sys.__stderr__
|
||||
|
||||
if not self.capture_fd_output:
|
||||
outstream_factory = partial(outstream_factory, watchfd=False)
|
||||
|
||||
sys.stdout = outstream_factory(self.session, self.iopub_thread, "stdout", echo=e_stdout)
|
||||
if sys.stderr is not None:
|
||||
sys.stderr.flush()
|
||||
sys.stderr = outstream_factory(self.session, self.iopub_thread, "stderr", echo=e_stderr)
|
||||
if hasattr(sys.stderr, "_original_stdstream_copy"):
|
||||
|
||||
for handler in self.log.handlers:
|
||||
if isinstance(handler, StreamHandler) and (handler.stream.buffer.fileno() == 2):
|
||||
self.log.debug("Seeing logger to stderr, rerouting to raw filedescriptor.")
|
||||
|
||||
handler.stream = TextIOWrapper(
|
||||
FileIO(
|
||||
sys.stderr._original_stdstream_copy, # type:ignore[attr-defined]
|
||||
"w",
|
||||
)
|
||||
)
|
||||
if self.displayhook_class:
|
||||
displayhook_factory = import_item(str(self.displayhook_class))
|
||||
self.displayhook = displayhook_factory(self.session, self.iopub_socket)
|
||||
sys.displayhook = self.displayhook
|
||||
|
||||
self.patch_io()
|
||||
|
||||
def reset_io(self):
|
||||
"""restore original io
|
||||
|
||||
restores state after init_io
|
||||
"""
|
||||
sys.stdout = sys.__stdout__
|
||||
sys.stderr = sys.__stderr__
|
||||
sys.displayhook = sys.__displayhook__
|
||||
|
||||
def patch_io(self):
|
||||
"""Patch important libraries that can't handle sys.stdout forwarding"""
|
||||
try:
|
||||
import faulthandler
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible
|
||||
# updates to the upstream API and update accordingly (up-to-date as of Python 3.5):
|
||||
# https://docs.python.org/3/library/faulthandler.html#faulthandler.enable
|
||||
|
||||
# change default file to __stderr__ from forwarded stderr
|
||||
faulthandler_enable = faulthandler.enable
|
||||
|
||||
def enable(file=sys.__stderr__, all_threads=True, **kwargs):
|
||||
return faulthandler_enable(file=file, all_threads=all_threads, **kwargs)
|
||||
|
||||
faulthandler.enable = enable
|
||||
|
||||
if hasattr(faulthandler, "register"):
|
||||
faulthandler_register = faulthandler.register
|
||||
|
||||
def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs):
|
||||
return faulthandler_register(
|
||||
signum, file=file, all_threads=all_threads, chain=chain, **kwargs
|
||||
)
|
||||
|
||||
faulthandler.register = register
|
||||
|
||||
def init_signal(self):
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
def init_kernel(self):
|
||||
"""Create the Kernel object itself"""
|
||||
shell_stream = ZMQStream(self.shell_socket)
|
||||
control_stream = ZMQStream(self.control_socket, self.control_thread.io_loop)
|
||||
debugpy_stream = ZMQStream(self.debugpy_socket, self.control_thread.io_loop)
|
||||
self.control_thread.start()
|
||||
kernel_factory = self.kernel_class.instance
|
||||
|
||||
kernel = kernel_factory(
|
||||
parent=self,
|
||||
session=self.session,
|
||||
control_stream=control_stream,
|
||||
debugpy_stream=debugpy_stream,
|
||||
debug_shell_socket=self.debug_shell_socket,
|
||||
shell_stream=shell_stream,
|
||||
control_thread=self.control_thread,
|
||||
iopub_thread=self.iopub_thread,
|
||||
iopub_socket=self.iopub_socket,
|
||||
stdin_socket=self.stdin_socket,
|
||||
log=self.log,
|
||||
profile_dir=self.profile_dir,
|
||||
user_ns=self.user_ns,
|
||||
)
|
||||
kernel.record_ports({name + "_port": port for name, port in self._ports.items()})
|
||||
self.kernel = kernel
|
||||
|
||||
# Allow the displayhook to get the execution count
|
||||
self.displayhook.get_execution_count = lambda: kernel.execution_count
|
||||
|
||||
def init_gui_pylab(self):
|
||||
"""Enable GUI event loop integration, taking pylab into account."""
|
||||
|
||||
# Register inline backend as default
|
||||
# this is higher priority than matplotlibrc,
|
||||
# but lower priority than anything else (mpl.use() for instance).
|
||||
# This only affects matplotlib >= 1.5
|
||||
if not os.environ.get("MPLBACKEND"):
|
||||
os.environ["MPLBACKEND"] = "module://matplotlib_inline.backend_inline"
|
||||
|
||||
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
|
||||
# to ensure that any exception is printed straight to stderr.
|
||||
# Normally _showtraceback associates the reply with an execution,
|
||||
# which means frontends will never draw it, as this exception
|
||||
# is not associated with any execute request.
|
||||
|
||||
shell = self.shell
|
||||
assert shell is not None
|
||||
_showtraceback = shell._showtraceback
|
||||
try:
|
||||
# replace error-sending traceback with stderr
|
||||
def print_tb(etype, evalue, stb):
|
||||
print("GUI event loop or pylab initialization failed", file=sys.stderr)
|
||||
assert shell is not None
|
||||
print(shell.InteractiveTB.stb2text(stb), file=sys.stderr)
|
||||
|
||||
shell._showtraceback = print_tb
|
||||
InteractiveShellApp.init_gui_pylab(self)
|
||||
finally:
|
||||
shell._showtraceback = _showtraceback
|
||||
|
||||
def init_shell(self):
|
||||
self.shell = getattr(self.kernel, "shell", None)
|
||||
if self.shell:
|
||||
self.shell.configurables.append(self)
|
||||
|
||||
def configure_tornado_logger(self):
|
||||
"""Configure the tornado logging.Logger.
|
||||
|
||||
Must set up the tornado logger or else tornado will call
|
||||
basicConfig for the root logger which makes the root logger
|
||||
go to the real sys.stderr instead of the capture streams.
|
||||
This function mimics the setup of logging.basicConfig.
|
||||
"""
|
||||
logger = logging.getLogger("tornado")
|
||||
handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter(logging.BASIC_FORMAT)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
def _init_asyncio_patch(self):
|
||||
"""set default asyncio policy to be compatible with tornado
|
||||
|
||||
Tornado 6 (at least) is not compatible with the default
|
||||
asyncio implementation on Windows
|
||||
|
||||
Pick the older SelectorEventLoopPolicy on Windows
|
||||
if the known-incompatible default policy is in use.
|
||||
|
||||
Support for Proactor via a background thread is available in tornado 6.1,
|
||||
but it is still preferable to run the Selector in the main thread
|
||||
instead of the background.
|
||||
|
||||
do this as early as possible to make it a low priority and overrideable
|
||||
|
||||
ref: https://github.com/tornadoweb/tornado/issues/2608
|
||||
|
||||
FIXME: if/when tornado supports the defaults in asyncio without threads,
|
||||
remove and bump tornado requirement for py38.
|
||||
Most likely, this will mean a new Python version
|
||||
where asyncio.ProactorEventLoop supports add_reader and friends.
|
||||
|
||||
"""
|
||||
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
from asyncio import (
|
||||
WindowsProactorEventLoopPolicy,
|
||||
WindowsSelectorEventLoopPolicy,
|
||||
)
|
||||
except ImportError:
|
||||
pass
|
||||
# not affected
|
||||
else:
|
||||
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
|
||||
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
|
||||
# fallback to the pre-3.8 default of Selector
|
||||
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
|
||||
|
||||
def init_pdb(self):
|
||||
"""Replace pdb with IPython's version that is interruptible.
|
||||
|
||||
With the non-interruptible version, stopping pdb() locks up the kernel in a
|
||||
non-recoverable state.
|
||||
"""
|
||||
import pdb
|
||||
|
||||
from IPython.core import debugger
|
||||
|
||||
if hasattr(debugger, "InterruptiblePdb"):
|
||||
# Only available in newer IPython releases:
|
||||
debugger.Pdb = debugger.InterruptiblePdb
|
||||
pdb.Pdb = debugger.Pdb # type:ignore[misc]
|
||||
pdb.set_trace = debugger.set_trace
|
||||
|
||||
@catch_config_error
|
||||
def initialize(self, argv=None):
|
||||
self._init_asyncio_patch()
|
||||
super().initialize(argv)
|
||||
if self.subapp is not None:
|
||||
return
|
||||
|
||||
self.init_pdb()
|
||||
self.init_blackhole()
|
||||
self.init_connection_file()
|
||||
self.init_poller()
|
||||
self.init_sockets()
|
||||
self.init_heartbeat()
|
||||
# writing/displaying connection info must be *after* init_sockets/heartbeat
|
||||
self.write_connection_file()
|
||||
# Log connection info after writing connection file, so that the connection
|
||||
# file is definitely available at the time someone reads the log.
|
||||
self.log_connection_info()
|
||||
self.init_io()
|
||||
try:
|
||||
self.init_signal()
|
||||
except Exception:
|
||||
# Catch exception when initializing signal fails, eg when running the
|
||||
# kernel on a separate thread
|
||||
if self.log_level < logging.CRITICAL:
|
||||
self.log.error("Unable to initialize signal:", exc_info=True)
|
||||
self.init_kernel()
|
||||
# shell init steps
|
||||
self.init_path()
|
||||
self.init_shell()
|
||||
if self.shell:
|
||||
self.init_gui_pylab()
|
||||
self.init_extensions()
|
||||
self.init_code()
|
||||
# flush stdout/stderr, so that anything written to these streams during
|
||||
# initialization do not get associated with the first execution request
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
def start(self):
|
||||
if self.subapp is not None:
|
||||
return self.subapp.start()
|
||||
if self.poller is not None:
|
||||
self.poller.start()
|
||||
self.kernel.start()
|
||||
self.io_loop = ioloop.IOLoop.current()
|
||||
if self.trio_loop:
|
||||
from ipykernel.trio_runner import TrioRunner
|
||||
|
||||
tr = TrioRunner()
|
||||
tr.initialize(self.kernel, self.io_loop)
|
||||
try:
|
||||
tr.run()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
self.io_loop.start()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
launch_new_instance = IPKernelApp.launch_instance
|
||||
|
||||
|
||||
def main():
|
||||
"""Run an IPKernel as an application"""
|
||||
app = IPKernelApp.instance()
|
||||
app.initialize()
|
||||
app.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
1295
.venv/Lib/site-packages/ipykernel/kernelbase.py
Normal file
1295
.venv/Lib/site-packages/ipykernel/kernelbase.py
Normal file
File diff suppressed because it is too large
Load Diff
245
.venv/Lib/site-packages/ipykernel/kernelspec.py
Normal file
245
.venv/Lib/site-packages/ipykernel/kernelspec.py
Normal file
@ -0,0 +1,245 @@
|
||||
"""The IPython kernel spec for Jupyter"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import errno
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import stat
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from jupyter_client.kernelspec import KernelSpecManager
|
||||
|
||||
from .debugger import _is_debugpy_available
|
||||
|
||||
pjoin = os.path.join
|
||||
|
||||
KERNEL_NAME = "python%i" % sys.version_info[0]
|
||||
|
||||
# path to kernelspec resources
|
||||
RESOURCES = pjoin(os.path.dirname(__file__), "resources")
|
||||
|
||||
|
||||
def make_ipkernel_cmd(mod="ipykernel_launcher", executable=None, extra_arguments=None):
|
||||
"""Build Popen command list for launching an IPython kernel.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mod : str, optional (default 'ipykernel')
|
||||
A string of an IPython module whose __main__ starts an IPython kernel
|
||||
executable : str, optional (default sys.executable)
|
||||
The Python executable to use for the kernel process.
|
||||
extra_arguments : list, optional
|
||||
A list of extra arguments to pass when executing the launch code.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A Popen command list
|
||||
"""
|
||||
if executable is None:
|
||||
executable = sys.executable
|
||||
extra_arguments = extra_arguments or []
|
||||
arguments = [executable, "-m", mod, "-f", "{connection_file}"]
|
||||
arguments.extend(extra_arguments)
|
||||
|
||||
return arguments
|
||||
|
||||
|
||||
def get_kernel_dict(extra_arguments=None):
|
||||
"""Construct dict for kernel.json"""
|
||||
return {
|
||||
"argv": make_ipkernel_cmd(extra_arguments=extra_arguments),
|
||||
"display_name": "Python %i (ipykernel)" % sys.version_info[0],
|
||||
"language": "python",
|
||||
"metadata": {"debugger": _is_debugpy_available},
|
||||
}
|
||||
|
||||
|
||||
def write_kernel_spec(path=None, overrides=None, extra_arguments=None):
|
||||
"""Write a kernel spec directory to `path`
|
||||
|
||||
If `path` is not specified, a temporary directory is created.
|
||||
If `overrides` is given, the kernelspec JSON is updated before writing.
|
||||
|
||||
The path to the kernelspec is always returned.
|
||||
"""
|
||||
if path is None:
|
||||
path = os.path.join(tempfile.mkdtemp(suffix="_kernels"), KERNEL_NAME)
|
||||
|
||||
# stage resources
|
||||
shutil.copytree(RESOURCES, path)
|
||||
|
||||
# ensure path is writable
|
||||
mask = os.stat(path).st_mode
|
||||
if not mask & stat.S_IWUSR:
|
||||
os.chmod(path, mask | stat.S_IWUSR)
|
||||
|
||||
# write kernel.json
|
||||
kernel_dict = get_kernel_dict(extra_arguments)
|
||||
|
||||
if overrides:
|
||||
kernel_dict.update(overrides)
|
||||
with open(pjoin(path, "kernel.json"), "w") as f:
|
||||
json.dump(kernel_dict, f, indent=1)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def install(
|
||||
kernel_spec_manager=None,
|
||||
user=False,
|
||||
kernel_name=KERNEL_NAME,
|
||||
display_name=None,
|
||||
prefix=None,
|
||||
profile=None,
|
||||
env=None,
|
||||
):
|
||||
"""Install the IPython kernelspec for Jupyter
|
||||
|
||||
Parameters
|
||||
----------
|
||||
kernel_spec_manager : KernelSpecManager [optional]
|
||||
A KernelSpecManager to use for installation.
|
||||
If none provided, a default instance will be created.
|
||||
user : bool [default: False]
|
||||
Whether to do a user-only install, or system-wide.
|
||||
kernel_name : str, optional
|
||||
Specify a name for the kernelspec.
|
||||
This is needed for having multiple IPython kernels for different environments.
|
||||
display_name : str, optional
|
||||
Specify the display name for the kernelspec
|
||||
profile : str, optional
|
||||
Specify a custom profile to be loaded by the kernel.
|
||||
prefix : str, optional
|
||||
Specify an install prefix for the kernelspec.
|
||||
This is needed to install into a non-default location, such as a conda/virtual-env.
|
||||
env : dict, optional
|
||||
A dictionary of extra environment variables for the kernel.
|
||||
These will be added to the current environment variables before the
|
||||
kernel is started
|
||||
|
||||
Returns
|
||||
-------
|
||||
The path where the kernelspec was installed.
|
||||
"""
|
||||
if kernel_spec_manager is None:
|
||||
kernel_spec_manager = KernelSpecManager()
|
||||
|
||||
if (kernel_name != KERNEL_NAME) and (display_name is None):
|
||||
# kernel_name is specified and display_name is not
|
||||
# default display_name to kernel_name
|
||||
display_name = kernel_name
|
||||
overrides = {}
|
||||
if display_name:
|
||||
overrides["display_name"] = display_name
|
||||
if profile:
|
||||
extra_arguments = ["--profile", profile]
|
||||
if not display_name:
|
||||
# add the profile to the default display name
|
||||
overrides["display_name"] = "Python %i [profile=%s]" % (sys.version_info[0], profile)
|
||||
else:
|
||||
extra_arguments = None
|
||||
if env:
|
||||
overrides["env"] = env
|
||||
path = write_kernel_spec(overrides=overrides, extra_arguments=extra_arguments)
|
||||
dest = kernel_spec_manager.install_kernel_spec(
|
||||
path, kernel_name=kernel_name, user=user, prefix=prefix
|
||||
)
|
||||
# cleanup afterward
|
||||
shutil.rmtree(path)
|
||||
return dest
|
||||
|
||||
|
||||
# Entrypoint
|
||||
|
||||
from traitlets.config import Application
|
||||
|
||||
|
||||
class InstallIPythonKernelSpecApp(Application):
|
||||
"""Dummy app wrapping argparse"""
|
||||
|
||||
name = "ipython-kernel-install"
|
||||
|
||||
def initialize(self, argv=None):
|
||||
if argv is None:
|
||||
argv = sys.argv[1:]
|
||||
self.argv = argv
|
||||
|
||||
def start(self):
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
prog=self.name, description="Install the IPython kernel spec."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--user",
|
||||
action="store_true",
|
||||
help="Install for the current user instead of system-wide",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--name",
|
||||
type=str,
|
||||
default=KERNEL_NAME,
|
||||
help="Specify a name for the kernelspec."
|
||||
" This is needed to have multiple IPython kernels at the same time.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--display-name",
|
||||
type=str,
|
||||
help="Specify the display name for the kernelspec."
|
||||
" This is helpful when you have multiple IPython kernels.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--profile",
|
||||
type=str,
|
||||
help="Specify an IPython profile to load. "
|
||||
"This can be used to create custom versions of the kernel.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--prefix",
|
||||
type=str,
|
||||
help="Specify an install prefix for the kernelspec."
|
||||
" This is needed to install into a non-default location, such as a conda/virtual-env.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sys-prefix",
|
||||
action="store_const",
|
||||
const=sys.prefix,
|
||||
dest="prefix",
|
||||
help="Install to Python's sys.prefix."
|
||||
" Shorthand for --prefix='%s'. For use in conda/virtual-envs." % sys.prefix,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--env",
|
||||
action="append",
|
||||
nargs=2,
|
||||
metavar=("ENV", "VALUE"),
|
||||
help="Set environment variables for the kernel.",
|
||||
)
|
||||
opts = parser.parse_args(self.argv)
|
||||
if opts.env:
|
||||
opts.env = {k: v for (k, v) in opts.env}
|
||||
try:
|
||||
dest = install(
|
||||
user=opts.user,
|
||||
kernel_name=opts.name,
|
||||
profile=opts.profile,
|
||||
prefix=opts.prefix,
|
||||
display_name=opts.display_name,
|
||||
env=opts.env,
|
||||
)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EACCES:
|
||||
print(e, file=sys.stderr)
|
||||
if opts.user:
|
||||
print("Perhaps you want `sudo` or `--user`?", file=sys.stderr)
|
||||
self.exit(1)
|
||||
raise
|
||||
print("Installed kernelspec %s in %s" % (opts.name, dest))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
InstallIPythonKernelSpecApp.launch_instance()
|
28
.venv/Lib/site-packages/ipykernel/log.py
Normal file
28
.venv/Lib/site-packages/ipykernel/log.py
Normal file
@ -0,0 +1,28 @@
|
||||
import warnings
|
||||
|
||||
from zmq.log.handlers import PUBHandler
|
||||
|
||||
warnings.warn(
|
||||
"ipykernel.log is deprecated. It has moved to ipyparallel.engine.log",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
class EnginePUBHandler(PUBHandler):
|
||||
"""A simple PUBHandler subclass that sets root_topic"""
|
||||
|
||||
engine = None
|
||||
|
||||
def __init__(self, engine, *args, **kwargs):
|
||||
PUBHandler.__init__(self, *args, **kwargs)
|
||||
self.engine = engine
|
||||
|
||||
@property # type:ignore[misc]
|
||||
def root_topic(self):
|
||||
"""this is a property, in case the handler is created
|
||||
before the engine gets registered with an id"""
|
||||
if isinstance(getattr(self.engine, "id", None), int):
|
||||
return "engine.%i" % self.engine.id # type:ignore[union-attr]
|
||||
else:
|
||||
return "engine"
|
116
.venv/Lib/site-packages/ipykernel/parentpoller.py
Normal file
116
.venv/Lib/site-packages/ipykernel/parentpoller.py
Normal file
@ -0,0 +1,116 @@
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
try:
|
||||
import ctypes
|
||||
except ImportError:
|
||||
ctypes = None # type:ignore[assignment]
|
||||
import os
|
||||
import platform
|
||||
import signal
|
||||
import time
|
||||
import warnings
|
||||
from _thread import interrupt_main # Py 3
|
||||
from threading import Thread
|
||||
|
||||
from traitlets.log import get_logger
|
||||
|
||||
|
||||
class ParentPollerUnix(Thread):
|
||||
"""A Unix-specific daemon thread that terminates the program immediately
|
||||
when the parent process no longer exists.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.daemon = True
|
||||
|
||||
def run(self):
|
||||
# We cannot use os.waitpid because it works only for child processes.
|
||||
from errno import EINTR
|
||||
|
||||
while True:
|
||||
try:
|
||||
if os.getppid() == 1:
|
||||
get_logger().warning("Parent appears to have exited, shutting down.")
|
||||
os._exit(1)
|
||||
time.sleep(1.0)
|
||||
except OSError as e:
|
||||
if e.errno == EINTR:
|
||||
continue
|
||||
raise
|
||||
|
||||
|
||||
class ParentPollerWindows(Thread):
|
||||
"""A Windows-specific daemon thread that listens for a special event that
|
||||
signals an interrupt and, optionally, terminates the program immediately
|
||||
when the parent process no longer exists.
|
||||
"""
|
||||
|
||||
def __init__(self, interrupt_handle=None, parent_handle=None):
|
||||
"""Create the poller. At least one of the optional parameters must be
|
||||
provided.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
interrupt_handle : HANDLE (int), optional
|
||||
If provided, the program will generate a Ctrl+C event when this
|
||||
handle is signaled.
|
||||
parent_handle : HANDLE (int), optional
|
||||
If provided, the program will terminate immediately when this
|
||||
handle is signaled.
|
||||
"""
|
||||
assert interrupt_handle or parent_handle
|
||||
super().__init__()
|
||||
if ctypes is None:
|
||||
raise ImportError("ParentPollerWindows requires ctypes")
|
||||
self.daemon = True
|
||||
self.interrupt_handle = interrupt_handle
|
||||
self.parent_handle = parent_handle
|
||||
|
||||
def run(self):
|
||||
"""Run the poll loop. This method never returns."""
|
||||
try:
|
||||
from _winapi import INFINITE, WAIT_OBJECT_0 # type:ignore[attr-defined]
|
||||
except ImportError:
|
||||
from _subprocess import INFINITE, WAIT_OBJECT_0
|
||||
|
||||
# Build the list of handle to listen on.
|
||||
handles = []
|
||||
if self.interrupt_handle:
|
||||
handles.append(self.interrupt_handle)
|
||||
if self.parent_handle:
|
||||
handles.append(self.parent_handle)
|
||||
arch = platform.architecture()[0]
|
||||
c_int = ctypes.c_int64 if arch.startswith("64") else ctypes.c_int
|
||||
|
||||
# Listen forever.
|
||||
while True:
|
||||
result = ctypes.windll.kernel32.WaitForMultipleObjects( # type:ignore[attr-defined]
|
||||
len(handles), # nCount
|
||||
(c_int * len(handles))(*handles), # lpHandles
|
||||
False, # bWaitAll
|
||||
INFINITE,
|
||||
) # dwMilliseconds
|
||||
|
||||
if WAIT_OBJECT_0 <= result < len(handles):
|
||||
handle = handles[result - WAIT_OBJECT_0]
|
||||
|
||||
if handle == self.interrupt_handle:
|
||||
# check if signal handler is callable
|
||||
# to avoid 'int not callable' error (Python issue #23395)
|
||||
if callable(signal.getsignal(signal.SIGINT)):
|
||||
interrupt_main()
|
||||
|
||||
elif handle == self.parent_handle:
|
||||
get_logger().warning("Parent appears to have exited, shutting down.")
|
||||
os._exit(1)
|
||||
elif result < 0:
|
||||
# wait failed, just give up and stop polling.
|
||||
warnings.warn(
|
||||
"""Parent poll failed. If the frontend dies,
|
||||
the kernel may be left running. Please let us know
|
||||
about your system (bitness, Python, etc.) at
|
||||
ipython-dev@scipy.org"""
|
||||
)
|
||||
return
|
471
.venv/Lib/site-packages/ipykernel/pickleutil.py
Normal file
471
.venv/Lib/site-packages/ipykernel/pickleutil.py
Normal file
@ -0,0 +1,471 @@
|
||||
"""Pickle related utilities. Perhaps this should be called 'can'."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
import typing
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"ipykernel.pickleutil is deprecated. It has moved to ipyparallel.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
import copy
|
||||
import pickle
|
||||
import sys
|
||||
from types import FunctionType
|
||||
|
||||
# This registers a hook when it's imported
|
||||
from ipyparallel.serialize import codeutil # noqa F401
|
||||
from traitlets.log import get_logger
|
||||
from traitlets.utils.importstring import import_item
|
||||
|
||||
buffer = memoryview
|
||||
class_type = type
|
||||
|
||||
PICKLE_PROTOCOL = pickle.DEFAULT_PROTOCOL
|
||||
|
||||
|
||||
def _get_cell_type(a=None):
|
||||
"""the type of a closure cell doesn't seem to be importable,
|
||||
so just create one
|
||||
"""
|
||||
|
||||
def inner():
|
||||
return a
|
||||
|
||||
return type(inner.__closure__[0]) # type:ignore[index]
|
||||
|
||||
|
||||
cell_type = _get_cell_type()
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# Functions
|
||||
# -------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def interactive(f):
|
||||
"""decorator for making functions appear as interactively defined.
|
||||
This results in the function being linked to the user_ns as globals()
|
||||
instead of the module globals().
|
||||
"""
|
||||
|
||||
# build new FunctionType, so it can have the right globals
|
||||
# interactive functions never have closures, that's kind of the point
|
||||
if isinstance(f, FunctionType):
|
||||
mainmod = __import__("__main__")
|
||||
f = FunctionType(
|
||||
f.__code__,
|
||||
mainmod.__dict__,
|
||||
f.__name__,
|
||||
f.__defaults__,
|
||||
)
|
||||
# associate with __main__ for uncanning
|
||||
f.__module__ = "__main__"
|
||||
return f
|
||||
|
||||
|
||||
def use_dill():
|
||||
"""use dill to expand serialization support
|
||||
|
||||
adds support for object methods and closures to serialization.
|
||||
"""
|
||||
# import dill causes most of the magic
|
||||
import dill
|
||||
|
||||
# dill doesn't work with cPickle,
|
||||
# tell the two relevant modules to use plain pickle
|
||||
|
||||
global pickle
|
||||
pickle = dill
|
||||
|
||||
try:
|
||||
from ipykernel import serialize
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
serialize.pickle = dill # type:ignore[attr-defined]
|
||||
|
||||
# disable special function handling, let dill take care of it
|
||||
can_map.pop(FunctionType, None)
|
||||
|
||||
|
||||
def use_cloudpickle():
|
||||
"""use cloudpickle to expand serialization support
|
||||
|
||||
adds support for object methods and closures to serialization.
|
||||
"""
|
||||
import cloudpickle
|
||||
|
||||
global pickle
|
||||
pickle = cloudpickle
|
||||
|
||||
try:
|
||||
from ipykernel import serialize
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
serialize.pickle = cloudpickle # type:ignore[attr-defined]
|
||||
|
||||
# disable special function handling, let cloudpickle take care of it
|
||||
can_map.pop(FunctionType, None)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# Classes
|
||||
# -------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class CannedObject:
|
||||
def __init__(self, obj, keys=None, hook=None):
|
||||
"""can an object for safe pickling
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj
|
||||
The object to be canned
|
||||
keys : list (optional)
|
||||
list of attribute names that will be explicitly canned / uncanned
|
||||
hook : callable (optional)
|
||||
An optional extra callable,
|
||||
which can do additional processing of the uncanned object.
|
||||
|
||||
Notes
|
||||
-----
|
||||
large data may be offloaded into the buffers list,
|
||||
used for zero-copy transfers.
|
||||
"""
|
||||
self.keys = keys or []
|
||||
self.obj = copy.copy(obj)
|
||||
self.hook = can(hook)
|
||||
for key in keys:
|
||||
setattr(self.obj, key, can(getattr(obj, key)))
|
||||
|
||||
self.buffers = []
|
||||
|
||||
def get_object(self, g=None):
|
||||
if g is None:
|
||||
g = {}
|
||||
obj = self.obj
|
||||
for key in self.keys:
|
||||
setattr(obj, key, uncan(getattr(obj, key), g))
|
||||
|
||||
if self.hook:
|
||||
self.hook = uncan(self.hook, g)
|
||||
self.hook(obj, g)
|
||||
return self.obj
|
||||
|
||||
|
||||
class Reference(CannedObject):
|
||||
"""object for wrapping a remote reference by name."""
|
||||
|
||||
def __init__(self, name):
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("illegal name: %r" % name)
|
||||
self.name = name
|
||||
self.buffers = []
|
||||
|
||||
def __repr__(self):
|
||||
return "<Reference: %r>" % self.name
|
||||
|
||||
def get_object(self, g=None):
|
||||
if g is None:
|
||||
g = {}
|
||||
|
||||
return eval(self.name, g)
|
||||
|
||||
|
||||
class CannedCell(CannedObject):
|
||||
"""Can a closure cell"""
|
||||
|
||||
def __init__(self, cell):
|
||||
self.cell_contents = can(cell.cell_contents)
|
||||
|
||||
def get_object(self, g=None):
|
||||
cell_contents = uncan(self.cell_contents, g)
|
||||
|
||||
def inner():
|
||||
return cell_contents
|
||||
|
||||
return inner.__closure__[0] # type:ignore[index]
|
||||
|
||||
|
||||
class CannedFunction(CannedObject):
|
||||
def __init__(self, f):
|
||||
self._check_type(f)
|
||||
self.code = f.__code__
|
||||
self.defaults: typing.Optional[typing.List[typing.Any]]
|
||||
if f.__defaults__:
|
||||
self.defaults = [can(fd) for fd in f.__defaults__]
|
||||
else:
|
||||
self.defaults = None
|
||||
|
||||
self.closure: typing.Any
|
||||
closure = f.__closure__
|
||||
if closure:
|
||||
self.closure = tuple(can(cell) for cell in closure)
|
||||
else:
|
||||
self.closure = None
|
||||
|
||||
self.module = f.__module__ or "__main__"
|
||||
self.__name__ = f.__name__
|
||||
self.buffers = []
|
||||
|
||||
def _check_type(self, obj):
|
||||
assert isinstance(obj, FunctionType), "Not a function type"
|
||||
|
||||
def get_object(self, g=None):
|
||||
# try to load function back into its module:
|
||||
if not self.module.startswith("__"):
|
||||
__import__(self.module)
|
||||
g = sys.modules[self.module].__dict__
|
||||
|
||||
if g is None:
|
||||
g = {}
|
||||
if self.defaults:
|
||||
defaults = tuple(uncan(cfd, g) for cfd in self.defaults)
|
||||
else:
|
||||
defaults = None
|
||||
if self.closure:
|
||||
closure = tuple(uncan(cell, g) for cell in self.closure)
|
||||
else:
|
||||
closure = None
|
||||
newFunc = FunctionType(self.code, g, self.__name__, defaults, closure)
|
||||
return newFunc
|
||||
|
||||
|
||||
class CannedClass(CannedObject):
|
||||
def __init__(self, cls):
|
||||
self._check_type(cls)
|
||||
self.name = cls.__name__
|
||||
self.old_style = not isinstance(cls, type)
|
||||
self._canned_dict = {}
|
||||
for k, v in cls.__dict__.items():
|
||||
if k not in ("__weakref__", "__dict__"):
|
||||
self._canned_dict[k] = can(v)
|
||||
if self.old_style:
|
||||
mro = []
|
||||
else:
|
||||
mro = cls.mro()
|
||||
|
||||
self.parents = [can(c) for c in mro[1:]]
|
||||
self.buffers = []
|
||||
|
||||
def _check_type(self, obj):
|
||||
assert isinstance(obj, class_type), "Not a class type"
|
||||
|
||||
def get_object(self, g=None):
|
||||
parents = tuple(uncan(p, g) for p in self.parents)
|
||||
return type(self.name, parents, uncan_dict(self._canned_dict, g=g))
|
||||
|
||||
|
||||
class CannedArray(CannedObject):
|
||||
def __init__(self, obj):
|
||||
from numpy import ascontiguousarray
|
||||
|
||||
self.shape = obj.shape
|
||||
self.dtype = obj.dtype.descr if obj.dtype.fields else obj.dtype.str
|
||||
self.pickled = False
|
||||
if sum(obj.shape) == 0:
|
||||
self.pickled = True
|
||||
elif obj.dtype == "O":
|
||||
# can't handle object dtype with buffer approach
|
||||
self.pickled = True
|
||||
elif obj.dtype.fields and any(dt == "O" for dt, sz in obj.dtype.fields.values()):
|
||||
self.pickled = True
|
||||
if self.pickled:
|
||||
# just pickle it
|
||||
self.buffers = [pickle.dumps(obj, PICKLE_PROTOCOL)]
|
||||
else:
|
||||
# ensure contiguous
|
||||
obj = ascontiguousarray(obj, dtype=None)
|
||||
self.buffers = [buffer(obj)]
|
||||
|
||||
def get_object(self, g=None):
|
||||
from numpy import frombuffer
|
||||
|
||||
data = self.buffers[0]
|
||||
if self.pickled:
|
||||
# we just pickled it
|
||||
return pickle.loads(data)
|
||||
else:
|
||||
return frombuffer(data, dtype=self.dtype).reshape(self.shape)
|
||||
|
||||
|
||||
class CannedBytes(CannedObject):
|
||||
@staticmethod
|
||||
def wrap(buf: typing.Union[memoryview, bytes, typing.SupportsBytes]) -> bytes:
|
||||
"""Cast a buffer or memoryview object to bytes"""
|
||||
if isinstance(buf, memoryview):
|
||||
return buf.tobytes()
|
||||
if not isinstance(buf, bytes):
|
||||
return bytes(buf)
|
||||
return buf
|
||||
|
||||
def __init__(self, obj):
|
||||
self.buffers = [obj]
|
||||
|
||||
def get_object(self, g=None):
|
||||
data = self.buffers[0]
|
||||
return self.wrap(data)
|
||||
|
||||
|
||||
class CannedBuffer(CannedBytes):
|
||||
wrap = buffer # type:ignore[assignment]
|
||||
|
||||
|
||||
class CannedMemoryView(CannedBytes):
|
||||
wrap = memoryview # type:ignore[assignment]
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# Functions
|
||||
# -------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _import_mapping(mapping, original=None):
|
||||
"""import any string-keys in a type mapping"""
|
||||
log = get_logger()
|
||||
log.debug("Importing canning map")
|
||||
for key, _ in list(mapping.items()):
|
||||
if isinstance(key, str):
|
||||
try:
|
||||
cls = import_item(key)
|
||||
except Exception:
|
||||
if original and key not in original:
|
||||
# only message on user-added classes
|
||||
log.error("canning class not importable: %r", key, exc_info=True)
|
||||
mapping.pop(key)
|
||||
else:
|
||||
mapping[cls] = mapping.pop(key)
|
||||
|
||||
|
||||
def istype(obj, check):
|
||||
"""like isinstance(obj, check), but strict
|
||||
|
||||
This won't catch subclasses.
|
||||
"""
|
||||
if isinstance(check, tuple):
|
||||
for cls in check:
|
||||
if type(obj) is cls:
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
return type(obj) is check
|
||||
|
||||
|
||||
def can(obj):
|
||||
"""prepare an object for pickling"""
|
||||
|
||||
import_needed = False
|
||||
|
||||
for cls, canner in can_map.items():
|
||||
if isinstance(cls, str):
|
||||
import_needed = True
|
||||
break
|
||||
elif istype(obj, cls):
|
||||
return canner(obj)
|
||||
|
||||
if import_needed:
|
||||
# perform can_map imports, then try again
|
||||
# this will usually only happen once
|
||||
_import_mapping(can_map, _original_can_map)
|
||||
return can(obj)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def can_class(obj):
|
||||
if isinstance(obj, class_type) and obj.__module__ == "__main__":
|
||||
return CannedClass(obj)
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
def can_dict(obj):
|
||||
"""can the *values* of a dict"""
|
||||
if istype(obj, dict):
|
||||
newobj = {}
|
||||
for k, v in obj.items():
|
||||
newobj[k] = can(v)
|
||||
return newobj
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
sequence_types = (list, tuple, set)
|
||||
|
||||
|
||||
def can_sequence(obj):
|
||||
"""can the elements of a sequence"""
|
||||
if istype(obj, sequence_types):
|
||||
t = type(obj)
|
||||
return t([can(i) for i in obj])
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
def uncan(obj, g=None):
|
||||
"""invert canning"""
|
||||
|
||||
import_needed = False
|
||||
for cls, uncanner in uncan_map.items():
|
||||
if isinstance(cls, str):
|
||||
import_needed = True
|
||||
break
|
||||
elif isinstance(obj, cls):
|
||||
return uncanner(obj, g)
|
||||
|
||||
if import_needed:
|
||||
# perform uncan_map imports, then try again
|
||||
# this will usually only happen once
|
||||
_import_mapping(uncan_map, _original_uncan_map)
|
||||
return uncan(obj, g)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def uncan_dict(obj, g=None):
|
||||
if istype(obj, dict):
|
||||
newobj = {}
|
||||
for k, v in obj.items():
|
||||
newobj[k] = uncan(v, g)
|
||||
return newobj
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
def uncan_sequence(obj, g=None):
|
||||
if istype(obj, sequence_types):
|
||||
t = type(obj)
|
||||
return t([uncan(i, g) for i in obj])
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# API dictionaries
|
||||
# -------------------------------------------------------------------------------
|
||||
|
||||
# These dicts can be extended for custom serialization of new objects
|
||||
|
||||
can_map = {
|
||||
"numpy.ndarray": CannedArray,
|
||||
FunctionType: CannedFunction,
|
||||
bytes: CannedBytes,
|
||||
memoryview: CannedMemoryView,
|
||||
cell_type: CannedCell,
|
||||
class_type: can_class,
|
||||
}
|
||||
if buffer is not memoryview:
|
||||
can_map[buffer] = CannedBuffer
|
||||
|
||||
uncan_map: typing.Dict[type, typing.Any] = {
|
||||
CannedObject: lambda obj, g: obj.get_object(g),
|
||||
dict: uncan_dict,
|
||||
}
|
||||
|
||||
# for use in _import_mapping:
|
||||
_original_can_map = can_map.copy()
|
||||
_original_uncan_map = uncan_map.copy()
|
0
.venv/Lib/site-packages/ipykernel/py.typed
Normal file
0
.venv/Lib/site-packages/ipykernel/py.typed
Normal file
0
.venv/Lib/site-packages/ipykernel/pylab/__init__.py
Normal file
0
.venv/Lib/site-packages/ipykernel/pylab/__init__.py
Normal file
14
.venv/Lib/site-packages/ipykernel/pylab/backend_inline.py
Normal file
14
.venv/Lib/site-packages/ipykernel/pylab/backend_inline.py
Normal file
@ -0,0 +1,14 @@
|
||||
"""A matplotlib backend for publishing figures via display_data"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import warnings
|
||||
|
||||
from matplotlib_inline.backend_inline import * # analysis: ignore # noqa F401
|
||||
|
||||
warnings.warn(
|
||||
"`ipykernel.pylab.backend_inline` is deprecated, directly "
|
||||
"use `matplotlib_inline.backend_inline`",
|
||||
DeprecationWarning,
|
||||
)
|
13
.venv/Lib/site-packages/ipykernel/pylab/config.py
Normal file
13
.venv/Lib/site-packages/ipykernel/pylab/config.py
Normal file
@ -0,0 +1,13 @@
|
||||
"""Configurable for configuring the IPython inline backend
|
||||
|
||||
This module does not import anything from matplotlib.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
from matplotlib_inline.config import * # analysis: ignore # noqa F401
|
||||
|
||||
warnings.warn(
|
||||
"`ipykernel.pylab.config` is deprecated, directly use `matplotlib_inline.config`",
|
||||
DeprecationWarning,
|
||||
)
|
BIN
.venv/Lib/site-packages/ipykernel/resources/logo-32x32.png
Normal file
BIN
.venv/Lib/site-packages/ipykernel/resources/logo-32x32.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.1 KiB |
BIN
.venv/Lib/site-packages/ipykernel/resources/logo-64x64.png
Normal file
BIN
.venv/Lib/site-packages/ipykernel/resources/logo-64x64.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.1 KiB |
204
.venv/Lib/site-packages/ipykernel/serialize.py
Normal file
204
.venv/Lib/site-packages/ipykernel/serialize.py
Normal file
@ -0,0 +1,204 @@
|
||||
"""serialization utilities for apply messages"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"ipykernel.serialize is deprecated. It has moved to ipyparallel.serialize",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
import pickle
|
||||
from itertools import chain
|
||||
|
||||
try:
|
||||
# available since ipyparallel 5.0.0
|
||||
from ipyparallel.serialize.canning import (
|
||||
CannedObject,
|
||||
can,
|
||||
can_sequence,
|
||||
istype,
|
||||
sequence_types,
|
||||
uncan,
|
||||
uncan_sequence,
|
||||
)
|
||||
from ipyparallel.serialize.serialize import PICKLE_PROTOCOL
|
||||
except ImportError:
|
||||
# Deprecated since ipykernel 4.3.0
|
||||
from ipykernel.pickleutil import (
|
||||
can,
|
||||
uncan,
|
||||
can_sequence,
|
||||
uncan_sequence,
|
||||
CannedObject,
|
||||
istype,
|
||||
sequence_types,
|
||||
PICKLE_PROTOCOL,
|
||||
)
|
||||
|
||||
from jupyter_client.session import MAX_BYTES, MAX_ITEMS
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Serialization Functions
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _extract_buffers(obj, threshold=MAX_BYTES):
|
||||
"""extract buffers larger than a certain threshold"""
|
||||
buffers = []
|
||||
if isinstance(obj, CannedObject) and obj.buffers:
|
||||
for i, buf in enumerate(obj.buffers):
|
||||
if len(buf) > threshold:
|
||||
# buffer larger than threshold, prevent pickling
|
||||
obj.buffers[i] = None
|
||||
buffers.append(buf)
|
||||
# buffer too small for separate send, coerce to bytes
|
||||
# because pickling buffer objects just results in broken pointers
|
||||
elif isinstance(buf, memoryview):
|
||||
obj.buffers[i] = buf.tobytes()
|
||||
return buffers
|
||||
|
||||
|
||||
def _restore_buffers(obj, buffers):
|
||||
"""restore buffers extracted by"""
|
||||
if isinstance(obj, CannedObject) and obj.buffers:
|
||||
for i, buf in enumerate(obj.buffers):
|
||||
if buf is None:
|
||||
obj.buffers[i] = buffers.pop(0)
|
||||
|
||||
|
||||
def serialize_object(obj, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):
|
||||
"""Serialize an object into a list of sendable buffers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj : object
|
||||
The object to be serialized
|
||||
buffer_threshold : int
|
||||
The threshold (in bytes) for pulling out data buffers
|
||||
to avoid pickling them.
|
||||
item_threshold : int
|
||||
The maximum number of items over which canning will iterate.
|
||||
Containers (lists, dicts) larger than this will be pickled without
|
||||
introspection.
|
||||
|
||||
Returns
|
||||
-------
|
||||
[bufs] : list of buffers representing the serialized object.
|
||||
"""
|
||||
buffers = []
|
||||
if istype(obj, sequence_types) and len(obj) < item_threshold:
|
||||
cobj = can_sequence(obj)
|
||||
for c in cobj:
|
||||
buffers.extend(_extract_buffers(c, buffer_threshold))
|
||||
elif istype(obj, dict) and len(obj) < item_threshold:
|
||||
cobj = {}
|
||||
for k in sorted(obj):
|
||||
c = can(obj[k])
|
||||
buffers.extend(_extract_buffers(c, buffer_threshold))
|
||||
cobj[k] = c
|
||||
else:
|
||||
cobj = can(obj)
|
||||
buffers.extend(_extract_buffers(cobj, buffer_threshold))
|
||||
|
||||
buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL))
|
||||
return buffers
|
||||
|
||||
|
||||
def deserialize_object(buffers, g=None):
|
||||
"""reconstruct an object serialized by serialize_object from data buffers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
buffers : list of buffers/bytes
|
||||
g : globals to be used when uncanning
|
||||
|
||||
Returns
|
||||
-------
|
||||
(newobj, bufs) : unpacked object, and the list of remaining unused buffers.
|
||||
"""
|
||||
bufs = list(buffers)
|
||||
pobj = bufs.pop(0)
|
||||
canned = pickle.loads(pobj)
|
||||
if istype(canned, sequence_types) and len(canned) < MAX_ITEMS:
|
||||
for c in canned:
|
||||
_restore_buffers(c, bufs)
|
||||
newobj = uncan_sequence(canned, g)
|
||||
elif istype(canned, dict) and len(canned) < MAX_ITEMS:
|
||||
newobj = {}
|
||||
for k in sorted(canned):
|
||||
c = canned[k]
|
||||
_restore_buffers(c, bufs)
|
||||
newobj[k] = uncan(c, g)
|
||||
else:
|
||||
_restore_buffers(canned, bufs)
|
||||
newobj = uncan(canned, g)
|
||||
|
||||
return newobj, bufs
|
||||
|
||||
|
||||
def pack_apply_message(f, args, kwargs, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):
|
||||
"""pack up a function, args, and kwargs to be sent over the wire
|
||||
|
||||
Each element of args/kwargs will be canned for special treatment,
|
||||
but inspection will not go any deeper than that.
|
||||
|
||||
Any object whose data is larger than `threshold` will not have their data copied
|
||||
(only numpy arrays and bytes/buffers support zero-copy)
|
||||
|
||||
Message will be a list of bytes/buffers of the format:
|
||||
|
||||
[ cf, pinfo, <arg_bufs>, <kwarg_bufs> ]
|
||||
|
||||
With length at least two + len(args) + len(kwargs)
|
||||
"""
|
||||
|
||||
arg_bufs = list(
|
||||
chain.from_iterable(serialize_object(arg, buffer_threshold, item_threshold) for arg in args)
|
||||
)
|
||||
|
||||
kw_keys = sorted(kwargs.keys())
|
||||
kwarg_bufs = list(
|
||||
chain.from_iterable(
|
||||
serialize_object(kwargs[key], buffer_threshold, item_threshold) for key in kw_keys
|
||||
)
|
||||
)
|
||||
|
||||
info = dict(nargs=len(args), narg_bufs=len(arg_bufs), kw_keys=kw_keys)
|
||||
|
||||
msg = [pickle.dumps(can(f), PICKLE_PROTOCOL)]
|
||||
msg.append(pickle.dumps(info, PICKLE_PROTOCOL))
|
||||
msg.extend(arg_bufs)
|
||||
msg.extend(kwarg_bufs)
|
||||
|
||||
return msg
|
||||
|
||||
|
||||
def unpack_apply_message(bufs, g=None, copy=True):
|
||||
"""unpack f,args,kwargs from buffers packed by pack_apply_message()
|
||||
Returns: original f,args,kwargs"""
|
||||
bufs = list(bufs) # allow us to pop
|
||||
assert len(bufs) >= 2, "not enough buffers!"
|
||||
pf = bufs.pop(0)
|
||||
f = uncan(pickle.loads(pf), g)
|
||||
pinfo = bufs.pop(0)
|
||||
info = pickle.loads(pinfo)
|
||||
arg_bufs, kwarg_bufs = bufs[: info["narg_bufs"]], bufs[info["narg_bufs"] :]
|
||||
|
||||
args_list = []
|
||||
for _ in range(info["nargs"]):
|
||||
arg, arg_bufs = deserialize_object(arg_bufs, g)
|
||||
args_list.append(arg)
|
||||
args = tuple(args_list)
|
||||
assert not arg_bufs, "Shouldn't be any arg bufs left over"
|
||||
|
||||
kwargs = {}
|
||||
for key in info["kw_keys"]:
|
||||
kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g)
|
||||
kwargs[key] = kwarg
|
||||
assert not kwarg_bufs, "Shouldn't be any kwarg bufs left over"
|
||||
|
||||
return f, args, kwargs
|
47
.venv/Lib/site-packages/ipykernel/tests/__init__.py
Normal file
47
.venv/Lib/site-packages/ipykernel/tests/__init__.py
Normal file
@ -0,0 +1,47 @@
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from unittest.mock import patch
|
||||
|
||||
from ipykernel.kernelspec import install
|
||||
|
||||
pjoin = os.path.join
|
||||
|
||||
tmp = None
|
||||
patchers = []
|
||||
|
||||
|
||||
def setup():
|
||||
"""setup temporary env for tests"""
|
||||
global tmp
|
||||
tmp = tempfile.mkdtemp()
|
||||
patchers[:] = [
|
||||
patch.dict(
|
||||
os.environ,
|
||||
{
|
||||
"HOME": tmp,
|
||||
# Let tests work with --user install when HOME is changed:
|
||||
"PYTHONPATH": os.pathsep.join(sys.path),
|
||||
},
|
||||
),
|
||||
]
|
||||
for p in patchers:
|
||||
p.start()
|
||||
|
||||
# install IPython in the temp home:
|
||||
install(user=True)
|
||||
|
||||
|
||||
def teardown():
|
||||
for p in patchers:
|
||||
p.stop()
|
||||
|
||||
try:
|
||||
shutil.rmtree(tmp)
|
||||
except OSError:
|
||||
# no such file
|
||||
pass
|
17
.venv/Lib/site-packages/ipykernel/tests/_asyncio_utils.py
Normal file
17
.venv/Lib/site-packages/ipykernel/tests/_asyncio_utils.py
Normal file
@ -0,0 +1,17 @@
|
||||
"""test utilities that use async/await syntax
|
||||
|
||||
a separate file to avoid syntax errors on Python 2
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
|
||||
def async_func():
|
||||
"""Simple async function to schedule a task on the current eventloop"""
|
||||
loop = asyncio.get_event_loop()
|
||||
assert loop.is_running()
|
||||
|
||||
async def task():
|
||||
await asyncio.sleep(1)
|
||||
|
||||
loop.create_task(task())
|
28
.venv/Lib/site-packages/ipykernel/tests/conftest.py
Normal file
28
.venv/Lib/site-packages/ipykernel/tests/conftest.py
Normal file
@ -0,0 +1,28 @@
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
try:
|
||||
import resource
|
||||
except ImportError:
|
||||
# Windows
|
||||
resource = None
|
||||
|
||||
|
||||
# Handle resource limit
|
||||
# Ensure a minimal soft limit of DEFAULT_SOFT if the current hard limit is at least that much.
|
||||
if resource is not None:
|
||||
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
|
||||
DEFAULT_SOFT = 4096
|
||||
if hard >= DEFAULT_SOFT:
|
||||
soft = DEFAULT_SOFT
|
||||
|
||||
if hard < soft:
|
||||
hard = soft
|
||||
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
|
||||
|
||||
|
||||
# Enforce selector event loop on Windows.
|
||||
if os.name == "nt":
|
||||
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
58
.venv/Lib/site-packages/ipykernel/tests/test_async.py
Normal file
58
.venv/Lib/site-packages/ipykernel/tests/test_async.py
Normal file
@ -0,0 +1,58 @@
|
||||
"""Test async/await integration"""
|
||||
|
||||
import pytest
|
||||
|
||||
from .test_message_spec import validate_message
|
||||
from .utils import TIMEOUT, execute, flush_channels, start_new_kernel
|
||||
|
||||
KC = KM = None
|
||||
|
||||
|
||||
def setup_function():
|
||||
"""start the global kernel (if it isn't running) and return its client"""
|
||||
global KM, KC
|
||||
KM, KC = start_new_kernel()
|
||||
flush_channels(KC)
|
||||
|
||||
|
||||
def teardown_function():
|
||||
KC.stop_channels()
|
||||
KM.shutdown_kernel(now=True)
|
||||
|
||||
|
||||
def test_async_await():
|
||||
flush_channels(KC)
|
||||
msg_id, content = execute("import asyncio; await asyncio.sleep(0.1)", KC)
|
||||
assert content["status"] == "ok", content
|
||||
|
||||
|
||||
@pytest.mark.parametrize("asynclib", ["asyncio", "trio", "curio"])
|
||||
def test_async_interrupt(asynclib, request):
|
||||
try:
|
||||
__import__(asynclib)
|
||||
except ImportError:
|
||||
pytest.skip("Requires %s" % asynclib)
|
||||
request.addfinalizer(lambda: execute("%autoawait asyncio", KC))
|
||||
|
||||
flush_channels(KC)
|
||||
msg_id, content = execute("%autoawait " + asynclib, KC)
|
||||
assert content["status"] == "ok", content
|
||||
|
||||
flush_channels(KC)
|
||||
msg_id = KC.execute(f"print('begin'); import {asynclib}; await {asynclib}.sleep(5)")
|
||||
busy = KC.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(busy, "status", msg_id)
|
||||
assert busy["content"]["execution_state"] == "busy"
|
||||
echo = KC.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(echo, "execute_input")
|
||||
stream = KC.get_iopub_msg(timeout=TIMEOUT)
|
||||
# wait for the stream output to be sure kernel is in the async block
|
||||
validate_message(stream, "stream")
|
||||
assert stream["content"]["text"] == "begin\n"
|
||||
|
||||
KM.interrupt_kernel()
|
||||
reply = KC.get_shell_msg()["content"]
|
||||
assert reply["status"] == "error", reply
|
||||
assert reply["ename"] in {"CancelledError", "KeyboardInterrupt"}
|
||||
|
||||
flush_channels(KC)
|
131
.venv/Lib/site-packages/ipykernel/tests/test_connect.py
Normal file
131
.venv/Lib/site-packages/ipykernel/tests/test_connect.py
Normal file
@ -0,0 +1,131 @@
|
||||
"""Tests for kernel connection utilities"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import errno
|
||||
import json
|
||||
import os
|
||||
from tempfile import TemporaryDirectory
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import zmq
|
||||
from traitlets.config import Config
|
||||
|
||||
from ipykernel import connect
|
||||
from ipykernel.kernelapp import IPKernelApp
|
||||
|
||||
from .utils import TemporaryWorkingDirectory
|
||||
|
||||
sample_info = {
|
||||
"ip": "1.2.3.4",
|
||||
"transport": "ipc",
|
||||
"shell_port": 1,
|
||||
"hb_port": 2,
|
||||
"iopub_port": 3,
|
||||
"stdin_port": 4,
|
||||
"control_port": 5,
|
||||
"key": b"abc123",
|
||||
"signature_scheme": "hmac-md5",
|
||||
}
|
||||
|
||||
|
||||
class DummyKernelApp(IPKernelApp):
|
||||
def _default_shell_port(self):
|
||||
return 0
|
||||
|
||||
def initialize(self, argv=None):
|
||||
self.init_profile_dir()
|
||||
self.init_connection_file()
|
||||
|
||||
|
||||
def test_get_connection_file():
|
||||
cfg = Config()
|
||||
with TemporaryWorkingDirectory() as d:
|
||||
cfg.ProfileDir.location = d
|
||||
cf = "kernel.json"
|
||||
app = DummyKernelApp(config=cfg, connection_file=cf)
|
||||
app.initialize()
|
||||
|
||||
profile_cf = os.path.join(app.connection_dir, cf)
|
||||
assert profile_cf == app.abs_connection_file
|
||||
with open(profile_cf, "w") as f:
|
||||
f.write("{}")
|
||||
assert os.path.exists(profile_cf)
|
||||
assert connect.get_connection_file(app) == profile_cf
|
||||
|
||||
app.connection_file = cf
|
||||
assert connect.get_connection_file(app) == profile_cf
|
||||
|
||||
|
||||
def test_get_connection_info():
|
||||
with TemporaryDirectory() as d:
|
||||
cf = os.path.join(d, "kernel.json")
|
||||
connect.write_connection_file(cf, **sample_info)
|
||||
json_info = connect.get_connection_info(cf)
|
||||
info = connect.get_connection_info(cf, unpack=True)
|
||||
assert isinstance(json_info, str)
|
||||
|
||||
sub_info = {k: v for k, v in info.items() if k in sample_info}
|
||||
assert sub_info == sample_info
|
||||
|
||||
info2 = json.loads(json_info)
|
||||
info2["key"] = info2["key"].encode("utf-8")
|
||||
sub_info2 = {k: v for k, v in info.items() if k in sample_info}
|
||||
assert sub_info2 == sample_info
|
||||
|
||||
|
||||
def test_port_bind_failure_raises(request):
|
||||
cfg = Config()
|
||||
with TemporaryWorkingDirectory() as d:
|
||||
cfg.ProfileDir.location = d
|
||||
cf = "kernel.json"
|
||||
app = DummyKernelApp(config=cfg, connection_file=cf)
|
||||
request.addfinalizer(app.close)
|
||||
app.initialize()
|
||||
with patch.object(app, "_try_bind_socket") as mock_try_bind:
|
||||
mock_try_bind.side_effect = zmq.ZMQError(-100, "fails for unknown error types")
|
||||
with pytest.raises(zmq.ZMQError):
|
||||
app.init_sockets()
|
||||
assert mock_try_bind.call_count == 1
|
||||
|
||||
|
||||
def test_port_bind_failure_recovery(request):
|
||||
try:
|
||||
errno.WSAEADDRINUSE
|
||||
except AttributeError:
|
||||
# Fake windows address in-use code
|
||||
p = patch.object(errno, "WSAEADDRINUSE", 12345, create=True)
|
||||
p.start()
|
||||
request.addfinalizer(p.stop)
|
||||
|
||||
cfg = Config()
|
||||
with TemporaryWorkingDirectory() as d:
|
||||
cfg.ProfileDir.location = d
|
||||
cf = "kernel.json"
|
||||
app = DummyKernelApp(config=cfg, connection_file=cf)
|
||||
request.addfinalizer(app.close)
|
||||
app.initialize()
|
||||
with patch.object(app, "_try_bind_socket") as mock_try_bind:
|
||||
mock_try_bind.side_effect = [
|
||||
zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind unix"),
|
||||
zmq.ZMQError(errno.WSAEADDRINUSE, "fails for non-bind windows"),
|
||||
] + [0] * 100
|
||||
# Shouldn't raise anything as retries will kick in
|
||||
app.init_sockets()
|
||||
|
||||
|
||||
def test_port_bind_failure_gives_up_retries(request):
|
||||
cfg = Config()
|
||||
with TemporaryWorkingDirectory() as d:
|
||||
cfg.ProfileDir.location = d
|
||||
cf = "kernel.json"
|
||||
app = DummyKernelApp(config=cfg, connection_file=cf)
|
||||
request.addfinalizer(app.close)
|
||||
app.initialize()
|
||||
with patch.object(app, "_try_bind_socket") as mock_try_bind:
|
||||
mock_try_bind.side_effect = zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind")
|
||||
with pytest.raises(zmq.ZMQError):
|
||||
app.init_sockets()
|
||||
assert mock_try_bind.call_count == 100
|
284
.venv/Lib/site-packages/ipykernel/tests/test_debugger.py
Normal file
284
.venv/Lib/site-packages/ipykernel/tests/test_debugger.py
Normal file
@ -0,0 +1,284 @@
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
from .utils import TIMEOUT, get_reply, new_kernel
|
||||
|
||||
seq = 0
|
||||
|
||||
# Skip if debugpy is not available
|
||||
pytest.importorskip("debugpy")
|
||||
|
||||
|
||||
def wait_for_debug_request(kernel, command, arguments=None, full_reply=False):
|
||||
"""Carry out a debug request and return the reply content.
|
||||
|
||||
It does not check if the request was successful.
|
||||
"""
|
||||
global seq
|
||||
seq += 1
|
||||
|
||||
msg = kernel.session.msg(
|
||||
"debug_request",
|
||||
{
|
||||
"type": "request",
|
||||
"seq": seq,
|
||||
"command": command,
|
||||
"arguments": arguments or {},
|
||||
},
|
||||
)
|
||||
kernel.control_channel.send(msg)
|
||||
reply = get_reply(kernel, msg["header"]["msg_id"], channel="control")
|
||||
return reply if full_reply else reply["content"]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def kernel():
|
||||
with new_kernel() as kc:
|
||||
yield kc
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def kernel_with_debug(kernel):
|
||||
# Initialize
|
||||
wait_for_debug_request(
|
||||
kernel,
|
||||
"initialize",
|
||||
{
|
||||
"clientID": "test-client",
|
||||
"clientName": "testClient",
|
||||
"adapterID": "",
|
||||
"pathFormat": "path",
|
||||
"linesStartAt1": True,
|
||||
"columnsStartAt1": True,
|
||||
"supportsVariableType": True,
|
||||
"supportsVariablePaging": True,
|
||||
"supportsRunInTerminalRequest": True,
|
||||
"locale": "en",
|
||||
},
|
||||
)
|
||||
|
||||
# Attach
|
||||
wait_for_debug_request(kernel, "attach")
|
||||
|
||||
try:
|
||||
yield kernel
|
||||
finally:
|
||||
# Detach
|
||||
wait_for_debug_request(kernel, "disconnect", {"restart": False, "terminateDebuggee": True})
|
||||
|
||||
|
||||
def test_debug_initialize(kernel):
|
||||
reply = wait_for_debug_request(
|
||||
kernel,
|
||||
"initialize",
|
||||
{
|
||||
"clientID": "test-client",
|
||||
"clientName": "testClient",
|
||||
"adapterID": "",
|
||||
"pathFormat": "path",
|
||||
"linesStartAt1": True,
|
||||
"columnsStartAt1": True,
|
||||
"supportsVariableType": True,
|
||||
"supportsVariablePaging": True,
|
||||
"supportsRunInTerminalRequest": True,
|
||||
"locale": "en",
|
||||
},
|
||||
)
|
||||
assert reply["success"]
|
||||
|
||||
|
||||
def test_attach_debug(kernel_with_debug):
|
||||
reply = wait_for_debug_request(
|
||||
kernel_with_debug, "evaluate", {"expression": "'a' + 'b'", "context": "repl"}
|
||||
)
|
||||
assert reply["success"]
|
||||
assert reply["body"]["result"] == ""
|
||||
|
||||
|
||||
def test_set_breakpoints(kernel_with_debug):
|
||||
code = """def f(a, b):
|
||||
c = a + b
|
||||
return c
|
||||
|
||||
f(2, 3)"""
|
||||
|
||||
r = wait_for_debug_request(kernel_with_debug, "dumpCell", {"code": code})
|
||||
source = r["body"]["sourcePath"]
|
||||
|
||||
reply = wait_for_debug_request(
|
||||
kernel_with_debug,
|
||||
"setBreakpoints",
|
||||
{
|
||||
"breakpoints": [{"line": 2}],
|
||||
"source": {"path": source},
|
||||
"sourceModified": False,
|
||||
},
|
||||
)
|
||||
assert reply["success"]
|
||||
assert len(reply["body"]["breakpoints"]) == 1
|
||||
assert reply["body"]["breakpoints"][0]["verified"]
|
||||
assert reply["body"]["breakpoints"][0]["source"]["path"] == source
|
||||
|
||||
r = wait_for_debug_request(kernel_with_debug, "debugInfo")
|
||||
assert source in map(lambda b: b["source"], r["body"]["breakpoints"])
|
||||
|
||||
r = wait_for_debug_request(kernel_with_debug, "configurationDone")
|
||||
assert r["success"]
|
||||
|
||||
|
||||
def test_stop_on_breakpoint(kernel_with_debug):
|
||||
code = """def f(a, b):
|
||||
c = a + b
|
||||
return c
|
||||
|
||||
f(2, 3)"""
|
||||
|
||||
r = wait_for_debug_request(kernel_with_debug, "dumpCell", {"code": code})
|
||||
source = r["body"]["sourcePath"]
|
||||
|
||||
wait_for_debug_request(kernel_with_debug, "debugInfo")
|
||||
|
||||
wait_for_debug_request(
|
||||
kernel_with_debug,
|
||||
"setBreakpoints",
|
||||
{
|
||||
"breakpoints": [{"line": 2}],
|
||||
"source": {"path": source},
|
||||
"sourceModified": False,
|
||||
},
|
||||
)
|
||||
|
||||
wait_for_debug_request(kernel_with_debug, "configurationDone", full_reply=True)
|
||||
|
||||
kernel_with_debug.execute(code)
|
||||
|
||||
# Wait for stop on breakpoint
|
||||
msg = {"msg_type": "", "content": {}}
|
||||
while msg.get("msg_type") != "debug_event" or msg["content"].get("event") != "stopped":
|
||||
msg = kernel_with_debug.get_iopub_msg(timeout=TIMEOUT)
|
||||
|
||||
assert msg["content"]["body"]["reason"] == "breakpoint"
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="TODO Does not work on Python 3.10")
|
||||
def test_breakpoint_in_cell_with_leading_empty_lines(kernel_with_debug):
|
||||
code = """
|
||||
def f(a, b):
|
||||
c = a + b
|
||||
return c
|
||||
|
||||
f(2, 3)"""
|
||||
|
||||
r = wait_for_debug_request(kernel_with_debug, "dumpCell", {"code": code})
|
||||
source = r["body"]["sourcePath"]
|
||||
|
||||
wait_for_debug_request(kernel_with_debug, "debugInfo")
|
||||
|
||||
wait_for_debug_request(
|
||||
kernel_with_debug,
|
||||
"setBreakpoints",
|
||||
{
|
||||
"breakpoints": [{"line": 6}],
|
||||
"source": {"path": source},
|
||||
"sourceModified": False,
|
||||
},
|
||||
)
|
||||
|
||||
wait_for_debug_request(kernel_with_debug, "configurationDone", full_reply=True)
|
||||
|
||||
kernel_with_debug.execute(code)
|
||||
|
||||
# Wait for stop on breakpoint
|
||||
msg = {"msg_type": "", "content": {}}
|
||||
while msg.get("msg_type") != "debug_event" or msg["content"].get("event") != "stopped":
|
||||
msg = kernel_with_debug.get_iopub_msg(timeout=TIMEOUT)
|
||||
|
||||
assert msg["content"]["body"]["reason"] == "breakpoint"
|
||||
|
||||
|
||||
def test_rich_inspect_not_at_breakpoint(kernel_with_debug):
|
||||
var_name = "text"
|
||||
value = "Hello the world"
|
||||
code = f"""{var_name}='{value}'
|
||||
print({var_name})
|
||||
"""
|
||||
|
||||
msg_id = kernel_with_debug.execute(code)
|
||||
get_reply(kernel_with_debug, msg_id)
|
||||
|
||||
r = wait_for_debug_request(kernel_with_debug, "inspectVariables")
|
||||
assert var_name in list(map(lambda v: v["name"], r["body"]["variables"]))
|
||||
|
||||
reply = wait_for_debug_request(
|
||||
kernel_with_debug,
|
||||
"richInspectVariables",
|
||||
{"variableName": var_name},
|
||||
)
|
||||
|
||||
assert reply["body"]["data"] == {"text/plain": f"'{value}'"}
|
||||
|
||||
|
||||
def test_rich_inspect_at_breakpoint(kernel_with_debug):
|
||||
code = """def f(a, b):
|
||||
c = a + b
|
||||
return c
|
||||
|
||||
f(2, 3)"""
|
||||
|
||||
r = wait_for_debug_request(kernel_with_debug, "dumpCell", {"code": code})
|
||||
source = r["body"]["sourcePath"]
|
||||
|
||||
wait_for_debug_request(
|
||||
kernel_with_debug,
|
||||
"setBreakpoints",
|
||||
{
|
||||
"breakpoints": [{"line": 2}],
|
||||
"source": {"path": source},
|
||||
"sourceModified": False,
|
||||
},
|
||||
)
|
||||
|
||||
r = wait_for_debug_request(kernel_with_debug, "debugInfo")
|
||||
|
||||
r = wait_for_debug_request(kernel_with_debug, "configurationDone")
|
||||
|
||||
kernel_with_debug.execute(code)
|
||||
|
||||
# Wait for stop on breakpoint
|
||||
msg = {"msg_type": "", "content": {}}
|
||||
while msg.get("msg_type") != "debug_event" or msg["content"].get("event") != "stopped":
|
||||
msg = kernel_with_debug.get_iopub_msg(timeout=TIMEOUT)
|
||||
|
||||
stacks = wait_for_debug_request(kernel_with_debug, "stackTrace", {"threadId": 1})["body"][
|
||||
"stackFrames"
|
||||
]
|
||||
|
||||
scopes = wait_for_debug_request(kernel_with_debug, "scopes", {"frameId": stacks[0]["id"]})[
|
||||
"body"
|
||||
]["scopes"]
|
||||
|
||||
locals_ = wait_for_debug_request(
|
||||
kernel_with_debug,
|
||||
"variables",
|
||||
{
|
||||
"variablesReference": next(filter(lambda s: s["name"] == "Locals", scopes))[
|
||||
"variablesReference"
|
||||
]
|
||||
},
|
||||
)["body"]["variables"]
|
||||
|
||||
reply = wait_for_debug_request(
|
||||
kernel_with_debug,
|
||||
"richInspectVariables",
|
||||
{"variableName": locals_[0]["name"], "frameId": stacks[0]["id"]},
|
||||
)
|
||||
|
||||
assert reply["body"]["data"] == {"text/plain": locals_[0]["value"]}
|
||||
|
||||
|
||||
def test_convert_to_long_pathname():
|
||||
if sys.platform == "win32":
|
||||
from ipykernel.compiler import _convert_to_long_pathname
|
||||
|
||||
_convert_to_long_pathname(__file__)
|
190
.venv/Lib/site-packages/ipykernel/tests/test_embed_kernel.py
Normal file
190
.venv/Lib/site-packages/ipykernel/tests/test_embed_kernel.py
Normal file
@ -0,0 +1,190 @@
|
||||
"""test IPython.embed_kernel()"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from subprocess import PIPE, Popen
|
||||
|
||||
from flaky import flaky
|
||||
from jupyter_client import BlockingKernelClient
|
||||
from jupyter_core import paths
|
||||
|
||||
SETUP_TIMEOUT = 60
|
||||
TIMEOUT = 15
|
||||
|
||||
|
||||
@contextmanager
|
||||
def setup_kernel(cmd):
|
||||
"""start an embedded kernel in a subprocess, and wait for it to be ready
|
||||
|
||||
Returns
|
||||
-------
|
||||
kernel_manager: connected KernelManager instance
|
||||
"""
|
||||
|
||||
def connection_file_ready(connection_file):
|
||||
"""Check if connection_file is a readable json file."""
|
||||
if not os.path.exists(connection_file):
|
||||
return False
|
||||
try:
|
||||
with open(connection_file) as f:
|
||||
json.load(f)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
kernel = Popen([sys.executable, "-c", cmd], stdout=PIPE, stderr=PIPE, encoding="utf-8")
|
||||
try:
|
||||
connection_file = os.path.join(
|
||||
paths.jupyter_runtime_dir(),
|
||||
"kernel-%i.json" % kernel.pid,
|
||||
)
|
||||
# wait for connection file to exist, timeout after 5s
|
||||
tic = time.time()
|
||||
while (
|
||||
not connection_file_ready(connection_file)
|
||||
and kernel.poll() is None
|
||||
and time.time() < tic + SETUP_TIMEOUT
|
||||
):
|
||||
time.sleep(0.1)
|
||||
|
||||
# Wait 100ms for the writing to finish
|
||||
time.sleep(0.1)
|
||||
|
||||
if kernel.poll() is not None:
|
||||
o, e = kernel.communicate()
|
||||
raise OSError("Kernel failed to start:\n%s" % e)
|
||||
|
||||
if not os.path.exists(connection_file):
|
||||
if kernel.poll() is None:
|
||||
kernel.terminate()
|
||||
raise OSError("Connection file %r never arrived" % connection_file)
|
||||
|
||||
client = BlockingKernelClient(connection_file=connection_file)
|
||||
client.load_connection_file()
|
||||
client.start_channels()
|
||||
client.wait_for_ready()
|
||||
try:
|
||||
yield client
|
||||
finally:
|
||||
client.stop_channels()
|
||||
finally:
|
||||
kernel.terminate()
|
||||
kernel.wait()
|
||||
# Make sure all the fds get closed.
|
||||
for attr in ["stdout", "stderr", "stdin"]:
|
||||
fid = getattr(kernel, attr)
|
||||
if fid:
|
||||
fid.close()
|
||||
|
||||
|
||||
@flaky(max_runs=3)
|
||||
def test_embed_kernel_basic():
|
||||
"""IPython.embed_kernel() is basically functional"""
|
||||
cmd = "\n".join(
|
||||
[
|
||||
"from IPython import embed_kernel",
|
||||
"def go():",
|
||||
" a=5",
|
||||
' b="hi there"',
|
||||
" embed_kernel()",
|
||||
"go()",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
with setup_kernel(cmd) as client:
|
||||
# oinfo a (int)
|
||||
client.inspect("a")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["found"]
|
||||
|
||||
client.execute("c=a*2")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["status"] == "ok"
|
||||
|
||||
# oinfo c (should be 10)
|
||||
client.inspect("c")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["found"]
|
||||
text = content["data"]["text/plain"]
|
||||
assert "10" in text
|
||||
|
||||
|
||||
@flaky(max_runs=3)
|
||||
def test_embed_kernel_namespace():
|
||||
"""IPython.embed_kernel() inherits calling namespace"""
|
||||
cmd = "\n".join(
|
||||
[
|
||||
"from IPython import embed_kernel",
|
||||
"def go():",
|
||||
" a=5",
|
||||
' b="hi there"',
|
||||
" embed_kernel()",
|
||||
"go()",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
with setup_kernel(cmd) as client:
|
||||
# oinfo a (int)
|
||||
client.inspect("a")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["found"]
|
||||
text = content["data"]["text/plain"]
|
||||
assert "5" in text
|
||||
|
||||
# oinfo b (str)
|
||||
client.inspect("b")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["found"]
|
||||
text = content["data"]["text/plain"]
|
||||
assert "hi there" in text
|
||||
|
||||
# oinfo c (undefined)
|
||||
client.inspect("c")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert not content["found"]
|
||||
|
||||
|
||||
@flaky(max_runs=3)
|
||||
def test_embed_kernel_reentrant():
|
||||
"""IPython.embed_kernel() can be called multiple times"""
|
||||
cmd = "\n".join(
|
||||
[
|
||||
"from IPython import embed_kernel",
|
||||
"count = 0",
|
||||
"def go():",
|
||||
" global count",
|
||||
" embed_kernel()",
|
||||
" count = count + 1",
|
||||
"",
|
||||
"while True: go()",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
with setup_kernel(cmd) as client:
|
||||
for i in range(5):
|
||||
client.inspect("count")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["found"]
|
||||
text = content["data"]["text/plain"]
|
||||
assert str(i) in text
|
||||
|
||||
# exit from embed_kernel
|
||||
client.execute("get_ipython().exit_now = True")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
time.sleep(0.2)
|
43
.venv/Lib/site-packages/ipykernel/tests/test_eventloop.py
Normal file
43
.venv/Lib/site-packages/ipykernel/tests/test_eventloop.py
Normal file
@ -0,0 +1,43 @@
|
||||
"""Test eventloop integration"""
|
||||
|
||||
import pytest
|
||||
import tornado
|
||||
|
||||
from .utils import execute, flush_channels, start_new_kernel
|
||||
|
||||
KC = KM = None
|
||||
|
||||
|
||||
def setup():
|
||||
"""start the global kernel (if it isn't running) and return its client"""
|
||||
global KM, KC
|
||||
KM, KC = start_new_kernel()
|
||||
flush_channels(KC)
|
||||
|
||||
|
||||
def teardown():
|
||||
KC.stop_channels()
|
||||
KM.shutdown_kernel(now=True)
|
||||
|
||||
|
||||
async_code = """
|
||||
from ipykernel.tests._asyncio_utils import async_func
|
||||
async_func()
|
||||
"""
|
||||
|
||||
|
||||
@pytest.mark.skipif(tornado.version_info < (5,), reason="only relevant on tornado 5")
|
||||
def test_asyncio_interrupt():
|
||||
flush_channels(KC)
|
||||
msg_id, content = execute("%gui asyncio", KC)
|
||||
assert content["status"] == "ok", content
|
||||
|
||||
flush_channels(KC)
|
||||
msg_id, content = execute(async_code, KC)
|
||||
assert content["status"] == "ok", content
|
||||
|
||||
KM.interrupt_kernel()
|
||||
|
||||
flush_channels(KC)
|
||||
msg_id, content = execute(async_code, KC)
|
||||
assert content["status"] == "ok"
|
59
.venv/Lib/site-packages/ipykernel/tests/test_heartbeat.py
Normal file
59
.venv/Lib/site-packages/ipykernel/tests/test_heartbeat.py
Normal file
@ -0,0 +1,59 @@
|
||||
"""Tests for heartbeat thread"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import errno
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import zmq
|
||||
|
||||
from ipykernel.heartbeat import Heartbeat
|
||||
|
||||
|
||||
def test_port_bind_failure_raises():
|
||||
heart = Heartbeat(None)
|
||||
with patch.object(heart, "_try_bind_socket") as mock_try_bind:
|
||||
mock_try_bind.side_effect = zmq.ZMQError(-100, "fails for unknown error types")
|
||||
with pytest.raises(zmq.ZMQError):
|
||||
heart._bind_socket()
|
||||
assert mock_try_bind.call_count == 1
|
||||
|
||||
|
||||
def test_port_bind_success():
|
||||
heart = Heartbeat(None)
|
||||
with patch.object(heart, "_try_bind_socket") as mock_try_bind:
|
||||
heart._bind_socket()
|
||||
assert mock_try_bind.call_count == 1
|
||||
|
||||
|
||||
def test_port_bind_failure_recovery():
|
||||
try:
|
||||
errno.WSAEADDRINUSE
|
||||
except AttributeError:
|
||||
# Fake windows address in-use code
|
||||
errno.WSAEADDRINUSE = 12345
|
||||
|
||||
try:
|
||||
heart = Heartbeat(None)
|
||||
with patch.object(heart, "_try_bind_socket") as mock_try_bind:
|
||||
mock_try_bind.side_effect = [
|
||||
zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind unix"),
|
||||
zmq.ZMQError(errno.WSAEADDRINUSE, "fails for non-bind windows"),
|
||||
] + [0] * 100
|
||||
# Shouldn't raise anything as retries will kick in
|
||||
heart._bind_socket()
|
||||
finally:
|
||||
# Cleanup fake assignment
|
||||
if errno.WSAEADDRINUSE == 12345:
|
||||
del errno.WSAEADDRINUSE
|
||||
|
||||
|
||||
def test_port_bind_failure_gives_up_retries():
|
||||
heart = Heartbeat(None)
|
||||
with patch.object(heart, "_try_bind_socket") as mock_try_bind:
|
||||
mock_try_bind.side_effect = zmq.ZMQError(errno.EADDRINUSE, "fails for non-bind")
|
||||
with pytest.raises(zmq.ZMQError):
|
||||
heart._bind_socket()
|
||||
assert mock_try_bind.call_count == 100
|
53
.venv/Lib/site-packages/ipykernel/tests/test_io.py
Normal file
53
.venv/Lib/site-packages/ipykernel/tests/test_io.py
Normal file
@ -0,0 +1,53 @@
|
||||
"""Test IO capturing functionality"""
|
||||
|
||||
import io
|
||||
|
||||
import pytest
|
||||
import zmq
|
||||
from jupyter_client.session import Session
|
||||
|
||||
from ipykernel.iostream import IOPubThread, OutStream
|
||||
|
||||
|
||||
def test_io_api():
|
||||
"""Test that wrapped stdout has the same API as a normal TextIO object"""
|
||||
session = Session()
|
||||
ctx = zmq.Context()
|
||||
pub = ctx.socket(zmq.PUB)
|
||||
thread = IOPubThread(pub)
|
||||
thread.start()
|
||||
|
||||
stream = OutStream(session, thread, "stdout")
|
||||
|
||||
# cleanup unused zmq objects before we start testing
|
||||
thread.stop()
|
||||
thread.close()
|
||||
ctx.term()
|
||||
|
||||
assert stream.errors is None
|
||||
assert not stream.isatty()
|
||||
with pytest.raises(io.UnsupportedOperation):
|
||||
stream.detach()
|
||||
with pytest.raises(io.UnsupportedOperation):
|
||||
next(stream)
|
||||
with pytest.raises(io.UnsupportedOperation):
|
||||
stream.read()
|
||||
with pytest.raises(io.UnsupportedOperation):
|
||||
stream.readline()
|
||||
with pytest.raises(io.UnsupportedOperation):
|
||||
stream.seek(0)
|
||||
with pytest.raises(io.UnsupportedOperation):
|
||||
stream.tell()
|
||||
with pytest.raises(TypeError):
|
||||
stream.write(b"")
|
||||
|
||||
|
||||
def test_io_isatty():
|
||||
session = Session()
|
||||
ctx = zmq.Context()
|
||||
pub = ctx.socket(zmq.PUB)
|
||||
thread = IOPubThread(pub)
|
||||
thread.start()
|
||||
|
||||
stream = OutStream(session, thread, "stdout", isatty=True)
|
||||
assert stream.isatty()
|
119
.venv/Lib/site-packages/ipykernel/tests/test_jsonutil.py
Normal file
119
.venv/Lib/site-packages/ipykernel/tests/test_jsonutil.py
Normal file
@ -0,0 +1,119 @@
|
||||
"""Test suite for our JSON utilities."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import json
|
||||
import numbers
|
||||
from binascii import a2b_base64
|
||||
from datetime import datetime
|
||||
|
||||
import pytest
|
||||
from jupyter_client._version import version_info as jupyter_client_version
|
||||
|
||||
from .. import jsonutil
|
||||
from ..jsonutil import encode_images, json_clean
|
||||
|
||||
JUPYTER_CLIENT_MAJOR_VERSION = jupyter_client_version[0]
|
||||
|
||||
|
||||
class MyInt:
|
||||
def __int__(self):
|
||||
return 389
|
||||
|
||||
|
||||
numbers.Integral.register(MyInt)
|
||||
|
||||
|
||||
class MyFloat:
|
||||
def __float__(self):
|
||||
return 3.14
|
||||
|
||||
|
||||
numbers.Real.register(MyFloat)
|
||||
|
||||
|
||||
@pytest.mark.skipif(JUPYTER_CLIENT_MAJOR_VERSION >= 7, reason="json_clean is a no-op")
|
||||
def test():
|
||||
# list of input/expected output. Use None for the expected output if it
|
||||
# can be the same as the input.
|
||||
pairs = [
|
||||
(1, None), # start with scalars
|
||||
(1.0, None),
|
||||
("a", None),
|
||||
(True, None),
|
||||
(False, None),
|
||||
(None, None),
|
||||
# Containers
|
||||
([1, 2], None),
|
||||
((1, 2), [1, 2]),
|
||||
({1, 2}, [1, 2]),
|
||||
(dict(x=1), None),
|
||||
({"x": 1, "y": [1, 2, 3], "1": "int"}, None),
|
||||
# More exotic objects
|
||||
((x for x in range(3)), [0, 1, 2]),
|
||||
(iter([1, 2]), [1, 2]),
|
||||
(datetime(1991, 7, 3, 12, 00), "1991-07-03T12:00:00.000000"),
|
||||
(MyFloat(), 3.14),
|
||||
(MyInt(), 389),
|
||||
]
|
||||
|
||||
for val, jval in pairs:
|
||||
if jval is None:
|
||||
jval = val
|
||||
out = json_clean(val)
|
||||
# validate our cleanup
|
||||
assert out == jval
|
||||
# and ensure that what we return, indeed encodes cleanly
|
||||
json.loads(json.dumps(out))
|
||||
|
||||
|
||||
@pytest.mark.skipif(JUPYTER_CLIENT_MAJOR_VERSION >= 7, reason="json_clean is a no-op")
|
||||
def test_encode_images():
|
||||
# invalid data, but the header and footer are from real files
|
||||
pngdata = b"\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82"
|
||||
jpegdata = b"\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9"
|
||||
pdfdata = b"%PDF-1.\ntrailer<</Root<</Pages<</Kids[<</MediaBox[0 0 3 3]>>]>>>>>>"
|
||||
bindata = b"\xff\xff\xff\xff"
|
||||
|
||||
fmt = {
|
||||
"image/png": pngdata,
|
||||
"image/jpeg": jpegdata,
|
||||
"application/pdf": pdfdata,
|
||||
"application/unrecognized": bindata,
|
||||
}
|
||||
encoded = json_clean(encode_images(fmt))
|
||||
for key, value in fmt.items():
|
||||
# encoded has unicode, want bytes
|
||||
decoded = a2b_base64(encoded[key])
|
||||
assert decoded == value
|
||||
encoded2 = json_clean(encode_images(encoded))
|
||||
assert encoded == encoded2
|
||||
|
||||
for key, value in fmt.items():
|
||||
decoded = a2b_base64(encoded[key])
|
||||
assert decoded == value
|
||||
|
||||
|
||||
@pytest.mark.skipif(JUPYTER_CLIENT_MAJOR_VERSION >= 7, reason="json_clean is a no-op")
|
||||
def test_lambda():
|
||||
with pytest.raises(ValueError):
|
||||
json_clean(lambda: 1)
|
||||
|
||||
|
||||
@pytest.mark.skipif(JUPYTER_CLIENT_MAJOR_VERSION >= 7, reason="json_clean is a no-op")
|
||||
def test_exception():
|
||||
bad_dicts = [
|
||||
{1: "number", "1": "string"},
|
||||
{True: "bool", "True": "string"},
|
||||
]
|
||||
for d in bad_dicts:
|
||||
with pytest.raises(ValueError):
|
||||
json_clean(d)
|
||||
|
||||
|
||||
@pytest.mark.skipif(JUPYTER_CLIENT_MAJOR_VERSION >= 7, reason="json_clean is a no-op")
|
||||
def test_unicode_dict():
|
||||
data = {"üniço∂e": "üniço∂e"}
|
||||
clean = jsonutil.json_clean(data)
|
||||
assert data == clean
|
575
.venv/Lib/site-packages/ipykernel/tests/test_kernel.py
Normal file
575
.venv/Lib/site-packages/ipykernel/tests/test_kernel.py
Normal file
@ -0,0 +1,575 @@
|
||||
"""test the IPython Kernel"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import ast
|
||||
import os.path
|
||||
import platform
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from subprocess import Popen
|
||||
from tempfile import TemporaryDirectory
|
||||
|
||||
import IPython
|
||||
import psutil
|
||||
import pytest
|
||||
from flaky import flaky
|
||||
from IPython.paths import locate_profile
|
||||
|
||||
from .utils import (
|
||||
TIMEOUT,
|
||||
assemble_output,
|
||||
execute,
|
||||
flush_channels,
|
||||
get_reply,
|
||||
kernel,
|
||||
new_kernel,
|
||||
wait_for_idle,
|
||||
)
|
||||
|
||||
|
||||
def _check_master(kc, expected=True, stream="stdout"):
|
||||
execute(kc=kc, code="import sys")
|
||||
flush_channels(kc)
|
||||
msg_id, content = execute(kc=kc, code="print(sys.%s._is_master_process())" % stream)
|
||||
stdout, stderr = assemble_output(kc.get_iopub_msg)
|
||||
assert stdout.strip() == repr(expected)
|
||||
|
||||
|
||||
def _check_status(content):
|
||||
"""If status=error, show the traceback"""
|
||||
if content["status"] == "error":
|
||||
assert False, "".join(["\n"] + content["traceback"])
|
||||
|
||||
|
||||
# printing tests
|
||||
|
||||
|
||||
def test_simple_print():
|
||||
"""simple print statement in kernel"""
|
||||
with kernel() as kc:
|
||||
msg_id, content = execute(kc=kc, code="print('hi')")
|
||||
stdout, stderr = assemble_output(kc.get_iopub_msg)
|
||||
assert stdout == "hi\n"
|
||||
assert stderr == ""
|
||||
_check_master(kc, expected=True)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Currently don't capture during test as pytest does its own capturing")
|
||||
def test_capture_fd():
|
||||
"""simple print statement in kernel"""
|
||||
with kernel() as kc:
|
||||
iopub = kc.iopub_channel
|
||||
msg_id, content = execute(kc=kc, code="import os; os.system('echo capsys')")
|
||||
stdout, stderr = assemble_output(iopub)
|
||||
assert stdout == "capsys\n"
|
||||
assert stderr == ""
|
||||
_check_master(kc, expected=True)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Currently don't capture during test as pytest does its own capturing")
|
||||
def test_subprocess_peek_at_stream_fileno():
|
||||
""""""
|
||||
with kernel() as kc:
|
||||
iopub = kc.iopub_channel
|
||||
msg_id, content = execute(
|
||||
kc=kc,
|
||||
code="import subprocess, sys; subprocess.run(['python', '-c', 'import os; os.system(\"echo CAP1\"); print(\"CAP2\")'], stderr=sys.stderr)",
|
||||
)
|
||||
stdout, stderr = assemble_output(iopub)
|
||||
assert stdout == "CAP1\nCAP2\n"
|
||||
assert stderr == ""
|
||||
_check_master(kc, expected=True)
|
||||
|
||||
|
||||
def test_sys_path():
|
||||
"""test that sys.path doesn't get messed up by default"""
|
||||
with kernel() as kc:
|
||||
msg_id, content = execute(kc=kc, code="import sys; print(repr(sys.path))")
|
||||
stdout, stderr = assemble_output(kc.get_iopub_msg)
|
||||
# for error-output on failure
|
||||
sys.stderr.write(stderr)
|
||||
|
||||
sys_path = ast.literal_eval(stdout.strip())
|
||||
assert "" in sys_path
|
||||
|
||||
|
||||
def test_sys_path_profile_dir():
|
||||
"""test that sys.path doesn't get messed up when `--profile-dir` is specified"""
|
||||
|
||||
with new_kernel(["--profile-dir", locate_profile("default")]) as kc:
|
||||
msg_id, content = execute(kc=kc, code="import sys; print(repr(sys.path))")
|
||||
stdout, stderr = assemble_output(kc.get_iopub_msg)
|
||||
# for error-output on failure
|
||||
sys.stderr.write(stderr)
|
||||
|
||||
sys_path = ast.literal_eval(stdout.strip())
|
||||
assert "" in sys_path
|
||||
|
||||
|
||||
@flaky(max_runs=3)
|
||||
@pytest.mark.skipif(
|
||||
sys.platform == "win32" or (sys.platform == "darwin" and sys.version_info >= (3, 8)),
|
||||
reason="subprocess prints fail on Windows and MacOS Python 3.8+",
|
||||
)
|
||||
def test_subprocess_print():
|
||||
"""printing from forked mp.Process"""
|
||||
with new_kernel() as kc:
|
||||
|
||||
_check_master(kc, expected=True)
|
||||
flush_channels(kc)
|
||||
np = 5
|
||||
code = "\n".join(
|
||||
[
|
||||
"import time",
|
||||
"import multiprocessing as mp",
|
||||
"pool = [mp.Process(target=print, args=('hello', i,)) for i in range(%i)]" % np,
|
||||
"for p in pool: p.start()",
|
||||
"for p in pool: p.join()",
|
||||
"time.sleep(0.5),",
|
||||
]
|
||||
)
|
||||
|
||||
msg_id, content = execute(kc=kc, code=code)
|
||||
stdout, stderr = assemble_output(kc.get_iopub_msg)
|
||||
assert stdout.count("hello") == np, stdout
|
||||
for n in range(np):
|
||||
assert stdout.count(str(n)) == 1, stdout
|
||||
assert stderr == ""
|
||||
_check_master(kc, expected=True)
|
||||
_check_master(kc, expected=True, stream="stderr")
|
||||
|
||||
|
||||
@flaky(max_runs=3)
|
||||
def test_subprocess_noprint():
|
||||
"""mp.Process without print doesn't trigger iostream mp_mode"""
|
||||
with kernel() as kc:
|
||||
|
||||
np = 5
|
||||
code = "\n".join(
|
||||
[
|
||||
"import multiprocessing as mp",
|
||||
"pool = [mp.Process(target=range, args=(i,)) for i in range(%i)]" % np,
|
||||
"for p in pool: p.start()",
|
||||
"for p in pool: p.join()",
|
||||
]
|
||||
)
|
||||
|
||||
msg_id, content = execute(kc=kc, code=code)
|
||||
stdout, stderr = assemble_output(kc.get_iopub_msg)
|
||||
assert stdout == ""
|
||||
assert stderr == ""
|
||||
|
||||
_check_master(kc, expected=True)
|
||||
_check_master(kc, expected=True, stream="stderr")
|
||||
|
||||
|
||||
@flaky(max_runs=3)
|
||||
@pytest.mark.skipif(
|
||||
sys.platform == "win32" or (sys.platform == "darwin" and sys.version_info >= (3, 8)),
|
||||
reason="subprocess prints fail on Windows and MacOS Python 3.8+",
|
||||
)
|
||||
def test_subprocess_error():
|
||||
"""error in mp.Process doesn't crash"""
|
||||
with new_kernel() as kc:
|
||||
|
||||
code = "\n".join(
|
||||
[
|
||||
"import multiprocessing as mp",
|
||||
"p = mp.Process(target=int, args=('hi',))",
|
||||
"p.start()",
|
||||
"p.join()",
|
||||
]
|
||||
)
|
||||
|
||||
msg_id, content = execute(kc=kc, code=code)
|
||||
stdout, stderr = assemble_output(kc.get_iopub_msg)
|
||||
assert stdout == ""
|
||||
assert "ValueError" in stderr
|
||||
|
||||
_check_master(kc, expected=True)
|
||||
_check_master(kc, expected=True, stream="stderr")
|
||||
|
||||
|
||||
# raw_input tests
|
||||
|
||||
|
||||
def test_raw_input():
|
||||
"""test input"""
|
||||
with kernel() as kc:
|
||||
iopub = kc.iopub_channel
|
||||
|
||||
input_f = "input"
|
||||
theprompt = "prompt> "
|
||||
code = 'print({input_f}("{theprompt}"))'.format(**locals())
|
||||
msg_id = kc.execute(code, allow_stdin=True)
|
||||
msg = kc.get_stdin_msg(timeout=TIMEOUT)
|
||||
assert msg["header"]["msg_type"] == "input_request"
|
||||
content = msg["content"]
|
||||
assert content["prompt"] == theprompt
|
||||
text = "some text"
|
||||
kc.input(text)
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "ok"
|
||||
stdout, stderr = assemble_output(kc.get_iopub_msg)
|
||||
assert stdout == text + "\n"
|
||||
|
||||
|
||||
def test_save_history():
|
||||
# Saving history from the kernel with %hist -f was failing because of
|
||||
# unicode problems on Python 2.
|
||||
with kernel() as kc, TemporaryDirectory() as td:
|
||||
file = os.path.join(td, "hist.out")
|
||||
execute("a=1", kc=kc)
|
||||
wait_for_idle(kc)
|
||||
execute('b="abcþ"', kc=kc)
|
||||
wait_for_idle(kc)
|
||||
_, reply = execute("%hist -f " + file, kc=kc)
|
||||
assert reply["status"] == "ok"
|
||||
with open(file, encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
assert "a=1" in content
|
||||
assert 'b="abcþ"' in content
|
||||
|
||||
|
||||
def test_smoke_faulthandler():
|
||||
faulthadler = pytest.importorskip("faulthandler", reason="this test needs faulthandler")
|
||||
with kernel() as kc:
|
||||
# Note: faulthandler.register is not available on windows.
|
||||
code = "\n".join(
|
||||
[
|
||||
"import sys",
|
||||
"import faulthandler",
|
||||
"import signal",
|
||||
"faulthandler.enable()",
|
||||
'if not sys.platform.startswith("win32"):',
|
||||
" faulthandler.register(signal.SIGTERM)",
|
||||
]
|
||||
)
|
||||
_, reply = execute(code, kc=kc)
|
||||
assert reply["status"] == "ok", reply.get("traceback", "")
|
||||
|
||||
|
||||
def test_help_output():
|
||||
"""ipython kernel --help-all works"""
|
||||
cmd = [sys.executable, "-m", "IPython", "kernel", "--help-all"]
|
||||
proc = subprocess.run(cmd, timeout=30, capture_output=True)
|
||||
assert proc.returncode == 0, proc.stderr
|
||||
assert b"Traceback" not in proc.stderr
|
||||
assert b"Options" in proc.stdout
|
||||
assert b"Class" in proc.stdout
|
||||
|
||||
|
||||
def test_is_complete():
|
||||
with kernel() as kc:
|
||||
# There are more test cases for this in core - here we just check
|
||||
# that the kernel exposes the interface correctly.
|
||||
kc.is_complete("2+2")
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "complete"
|
||||
|
||||
# SyntaxError
|
||||
kc.is_complete("raise = 2")
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "invalid"
|
||||
|
||||
kc.is_complete("a = [1,\n2,")
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "incomplete"
|
||||
assert reply["content"]["indent"] == ""
|
||||
|
||||
# Cell magic ends on two blank lines for console UIs
|
||||
kc.is_complete("%%timeit\na\n\n")
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "complete"
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform != "win32", reason="only run on Windows")
|
||||
def test_complete():
|
||||
with kernel() as kc:
|
||||
execute("a = 1", kc=kc)
|
||||
wait_for_idle(kc)
|
||||
cell = "import IPython\nb = a."
|
||||
kc.complete(cell)
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
|
||||
c = reply["content"]
|
||||
assert c["status"] == "ok"
|
||||
start = cell.find("a.")
|
||||
end = start + 2
|
||||
assert c["cursor_end"] == cell.find("a.") + 2
|
||||
assert c["cursor_start"] <= end
|
||||
|
||||
# there are many right answers for cursor_start,
|
||||
# so verify application of the completion
|
||||
# rather than the value of cursor_start
|
||||
|
||||
matches = c["matches"]
|
||||
assert matches
|
||||
for m in matches:
|
||||
completed = cell[: c["cursor_start"]] + m
|
||||
assert completed.startswith(cell)
|
||||
|
||||
|
||||
def test_matplotlib_inline_on_import():
|
||||
pytest.importorskip("matplotlib", reason="this test requires matplotlib")
|
||||
with kernel() as kc:
|
||||
cell = "\n".join(
|
||||
["import matplotlib, matplotlib.pyplot as plt", "backend = matplotlib.get_backend()"]
|
||||
)
|
||||
_, reply = execute(cell, user_expressions={"backend": "backend"}, kc=kc)
|
||||
_check_status(reply)
|
||||
backend_bundle = reply["user_expressions"]["backend"]
|
||||
_check_status(backend_bundle)
|
||||
assert "backend_inline" in backend_bundle["data"]["text/plain"]
|
||||
|
||||
|
||||
def test_message_order():
|
||||
N = 100 # number of messages to test
|
||||
with kernel() as kc:
|
||||
_, reply = execute("a = 1", kc=kc)
|
||||
_check_status(reply)
|
||||
offset = reply["execution_count"] + 1
|
||||
cell = "a += 1\na"
|
||||
msg_ids = []
|
||||
# submit N executions as fast as we can
|
||||
for _ in range(N):
|
||||
msg_ids.append(kc.execute(cell))
|
||||
# check message-handling order
|
||||
for i, msg_id in enumerate(msg_ids, offset):
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
_check_status(reply["content"])
|
||||
assert reply["content"]["execution_count"] == i
|
||||
assert reply["parent_header"]["msg_id"] == msg_id
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.platform.startswith("linux") or sys.platform.startswith("darwin"),
|
||||
reason="test only on windows",
|
||||
)
|
||||
def test_unc_paths():
|
||||
with kernel() as kc, TemporaryDirectory() as td:
|
||||
drive_file_path = os.path.join(td, "unc.txt")
|
||||
with open(drive_file_path, "w+") as f:
|
||||
f.write("# UNC test")
|
||||
unc_root = "\\\\localhost\\C$"
|
||||
file_path = os.path.splitdrive(os.path.dirname(drive_file_path))[1]
|
||||
unc_file_path = os.path.join(unc_root, file_path[1:])
|
||||
|
||||
kc.execute(f"cd {unc_file_path:s}")
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "ok"
|
||||
out, err = assemble_output(kc.get_iopub_msg)
|
||||
assert unc_file_path in out
|
||||
|
||||
flush_channels(kc)
|
||||
kc.execute(code="ls")
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "ok"
|
||||
out, err = assemble_output(kc.get_iopub_msg)
|
||||
assert "unc.txt" in out
|
||||
|
||||
kc.execute(code="cd")
|
||||
reply = kc.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "ok"
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.python_implementation() == "PyPy",
|
||||
reason="does not work on PyPy",
|
||||
)
|
||||
def test_shutdown():
|
||||
"""Kernel exits after polite shutdown_request"""
|
||||
with new_kernel() as kc:
|
||||
km = kc.parent
|
||||
execute("a = 1", kc=kc)
|
||||
wait_for_idle(kc)
|
||||
kc.shutdown()
|
||||
for _ in range(300): # 30s timeout
|
||||
if km.is_alive():
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
break
|
||||
assert not km.is_alive()
|
||||
|
||||
|
||||
def test_interrupt_during_input():
|
||||
"""
|
||||
The kernel exits after being interrupted while waiting in input().
|
||||
|
||||
input() appears to have issues other functions don't, and it needs to be
|
||||
interruptible in order for pdb to be interruptible.
|
||||
"""
|
||||
with new_kernel() as kc:
|
||||
km = kc.parent
|
||||
msg_id = kc.execute("input()")
|
||||
time.sleep(1) # Make sure it's actually waiting for input.
|
||||
km.interrupt_kernel()
|
||||
from .test_message_spec import validate_message
|
||||
|
||||
# If we failed to interrupt interrupt, this will timeout:
|
||||
reply = get_reply(kc, msg_id, TIMEOUT)
|
||||
validate_message(reply, "execute_reply", msg_id)
|
||||
|
||||
|
||||
@pytest.mark.skipif(os.name == "nt", reason="Message based interrupt not supported on Windows")
|
||||
def test_interrupt_with_message():
|
||||
""" """
|
||||
with new_kernel() as kc:
|
||||
km = kc.parent
|
||||
km.kernel_spec.interrupt_mode = "message"
|
||||
msg_id = kc.execute("input()")
|
||||
time.sleep(1) # Make sure it's actually waiting for input.
|
||||
km.interrupt_kernel()
|
||||
from .test_message_spec import validate_message
|
||||
|
||||
# If we failed to interrupt interrupt, this will timeout:
|
||||
reply = get_reply(kc, msg_id, TIMEOUT)
|
||||
validate_message(reply, "execute_reply", msg_id)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
"__pypy__" in sys.builtin_module_names,
|
||||
reason="fails on pypy",
|
||||
)
|
||||
def test_interrupt_during_pdb_set_trace():
|
||||
"""
|
||||
The kernel exits after being interrupted while waiting in pdb.set_trace().
|
||||
|
||||
Merely testing input() isn't enough, pdb has its own issues that need
|
||||
to be handled in addition.
|
||||
|
||||
This test will fail with versions of IPython < 7.14.0.
|
||||
"""
|
||||
with new_kernel() as kc:
|
||||
km = kc.parent
|
||||
msg_id = kc.execute("import pdb; pdb.set_trace()")
|
||||
msg_id2 = kc.execute("3 + 4")
|
||||
time.sleep(1) # Make sure it's actually waiting for input.
|
||||
km.interrupt_kernel()
|
||||
from .test_message_spec import validate_message
|
||||
|
||||
# If we failed to interrupt interrupt, this will timeout:
|
||||
reply = get_reply(kc, msg_id, TIMEOUT)
|
||||
validate_message(reply, "execute_reply", msg_id)
|
||||
# If we failed to interrupt interrupt, this will timeout:
|
||||
reply = get_reply(kc, msg_id2, TIMEOUT)
|
||||
validate_message(reply, "execute_reply", msg_id2)
|
||||
|
||||
|
||||
def test_control_thread_priority():
|
||||
|
||||
N = 5
|
||||
with new_kernel() as kc:
|
||||
msg_id = kc.execute("pass")
|
||||
get_reply(kc, msg_id)
|
||||
|
||||
sleep_msg_id = kc.execute("import asyncio; await asyncio.sleep(2)")
|
||||
|
||||
# submit N shell messages
|
||||
shell_msg_ids = []
|
||||
for i in range(N):
|
||||
shell_msg_ids.append(kc.execute(f"i = {i}"))
|
||||
|
||||
# ensure all shell messages have arrived at the kernel before any control messages
|
||||
time.sleep(0.5)
|
||||
# at this point, shell messages should be waiting in msg_queue,
|
||||
# rather than zmq while the kernel is still in the middle of processing
|
||||
# the first execution
|
||||
|
||||
# now send N control messages
|
||||
control_msg_ids = []
|
||||
for _ in range(N):
|
||||
msg = kc.session.msg("kernel_info_request", {})
|
||||
kc.control_channel.send(msg)
|
||||
control_msg_ids.append(msg["header"]["msg_id"])
|
||||
|
||||
# finally, collect the replies on both channels for comparison
|
||||
get_reply(kc, sleep_msg_id)
|
||||
shell_replies = []
|
||||
for msg_id in shell_msg_ids:
|
||||
shell_replies.append(get_reply(kc, msg_id))
|
||||
|
||||
control_replies = []
|
||||
for msg_id in control_msg_ids:
|
||||
control_replies.append(get_reply(kc, msg_id, channel="control"))
|
||||
|
||||
# verify that all control messages were handled before all shell messages
|
||||
shell_dates = [msg["header"]["date"] for msg in shell_replies]
|
||||
control_dates = [msg["header"]["date"] for msg in control_replies]
|
||||
# comparing first to last ought to be enough, since queues preserve order
|
||||
# use <= in case of very-fast handling and/or low resolution timers
|
||||
assert control_dates[-1] <= shell_dates[0]
|
||||
|
||||
|
||||
def _child():
|
||||
print("in child", os.getpid())
|
||||
|
||||
def _print_and_exit(sig, frame):
|
||||
print(f"Received signal {sig}")
|
||||
# take some time so retries are triggered
|
||||
time.sleep(0.5)
|
||||
sys.exit(-sig)
|
||||
|
||||
signal.signal(signal.SIGTERM, _print_and_exit)
|
||||
time.sleep(30)
|
||||
|
||||
|
||||
def _start_children():
|
||||
ip = IPython.get_ipython()
|
||||
ns = ip.user_ns
|
||||
|
||||
cmd = [sys.executable, "-c", f"from {__name__} import _child; _child()"]
|
||||
child_pg = Popen(cmd, start_new_session=False)
|
||||
child_newpg = Popen(cmd, start_new_session=True)
|
||||
ns["pid"] = os.getpid()
|
||||
ns["child_pg"] = child_pg.pid
|
||||
ns["child_newpg"] = child_newpg.pid
|
||||
# give them time to start up and register signal handlers
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.python_implementation() == "PyPy",
|
||||
reason="does not work on PyPy",
|
||||
)
|
||||
def test_shutdown_subprocesses():
|
||||
"""Kernel exits after polite shutdown_request"""
|
||||
with new_kernel() as kc:
|
||||
km = kc.parent
|
||||
msg_id, reply = execute(
|
||||
f"from {__name__} import _start_children\n_start_children()",
|
||||
kc=kc,
|
||||
user_expressions={
|
||||
"pid": "pid",
|
||||
"child_pg": "child_pg",
|
||||
"child_newpg": "child_newpg",
|
||||
},
|
||||
)
|
||||
print(reply)
|
||||
expressions = reply["user_expressions"]
|
||||
kernel_process = psutil.Process(int(expressions["pid"]["data"]["text/plain"]))
|
||||
child_pg = psutil.Process(int(expressions["child_pg"]["data"]["text/plain"]))
|
||||
child_newpg = psutil.Process(int(expressions["child_newpg"]["data"]["text/plain"]))
|
||||
wait_for_idle(kc)
|
||||
|
||||
kc.shutdown()
|
||||
for _ in range(300): # 30s timeout
|
||||
if km.is_alive():
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
break
|
||||
assert not km.is_alive()
|
||||
assert not kernel_process.is_running()
|
||||
# child in the process group shut down
|
||||
assert not child_pg.is_running()
|
||||
# child outside the process group was not shut down (unix only)
|
||||
if os.name != "nt":
|
||||
assert child_newpg.is_running()
|
||||
try:
|
||||
child_newpg.terminate()
|
||||
except psutil.NoSuchProcess:
|
||||
pass
|
147
.venv/Lib/site-packages/ipykernel/tests/test_kernelspec.py
Normal file
147
.venv/Lib/site-packages/ipykernel/tests/test_kernelspec.py
Normal file
@ -0,0 +1,147 @@
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
from jupyter_core.paths import jupyter_data_dir
|
||||
|
||||
from ipykernel.kernelspec import (
|
||||
KERNEL_NAME,
|
||||
RESOURCES,
|
||||
InstallIPythonKernelSpecApp,
|
||||
get_kernel_dict,
|
||||
install,
|
||||
make_ipkernel_cmd,
|
||||
write_kernel_spec,
|
||||
)
|
||||
|
||||
pjoin = os.path.join
|
||||
|
||||
|
||||
def test_make_ipkernel_cmd():
|
||||
cmd = make_ipkernel_cmd()
|
||||
assert cmd == [sys.executable, "-m", "ipykernel_launcher", "-f", "{connection_file}"]
|
||||
|
||||
|
||||
def assert_kernel_dict(d):
|
||||
assert d["argv"] == make_ipkernel_cmd()
|
||||
assert d["display_name"] == "Python %i (ipykernel)" % sys.version_info[0]
|
||||
assert d["language"] == "python"
|
||||
|
||||
|
||||
def test_get_kernel_dict():
|
||||
d = get_kernel_dict()
|
||||
assert_kernel_dict(d)
|
||||
|
||||
|
||||
def assert_kernel_dict_with_profile(d):
|
||||
assert d["argv"] == make_ipkernel_cmd(extra_arguments=["--profile", "test"])
|
||||
assert d["display_name"] == "Python %i (ipykernel)" % sys.version_info[0]
|
||||
assert d["language"] == "python"
|
||||
|
||||
|
||||
def test_get_kernel_dict_with_profile():
|
||||
d = get_kernel_dict(["--profile", "test"])
|
||||
assert_kernel_dict_with_profile(d)
|
||||
|
||||
|
||||
def assert_is_spec(path):
|
||||
for fname in os.listdir(RESOURCES):
|
||||
dst = pjoin(path, fname)
|
||||
assert os.path.exists(dst)
|
||||
kernel_json = pjoin(path, "kernel.json")
|
||||
assert os.path.exists(kernel_json)
|
||||
with open(kernel_json, encoding="utf8") as f:
|
||||
json.load(f)
|
||||
|
||||
|
||||
def test_write_kernel_spec():
|
||||
path = write_kernel_spec()
|
||||
assert_is_spec(path)
|
||||
shutil.rmtree(path)
|
||||
|
||||
|
||||
def test_write_kernel_spec_path():
|
||||
path = os.path.join(tempfile.mkdtemp(), KERNEL_NAME)
|
||||
path2 = write_kernel_spec(path)
|
||||
assert path == path2
|
||||
assert_is_spec(path)
|
||||
shutil.rmtree(path)
|
||||
|
||||
|
||||
def test_install_kernelspec():
|
||||
|
||||
path = tempfile.mkdtemp()
|
||||
try:
|
||||
InstallIPythonKernelSpecApp.launch_instance(argv=["--prefix", path])
|
||||
assert_is_spec(os.path.join(path, "share", "jupyter", "kernels", KERNEL_NAME))
|
||||
finally:
|
||||
shutil.rmtree(path)
|
||||
|
||||
|
||||
def test_install_user():
|
||||
tmp = tempfile.mkdtemp()
|
||||
|
||||
with mock.patch.dict(os.environ, {"HOME": tmp}):
|
||||
install(user=True)
|
||||
data_dir = jupyter_data_dir()
|
||||
|
||||
assert_is_spec(os.path.join(data_dir, "kernels", KERNEL_NAME))
|
||||
|
||||
|
||||
def test_install():
|
||||
system_jupyter_dir = tempfile.mkdtemp()
|
||||
|
||||
with mock.patch("jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH", [system_jupyter_dir]):
|
||||
install()
|
||||
|
||||
assert_is_spec(os.path.join(system_jupyter_dir, "kernels", KERNEL_NAME))
|
||||
|
||||
|
||||
def test_install_profile():
|
||||
system_jupyter_dir = tempfile.mkdtemp()
|
||||
|
||||
with mock.patch("jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH", [system_jupyter_dir]):
|
||||
install(profile="Test")
|
||||
|
||||
spec = os.path.join(system_jupyter_dir, "kernels", KERNEL_NAME, "kernel.json")
|
||||
with open(spec) as f:
|
||||
spec = json.load(f)
|
||||
assert spec["display_name"].endswith(" [profile=Test]")
|
||||
assert spec["argv"][-2:] == ["--profile", "Test"]
|
||||
|
||||
|
||||
def test_install_display_name_overrides_profile():
|
||||
system_jupyter_dir = tempfile.mkdtemp()
|
||||
|
||||
with mock.patch("jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH", [system_jupyter_dir]):
|
||||
install(display_name="Display", profile="Test")
|
||||
|
||||
spec = os.path.join(system_jupyter_dir, "kernels", KERNEL_NAME, "kernel.json")
|
||||
with open(spec) as f:
|
||||
spec = json.load(f)
|
||||
assert spec["display_name"] == "Display"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("env", [None, dict(spam="spam"), dict(spam="spam", foo="bar")])
|
||||
def test_install_env(tmp_path, env):
|
||||
# python 3.5 // tmp_path must be converted to str
|
||||
with mock.patch("jupyter_client.kernelspec.SYSTEM_JUPYTER_PATH", [str(tmp_path)]):
|
||||
install(env=env)
|
||||
|
||||
spec = tmp_path / "kernels" / KERNEL_NAME / "kernel.json"
|
||||
with spec.open() as f:
|
||||
spec = json.load(f)
|
||||
|
||||
if env:
|
||||
assert len(env) == len(spec["env"])
|
||||
for k, v in env.items():
|
||||
assert spec["env"][k] == v
|
||||
else:
|
||||
assert "env" not in spec
|
620
.venv/Lib/site-packages/ipykernel/tests/test_message_spec.py
Normal file
620
.venv/Lib/site-packages/ipykernel/tests/test_message_spec.py
Normal file
@ -0,0 +1,620 @@
|
||||
"""Test suite for our zeromq-based message specification."""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import re
|
||||
import sys
|
||||
from queue import Empty
|
||||
|
||||
import jupyter_client
|
||||
import pytest
|
||||
from packaging.version import Version as V
|
||||
from traitlets import (
|
||||
Bool,
|
||||
Dict,
|
||||
Enum,
|
||||
HasTraits,
|
||||
Integer,
|
||||
List,
|
||||
TraitError,
|
||||
Unicode,
|
||||
observe,
|
||||
)
|
||||
|
||||
from .utils import TIMEOUT, execute, flush_channels, get_reply, start_global_kernel
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Globals
|
||||
# -----------------------------------------------------------------------------
|
||||
KC = None
|
||||
|
||||
|
||||
def setup():
|
||||
global KC
|
||||
KC = start_global_kernel()
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Message Spec References
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class Reference(HasTraits):
|
||||
|
||||
"""
|
||||
Base class for message spec specification testing.
|
||||
|
||||
This class is the core of the message specification test. The
|
||||
idea is that child classes implement trait attributes for each
|
||||
message keys, so that message keys can be tested against these
|
||||
traits using :meth:`check` method.
|
||||
|
||||
"""
|
||||
|
||||
def check(self, d):
|
||||
"""validate a dict against our traits"""
|
||||
for key in self.trait_names():
|
||||
assert key in d
|
||||
# FIXME: always allow None, probably not a good idea
|
||||
if d[key] is None:
|
||||
continue
|
||||
try:
|
||||
setattr(self, key, d[key])
|
||||
except TraitError as e:
|
||||
assert False, str(e)
|
||||
|
||||
|
||||
class Version(Unicode):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.min = kwargs.pop("min", None)
|
||||
self.max = kwargs.pop("max", None)
|
||||
kwargs["default_value"] = self.min
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def validate(self, obj, value):
|
||||
if self.min and V(value) < V(self.min):
|
||||
raise TraitError("bad version: %s < %s" % (value, self.min))
|
||||
if self.max and (V(value) > V(self.max)):
|
||||
raise TraitError("bad version: %s > %s" % (value, self.max))
|
||||
|
||||
|
||||
class RMessage(Reference):
|
||||
msg_id = Unicode()
|
||||
msg_type = Unicode()
|
||||
header = Dict()
|
||||
parent_header = Dict()
|
||||
content = Dict()
|
||||
|
||||
def check(self, d):
|
||||
super().check(d)
|
||||
RHeader().check(self.header)
|
||||
if self.parent_header:
|
||||
RHeader().check(self.parent_header)
|
||||
|
||||
|
||||
class RHeader(Reference):
|
||||
msg_id = Unicode()
|
||||
msg_type = Unicode()
|
||||
session = Unicode()
|
||||
username = Unicode()
|
||||
version = Version(min="5.0")
|
||||
|
||||
|
||||
mime_pat = re.compile(r"^[\w\-\+\.]+/[\w\-\+\.]+$")
|
||||
|
||||
|
||||
class MimeBundle(Reference):
|
||||
metadata = Dict()
|
||||
data = Dict()
|
||||
|
||||
@observe("data")
|
||||
def _on_data_changed(self, change):
|
||||
for k, v in change["new"].items():
|
||||
assert mime_pat.match(k)
|
||||
assert isinstance(v, str)
|
||||
|
||||
|
||||
# shell replies
|
||||
class Reply(Reference):
|
||||
status = Enum(("ok", "error"), default_value="ok")
|
||||
|
||||
|
||||
class ExecuteReply(Reply):
|
||||
execution_count = Integer()
|
||||
|
||||
def check(self, d):
|
||||
Reference.check(self, d)
|
||||
if d["status"] == "ok":
|
||||
ExecuteReplyOkay().check(d)
|
||||
elif d["status"] == "error":
|
||||
ExecuteReplyError().check(d)
|
||||
elif d["status"] == "aborted":
|
||||
ExecuteReplyAborted().check(d)
|
||||
|
||||
|
||||
class ExecuteReplyOkay(Reply):
|
||||
status = Enum(("ok",))
|
||||
user_expressions = Dict()
|
||||
|
||||
|
||||
class ExecuteReplyError(Reply):
|
||||
status = Enum(("error",))
|
||||
ename = Unicode()
|
||||
evalue = Unicode()
|
||||
traceback = List(Unicode())
|
||||
|
||||
|
||||
class ExecuteReplyAborted(Reply):
|
||||
status = Enum(("aborted",))
|
||||
|
||||
|
||||
class InspectReply(Reply, MimeBundle):
|
||||
found = Bool()
|
||||
|
||||
|
||||
class ArgSpec(Reference):
|
||||
args = List(Unicode())
|
||||
varargs = Unicode()
|
||||
varkw = Unicode()
|
||||
defaults = List()
|
||||
|
||||
|
||||
class Status(Reference):
|
||||
execution_state = Enum(("busy", "idle", "starting"), default_value="busy")
|
||||
|
||||
|
||||
class CompleteReply(Reply):
|
||||
matches = List(Unicode())
|
||||
cursor_start = Integer()
|
||||
cursor_end = Integer()
|
||||
status = Unicode()
|
||||
|
||||
|
||||
class LanguageInfo(Reference):
|
||||
name = Unicode("python")
|
||||
version = Unicode(sys.version.split()[0])
|
||||
|
||||
|
||||
class KernelInfoReply(Reply):
|
||||
protocol_version = Version(min="5.0")
|
||||
implementation = Unicode("ipython")
|
||||
implementation_version = Version(min="2.1")
|
||||
language_info = Dict()
|
||||
banner = Unicode()
|
||||
|
||||
def check(self, d):
|
||||
Reference.check(self, d)
|
||||
LanguageInfo().check(d["language_info"])
|
||||
|
||||
|
||||
class ConnectReply(Reference):
|
||||
shell_port = Integer()
|
||||
control_port = Integer()
|
||||
stdin_port = Integer()
|
||||
iopub_port = Integer()
|
||||
hb_port = Integer()
|
||||
|
||||
|
||||
class CommInfoReply(Reply):
|
||||
comms = Dict()
|
||||
|
||||
|
||||
class IsCompleteReply(Reference):
|
||||
status = Enum(("complete", "incomplete", "invalid", "unknown"), default_value="complete")
|
||||
|
||||
def check(self, d):
|
||||
Reference.check(self, d)
|
||||
if d["status"] == "incomplete":
|
||||
IsCompleteReplyIncomplete().check(d)
|
||||
|
||||
|
||||
class IsCompleteReplyIncomplete(Reference):
|
||||
indent = Unicode()
|
||||
|
||||
|
||||
# IOPub messages
|
||||
|
||||
|
||||
class ExecuteInput(Reference):
|
||||
code = Unicode()
|
||||
execution_count = Integer()
|
||||
|
||||
|
||||
class Error(ExecuteReplyError):
|
||||
"""Errors are the same as ExecuteReply, but without status"""
|
||||
|
||||
status = None # no status field
|
||||
|
||||
|
||||
class Stream(Reference):
|
||||
name = Enum(("stdout", "stderr"), default_value="stdout")
|
||||
text = Unicode()
|
||||
|
||||
|
||||
class DisplayData(MimeBundle):
|
||||
pass
|
||||
|
||||
|
||||
class ExecuteResult(MimeBundle):
|
||||
execution_count = Integer()
|
||||
|
||||
|
||||
class HistoryReply(Reply):
|
||||
history = List(List())
|
||||
|
||||
|
||||
references = {
|
||||
"execute_reply": ExecuteReply(),
|
||||
"inspect_reply": InspectReply(),
|
||||
"status": Status(),
|
||||
"complete_reply": CompleteReply(),
|
||||
"kernel_info_reply": KernelInfoReply(),
|
||||
"connect_reply": ConnectReply(),
|
||||
"comm_info_reply": CommInfoReply(),
|
||||
"is_complete_reply": IsCompleteReply(),
|
||||
"execute_input": ExecuteInput(),
|
||||
"execute_result": ExecuteResult(),
|
||||
"history_reply": HistoryReply(),
|
||||
"error": Error(),
|
||||
"stream": Stream(),
|
||||
"display_data": DisplayData(),
|
||||
"header": RHeader(),
|
||||
}
|
||||
"""
|
||||
Specifications of `content` part of the reply messages.
|
||||
"""
|
||||
|
||||
|
||||
def validate_message(msg, msg_type=None, parent=None):
|
||||
"""validate a message
|
||||
|
||||
This is a generator, and must be iterated through to actually
|
||||
trigger each test.
|
||||
|
||||
If msg_type and/or parent are given, the msg_type and/or parent msg_id
|
||||
are compared with the given values.
|
||||
"""
|
||||
RMessage().check(msg)
|
||||
if msg_type:
|
||||
assert msg["msg_type"] == msg_type
|
||||
if parent:
|
||||
assert msg["parent_header"]["msg_id"] == parent
|
||||
content = msg["content"]
|
||||
ref = references[msg["msg_type"]]
|
||||
ref.check(content)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Tests
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# Shell channel
|
||||
|
||||
|
||||
def test_execute():
|
||||
flush_channels()
|
||||
|
||||
msg_id = KC.execute(code="x=1")
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "execute_reply", msg_id)
|
||||
|
||||
|
||||
def test_execute_silent():
|
||||
flush_channels()
|
||||
msg_id, reply = execute(code="x=1", silent=True)
|
||||
|
||||
# flush status=idle
|
||||
status = KC.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(status, "status", msg_id)
|
||||
assert status["content"]["execution_state"] == "idle"
|
||||
|
||||
with pytest.raises(Empty):
|
||||
KC.get_iopub_msg(timeout=0.1)
|
||||
|
||||
count = reply["execution_count"]
|
||||
|
||||
msg_id, reply = execute(code="x=2", silent=True)
|
||||
|
||||
# flush status=idle
|
||||
status = KC.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(status, "status", msg_id)
|
||||
assert status["content"]["execution_state"] == "idle"
|
||||
|
||||
with pytest.raises(Empty):
|
||||
KC.get_iopub_msg(timeout=0.1)
|
||||
|
||||
count_2 = reply["execution_count"]
|
||||
assert count_2 == count
|
||||
|
||||
|
||||
def test_execute_error():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code="1/0")
|
||||
assert reply["status"] == "error"
|
||||
assert reply["ename"] == "ZeroDivisionError"
|
||||
|
||||
error = KC.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(error, "error", msg_id)
|
||||
|
||||
|
||||
def test_execute_inc():
|
||||
"""execute request should increment execution_count"""
|
||||
flush_channels()
|
||||
|
||||
_, reply = execute(code="x=1")
|
||||
count = reply["execution_count"]
|
||||
|
||||
flush_channels()
|
||||
|
||||
_, reply = execute(code="x=2")
|
||||
count_2 = reply["execution_count"]
|
||||
assert count_2 == count + 1
|
||||
|
||||
|
||||
def test_execute_stop_on_error():
|
||||
"""execute request should not abort execution queue with stop_on_error False"""
|
||||
flush_channels()
|
||||
|
||||
fail = "\n".join(
|
||||
[
|
||||
# sleep to ensure subsequent message is waiting in the queue to be aborted
|
||||
# async sleep to ensure coroutines are processing while this happens
|
||||
"import asyncio",
|
||||
"await asyncio.sleep(1)",
|
||||
"raise ValueError()",
|
||||
]
|
||||
)
|
||||
KC.execute(code=fail)
|
||||
KC.execute(code='print("Hello")')
|
||||
KC.execute(code='print("world")')
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
print(reply)
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "aborted"
|
||||
# second message, too
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "aborted"
|
||||
|
||||
flush_channels()
|
||||
|
||||
KC.execute(code=fail, stop_on_error=False)
|
||||
KC.execute(code='print("Hello")')
|
||||
KC.get_shell_msg(timeout=TIMEOUT)
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT)
|
||||
assert reply["content"]["status"] == "ok"
|
||||
|
||||
|
||||
def test_non_execute_stop_on_error():
|
||||
"""test that non-execute_request's are not aborted after an error"""
|
||||
flush_channels()
|
||||
|
||||
fail = "\n".join(
|
||||
[
|
||||
# sleep to ensure subsequent message is waiting in the queue to be aborted
|
||||
"import time",
|
||||
"time.sleep(0.5)",
|
||||
"raise ValueError",
|
||||
]
|
||||
)
|
||||
KC.execute(code=fail)
|
||||
KC.kernel_info()
|
||||
KC.comm_info()
|
||||
KC.inspect(code="print")
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT) # execute
|
||||
assert reply["content"]["status"] == "error"
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT) # kernel_info
|
||||
assert reply["content"]["status"] == "ok"
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT) # comm_info
|
||||
assert reply["content"]["status"] == "ok"
|
||||
reply = KC.get_shell_msg(timeout=TIMEOUT) # inspect
|
||||
assert reply["content"]["status"] == "ok"
|
||||
|
||||
|
||||
def test_user_expressions():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code="x=1", user_expressions=dict(foo="x+1"))
|
||||
user_expressions = reply["user_expressions"]
|
||||
assert user_expressions == {
|
||||
"foo": {
|
||||
"status": "ok",
|
||||
"data": {"text/plain": "2"},
|
||||
"metadata": {},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def test_user_expressions_fail():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code="x=0", user_expressions=dict(foo="nosuchname"))
|
||||
user_expressions = reply["user_expressions"]
|
||||
foo = user_expressions["foo"]
|
||||
assert foo["status"] == "error"
|
||||
assert foo["ename"] == "NameError"
|
||||
|
||||
|
||||
def test_oinfo():
|
||||
flush_channels()
|
||||
|
||||
msg_id = KC.inspect("a")
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "inspect_reply", msg_id)
|
||||
|
||||
|
||||
def test_oinfo_found():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code="a=5")
|
||||
|
||||
msg_id = KC.inspect("a")
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "inspect_reply", msg_id)
|
||||
content = reply["content"]
|
||||
assert content["found"]
|
||||
text = content["data"]["text/plain"]
|
||||
assert "Type:" in text
|
||||
assert "Docstring:" in text
|
||||
|
||||
|
||||
def test_oinfo_detail():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code="ip=get_ipython()")
|
||||
|
||||
msg_id = KC.inspect("ip.object_inspect", cursor_pos=10, detail_level=1)
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "inspect_reply", msg_id)
|
||||
content = reply["content"]
|
||||
assert content["found"]
|
||||
text = content["data"]["text/plain"]
|
||||
assert "Signature:" in text
|
||||
assert "Source:" in text
|
||||
|
||||
|
||||
def test_oinfo_not_found():
|
||||
flush_channels()
|
||||
|
||||
msg_id = KC.inspect("dne")
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "inspect_reply", msg_id)
|
||||
content = reply["content"]
|
||||
assert not content["found"]
|
||||
|
||||
|
||||
def test_complete():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute(code="alpha = albert = 5")
|
||||
|
||||
msg_id = KC.complete("al", 2)
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "complete_reply", msg_id)
|
||||
matches = reply["content"]["matches"]
|
||||
for name in ("alpha", "albert"):
|
||||
assert name in matches
|
||||
|
||||
|
||||
def test_kernel_info_request():
|
||||
flush_channels()
|
||||
|
||||
msg_id = KC.kernel_info()
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "kernel_info_reply", msg_id)
|
||||
|
||||
|
||||
def test_connect_request():
|
||||
flush_channels()
|
||||
msg = KC.session.msg("connect_request")
|
||||
KC.shell_channel.send(msg)
|
||||
return msg["header"]["msg_id"]
|
||||
|
||||
msg_id = KC.kernel_info()
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "connect_reply", msg_id)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
jupyter_client.version_info < (5, 0),
|
||||
reason="earlier Jupyter Client don't have comm_info",
|
||||
)
|
||||
def test_comm_info_request():
|
||||
flush_channels()
|
||||
msg_id = KC.comm_info()
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "comm_info_reply", msg_id)
|
||||
|
||||
|
||||
def test_single_payload():
|
||||
"""
|
||||
We want to test the set_next_input is not triggered several time per cell.
|
||||
This is (was ?) mostly due to the fact that `?` in a loop would trigger
|
||||
several set_next_input.
|
||||
|
||||
I'm tempted to thing that we actually want to _allow_ multiple
|
||||
set_next_input (that's users' choice). But that `?` itself (and ?'s
|
||||
transform) should avoid setting multiple set_next_input).
|
||||
"""
|
||||
flush_channels()
|
||||
msg_id, reply = execute(
|
||||
code="ip = get_ipython()\nfor i in range(3):\n ip.set_next_input('Hello There')\n"
|
||||
)
|
||||
payload = reply["payload"]
|
||||
next_input_pls = [pl for pl in payload if pl["source"] == "set_next_input"]
|
||||
assert len(next_input_pls) == 1
|
||||
|
||||
|
||||
def test_is_complete():
|
||||
flush_channels()
|
||||
|
||||
msg_id = KC.is_complete("a = 1")
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "is_complete_reply", msg_id)
|
||||
|
||||
|
||||
def test_history_range():
|
||||
flush_channels()
|
||||
|
||||
KC.execute(code="x=1", store_history=True)
|
||||
KC.get_shell_msg(timeout=TIMEOUT)
|
||||
|
||||
msg_id = KC.history(hist_access_type="range", raw=True, output=True, start=1, stop=2, session=0)
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "history_reply", msg_id)
|
||||
content = reply["content"]
|
||||
assert len(content["history"]) == 1
|
||||
|
||||
|
||||
def test_history_tail():
|
||||
flush_channels()
|
||||
|
||||
KC.execute(code="x=1", store_history=True)
|
||||
KC.get_shell_msg(timeout=TIMEOUT)
|
||||
|
||||
msg_id = KC.history(hist_access_type="tail", raw=True, output=True, n=1, session=0)
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "history_reply", msg_id)
|
||||
content = reply["content"]
|
||||
assert len(content["history"]) == 1
|
||||
|
||||
|
||||
def test_history_search():
|
||||
flush_channels()
|
||||
|
||||
KC.execute(code="x=1", store_history=True)
|
||||
KC.get_shell_msg(timeout=TIMEOUT)
|
||||
|
||||
msg_id = KC.history(
|
||||
hist_access_type="search", raw=True, output=True, n=1, pattern="*", session=0
|
||||
)
|
||||
reply = get_reply(KC, msg_id, TIMEOUT)
|
||||
validate_message(reply, "history_reply", msg_id)
|
||||
content = reply["content"]
|
||||
assert len(content["history"]) == 1
|
||||
|
||||
|
||||
# IOPub channel
|
||||
|
||||
|
||||
def test_stream():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute("print('hi')")
|
||||
|
||||
stdout = KC.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(stdout, "stream", msg_id)
|
||||
content = stdout["content"]
|
||||
assert content["text"] == "hi\n"
|
||||
|
||||
|
||||
def test_display_data():
|
||||
flush_channels()
|
||||
|
||||
msg_id, reply = execute("from IPython.display import display; display(1)")
|
||||
|
||||
display = KC.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(display, "display_data", parent=msg_id)
|
||||
data = display["content"]["data"]
|
||||
assert data["text/plain"] == "1"
|
78
.venv/Lib/site-packages/ipykernel/tests/test_pickleutil.py
Normal file
78
.venv/Lib/site-packages/ipykernel/tests/test_pickleutil.py
Normal file
@ -0,0 +1,78 @@
|
||||
import pickle
|
||||
import warnings
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
from ipykernel.pickleutil import can, uncan
|
||||
|
||||
|
||||
def interactive(f):
|
||||
f.__module__ = "__main__"
|
||||
return f
|
||||
|
||||
|
||||
def dumps(obj):
|
||||
return pickle.dumps(can(obj))
|
||||
|
||||
|
||||
def loads(obj):
|
||||
return uncan(pickle.loads(obj))
|
||||
|
||||
|
||||
def test_no_closure():
|
||||
@interactive
|
||||
def foo():
|
||||
a = 5
|
||||
return a
|
||||
|
||||
pfoo = dumps(foo)
|
||||
bar = loads(pfoo)
|
||||
assert foo() == bar()
|
||||
|
||||
|
||||
def test_generator_closure():
|
||||
# this only creates a closure on Python 3
|
||||
@interactive
|
||||
def foo():
|
||||
i = "i"
|
||||
r = [i for j in (1, 2)]
|
||||
return r
|
||||
|
||||
pfoo = dumps(foo)
|
||||
bar = loads(pfoo)
|
||||
assert foo() == bar()
|
||||
|
||||
|
||||
def test_nested_closure():
|
||||
@interactive
|
||||
def foo():
|
||||
i = "i"
|
||||
|
||||
def g():
|
||||
return i
|
||||
|
||||
return g()
|
||||
|
||||
pfoo = dumps(foo)
|
||||
bar = loads(pfoo)
|
||||
assert foo() == bar()
|
||||
|
||||
|
||||
def test_closure():
|
||||
i = "i"
|
||||
|
||||
@interactive
|
||||
def foo():
|
||||
return i
|
||||
|
||||
pfoo = dumps(foo)
|
||||
bar = loads(pfoo)
|
||||
assert foo() == bar()
|
||||
|
||||
|
||||
def test_uncan_bytes_buffer():
|
||||
data = b"data"
|
||||
canned = can(data)
|
||||
canned.buffers = [memoryview(buf) for buf in canned.buffers]
|
||||
out = uncan(canned)
|
||||
assert out == data
|
62
.venv/Lib/site-packages/ipykernel/tests/test_start_kernel.py
Normal file
62
.venv/Lib/site-packages/ipykernel/tests/test_start_kernel.py
Normal file
@ -0,0 +1,62 @@
|
||||
from textwrap import dedent
|
||||
|
||||
from flaky import flaky
|
||||
|
||||
from .test_embed_kernel import setup_kernel
|
||||
|
||||
TIMEOUT = 15
|
||||
|
||||
|
||||
@flaky(max_runs=3)
|
||||
def test_ipython_start_kernel_userns():
|
||||
cmd = dedent(
|
||||
"""
|
||||
from ipykernel.kernelapp import launch_new_instance
|
||||
ns = {"tre": 123}
|
||||
launch_new_instance(user_ns=ns)
|
||||
"""
|
||||
)
|
||||
|
||||
with setup_kernel(cmd) as client:
|
||||
client.inspect("tre")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["found"]
|
||||
text = content["data"]["text/plain"]
|
||||
assert "123" in text
|
||||
|
||||
# user_module should be an instance of DummyMod
|
||||
client.execute("usermod = get_ipython().user_module")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["status"] == "ok"
|
||||
client.inspect("usermod")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["found"]
|
||||
text = content["data"]["text/plain"]
|
||||
assert "DummyMod" in text
|
||||
|
||||
|
||||
@flaky(max_runs=3)
|
||||
def test_ipython_start_kernel_no_userns():
|
||||
# Issue #4188 - user_ns should be passed to shell as None, not {}
|
||||
cmd = dedent(
|
||||
"""
|
||||
from ipykernel.kernelapp import launch_new_instance
|
||||
launch_new_instance()
|
||||
"""
|
||||
)
|
||||
|
||||
with setup_kernel(cmd) as client:
|
||||
# user_module should not be an instance of DummyMod
|
||||
client.execute("usermod = get_ipython().user_module")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["status"] == "ok"
|
||||
client.inspect("usermod")
|
||||
msg = client.get_shell_msg(timeout=TIMEOUT)
|
||||
content = msg["content"]
|
||||
assert content["found"]
|
||||
text = content["data"]["text/plain"]
|
||||
assert "DummyMod" not in text
|
205
.venv/Lib/site-packages/ipykernel/tests/test_zmq_shell.py
Normal file
205
.venv/Lib/site-packages/ipykernel/tests/test_zmq_shell.py
Normal file
@ -0,0 +1,205 @@
|
||||
""" Tests for zmq shell / display publisher. """
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import unittest
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
import zmq
|
||||
from jupyter_client.session import Session
|
||||
from traitlets import Int
|
||||
|
||||
from ipykernel.zmqshell import ZMQDisplayPublisher
|
||||
|
||||
|
||||
class NoReturnDisplayHook:
|
||||
"""
|
||||
A dummy DisplayHook which allows us to monitor
|
||||
the number of times an object is called, but which
|
||||
does *not* return a message when it is called.
|
||||
"""
|
||||
|
||||
call_count = 0
|
||||
|
||||
def __call__(self, obj):
|
||||
self.call_count += 1
|
||||
|
||||
|
||||
class ReturnDisplayHook(NoReturnDisplayHook):
|
||||
"""
|
||||
A dummy DisplayHook with the same counting ability
|
||||
as its base class, but which also returns the same
|
||||
message when it is called.
|
||||
"""
|
||||
|
||||
def __call__(self, obj):
|
||||
super().__call__(obj)
|
||||
return obj
|
||||
|
||||
|
||||
class CounterSession(Session):
|
||||
"""
|
||||
This is a simple subclass to allow us to count
|
||||
the calls made to the session object by the display
|
||||
publisher.
|
||||
"""
|
||||
|
||||
send_count = Int(0)
|
||||
|
||||
def send(self, *args, **kwargs):
|
||||
"""
|
||||
A trivial override to just augment the existing call
|
||||
with an increment to the send counter.
|
||||
"""
|
||||
self.send_count += 1
|
||||
super().send(*args, **kwargs)
|
||||
|
||||
|
||||
class ZMQDisplayPublisherTests(unittest.TestCase):
|
||||
"""
|
||||
Tests the ZMQDisplayPublisher in zmqshell.py
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
self.context = zmq.Context()
|
||||
self.socket = self.context.socket(zmq.PUB)
|
||||
self.session = CounterSession()
|
||||
|
||||
self.disp_pub = ZMQDisplayPublisher(session=self.session, pub_socket=self.socket)
|
||||
|
||||
def tearDown(self):
|
||||
"""
|
||||
We need to close the socket in order to proceed with the
|
||||
tests.
|
||||
TODO - There is still an open file handler to '/dev/null',
|
||||
presumably created by zmq.
|
||||
"""
|
||||
self.disp_pub.clear_output()
|
||||
self.socket.close()
|
||||
self.context.term()
|
||||
|
||||
def test_display_publisher_creation(self):
|
||||
"""
|
||||
Since there's no explicit constructor, here we confirm
|
||||
that keyword args get assigned correctly, and override
|
||||
the defaults.
|
||||
"""
|
||||
assert self.disp_pub.session == self.session
|
||||
assert self.disp_pub.pub_socket == self.socket
|
||||
|
||||
def test_thread_local_hooks(self):
|
||||
"""
|
||||
Confirms that the thread_local attribute is correctly
|
||||
initialised with an empty list for the display hooks
|
||||
"""
|
||||
assert self.disp_pub._hooks == []
|
||||
|
||||
def hook(msg):
|
||||
return msg
|
||||
|
||||
self.disp_pub.register_hook(hook)
|
||||
assert self.disp_pub._hooks == [hook]
|
||||
|
||||
q = Queue()
|
||||
|
||||
def set_thread_hooks():
|
||||
q.put(self.disp_pub._hooks)
|
||||
|
||||
t = Thread(target=set_thread_hooks)
|
||||
t.start()
|
||||
thread_hooks = q.get(timeout=10)
|
||||
assert thread_hooks == []
|
||||
|
||||
def test_publish(self):
|
||||
"""
|
||||
Publish should prepare the message and eventually call
|
||||
`send` by default.
|
||||
"""
|
||||
data = dict(a=1)
|
||||
assert self.session.send_count == 0
|
||||
self.disp_pub.publish(data)
|
||||
assert self.session.send_count == 1
|
||||
|
||||
def test_display_hook_halts_send(self):
|
||||
"""
|
||||
If a hook is installed, and on calling the object
|
||||
it does *not* return a message, then we assume that
|
||||
the message has been consumed, and should not be
|
||||
processed (`sent`) in the normal manner.
|
||||
"""
|
||||
data = dict(a=1)
|
||||
hook = NoReturnDisplayHook()
|
||||
|
||||
self.disp_pub.register_hook(hook)
|
||||
assert hook.call_count == 0
|
||||
assert self.session.send_count == 0
|
||||
|
||||
self.disp_pub.publish(data)
|
||||
|
||||
assert hook.call_count == 1
|
||||
assert self.session.send_count == 0
|
||||
|
||||
def test_display_hook_return_calls_send(self):
|
||||
"""
|
||||
If a hook is installed and on calling the object
|
||||
it returns a new message, then we assume that this
|
||||
is just a message transformation, and the message
|
||||
should be sent in the usual manner.
|
||||
"""
|
||||
data = dict(a=1)
|
||||
hook = ReturnDisplayHook()
|
||||
|
||||
self.disp_pub.register_hook(hook)
|
||||
assert hook.call_count == 0
|
||||
assert self.session.send_count == 0
|
||||
|
||||
self.disp_pub.publish(data)
|
||||
|
||||
assert hook.call_count == 1
|
||||
assert self.session.send_count == 1
|
||||
|
||||
def test_unregister_hook(self):
|
||||
"""
|
||||
Once a hook is unregistered, it should not be called
|
||||
during `publish`.
|
||||
"""
|
||||
data = dict(a=1)
|
||||
hook = NoReturnDisplayHook()
|
||||
|
||||
self.disp_pub.register_hook(hook)
|
||||
assert hook.call_count == 0
|
||||
assert self.session.send_count == 0
|
||||
|
||||
self.disp_pub.publish(data)
|
||||
|
||||
assert hook.call_count == 1
|
||||
assert self.session.send_count == 0
|
||||
|
||||
#
|
||||
# After unregistering the `NoReturn` hook, any calls
|
||||
# to publish should *not* got through the DisplayHook,
|
||||
# but should instead hit the usual `session.send` call
|
||||
# at the end.
|
||||
#
|
||||
# As a result, the hook call count should *not* increase,
|
||||
# but the session send count *should* increase.
|
||||
#
|
||||
first = self.disp_pub.unregister_hook(hook)
|
||||
self.disp_pub.publish(data)
|
||||
|
||||
self.assertTrue(first)
|
||||
assert hook.call_count == 1
|
||||
assert self.session.send_count == 1
|
||||
|
||||
#
|
||||
# If a hook is not installed, `unregister_hook`
|
||||
# should return false.
|
||||
#
|
||||
second = self.disp_pub.unregister_hook(hook)
|
||||
self.assertFalse(second)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
213
.venv/Lib/site-packages/ipykernel/tests/utils.py
Normal file
213
.venv/Lib/site-packages/ipykernel/tests/utils.py
Normal file
@ -0,0 +1,213 @@
|
||||
"""utilities for testing IPython kernels"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import atexit
|
||||
import os
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
from queue import Empty
|
||||
from subprocess import STDOUT
|
||||
from tempfile import TemporaryDirectory
|
||||
from time import time
|
||||
|
||||
from jupyter_client import manager
|
||||
|
||||
STARTUP_TIMEOUT = 60
|
||||
TIMEOUT = 100
|
||||
|
||||
KM = None
|
||||
KC = None
|
||||
|
||||
|
||||
def start_new_kernel(**kwargs):
|
||||
"""start a new kernel, and return its Manager and Client
|
||||
|
||||
Integrates with our output capturing for tests.
|
||||
"""
|
||||
kwargs["stderr"] = STDOUT
|
||||
try:
|
||||
import nose
|
||||
|
||||
kwargs["stdout"] = nose.iptest_stdstreams_fileno()
|
||||
except (ImportError, AttributeError):
|
||||
pass
|
||||
return manager.start_new_kernel(startup_timeout=STARTUP_TIMEOUT, **kwargs)
|
||||
|
||||
|
||||
def flush_channels(kc=None):
|
||||
"""flush any messages waiting on the queue"""
|
||||
from .test_message_spec import validate_message
|
||||
|
||||
if kc is None:
|
||||
kc = KC
|
||||
for get_msg in (kc.get_shell_msg, kc.get_iopub_msg):
|
||||
while True:
|
||||
try:
|
||||
msg = get_msg(timeout=0.1)
|
||||
except Empty:
|
||||
break
|
||||
else:
|
||||
validate_message(msg)
|
||||
|
||||
|
||||
def get_reply(kc, msg_id, timeout=TIMEOUT, channel="shell"):
|
||||
t0 = time()
|
||||
while True:
|
||||
get_msg = getattr(kc, f"get_{channel}_msg")
|
||||
reply = get_msg(timeout=timeout)
|
||||
if reply["parent_header"]["msg_id"] == msg_id:
|
||||
break
|
||||
# Allow debugging ignored replies
|
||||
print(f"Ignoring reply not to {msg_id}: {reply}")
|
||||
t1 = time()
|
||||
timeout -= t1 - t0
|
||||
t0 = t1
|
||||
return reply
|
||||
|
||||
|
||||
def execute(code="", kc=None, **kwargs):
|
||||
"""wrapper for doing common steps for validating an execution request"""
|
||||
from .test_message_spec import validate_message
|
||||
|
||||
if kc is None:
|
||||
kc = KC
|
||||
msg_id = kc.execute(code=code, **kwargs)
|
||||
reply = get_reply(kc, msg_id, TIMEOUT)
|
||||
validate_message(reply, "execute_reply", msg_id)
|
||||
busy = kc.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(busy, "status", msg_id)
|
||||
assert busy["content"]["execution_state"] == "busy"
|
||||
|
||||
if not kwargs.get("silent"):
|
||||
execute_input = kc.get_iopub_msg(timeout=TIMEOUT)
|
||||
validate_message(execute_input, "execute_input", msg_id)
|
||||
assert execute_input["content"]["code"] == code
|
||||
|
||||
# show tracebacks if present for debugging
|
||||
if reply["content"].get("traceback"):
|
||||
print("\n".join(reply["content"]["traceback"]), file=sys.stderr)
|
||||
|
||||
return msg_id, reply["content"]
|
||||
|
||||
|
||||
def start_global_kernel():
|
||||
"""start the global kernel (if it isn't running) and return its client"""
|
||||
global KM, KC
|
||||
if KM is None:
|
||||
KM, KC = start_new_kernel()
|
||||
atexit.register(stop_global_kernel)
|
||||
else:
|
||||
flush_channels(KC)
|
||||
return KC
|
||||
|
||||
|
||||
@contextmanager
|
||||
def kernel():
|
||||
"""Context manager for the global kernel instance
|
||||
|
||||
Should be used for most kernel tests
|
||||
|
||||
Returns
|
||||
-------
|
||||
kernel_client: connected KernelClient instance
|
||||
"""
|
||||
yield start_global_kernel()
|
||||
|
||||
|
||||
def uses_kernel(test_f):
|
||||
"""Decorator for tests that use the global kernel"""
|
||||
|
||||
def wrapped_test():
|
||||
with kernel() as kc:
|
||||
test_f(kc)
|
||||
|
||||
wrapped_test.__doc__ = test_f.__doc__
|
||||
wrapped_test.__name__ = test_f.__name__
|
||||
return wrapped_test
|
||||
|
||||
|
||||
def stop_global_kernel():
|
||||
"""Stop the global shared kernel instance, if it exists"""
|
||||
global KM, KC
|
||||
KC.stop_channels()
|
||||
KC = None
|
||||
if KM is None:
|
||||
return
|
||||
KM.shutdown_kernel(now=True)
|
||||
KM = None
|
||||
|
||||
|
||||
def new_kernel(argv=None):
|
||||
"""Context manager for a new kernel in a subprocess
|
||||
|
||||
Should only be used for tests where the kernel must not be re-used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
kernel_client: connected KernelClient instance
|
||||
"""
|
||||
kwargs = {"stderr": STDOUT}
|
||||
try:
|
||||
import nose
|
||||
|
||||
kwargs["stdout"] = nose.iptest_stdstreams_fileno()
|
||||
except (ImportError, AttributeError):
|
||||
pass
|
||||
if argv is not None:
|
||||
kwargs["extra_arguments"] = argv
|
||||
return manager.run_kernel(**kwargs)
|
||||
|
||||
|
||||
def assemble_output(get_msg):
|
||||
"""assemble stdout/err from an execution"""
|
||||
stdout = ""
|
||||
stderr = ""
|
||||
while True:
|
||||
msg = get_msg(timeout=1)
|
||||
msg_type = msg["msg_type"]
|
||||
content = msg["content"]
|
||||
if msg_type == "status" and content["execution_state"] == "idle":
|
||||
# idle message signals end of output
|
||||
break
|
||||
elif msg["msg_type"] == "stream":
|
||||
if content["name"] == "stdout":
|
||||
stdout += content["text"]
|
||||
elif content["name"] == "stderr":
|
||||
stderr += content["text"]
|
||||
else:
|
||||
raise KeyError("bad stream: %r" % content["name"])
|
||||
else:
|
||||
# other output, ignored
|
||||
pass
|
||||
return stdout, stderr
|
||||
|
||||
|
||||
def wait_for_idle(kc):
|
||||
while True:
|
||||
msg = kc.get_iopub_msg(timeout=1)
|
||||
msg_type = msg["msg_type"]
|
||||
content = msg["content"]
|
||||
if msg_type == "status" and content["execution_state"] == "idle":
|
||||
break
|
||||
|
||||
|
||||
class TemporaryWorkingDirectory(TemporaryDirectory):
|
||||
"""
|
||||
Creates a temporary directory and sets the cwd to that directory.
|
||||
Automatically reverts to previous cwd upon cleanup.
|
||||
Usage example:
|
||||
|
||||
with TemporaryWorkingDirectory() as tmpdir:
|
||||
...
|
||||
"""
|
||||
|
||||
def __enter__(self):
|
||||
self.old_wd = os.getcwd()
|
||||
os.chdir(self.name)
|
||||
return super().__enter__()
|
||||
|
||||
def __exit__(self, exc, value, tb):
|
||||
os.chdir(self.old_wd)
|
||||
return super().__exit__(exc, value, tb)
|
57
.venv/Lib/site-packages/ipykernel/trio_runner.py
Normal file
57
.venv/Lib/site-packages/ipykernel/trio_runner.py
Normal file
@ -0,0 +1,57 @@
|
||||
import builtins
|
||||
import logging
|
||||
import signal
|
||||
import threading
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
import trio
|
||||
|
||||
|
||||
class TrioRunner:
|
||||
def __init__(self):
|
||||
self._cell_cancel_scope = None
|
||||
self._trio_token = None
|
||||
|
||||
def initialize(self, kernel, io_loop):
|
||||
kernel.shell.set_trio_runner(self)
|
||||
kernel.shell.run_line_magic("autoawait", "trio")
|
||||
kernel.shell.magics_manager.magics["line"]["autoawait"] = lambda _: warnings.warn(
|
||||
"Autoawait isn't allowed in Trio background loop mode."
|
||||
)
|
||||
bg_thread = threading.Thread(target=io_loop.start, daemon=True, name="TornadoBackground")
|
||||
bg_thread.start()
|
||||
|
||||
def interrupt(self, signum, frame):
|
||||
if self._cell_cancel_scope:
|
||||
self._cell_cancel_scope.cancel()
|
||||
else:
|
||||
raise Exception("Kernel interrupted but no cell is running")
|
||||
|
||||
def run(self):
|
||||
old_sig = signal.signal(signal.SIGINT, self.interrupt)
|
||||
|
||||
def log_nursery_exc(exc):
|
||||
exc = "\n".join(traceback.format_exception(type(exc), exc, exc.__traceback__))
|
||||
logging.error("An exception occurred in a global nursery task.\n%s", exc)
|
||||
|
||||
async def trio_main():
|
||||
self._trio_token = trio.lowlevel.current_trio_token()
|
||||
async with trio.open_nursery() as nursery:
|
||||
# TODO This hack prevents the nursery from cancelling all child
|
||||
# tasks when an uncaught exception occurs, but it's ugly.
|
||||
nursery._add_exc = log_nursery_exc
|
||||
builtins.GLOBAL_NURSERY = nursery # type:ignore[attr-defined]
|
||||
await trio.sleep_forever()
|
||||
|
||||
trio.run(trio_main)
|
||||
signal.signal(signal.SIGINT, old_sig)
|
||||
|
||||
def __call__(self, async_fn):
|
||||
async def loc(coro):
|
||||
self._cell_cancel_scope = trio.CancelScope()
|
||||
with self._cell_cancel_scope:
|
||||
return await coro
|
||||
self._cell_cancel_scope = None
|
||||
|
||||
return trio.from_thread.run(loc, async_fn, trio_token=self._trio_token)
|
639
.venv/Lib/site-packages/ipykernel/zmqshell.py
Normal file
639
.venv/Lib/site-packages/ipykernel/zmqshell.py
Normal file
@ -0,0 +1,639 @@
|
||||
"""A ZMQ-based subclass of InteractiveShell.
|
||||
|
||||
This code is meant to ease the refactoring of the base InteractiveShell into
|
||||
something with a cleaner architecture for 2-process use, without actually
|
||||
breaking InteractiveShell itself. So we're doing something a bit ugly, where
|
||||
we subclass and override what we want to fix. Once this is working well, we
|
||||
can go back to the base class and refactor the code for a cleaner inheritance
|
||||
implementation that doesn't rely on so much monkeypatching.
|
||||
|
||||
But this lets us maintain a fully working IPython as we develop the new
|
||||
machinery. This should thus be thought of as scaffolding.
|
||||
"""
|
||||
|
||||
# Copyright (c) IPython Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from threading import local
|
||||
|
||||
from IPython.core import page, payloadpage
|
||||
from IPython.core.autocall import ZMQExitAutocall
|
||||
from IPython.core.displaypub import DisplayPublisher
|
||||
from IPython.core.error import UsageError
|
||||
from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
|
||||
from IPython.core.magic import Magics, line_magic, magics_class
|
||||
from IPython.core.magics import CodeMagics, MacroToEdit
|
||||
from IPython.core.usage import default_banner
|
||||
from IPython.display import Javascript, display
|
||||
from IPython.utils import openpy
|
||||
from IPython.utils.process import arg_split, system
|
||||
from jupyter_client.session import Session, extract_header
|
||||
from jupyter_core.paths import jupyter_runtime_dir
|
||||
from traitlets import Any, CBool, CBytes, Dict, Instance, Type, default, observe
|
||||
|
||||
from ipykernel import connect_qtconsole, get_connection_file, get_connection_info
|
||||
from ipykernel.displayhook import ZMQShellDisplayHook
|
||||
from ipykernel.jsonutil import encode_images, json_clean
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Functions and classes
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ZMQDisplayPublisher(DisplayPublisher):
|
||||
"""A display publisher that publishes data using a ZeroMQ PUB socket."""
|
||||
|
||||
session = Instance(Session, allow_none=True)
|
||||
pub_socket = Any(allow_none=True)
|
||||
parent_header = Dict({})
|
||||
topic = CBytes(b"display_data")
|
||||
|
||||
# thread_local:
|
||||
# An attribute used to ensure the correct output message
|
||||
# is processed. See ipykernel Issue 113 for a discussion.
|
||||
_thread_local = Any()
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent for outbound messages."""
|
||||
self.parent_header = extract_header(parent)
|
||||
|
||||
def _flush_streams(self):
|
||||
"""flush IO Streams prior to display"""
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
@default("_thread_local")
|
||||
def _default_thread_local(self):
|
||||
"""Initialize our thread local storage"""
|
||||
return local()
|
||||
|
||||
@property
|
||||
def _hooks(self):
|
||||
if not hasattr(self._thread_local, "hooks"):
|
||||
# create new list for a new thread
|
||||
self._thread_local.hooks = []
|
||||
return self._thread_local.hooks
|
||||
|
||||
def publish(
|
||||
self,
|
||||
data,
|
||||
metadata=None,
|
||||
transient=None,
|
||||
update=False,
|
||||
):
|
||||
"""Publish a display-data message
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : dict
|
||||
A mime-bundle dict, keyed by mime-type.
|
||||
metadata : dict, optional
|
||||
Metadata associated with the data.
|
||||
transient : dict, optional, keyword-only
|
||||
Transient data that may only be relevant during a live display,
|
||||
such as display_id.
|
||||
Transient data should not be persisted to documents.
|
||||
update : bool, optional, keyword-only
|
||||
If True, send an update_display_data message instead of display_data.
|
||||
"""
|
||||
self._flush_streams()
|
||||
if metadata is None:
|
||||
metadata = {}
|
||||
if transient is None:
|
||||
transient = {}
|
||||
self._validate_data(data, metadata)
|
||||
content = {}
|
||||
content["data"] = encode_images(data)
|
||||
content["metadata"] = metadata
|
||||
content["transient"] = transient
|
||||
|
||||
msg_type = "update_display_data" if update else "display_data"
|
||||
|
||||
# Use 2-stage process to send a message,
|
||||
# in order to put it through the transform
|
||||
# hooks before potentially sending.
|
||||
msg = self.session.msg(msg_type, json_clean(content), parent=self.parent_header)
|
||||
|
||||
# Each transform either returns a new
|
||||
# message or None. If None is returned,
|
||||
# the message has been 'used' and we return.
|
||||
for hook in self._hooks:
|
||||
msg = hook(msg)
|
||||
if msg is None:
|
||||
return
|
||||
|
||||
self.session.send(
|
||||
self.pub_socket,
|
||||
msg,
|
||||
ident=self.topic,
|
||||
)
|
||||
|
||||
def clear_output(self, wait=False):
|
||||
"""Clear output associated with the current execution (cell).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
wait : bool (default: False)
|
||||
If True, the output will not be cleared immediately,
|
||||
instead waiting for the next display before clearing.
|
||||
This reduces bounce during repeated clear & display loops.
|
||||
|
||||
"""
|
||||
content = dict(wait=wait)
|
||||
self._flush_streams()
|
||||
self.session.send(
|
||||
self.pub_socket,
|
||||
"clear_output",
|
||||
content,
|
||||
parent=self.parent_header,
|
||||
ident=self.topic,
|
||||
)
|
||||
|
||||
def register_hook(self, hook):
|
||||
"""
|
||||
Registers a hook with the thread-local storage.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hook : Any callable object
|
||||
|
||||
Returns
|
||||
-------
|
||||
Either a publishable message, or `None`.
|
||||
The DisplayHook objects must return a message from
|
||||
the __call__ method if they still require the
|
||||
`session.send` method to be called after transformation.
|
||||
Returning `None` will halt that execution path, and
|
||||
session.send will not be called.
|
||||
"""
|
||||
self._hooks.append(hook)
|
||||
|
||||
def unregister_hook(self, hook):
|
||||
"""
|
||||
Un-registers a hook with the thread-local storage.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hook : Any callable object which has previously been
|
||||
registered as a hook.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool - `True` if the hook was removed, `False` if it wasn't
|
||||
found.
|
||||
"""
|
||||
try:
|
||||
self._hooks.remove(hook)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
@magics_class
|
||||
class KernelMagics(Magics):
|
||||
# ------------------------------------------------------------------------
|
||||
# Magic overrides
|
||||
# ------------------------------------------------------------------------
|
||||
# Once the base class stops inheriting from magic, this code needs to be
|
||||
# moved into a separate machinery as well. For now, at least isolate here
|
||||
# the magics which this class needs to implement differently from the base
|
||||
# class, or that are unique to it.
|
||||
|
||||
_find_edit_target = CodeMagics._find_edit_target
|
||||
|
||||
@line_magic
|
||||
def edit(self, parameter_s="", last_call=None):
|
||||
"""Bring up an editor and execute the resulting code.
|
||||
|
||||
Usage:
|
||||
%edit [options] [args]
|
||||
|
||||
%edit runs an external text editor. You will need to set the command for
|
||||
this editor via the ``TerminalInteractiveShell.editor`` option in your
|
||||
configuration file before it will work.
|
||||
|
||||
This command allows you to conveniently edit multi-line code right in
|
||||
your IPython session.
|
||||
|
||||
If called without arguments, %edit opens up an empty editor with a
|
||||
temporary file and will execute the contents of this file when you
|
||||
close it (don't forget to save it!).
|
||||
|
||||
Options:
|
||||
|
||||
-n <number>
|
||||
Open the editor at a specified line number. By default, the IPython
|
||||
editor hook uses the unix syntax 'editor +N filename', but you can
|
||||
configure this by providing your own modified hook if your favorite
|
||||
editor supports line-number specifications with a different syntax.
|
||||
|
||||
-p
|
||||
Call the editor with the same data as the previous time it was used,
|
||||
regardless of how long ago (in your current session) it was.
|
||||
|
||||
-r
|
||||
Use 'raw' input. This option only applies to input taken from the
|
||||
user's history. By default, the 'processed' history is used, so that
|
||||
magics are loaded in their transformed version to valid Python. If
|
||||
this option is given, the raw input as typed as the command line is
|
||||
used instead. When you exit the editor, it will be executed by
|
||||
IPython's own processor.
|
||||
|
||||
Arguments:
|
||||
|
||||
If arguments are given, the following possibilities exist:
|
||||
|
||||
- The arguments are numbers or pairs of colon-separated numbers (like
|
||||
1 4:8 9). These are interpreted as lines of previous input to be
|
||||
loaded into the editor. The syntax is the same of the %macro command.
|
||||
|
||||
- If the argument doesn't start with a number, it is evaluated as a
|
||||
variable and its contents loaded into the editor. You can thus edit
|
||||
any string which contains python code (including the result of
|
||||
previous edits).
|
||||
|
||||
- If the argument is the name of an object (other than a string),
|
||||
IPython will try to locate the file where it was defined and open the
|
||||
editor at the point where it is defined. You can use ``%edit function``
|
||||
to load an editor exactly at the point where 'function' is defined,
|
||||
edit it and have the file be executed automatically.
|
||||
|
||||
If the object is a macro (see %macro for details), this opens up your
|
||||
specified editor with a temporary file containing the macro's data.
|
||||
Upon exit, the macro is reloaded with the contents of the file.
|
||||
|
||||
Note: opening at an exact line is only supported under Unix, and some
|
||||
editors (like kedit and gedit up to Gnome 2.8) do not understand the
|
||||
'+NUMBER' parameter necessary for this feature. Good editors like
|
||||
(X)Emacs, vi, jed, pico and joe all do.
|
||||
|
||||
- If the argument is not found as a variable, IPython will look for a
|
||||
file with that name (adding .py if necessary) and load it into the
|
||||
editor. It will execute its contents with execfile() when you exit,
|
||||
loading any code in the file into your interactive namespace.
|
||||
|
||||
Unlike in the terminal, this is designed to use a GUI editor, and we do
|
||||
not know when it has closed. So the file you edit will not be
|
||||
automatically executed or printed.
|
||||
|
||||
Note that %edit is also available through the alias %ed.
|
||||
"""
|
||||
last_call = last_call or ["", ""]
|
||||
opts, args = self.parse_options(parameter_s, "prn:")
|
||||
|
||||
try:
|
||||
filename, lineno, _ = CodeMagics._find_edit_target(self.shell, args, opts, last_call)
|
||||
except MacroToEdit:
|
||||
# TODO: Implement macro editing over 2 processes.
|
||||
print("Macro editing not yet implemented in 2-process model.")
|
||||
return
|
||||
|
||||
# Make sure we send to the client an absolute path, in case the working
|
||||
# directory of client and kernel don't match
|
||||
filename = os.path.abspath(filename)
|
||||
|
||||
payload = {"source": "edit_magic", "filename": filename, "line_number": lineno}
|
||||
self.shell.payload_manager.write_payload(payload)
|
||||
|
||||
# A few magics that are adapted to the specifics of using pexpect and a
|
||||
# remote terminal
|
||||
|
||||
@line_magic
|
||||
def clear(self, arg_s):
|
||||
"""Clear the terminal."""
|
||||
if os.name == "posix":
|
||||
self.shell.system("clear")
|
||||
else:
|
||||
self.shell.system("cls")
|
||||
|
||||
if os.name == "nt":
|
||||
# This is the usual name in windows
|
||||
cls = line_magic("cls")(clear)
|
||||
|
||||
# Terminal pagers won't work over pexpect, but we do have our own pager
|
||||
|
||||
@line_magic
|
||||
def less(self, arg_s):
|
||||
"""Show a file through the pager.
|
||||
|
||||
Files ending in .py are syntax-highlighted."""
|
||||
if not arg_s:
|
||||
raise UsageError("Missing filename.")
|
||||
|
||||
if arg_s.endswith(".py"):
|
||||
cont = self.shell.pycolorize(openpy.read_py_file(arg_s, skip_encoding_cookie=False))
|
||||
else:
|
||||
cont = open(arg_s).read()
|
||||
page.page(cont)
|
||||
|
||||
more = line_magic("more")(less)
|
||||
|
||||
# Man calls a pager, so we also need to redefine it
|
||||
if os.name == "posix":
|
||||
|
||||
@line_magic
|
||||
def man(self, arg_s):
|
||||
"""Find the man page for the given command and display in pager."""
|
||||
page.page(self.shell.getoutput("man %s | col -b" % arg_s, split=False))
|
||||
|
||||
@line_magic
|
||||
def connect_info(self, arg_s):
|
||||
"""Print information for connecting other clients to this kernel
|
||||
|
||||
It will print the contents of this session's connection file, as well as
|
||||
shortcuts for local clients.
|
||||
|
||||
In the simplest case, when called from the most recently launched kernel,
|
||||
secondary clients can be connected, simply with:
|
||||
|
||||
$> jupyter <app> --existing
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
connection_file = get_connection_file()
|
||||
info = get_connection_info(unpack=False)
|
||||
except Exception as e:
|
||||
warnings.warn("Could not get connection info: %r" % e)
|
||||
return
|
||||
|
||||
# if it's in the default dir, truncate to basename
|
||||
if jupyter_runtime_dir() == os.path.dirname(connection_file):
|
||||
connection_file = os.path.basename(connection_file)
|
||||
|
||||
print(info + "\n")
|
||||
print(
|
||||
f"Paste the above JSON into a file, and connect with:\n"
|
||||
f" $> jupyter <app> --existing <file>\n"
|
||||
f"or, if you are local, you can connect with just:\n"
|
||||
f" $> jupyter <app> --existing {connection_file}\n"
|
||||
f"or even just:\n"
|
||||
f" $> jupyter <app> --existing\n"
|
||||
f"if this is the most recent Jupyter kernel you have started."
|
||||
)
|
||||
|
||||
@line_magic
|
||||
def qtconsole(self, arg_s):
|
||||
"""Open a qtconsole connected to this kernel.
|
||||
|
||||
Useful for connecting a qtconsole to running notebooks, for better
|
||||
debugging.
|
||||
"""
|
||||
|
||||
# %qtconsole should imply bind_kernel for engines:
|
||||
# FIXME: move to ipyparallel Kernel subclass
|
||||
if "ipyparallel" in sys.modules:
|
||||
from ipyparallel import bind_kernel
|
||||
|
||||
bind_kernel()
|
||||
|
||||
try:
|
||||
connect_qtconsole(argv=arg_split(arg_s, os.name == "posix"))
|
||||
except Exception as e:
|
||||
warnings.warn("Could not start qtconsole: %r" % e)
|
||||
return
|
||||
|
||||
@line_magic
|
||||
def autosave(self, arg_s):
|
||||
"""Set the autosave interval in the notebook (in seconds).
|
||||
|
||||
The default value is 120, or two minutes.
|
||||
``%autosave 0`` will disable autosave.
|
||||
|
||||
This magic only has an effect when called from the notebook interface.
|
||||
It has no effect when called in a startup file.
|
||||
"""
|
||||
|
||||
try:
|
||||
interval = int(arg_s)
|
||||
except ValueError as e:
|
||||
raise UsageError("%%autosave requires an integer, got %r" % arg_s) from e
|
||||
|
||||
# javascript wants milliseconds
|
||||
milliseconds = 1000 * interval
|
||||
display(
|
||||
Javascript("IPython.notebook.set_autosave_interval(%i)" % milliseconds),
|
||||
include=["application/javascript"],
|
||||
)
|
||||
if interval:
|
||||
print("Autosaving every %i seconds" % interval)
|
||||
else:
|
||||
print("Autosave disabled")
|
||||
|
||||
|
||||
class ZMQInteractiveShell(InteractiveShell):
|
||||
"""A subclass of InteractiveShell for ZMQ."""
|
||||
|
||||
displayhook_class = Type(ZMQShellDisplayHook)
|
||||
display_pub_class = Type(ZMQDisplayPublisher)
|
||||
data_pub_class = Any()
|
||||
kernel = Any()
|
||||
parent_header = Any()
|
||||
|
||||
@default("banner1")
|
||||
def _default_banner1(self):
|
||||
return default_banner
|
||||
|
||||
# Override the traitlet in the parent class, because there's no point using
|
||||
# readline for the kernel. Can be removed when the readline code is moved
|
||||
# to the terminal frontend.
|
||||
readline_use = CBool(False)
|
||||
# autoindent has no meaning in a zmqshell, and attempting to enable it
|
||||
# will print a warning in the absence of readline.
|
||||
autoindent = CBool(False)
|
||||
|
||||
exiter = Instance(ZMQExitAutocall)
|
||||
|
||||
@default("exiter")
|
||||
def _default_exiter(self):
|
||||
return ZMQExitAutocall(self)
|
||||
|
||||
@observe("exit_now")
|
||||
def _update_exit_now(self, change):
|
||||
"""stop eventloop when exit_now fires"""
|
||||
if change["new"]:
|
||||
if hasattr(self.kernel, "io_loop"):
|
||||
loop = self.kernel.io_loop
|
||||
loop.call_later(0.1, loop.stop)
|
||||
if self.kernel.eventloop:
|
||||
exit_hook = getattr(self.kernel.eventloop, "exit_hook", None)
|
||||
if exit_hook:
|
||||
exit_hook(self.kernel)
|
||||
|
||||
keepkernel_on_exit = None
|
||||
|
||||
# Over ZeroMQ, GUI control isn't done with PyOS_InputHook as there is no
|
||||
# interactive input being read; we provide event loop support in ipkernel
|
||||
def enable_gui(self, gui):
|
||||
from .eventloops import enable_gui as real_enable_gui
|
||||
|
||||
try:
|
||||
real_enable_gui(gui)
|
||||
self.active_eventloop = gui
|
||||
except ValueError as e:
|
||||
raise UsageError("%s" % e) from e
|
||||
|
||||
def init_environment(self):
|
||||
"""Configure the user's environment."""
|
||||
env = os.environ
|
||||
# These two ensure 'ls' produces nice coloring on BSD-derived systems
|
||||
env["TERM"] = "xterm-color"
|
||||
env["CLICOLOR"] = "1"
|
||||
# Since normal pagers don't work at all (over pexpect we don't have
|
||||
# single-key control of the subprocess), try to disable paging in
|
||||
# subprocesses as much as possible.
|
||||
env["PAGER"] = "cat"
|
||||
env["GIT_PAGER"] = "cat"
|
||||
|
||||
def init_hooks(self):
|
||||
super().init_hooks()
|
||||
self.set_hook("show_in_pager", page.as_hook(payloadpage.page), 99)
|
||||
|
||||
def init_data_pub(self):
|
||||
"""Delay datapub init until request, for deprecation warnings"""
|
||||
pass
|
||||
|
||||
@property
|
||||
def data_pub(self):
|
||||
if not hasattr(self, "_data_pub"):
|
||||
warnings.warn(
|
||||
"InteractiveShell.data_pub is deprecated outside IPython parallel.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
self._data_pub = self.data_pub_class(parent=self)
|
||||
self._data_pub.session = self.display_pub.session
|
||||
self._data_pub.pub_socket = self.display_pub.pub_socket
|
||||
return self._data_pub
|
||||
|
||||
@data_pub.setter
|
||||
def data_pub(self, pub):
|
||||
self._data_pub = pub
|
||||
|
||||
def ask_exit(self):
|
||||
"""Engage the exit actions."""
|
||||
self.exit_now = not self.keepkernel_on_exit
|
||||
payload = dict(
|
||||
source="ask_exit",
|
||||
keepkernel=self.keepkernel_on_exit,
|
||||
)
|
||||
self.payload_manager.write_payload(payload)
|
||||
|
||||
def run_cell(self, *args, **kwargs):
|
||||
self._last_traceback = None
|
||||
return super().run_cell(*args, **kwargs)
|
||||
|
||||
def _showtraceback(self, etype, evalue, stb):
|
||||
# try to preserve ordering of tracebacks and print statements
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
exc_content = {
|
||||
"traceback": stb,
|
||||
"ename": str(etype.__name__),
|
||||
"evalue": str(evalue),
|
||||
}
|
||||
|
||||
dh = self.displayhook
|
||||
# Send exception info over pub socket for other clients than the caller
|
||||
# to pick up
|
||||
topic = None
|
||||
if dh.topic:
|
||||
topic = dh.topic.replace(b"execute_result", b"error")
|
||||
|
||||
dh.session.send(
|
||||
dh.pub_socket,
|
||||
"error",
|
||||
json_clean(exc_content),
|
||||
dh.parent_header,
|
||||
ident=topic,
|
||||
)
|
||||
|
||||
# FIXME - Once we rely on Python 3, the traceback is stored on the
|
||||
# exception object, so we shouldn't need to store it here.
|
||||
self._last_traceback = stb
|
||||
|
||||
def set_next_input(self, text, replace=False):
|
||||
"""Send the specified text to the frontend to be presented at the next
|
||||
input cell."""
|
||||
payload = dict(
|
||||
source="set_next_input",
|
||||
text=text,
|
||||
replace=replace,
|
||||
)
|
||||
self.payload_manager.write_payload(payload)
|
||||
|
||||
def set_parent(self, parent):
|
||||
"""Set the parent header for associating output with its triggering input"""
|
||||
self.parent_header = parent
|
||||
self.displayhook.set_parent(parent)
|
||||
self.display_pub.set_parent(parent)
|
||||
if hasattr(self, "_data_pub"):
|
||||
self.data_pub.set_parent(parent)
|
||||
try:
|
||||
sys.stdout.set_parent(parent) # type:ignore[attr-defined]
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
sys.stderr.set_parent(parent) # type:ignore[attr-defined]
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def get_parent(self):
|
||||
return self.parent_header
|
||||
|
||||
def init_magics(self):
|
||||
super().init_magics()
|
||||
self.register_magics(KernelMagics)
|
||||
self.magics_manager.register_alias("ed", "edit")
|
||||
|
||||
def init_virtualenv(self):
|
||||
# Overridden not to do virtualenv detection, because it's probably
|
||||
# not appropriate in a kernel. To use a kernel in a virtualenv, install
|
||||
# it inside the virtualenv.
|
||||
# https://ipython.readthedocs.io/en/latest/install/kernel_install.html
|
||||
pass
|
||||
|
||||
def system_piped(self, cmd):
|
||||
"""Call the given cmd in a subprocess, piping stdout/err
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cmd : str
|
||||
Command to execute (can not end in '&', as background processes are
|
||||
not supported. Should not be a command that expects input
|
||||
other than simple text.
|
||||
"""
|
||||
if cmd.rstrip().endswith("&"):
|
||||
# this is *far* from a rigorous test
|
||||
# We do not support backgrounding processes because we either use
|
||||
# pexpect or pipes to read from. Users can always just call
|
||||
# os.system() or use ip.system=ip.system_raw
|
||||
# if they really want a background process.
|
||||
raise OSError("Background processes not supported.")
|
||||
|
||||
# we explicitly do NOT return the subprocess status code, because
|
||||
# a non-None value would trigger :func:`sys.displayhook` calls.
|
||||
# Instead, we store the exit_code in user_ns.
|
||||
# Also, protect system call from UNC paths on Windows here too
|
||||
# as is done in InteractiveShell.system_raw
|
||||
if sys.platform == "win32":
|
||||
cmd = self.var_expand(cmd, depth=1)
|
||||
from IPython.utils._process_win32 import AvoidUNCPath
|
||||
|
||||
with AvoidUNCPath() as path:
|
||||
if path is not None:
|
||||
cmd = "pushd %s &&%s" % (path, cmd)
|
||||
self.user_ns["_exit_code"] = system(cmd)
|
||||
else:
|
||||
self.user_ns["_exit_code"] = system(self.var_expand(cmd, depth=1))
|
||||
|
||||
# Ensure new system_piped implementation is used
|
||||
system = system_piped
|
||||
|
||||
|
||||
InteractiveShellABC.register(ZMQInteractiveShell)
|
Reference in New Issue
Block a user