first commit

This commit is contained in:
Ayxan
2022-05-23 00:16:32 +04:00
commit d660f2a4ca
24786 changed files with 4428337 additions and 0 deletions

View File

@@ -0,0 +1,62 @@
import asyncio
import os
import sys
import pytest
from jupyter_core import paths
from .utils import test_env
try:
import resource
except ImportError:
# Windows
resource = None
pjoin = os.path.join
# Handle resource limit
# Ensure a minimal soft limit of DEFAULT_SOFT if the current hard limit is at least that much.
if resource is not None:
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
DEFAULT_SOFT = 4096
if hard >= DEFAULT_SOFT:
soft = DEFAULT_SOFT
if hard < soft:
hard = soft
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
if os.name == "nt" and sys.version_info >= (3, 7):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
@pytest.fixture
def event_loop():
# Make sure we test against a selector event loop
# since pyzmq doesn't like the proactor loop.
# This fixture is picked up by pytest-asyncio
if os.name == "nt" and sys.version_info >= (3, 7):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
loop = asyncio.SelectorEventLoop()
try:
yield loop
finally:
loop.close()
@pytest.fixture(autouse=True)
def env():
env_patch = test_env()
env_patch.start()
yield
env_patch.stop()
@pytest.fixture()
def kernel_dir():
return pjoin(paths.jupyter_data_dir(), 'kernels')

View File

@@ -0,0 +1,39 @@
"""Test kernel for signalling subprocesses"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import time
from ipykernel.displayhook import ZMQDisplayHook
from ipykernel.kernelapp import IPKernelApp
from ipykernel.kernelbase import Kernel
class ProblemTestKernel(Kernel):
"""Kernel for testing kernel problems"""
implementation = "problemtest"
implementation_version = "0.0"
banner = ""
class ProblemTestApp(IPKernelApp):
kernel_class = ProblemTestKernel
def init_io(self):
# Overridden to disable stdout/stderr capture
self.displayhook = ZMQDisplayHook(self.session, self.iopub_socket)
def init_sockets(self):
if os.environ.get("FAIL_ON_START") == "1":
# Simulates e.g. a port binding issue (Adress already in use)
raise RuntimeError("Failed for testing purposes")
return super().init_sockets()
if __name__ == "__main__":
# make startup artificially slow,
# so that we exercise client logic for slow-starting kernels
startup_delay = int(os.environ.get("STARTUP_DELAY", "2"))
time.sleep(startup_delay)
ProblemTestApp.launch_instance()

View File

@@ -0,0 +1,77 @@
"""Test kernel for signalling subprocesses"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import signal
import time
from subprocess import PIPE
from subprocess import Popen
from ipykernel.displayhook import ZMQDisplayHook
from ipykernel.kernelapp import IPKernelApp
from ipykernel.kernelbase import Kernel
class SignalTestKernel(Kernel):
"""Kernel for testing subprocess signaling"""
implementation = "signaltest"
implementation_version = "0.0"
banner = ""
def __init__(self, **kwargs):
kwargs.pop("user_ns", None)
super().__init__(**kwargs)
self.children = []
if os.environ.get("NO_SIGTERM_REPLY", None) == "1":
signal.signal(signal.SIGTERM, signal.SIG_IGN)
async def shutdown_request(self, stream, ident, parent):
if os.environ.get("NO_SHUTDOWN_REPLY") != "1":
await super().shutdown_request(stream, ident, parent)
def do_execute(
self, code, silent, store_history=True, user_expressions=None, allow_stdin=False
):
code = code.strip()
reply = {
"status": "ok",
"user_expressions": {},
}
if code == "start":
child = Popen(["bash", "-i", "-c", "sleep 30"], stderr=PIPE)
self.children.append(child)
reply["user_expressions"]["pid"] = self.children[-1].pid
elif code == "check":
reply["user_expressions"]["poll"] = [child.poll() for child in self.children]
elif code == "env":
reply["user_expressions"]["env"] = os.getenv("TEST_VARS", "")
elif code == "sleep":
try:
time.sleep(10)
except KeyboardInterrupt:
reply["user_expressions"]["interrupted"] = True
else:
reply["user_expressions"]["interrupted"] = False
else:
reply["status"] = "error"
reply["ename"] = "Error"
reply["evalue"] = code
reply["traceback"] = ["no such command: %s" % code]
return reply
class SignalTestApp(IPKernelApp):
kernel_class = SignalTestKernel
def init_io(self):
# Overridden to disable stdout/stderr capture
self.displayhook = ZMQDisplayHook(self.session, self.iopub_socket)
if __name__ == "__main__":
# make startup artificially slow,
# so that we exercise client logic for slow-starting kernels
time.sleep(2)
SignalTestApp.launch_instance()

View File

@@ -0,0 +1,457 @@
"""Tests for adapting Jupyter msg spec versions"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import copy
import json
from unittest import TestCase
from jupyter_client.adapter import adapt
from jupyter_client.adapter import code_to_line
from jupyter_client.adapter import V4toV5
from jupyter_client.session import Session
def test_default_version():
s = Session()
msg = s.msg("msg_type")
msg["header"].pop("version")
original = copy.deepcopy(msg)
adapted = adapt(original)
assert adapted["header"]["version"] == V4toV5.version
def test_code_to_line_no_code():
line, pos = code_to_line("", 0)
assert line == ""
assert pos == 0
class AdapterTest(TestCase):
def setUp(self):
self.session = Session()
def adapt(self, msg, version=None):
original = copy.deepcopy(msg)
adapted = adapt(msg, version or self.to_version)
return original, adapted
def check_header(self, msg):
pass
class V4toV5TestCase(AdapterTest):
from_version = 4
to_version = 5
def msg(self, msg_type, content):
"""Create a v4 msg (same as v5, minus version header)"""
msg = self.session.msg(msg_type, content)
msg["header"].pop("version")
return msg
def test_same_version(self):
msg = self.msg("execute_result", content={"status": "ok"})
original, adapted = self.adapt(msg, self.from_version)
self.assertEqual(original, adapted)
def test_no_adapt(self):
msg = self.msg("input_reply", {"value": "some text"})
v4, v5 = self.adapt(msg)
self.assertEqual(v5["header"]["version"], V4toV5.version)
v5["header"].pop("version")
self.assertEqual(v4, v5)
def test_rename_type(self):
for v5_type, v4_type in [
("execute_result", "pyout"),
("execute_input", "pyin"),
("error", "pyerr"),
]:
msg = self.msg(v4_type, {"key": "value"})
v4, v5 = self.adapt(msg)
self.assertEqual(v5["header"]["version"], V4toV5.version)
self.assertEqual(v5["header"]["msg_type"], v5_type)
self.assertEqual(v4["content"], v5["content"])
def test_execute_request(self):
msg = self.msg(
"execute_request",
{
"code": "a=5",
"silent": False,
"user_expressions": {"a": "apple"},
"user_variables": ["b"],
},
)
v4, v5 = self.adapt(msg)
self.assertEqual(v4["header"]["msg_type"], v5["header"]["msg_type"])
v4c = v4["content"]
v5c = v5["content"]
self.assertEqual(v5c["user_expressions"], {"a": "apple", "b": "b"})
self.assertNotIn("user_variables", v5c)
self.assertEqual(v5c["code"], v4c["code"])
def test_execute_reply(self):
msg = self.msg(
"execute_reply",
{
"status": "ok",
"execution_count": 7,
"user_variables": {"a": 1},
"user_expressions": {"a+a": 2},
"payload": [{"source": "page", "text": "blah"}],
},
)
v4, v5 = self.adapt(msg)
v5c = v5["content"]
self.assertNotIn("user_variables", v5c)
self.assertEqual(v5c["user_expressions"], {"a": 1, "a+a": 2})
self.assertEqual(v5c["payload"], [{"source": "page", "data": {"text/plain": "blah"}}])
def test_complete_request(self):
msg = self.msg(
"complete_request",
{
"text": "a.is",
"line": "foo = a.is",
"block": None,
"cursor_pos": 10,
},
)
v4, v5 = self.adapt(msg)
v4c = v4["content"]
v5c = v5["content"]
for key in ("text", "line", "block"):
self.assertNotIn(key, v5c)
self.assertEqual(v5c["cursor_pos"], v4c["cursor_pos"])
self.assertEqual(v5c["code"], v4c["line"])
def test_complete_reply(self):
msg = self.msg(
"complete_reply",
{
"matched_text": "a.is",
"matches": [
"a.isalnum",
"a.isalpha",
"a.isdigit",
"a.islower",
],
},
)
v4, v5 = self.adapt(msg)
v4c = v4["content"]
v5c = v5["content"]
self.assertEqual(v5c["matches"], v4c["matches"])
self.assertEqual(v5c["metadata"], {})
self.assertEqual(v5c["cursor_start"], -4)
self.assertEqual(v5c["cursor_end"], None)
def test_object_info_request(self):
msg = self.msg(
"object_info_request",
{
"oname": "foo",
"detail_level": 1,
},
)
v4, v5 = self.adapt(msg)
self.assertEqual(v5["header"]["msg_type"], "inspect_request")
v4c = v4["content"]
v5c = v5["content"]
self.assertEqual(v5c["code"], v4c["oname"])
self.assertEqual(v5c["cursor_pos"], len(v4c["oname"]))
self.assertEqual(v5c["detail_level"], v4c["detail_level"])
def test_object_info_reply(self):
msg = self.msg(
"object_info_reply",
{
"name": "foo",
"found": True,
"status": "ok",
"definition": "foo(a=5)",
"docstring": "the docstring",
},
)
v4, v5 = self.adapt(msg)
self.assertEqual(v5["header"]["msg_type"], "inspect_reply")
v4c = v4["content"]
v5c = v5["content"]
self.assertEqual(sorted(v5c), ["data", "found", "metadata", "status"])
text = v5c["data"]["text/plain"]
self.assertEqual(text, "\n".join([v4c["definition"], v4c["docstring"]]))
def test_object_info_reply_not_found(self):
msg = self.msg(
"object_info_reply",
{
"name": "foo",
"found": False,
},
)
v4, v5 = self.adapt(msg)
self.assertEqual(v5["header"]["msg_type"], "inspect_reply")
v4["content"]
v5c = v5["content"]
self.assertEqual(
v5c,
{
"status": "ok",
"found": False,
"data": {},
"metadata": {},
},
)
def test_kernel_info_reply(self):
msg = self.msg(
"kernel_info_reply",
{
"language": "python",
"language_version": [2, 8, 0],
"ipython_version": [1, 2, 3],
},
)
v4, v5 = self.adapt(msg)
v4["content"]
v5c = v5["content"]
self.assertEqual(
v5c,
{
"protocol_version": "4.1",
"implementation": "ipython",
"implementation_version": "1.2.3",
"language_info": {
"name": "python",
"version": "2.8.0",
},
"banner": "",
},
)
# iopub channel
def test_display_data(self):
jsondata = dict(a=5)
msg = self.msg(
"display_data",
{
"data": {
"text/plain": "some text",
"application/json": json.dumps(jsondata),
},
"metadata": {"text/plain": {"key": "value"}},
},
)
v4, v5 = self.adapt(msg)
v4c = v4["content"]
v5c = v5["content"]
self.assertEqual(v5c["metadata"], v4c["metadata"])
self.assertEqual(v5c["data"]["text/plain"], v4c["data"]["text/plain"])
self.assertEqual(v5c["data"]["application/json"], jsondata)
# stdin channel
def test_input_request(self):
msg = self.msg("input_request", {"prompt": "$>"})
v4, v5 = self.adapt(msg)
self.assertEqual(v5["content"]["prompt"], v4["content"]["prompt"])
self.assertFalse(v5["content"]["password"])
class V5toV4TestCase(AdapterTest):
from_version = 5
to_version = 4
def msg(self, msg_type, content):
return self.session.msg(msg_type, content)
def test_same_version(self):
msg = self.msg("execute_result", content={"status": "ok"})
original, adapted = self.adapt(msg, self.from_version)
self.assertEqual(original, adapted)
def test_no_adapt(self):
msg = self.msg("input_reply", {"value": "some text"})
v5, v4 = self.adapt(msg)
self.assertNotIn("version", v4["header"])
v5["header"].pop("version")
self.assertEqual(v4, v5)
def test_rename_type(self):
for v5_type, v4_type in [
("execute_result", "pyout"),
("execute_input", "pyin"),
("error", "pyerr"),
]:
msg = self.msg(v5_type, {"key": "value"})
v5, v4 = self.adapt(msg)
self.assertEqual(v4["header"]["msg_type"], v4_type)
assert "version" not in v4["header"]
self.assertEqual(v4["content"], v5["content"])
def test_execute_request(self):
msg = self.msg(
"execute_request",
{
"code": "a=5",
"silent": False,
"user_expressions": {"a": "apple"},
},
)
v5, v4 = self.adapt(msg)
self.assertEqual(v4["header"]["msg_type"], v5["header"]["msg_type"])
v4c = v4["content"]
v5c = v5["content"]
self.assertEqual(v4c["user_variables"], [])
self.assertEqual(v5c["code"], v4c["code"])
def test_complete_request(self):
msg = self.msg(
"complete_request",
{
"code": "def foo():\n a.is\nfoo()",
"cursor_pos": 19,
},
)
v5, v4 = self.adapt(msg)
v4c = v4["content"]
v5c = v5["content"]
self.assertNotIn("code", v4c)
self.assertEqual(v4c["line"], v5c["code"].splitlines(True)[1])
self.assertEqual(v4c["cursor_pos"], 8)
self.assertEqual(v4c["text"], "")
self.assertEqual(v4c["block"], None)
def test_complete_reply(self):
msg = self.msg(
"complete_reply",
{
"cursor_start": 10,
"cursor_end": 14,
"matches": [
"a.isalnum",
"a.isalpha",
"a.isdigit",
"a.islower",
],
"metadata": {},
},
)
v5, v4 = self.adapt(msg)
v4c = v4["content"]
v5c = v5["content"]
self.assertEqual(v4c["matched_text"], "a.is")
self.assertEqual(v4c["matches"], v5c["matches"])
def test_inspect_request(self):
msg = self.msg(
"inspect_request",
{
"code": "def foo():\n apple\nbar()",
"cursor_pos": 18,
"detail_level": 1,
},
)
v5, v4 = self.adapt(msg)
self.assertEqual(v4["header"]["msg_type"], "object_info_request")
v4c = v4["content"]
v5c = v5["content"]
self.assertEqual(v4c["oname"], "apple")
self.assertEqual(v5c["detail_level"], v4c["detail_level"])
def test_inspect_request_token(self):
line = "something(range(10), kwarg=smth) ; xxx.xxx.xxx( firstarg, rand(234,23), kwarg1=2,"
msg = self.msg(
"inspect_request",
{
"code": line,
"cursor_pos": len(line) - 1,
"detail_level": 1,
},
)
v5, v4 = self.adapt(msg)
self.assertEqual(v4["header"]["msg_type"], "object_info_request")
v4c = v4["content"]
v5c = v5["content"]
self.assertEqual(v4c["oname"], "xxx.xxx.xxx")
self.assertEqual(v5c["detail_level"], v4c["detail_level"])
def test_inspect_reply(self):
msg = self.msg(
"inspect_reply",
{
"name": "foo",
"found": True,
"data": {"text/plain": "some text"},
"metadata": {},
},
)
v5, v4 = self.adapt(msg)
self.assertEqual(v4["header"]["msg_type"], "object_info_reply")
v4c = v4["content"]
v5["content"]
self.assertEqual(sorted(v4c), ["found", "oname"])
self.assertEqual(v4c["found"], False)
def test_kernel_info_reply(self):
msg = self.msg(
"kernel_info_reply",
{
"protocol_version": "5.0",
"implementation": "ipython",
"implementation_version": "1.2.3",
"language_info": {
"name": "python",
"version": "2.8.0",
"mimetype": "text/x-python",
},
"banner": "the banner",
},
)
v5, v4 = self.adapt(msg)
v4c = v4["content"]
v5c = v5["content"]
v5c["language_info"]
self.assertEqual(
v4c,
{
"protocol_version": [5, 0],
"language": "python",
"language_version": [2, 8, 0],
"ipython_version": [1, 2, 3],
},
)
# iopub channel
def test_display_data(self):
jsondata = dict(a=5)
msg = self.msg(
"display_data",
{
"data": {
"text/plain": "some text",
"application/json": jsondata,
},
"metadata": {"text/plain": {"key": "value"}},
},
)
v5, v4 = self.adapt(msg)
v4c = v4["content"]
v5c = v5["content"]
self.assertEqual(v5c["metadata"], v4c["metadata"])
self.assertEqual(v5c["data"]["text/plain"], v4c["data"]["text/plain"])
self.assertEqual(v4c["data"]["application/json"], json.dumps(jsondata))
# stdin channel
def test_input_request(self):
msg = self.msg("input_request", {"prompt": "$>", "password": True})
v5, v4 = self.adapt(msg)
self.assertEqual(v5["content"]["prompt"], v4["content"]["prompt"])
self.assertNotIn("password", v4["content"])

View File

@@ -0,0 +1,94 @@
"""Tests for the KernelClient"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from unittest import TestCase
import pytest
from IPython.utils.capture import capture_output
from ..manager import start_new_kernel
from .utils import test_env
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_client.kernelspec import NATIVE_KERNEL_NAME
from jupyter_client.kernelspec import NoSuchKernel
TIMEOUT = 30
pjoin = os.path.join
class TestKernelClient(TestCase):
def setUp(self):
self.env_patch = test_env()
self.env_patch.start()
self.addCleanup(self.env_patch.stop)
try:
KernelSpecManager().get_kernel_spec(NATIVE_KERNEL_NAME)
except NoSuchKernel:
pytest.skip()
self.km, self.kc = start_new_kernel(kernel_name=NATIVE_KERNEL_NAME)
def tearDown(self):
self.env_patch.stop()
self.km.shutdown_kernel()
self.kc.stop_channels()
return super().tearDown()
def test_execute_interactive(self):
kc = self.kc
with capture_output() as io:
reply = kc.execute_interactive("print('hello')", timeout=TIMEOUT)
assert "hello" in io.stdout
assert reply["content"]["status"] == "ok"
def _check_reply(self, reply_type, reply):
self.assertIsInstance(reply, dict)
self.assertEqual(reply["header"]["msg_type"], reply_type + "_reply")
self.assertEqual(reply["parent_header"]["msg_type"], reply_type + "_request")
def test_history(self):
kc = self.kc
msg_id = kc.history(session=0)
self.assertIsInstance(msg_id, str)
reply = kc.history(session=0, reply=True, timeout=TIMEOUT)
self._check_reply("history", reply)
def test_inspect(self):
kc = self.kc
msg_id = kc.inspect("who cares")
self.assertIsInstance(msg_id, str)
reply = kc.inspect("code", reply=True, timeout=TIMEOUT)
self._check_reply("inspect", reply)
def test_complete(self):
kc = self.kc
msg_id = kc.complete("who cares")
self.assertIsInstance(msg_id, str)
reply = kc.complete("code", reply=True, timeout=TIMEOUT)
self._check_reply("complete", reply)
def test_kernel_info(self):
kc = self.kc
msg_id = kc.kernel_info()
self.assertIsInstance(msg_id, str)
reply = kc.kernel_info(reply=True, timeout=TIMEOUT)
self._check_reply("kernel_info", reply)
def test_comm_info(self):
kc = self.kc
msg_id = kc.comm_info()
self.assertIsInstance(msg_id, str)
reply = kc.comm_info(reply=True, timeout=TIMEOUT)
self._check_reply("comm_info", reply)
def test_shutdown(self):
kc = self.kc
reply = kc.shutdown(reply=True, timeout=TIMEOUT)
self._check_reply("shutdown", reply)
def test_shutdown_id(self):
kc = self.kc
msg_id = kc.shutdown()
self.assertIsInstance(msg_id, str)

View File

@@ -0,0 +1,237 @@
"""Tests for kernel connection utilities"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
from tempfile import TemporaryDirectory
from jupyter_core.application import JupyterApp
from jupyter_core.paths import jupyter_runtime_dir
from jupyter_client import connect
from jupyter_client import KernelClient
from jupyter_client.consoleapp import JupyterConsoleApp
from jupyter_client.session import Session
class TemporaryWorkingDirectory(TemporaryDirectory):
"""
Creates a temporary directory and sets the cwd to that directory.
Automatically reverts to previous cwd upon cleanup.
Usage example:
with TemporaryWorkingDirectory() as tmpdir:
...
"""
def __enter__(self):
self.old_wd = os.getcwd()
os.chdir(self.name)
return super().__enter__()
def __exit__(self, exc, value, tb):
os.chdir(self.old_wd)
return super().__exit__(exc, value, tb)
class DummyConsoleApp(JupyterApp, JupyterConsoleApp):
def initialize(self, argv=None):
JupyterApp.initialize(self, argv=argv or [])
self.init_connection_file()
class DummyConfigurable(connect.ConnectionFileMixin):
def initialize(self):
pass
sample_info = dict(
ip="1.2.3.4",
transport="ipc",
shell_port=1,
hb_port=2,
iopub_port=3,
stdin_port=4,
control_port=5,
key=b"abc123",
signature_scheme="hmac-md5",
kernel_name="python",
)
sample_info_kn = dict(
ip="1.2.3.4",
transport="ipc",
shell_port=1,
hb_port=2,
iopub_port=3,
stdin_port=4,
control_port=5,
key=b"abc123",
signature_scheme="hmac-md5",
kernel_name="test",
)
def test_write_connection_file():
with TemporaryDirectory() as d:
cf = os.path.join(d, "kernel.json")
connect.write_connection_file(cf, **sample_info)
assert os.path.exists(cf)
with open(cf, "r") as f:
info = json.load(f)
info["key"] = info["key"].encode()
assert info == sample_info
def test_load_connection_file_session():
"""test load_connection_file() after"""
session = Session()
app = DummyConsoleApp(session=Session())
app.initialize(argv=[])
session = app.session
with TemporaryDirectory() as d:
cf = os.path.join(d, "kernel.json")
connect.write_connection_file(cf, **sample_info)
app.connection_file = cf
app.load_connection_file()
assert session.key == sample_info["key"]
assert session.signature_scheme == sample_info["signature_scheme"]
def test_load_connection_file_session_with_kn():
"""test load_connection_file() after"""
session = Session()
app = DummyConsoleApp(session=Session())
app.initialize(argv=[])
session = app.session
with TemporaryDirectory() as d:
cf = os.path.join(d, "kernel.json")
connect.write_connection_file(cf, **sample_info_kn)
app.connection_file = cf
app.load_connection_file()
assert session.key == sample_info_kn["key"]
assert session.signature_scheme == sample_info_kn["signature_scheme"]
def test_app_load_connection_file():
"""test `ipython console --existing` loads a connection file"""
with TemporaryDirectory() as d:
cf = os.path.join(d, "kernel.json")
connect.write_connection_file(cf, **sample_info)
app = DummyConsoleApp(connection_file=cf)
app.initialize(argv=[])
for attr, expected in sample_info.items():
if attr in ("key", "signature_scheme"):
continue
value = getattr(app, attr)
assert value == expected, "app.%s = %s != %s" % (attr, value, expected)
def test_load_connection_info():
client = KernelClient()
info = {
"control_port": 53702,
"hb_port": 53705,
"iopub_port": 53703,
"ip": "0.0.0.0",
"key": "secret",
"shell_port": 53700,
"signature_scheme": "hmac-sha256",
"stdin_port": 53701,
"transport": "tcp",
}
client.load_connection_info(info)
assert client.control_port == info["control_port"]
assert client.session.key.decode("ascii") == info["key"]
assert client.ip == info["ip"]
def test_find_connection_file():
with TemporaryDirectory() as d:
cf = "kernel.json"
app = DummyConsoleApp(runtime_dir=d, connection_file=cf)
app.initialize()
security_dir = app.runtime_dir
profile_cf = os.path.join(security_dir, cf)
with open(profile_cf, "w") as f:
f.write("{}")
for query in (
"kernel.json",
"kern*",
"*ernel*",
"k*",
):
assert connect.find_connection_file(query, path=security_dir) == profile_cf
def test_find_connection_file_local():
with TemporaryWorkingDirectory():
cf = "test.json"
abs_cf = os.path.abspath(cf)
with open(cf, "w") as f:
f.write("{}")
for query in (
"test.json",
"test",
abs_cf,
os.path.join(".", "test.json"),
):
assert connect.find_connection_file(query, path=[".", jupyter_runtime_dir()]) == abs_cf
def test_find_connection_file_relative():
with TemporaryWorkingDirectory():
cf = "test.json"
os.mkdir("subdir")
cf = os.path.join("subdir", "test.json")
abs_cf = os.path.abspath(cf)
with open(cf, "w") as f:
f.write("{}")
for query in (
os.path.join(".", "subdir", "test.json"),
os.path.join("subdir", "test.json"),
abs_cf,
):
assert connect.find_connection_file(query, path=[".", jupyter_runtime_dir()]) == abs_cf
def test_find_connection_file_abspath():
with TemporaryDirectory():
cf = "absolute.json"
abs_cf = os.path.abspath(cf)
with open(cf, "w") as f:
f.write("{}")
assert connect.find_connection_file(abs_cf, path=jupyter_runtime_dir()) == abs_cf
os.remove(abs_cf)
def test_mixin_record_random_ports():
with TemporaryDirectory() as d:
dc = DummyConfigurable(data_dir=d, kernel_name="via-tcp", transport="tcp")
dc.write_connection_file()
assert dc._connection_file_written
assert os.path.exists(dc.connection_file)
assert dc._random_port_names == connect.port_names
def test_mixin_cleanup_random_ports():
with TemporaryDirectory() as d:
dc = DummyConfigurable(data_dir=d, kernel_name="via-tcp", transport="tcp")
dc.write_connection_file()
filename = dc.connection_file
dc.cleanup_random_ports()
assert not os.path.exists(filename)
for name in dc._random_port_names:
assert getattr(dc, name) == 0

View File

@@ -0,0 +1,131 @@
"""Test suite for our JSON utilities."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import datetime
import json
import numbers
from datetime import timedelta
from unittest import mock
import pytest
from dateutil.tz import tzlocal
from dateutil.tz import tzoffset
from jupyter_client import jsonutil
from jupyter_client.session import utcnow
REFERENCE_DATETIME = datetime.datetime(2013, 7, 3, 16, 34, 52, 249482, tzlocal())
class MyInt(object):
def __int__(self):
return 389
numbers.Integral.register(MyInt)
class MyFloat(object):
def __float__(self):
return 3.14
numbers.Real.register(MyFloat)
def test_extract_date_from_naive():
ref = REFERENCE_DATETIME
timestamp = "2013-07-03T16:34:52.249482"
with pytest.deprecated_call(match="Interpreting naive datetime as local"):
extracted = jsonutil.extract_dates(timestamp)
assert isinstance(extracted, datetime.datetime)
assert extracted.tzinfo is not None
assert extracted.tzinfo.utcoffset(ref) == tzlocal().utcoffset(ref)
assert extracted == ref
def test_extract_dates():
ref = REFERENCE_DATETIME
timestamps = [
"2013-07-03T16:34:52.249482Z",
"2013-07-03T16:34:52.249482-0800",
"2013-07-03T16:34:52.249482+0800",
"2013-07-03T16:34:52.249482-08:00",
"2013-07-03T16:34:52.249482+08:00",
]
extracted = jsonutil.extract_dates(timestamps)
for dt in extracted:
assert isinstance(dt, datetime.datetime)
assert dt.tzinfo is not None
assert extracted[0].tzinfo.utcoffset(ref) == timedelta(0)
assert extracted[1].tzinfo.utcoffset(ref) == timedelta(hours=-8)
assert extracted[2].tzinfo.utcoffset(ref) == timedelta(hours=8)
assert extracted[3].tzinfo.utcoffset(ref) == timedelta(hours=-8)
assert extracted[4].tzinfo.utcoffset(ref) == timedelta(hours=8)
def test_parse_ms_precision():
base = "2013-07-03T16:34:52"
digits = "1234567890"
parsed = jsonutil.parse_date(base + "Z")
assert isinstance(parsed, datetime.datetime)
for i in range(len(digits)):
ts = base + "." + digits[:i]
parsed = jsonutil.parse_date(ts + "Z")
if i >= 1 and i <= 6:
assert isinstance(parsed, datetime.datetime)
else:
assert isinstance(parsed, str)
def test_json_default_date():
naive = datetime.datetime.now()
local = tzoffset("Local", -8 * 3600)
other = tzoffset("Other", 2 * 3600)
data = dict(naive=naive, utc=utcnow(), withtz=naive.replace(tzinfo=other))
with mock.patch.object(jsonutil, "tzlocal", lambda: local):
with pytest.deprecated_call(match="Please add timezone info"):
jsondata = json.dumps(data, default=jsonutil.json_default)
assert "Z" in jsondata
assert jsondata.count("Z") == 1
extracted = jsonutil.extract_dates(json.loads(jsondata))
for dt in extracted.values():
assert isinstance(dt, datetime.datetime)
assert dt.tzinfo is not None
def test_json_default():
# list of input/expected output. Use None for the expected output if it
# can be the same as the input.
pairs = [
(1, None), # start with scalars
(1.123, None),
(1.0, None),
('a', None),
(True, None),
(False, None),
(None, None),
({"key": b"\xFF"}, {"key": "/w==\n"}),
# Containers
([1, 2], None),
((1, 2), [1, 2]),
(set([1, 2]), [1, 2]),
(dict(x=1), None),
({'x': 1, 'y': [1, 2, 3], '1': 'int'}, None),
# More exotic objects
((x for x in range(3)), [0, 1, 2]),
(iter([1, 2]), [1, 2]),
(MyFloat(), 3.14),
(MyInt(), 389),
]
for val, jval in pairs:
if jval is None:
jval = val
out = json.loads(json.dumps(val, default=jsonutil.json_default))
# validate our cleanup
assert out == jval

View File

@@ -0,0 +1,64 @@
import os
import shutil
import sys
import time
from subprocess import PIPE
from subprocess import Popen
from tempfile import mkdtemp
def _launch(extra_env):
env = os.environ.copy()
env.update(extra_env)
return Popen(
[sys.executable, "-c", "from jupyter_client.kernelapp import main; main()"],
env=env,
stderr=PIPE,
)
WAIT_TIME = 10
POLL_FREQ = 10
def test_kernelapp_lifecycle():
# Check that 'jupyter kernel' starts and terminates OK.
runtime_dir = mkdtemp()
startup_dir = mkdtemp()
started = os.path.join(startup_dir, "started")
try:
p = _launch(
{
"JUPYTER_RUNTIME_DIR": runtime_dir,
"JUPYTER_CLIENT_TEST_RECORD_STARTUP_PRIVATE": started,
}
)
# Wait for start
for _ in range(WAIT_TIME * POLL_FREQ):
if os.path.isfile(started):
break
time.sleep(1 / POLL_FREQ)
else:
raise AssertionError("No started file created in {} seconds".format(WAIT_TIME))
# Connection file should be there by now
for _ in range(WAIT_TIME * POLL_FREQ):
files = os.listdir(runtime_dir)
if files:
break
time.sleep(1 / POLL_FREQ)
else:
raise AssertionError("No connection file created in {} seconds".format(WAIT_TIME))
assert len(files) == 1
cf = files[0]
assert cf.startswith("kernel")
assert cf.endswith(".json")
# Send SIGTERM to shut down
time.sleep(1)
p.terminate()
_, stderr = p.communicate(timeout=WAIT_TIME)
assert cf in stderr.decode("utf-8", "replace")
finally:
shutil.rmtree(runtime_dir)
shutil.rmtree(startup_dir)

View File

@@ -0,0 +1,585 @@
"""Tests for the KernelManager"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import concurrent.futures
import json
import os
import signal
import sys
import time
from subprocess import PIPE
import pytest
from jupyter_core import paths
from traitlets.config.loader import Config
from ..manager import _ShutdownStatus
from ..manager import start_new_async_kernel
from ..manager import start_new_kernel
from .utils import AsyncKMSubclass
from .utils import SyncKMSubclass
from jupyter_client import AsyncKernelManager
from jupyter_client import KernelManager
pjoin = os.path.join
TIMEOUT = 30
@pytest.fixture(params=["tcp", "ipc"])
def transport(request):
if sys.platform == "win32" and request.param == "ipc": #
pytest.skip("Transport 'ipc' not supported on Windows.")
return request.param
@pytest.fixture
def config(transport):
c = Config()
c.KernelManager.transport = transport
if transport == "ipc":
c.KernelManager.ip = "test"
return c
def _install_kernel(name="signaltest", extra_env=None):
if extra_env is None:
extra_env = {}
kernel_dir = pjoin(paths.jupyter_data_dir(), "kernels", name)
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, "kernel.json"), "w") as f:
f.write(
json.dumps(
{
"argv": [
sys.executable,
"-m",
"jupyter_client.tests.signalkernel",
"-f",
"{connection_file}",
],
"display_name": "Signal Test Kernel",
"env": {"TEST_VARS": "${TEST_VARS}:test_var_2", **extra_env},
}
)
)
@pytest.fixture
def install_kernel():
return _install_kernel()
def install_kernel_dont_shutdown():
_install_kernel("signaltest-no-shutdown", {"NO_SHUTDOWN_REPLY": "1"})
def install_kernel_dont_terminate():
return _install_kernel(
"signaltest-no-terminate", {"NO_SHUTDOWN_REPLY": "1", "NO_SIGTERM_REPLY": "1"}
)
@pytest.fixture
def start_kernel():
km, kc = start_new_kernel(kernel_name="signaltest")
yield km, kc
kc.stop_channels()
km.shutdown_kernel()
assert km.context.closed
@pytest.fixture
def km(config):
km = KernelManager(config=config)
return km
@pytest.fixture
def km_subclass(config):
km = SyncKMSubclass(config=config)
return km
@pytest.fixture
def zmq_context():
import zmq
ctx = zmq.Context()
yield ctx
ctx.term()
@pytest.fixture(params=[AsyncKernelManager, AsyncKMSubclass])
def async_km(request, config):
km = request.param(config=config)
return km
@pytest.fixture
def async_km_subclass(config):
km = AsyncKMSubclass(config=config)
return km
@pytest.fixture
async def start_async_kernel():
km, kc = await start_new_async_kernel(kernel_name="signaltest")
yield km, kc
kc.stop_channels()
await km.shutdown_kernel()
assert km.context.closed
class TestKernelManagerShutDownGracefully:
parameters = (
"name, install, expected",
[
("signaltest", _install_kernel, _ShutdownStatus.ShutdownRequest),
(
"signaltest-no-shutdown",
install_kernel_dont_shutdown,
_ShutdownStatus.SigtermRequest,
),
(
"signaltest-no-terminate",
install_kernel_dont_terminate,
_ShutdownStatus.SigkillRequest,
),
],
)
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
@pytest.mark.parametrize(*parameters)
def test_signal_kernel_subprocesses(self, name, install, expected):
# ipykernel doesn't support 3.6 and this test uses async shutdown_request
if expected == _ShutdownStatus.ShutdownRequest and sys.version_info < (3, 7):
pytest.skip()
install()
km, kc = start_new_kernel(kernel_name=name)
assert km._shutdown_status == _ShutdownStatus.Unset
assert km.is_alive()
# kc.execute("1")
kc.stop_channels()
km.shutdown_kernel()
if expected == _ShutdownStatus.ShutdownRequest:
expected = [expected, _ShutdownStatus.SigtermRequest]
else:
expected = [expected]
assert km._shutdown_status in expected
@pytest.mark.asyncio
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
@pytest.mark.parametrize(*parameters)
async def test_async_signal_kernel_subprocesses(self, name, install, expected):
install()
km, kc = await start_new_async_kernel(kernel_name=name)
assert km._shutdown_status == _ShutdownStatus.Unset
assert await km.is_alive()
# kc.execute("1")
kc.stop_channels()
await km.shutdown_kernel()
if expected == _ShutdownStatus.ShutdownRequest:
expected = [expected, _ShutdownStatus.SigtermRequest]
else:
expected = [expected]
assert km._shutdown_status in expected
class TestKernelManager:
def test_lifecycle(self, km):
km.start_kernel(stdout=PIPE, stderr=PIPE)
kc = km.client()
assert km.is_alive()
is_done = km.ready.done()
assert is_done
km.restart_kernel(now=True)
assert km.is_alive()
km.interrupt_kernel()
assert isinstance(km, KernelManager)
kc.stop_channels()
km.shutdown_kernel(now=True)
assert km.context.closed
def test_get_connect_info(self, km):
cinfo = km.get_connection_info()
keys = sorted(cinfo.keys())
expected = sorted(
[
"ip",
"transport",
"hb_port",
"shell_port",
"stdin_port",
"iopub_port",
"control_port",
"key",
"signature_scheme",
]
)
assert keys == expected
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
def test_signal_kernel_subprocesses(self, install_kernel, start_kernel):
km, kc = start_kernel
def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
N = 5
for i in range(N):
execute("start")
time.sleep(1) # make sure subprocs stay up
reply = execute("check")
assert reply["user_expressions"]["poll"] == [None] * N
# start a job on the kernel to be interrupted
kc.execute("sleep")
time.sleep(1) # ensure sleep message has been handled before we interrupt
km.interrupt_kernel()
reply = kc.get_shell_msg(TIMEOUT)
content = reply["content"]
assert content["status"] == "ok"
assert content["user_expressions"]["interrupted"]
# wait up to 10s for subprocesses to handle signal
for i in range(100):
reply = execute("check")
if reply["user_expressions"]["poll"] != [-signal.SIGINT] * N:
time.sleep(0.1)
else:
break
# verify that subprocesses were interrupted
assert reply["user_expressions"]["poll"] == [-signal.SIGINT] * N
def test_start_new_kernel(self, install_kernel, start_kernel):
km, kc = start_kernel
assert km.is_alive()
assert kc.is_alive()
assert km.context.closed is False
def _env_test_body(self, kc):
def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
reply = execute("env")
assert reply is not None
assert reply["user_expressions"]["env"] == "test_var_1:test_var_2"
def test_templated_kspec_env(self, install_kernel, start_kernel):
km, kc = start_kernel
assert km.is_alive()
assert kc.is_alive()
assert km.context.closed is False
self._env_test_body(kc)
def test_cleanup_context(self, km):
assert km.context is not None
km.cleanup_resources(restart=False)
assert km.context.closed
def test_no_cleanup_shared_context(self, zmq_context):
"""kernel manager does not terminate shared context"""
km = KernelManager(context=zmq_context)
assert km.context == zmq_context
assert km.context is not None
km.cleanup_resources(restart=False)
assert km.context.closed is False
assert zmq_context.closed is False
def test_subclass_callables(self, km_subclass):
km_subclass.reset_counts()
km_subclass.start_kernel(stdout=PIPE, stderr=PIPE)
assert km_subclass.call_count("start_kernel") == 1
assert km_subclass.call_count("_launch_kernel") == 1
is_alive = km_subclass.is_alive()
assert is_alive
km_subclass.reset_counts()
km_subclass.restart_kernel(now=True)
assert km_subclass.call_count("restart_kernel") == 1
assert km_subclass.call_count("shutdown_kernel") == 1
assert km_subclass.call_count("interrupt_kernel") == 1
assert km_subclass.call_count("_kill_kernel") == 1
assert km_subclass.call_count("cleanup_resources") == 1
assert km_subclass.call_count("start_kernel") == 1
assert km_subclass.call_count("_launch_kernel") == 1
assert km_subclass.call_count("signal_kernel") == 1
is_alive = km_subclass.is_alive()
assert is_alive
assert km_subclass.call_count("is_alive") >= 1
km_subclass.reset_counts()
km_subclass.interrupt_kernel()
assert km_subclass.call_count("interrupt_kernel") == 1
assert km_subclass.call_count("signal_kernel") == 1
assert isinstance(km_subclass, KernelManager)
km_subclass.reset_counts()
km_subclass.shutdown_kernel(now=False)
assert km_subclass.call_count("shutdown_kernel") == 1
assert km_subclass.call_count("interrupt_kernel") == 1
assert km_subclass.call_count("request_shutdown") == 1
assert km_subclass.call_count("finish_shutdown") == 1
assert km_subclass.call_count("cleanup_resources") == 1
assert km_subclass.call_count("signal_kernel") == 1
assert km_subclass.call_count("is_alive") >= 1
is_alive = km_subclass.is_alive()
assert is_alive is False
assert km_subclass.call_count("is_alive") >= 1
assert km_subclass.context.closed
class TestParallel:
@pytest.mark.timeout(TIMEOUT)
def test_start_sequence_kernels(self, config, install_kernel):
"""Ensure that a sequence of kernel startups doesn't break anything."""
self._run_signaltest_lifecycle(config)
self._run_signaltest_lifecycle(config)
self._run_signaltest_lifecycle(config)
@pytest.mark.timeout(TIMEOUT + 10)
def test_start_parallel_thread_kernels(self, config, install_kernel):
if config.KernelManager.transport == "ipc": # FIXME
pytest.skip("IPC transport is currently not working for this test!")
self._run_signaltest_lifecycle(config)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as thread_executor:
future1 = thread_executor.submit(self._run_signaltest_lifecycle, config)
future2 = thread_executor.submit(self._run_signaltest_lifecycle, config)
future1.result()
future2.result()
@pytest.mark.timeout(TIMEOUT)
@pytest.mark.skipif(
(sys.platform == "darwin") and (sys.version_info >= (3, 6)) and (sys.version_info < (3, 8)),
reason='"Bad file descriptor" error',
)
def test_start_parallel_process_kernels(self, config, install_kernel):
if config.KernelManager.transport == "ipc": # FIXME
pytest.skip("IPC transport is currently not working for this test!")
self._run_signaltest_lifecycle(config)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_executor:
future1 = thread_executor.submit(self._run_signaltest_lifecycle, config)
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as process_executor:
future2 = process_executor.submit(self._run_signaltest_lifecycle, config)
future2.result()
future1.result()
@pytest.mark.timeout(TIMEOUT)
@pytest.mark.skipif(
(sys.platform == "darwin") and (sys.version_info >= (3, 6)) and (sys.version_info < (3, 8)),
reason='"Bad file descriptor" error',
)
def test_start_sequence_process_kernels(self, config, install_kernel):
if config.KernelManager.transport == "ipc": # FIXME
pytest.skip("IPC transport is currently not working for this test!")
self._run_signaltest_lifecycle(config)
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as pool_executor:
future = pool_executor.submit(self._run_signaltest_lifecycle, config)
future.result()
def _prepare_kernel(self, km, startup_timeout=TIMEOUT, **kwargs):
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return kc
def _run_signaltest_lifecycle(self, config=None):
km = KernelManager(config=config, kernel_name="signaltest")
kc = self._prepare_kernel(km, stdout=PIPE, stderr=PIPE)
def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
execute("start")
assert km.is_alive()
execute("check")
assert km.is_alive()
km.restart_kernel(now=True)
assert km.is_alive()
execute("check")
km.shutdown_kernel()
assert km.context.closed
kc.stop_channels()
@pytest.mark.asyncio
class TestAsyncKernelManager:
async def test_lifecycle(self, async_km):
await async_km.start_kernel(stdout=PIPE, stderr=PIPE)
is_alive = await async_km.is_alive()
assert is_alive
is_ready = async_km.ready.done()
assert is_ready
await async_km.restart_kernel(now=True)
is_alive = await async_km.is_alive()
assert is_alive
await async_km.interrupt_kernel()
assert isinstance(async_km, AsyncKernelManager)
await async_km.shutdown_kernel(now=True)
is_alive = await async_km.is_alive()
assert is_alive is False
assert async_km.context.closed
async def test_get_connect_info(self, async_km):
cinfo = async_km.get_connection_info()
keys = sorted(cinfo.keys())
expected = sorted(
[
"ip",
"transport",
"hb_port",
"shell_port",
"stdin_port",
"iopub_port",
"control_port",
"key",
"signature_scheme",
]
)
assert keys == expected
@pytest.mark.timeout(10)
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
async def test_signal_kernel_subprocesses(self, install_kernel, start_async_kernel):
km, kc = start_async_kernel
async def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = await kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
# Ensure that shutdown_kernel and stop_channels are called at the end of the test.
# Note: we cannot use addCleanup(<func>) for these since it doesn't prpperly handle
# coroutines - which km.shutdown_kernel now is.
N = 5
for i in range(N):
await execute("start")
await asyncio.sleep(1) # make sure subprocs stay up
reply = await execute("check")
assert reply["user_expressions"]["poll"] == [None] * N
# start a job on the kernel to be interrupted
request_id = kc.execute("sleep")
await asyncio.sleep(1) # ensure sleep message has been handled before we interrupt
await km.interrupt_kernel()
while True:
reply = await kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
assert content["user_expressions"]["interrupted"] is True
# wait up to 5s for subprocesses to handle signal
for i in range(50):
reply = await execute("check")
if reply["user_expressions"]["poll"] != [-signal.SIGINT] * N:
await asyncio.sleep(0.1)
else:
break
# verify that subprocesses were interrupted
assert reply["user_expressions"]["poll"] == [-signal.SIGINT] * N
@pytest.mark.timeout(10)
async def test_start_new_async_kernel(self, install_kernel, start_async_kernel):
km, kc = start_async_kernel
is_alive = await km.is_alive()
assert is_alive
is_alive = await kc.is_alive()
assert is_alive
async def test_subclass_callables(self, async_km_subclass):
async_km_subclass.reset_counts()
await async_km_subclass.start_kernel(stdout=PIPE, stderr=PIPE)
assert async_km_subclass.call_count("start_kernel") == 1
assert async_km_subclass.call_count("_launch_kernel") == 1
is_alive = await async_km_subclass.is_alive()
assert is_alive
assert async_km_subclass.call_count("is_alive") >= 1
async_km_subclass.reset_counts()
await async_km_subclass.restart_kernel(now=True)
assert async_km_subclass.call_count("restart_kernel") == 1
assert async_km_subclass.call_count("shutdown_kernel") == 1
assert async_km_subclass.call_count("interrupt_kernel") == 1
assert async_km_subclass.call_count("_kill_kernel") == 1
assert async_km_subclass.call_count("cleanup_resources") == 1
assert async_km_subclass.call_count("start_kernel") == 1
assert async_km_subclass.call_count("_launch_kernel") == 1
assert async_km_subclass.call_count("signal_kernel") == 1
is_alive = await async_km_subclass.is_alive()
assert is_alive
assert async_km_subclass.call_count("is_alive") >= 1
async_km_subclass.reset_counts()
await async_km_subclass.interrupt_kernel()
assert async_km_subclass.call_count("interrupt_kernel") == 1
assert async_km_subclass.call_count("signal_kernel") == 1
assert isinstance(async_km_subclass, AsyncKernelManager)
async_km_subclass.reset_counts()
await async_km_subclass.shutdown_kernel(now=False)
assert async_km_subclass.call_count("shutdown_kernel") == 1
assert async_km_subclass.call_count("interrupt_kernel") == 1
assert async_km_subclass.call_count("request_shutdown") == 1
assert async_km_subclass.call_count("finish_shutdown") == 1
assert async_km_subclass.call_count("cleanup_resources") == 1
assert async_km_subclass.call_count("signal_kernel") == 1
assert async_km_subclass.call_count("is_alive") >= 1
is_alive = await async_km_subclass.is_alive()
assert is_alive is False
assert async_km_subclass.call_count("is_alive") >= 1
assert async_km_subclass.context.closed

View File

@@ -0,0 +1,200 @@
"""Tests for the KernelSpecManager"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import copy
import json
import os
import sys
import tempfile
import unittest
from io import StringIO
from logging import StreamHandler
from os.path import join as pjoin
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
from tempfile import TemporaryDirectory
import pytest
from jupyter_core import paths
from .utils import install_kernel
from .utils import sample_kernel_json
from .utils import test_env
from jupyter_client import kernelspec
class KernelSpecTests(unittest.TestCase):
def setUp(self):
self.env_patch = test_env()
self.env_patch.start()
self.sample_kernel_dir = install_kernel(
pjoin(paths.jupyter_data_dir(), "kernels"), name="sample"
)
self.ksm = kernelspec.KernelSpecManager()
td2 = TemporaryDirectory()
self.addCleanup(td2.cleanup)
self.installable_kernel = td2.name
with open(pjoin(self.installable_kernel, "kernel.json"), "w") as f:
json.dump(sample_kernel_json, f)
def tearDown(self):
self.env_patch.stop()
def test_find_kernel_specs(self):
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels["sample"], self.sample_kernel_dir)
def test_allowed_kernel_names(self):
ksm = kernelspec.KernelSpecManager()
ksm.allowed_kernelspecs = ["foo"]
kernels = ksm.find_kernel_specs()
assert not len(kernels)
def test_deprecated_whitelist(self):
ksm = kernelspec.KernelSpecManager()
ksm.whitelist = ["bar"]
kernels = ksm.find_kernel_specs()
assert not len(kernels)
def test_get_kernel_spec(self):
ks = self.ksm.get_kernel_spec("SAMPLE") # Case insensitive
self.assertEqual(ks.resource_dir, self.sample_kernel_dir)
self.assertEqual(ks.argv, sample_kernel_json["argv"])
self.assertEqual(ks.display_name, sample_kernel_json["display_name"])
self.assertEqual(ks.env, {})
self.assertEqual(ks.metadata, {})
def test_find_all_specs(self):
kernels = self.ksm.get_all_specs()
self.assertEqual(kernels["sample"]["resource_dir"], self.sample_kernel_dir)
self.assertIsNotNone(kernels["sample"]["spec"])
def test_kernel_spec_priority(self):
td = TemporaryDirectory()
self.addCleanup(td.cleanup)
sample_kernel = install_kernel(td.name, name="sample")
self.ksm.kernel_dirs.append(td.name)
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels["sample"], self.sample_kernel_dir)
self.ksm.kernel_dirs.insert(0, td.name)
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels["sample"], sample_kernel)
def test_install_kernel_spec(self):
self.ksm.install_kernel_spec(self.installable_kernel, kernel_name="tstinstalled", user=True)
self.assertIn("tstinstalled", self.ksm.find_kernel_specs())
# install again works
self.ksm.install_kernel_spec(self.installable_kernel, kernel_name="tstinstalled", user=True)
def test_install_kernel_spec_prefix(self):
td = TemporaryDirectory()
self.addCleanup(td.cleanup)
capture = StringIO()
handler = StreamHandler(capture)
self.ksm.log.addHandler(handler)
self.ksm.install_kernel_spec(
self.installable_kernel, kernel_name="tstinstalled", prefix=td.name
)
captured = capture.getvalue()
self.ksm.log.removeHandler(handler)
self.assertIn("may not be found", captured)
self.assertNotIn("tstinstalled", self.ksm.find_kernel_specs())
# add prefix to path, so we find the spec
self.ksm.kernel_dirs.append(pjoin(td.name, "share", "jupyter", "kernels"))
self.assertIn("tstinstalled", self.ksm.find_kernel_specs())
# Run it again, no warning this time because we've added it to the path
capture = StringIO()
handler = StreamHandler(capture)
self.ksm.log.addHandler(handler)
self.ksm.install_kernel_spec(
self.installable_kernel, kernel_name="tstinstalled", prefix=td.name
)
captured = capture.getvalue()
self.ksm.log.removeHandler(handler)
self.assertNotIn("may not be found", captured)
@pytest.mark.skipif(
not (os.name != "nt" and not os.access("/usr/local/share", os.W_OK)),
reason="needs Unix system without root privileges",
)
def test_cant_install_kernel_spec(self):
with self.assertRaises(OSError):
self.ksm.install_kernel_spec(
self.installable_kernel, kernel_name="tstinstalled", user=False
)
def test_remove_kernel_spec(self):
path = self.ksm.remove_kernel_spec("sample")
self.assertEqual(path, self.sample_kernel_dir)
def test_remove_kernel_spec_app(self):
p = Popen(
[
sys.executable,
"-m",
"jupyter_client.kernelspecapp",
"remove",
"sample",
"-f",
],
stdout=PIPE,
stderr=STDOUT,
env=os.environ,
)
out, _ = p.communicate()
self.assertEqual(p.returncode, 0, out.decode("utf8", "replace"))
def test_validate_kernel_name(self):
for good in [
"julia-0.4",
"ipython",
"R",
"python_3",
"Haskell-1-2-3",
]:
assert kernelspec._is_valid_kernel_name(good)
for bad in [
"has space",
"ünicode",
"%percent",
"question?",
]:
assert not kernelspec._is_valid_kernel_name(bad)
def test_subclass(self):
"""Test get_all_specs in subclasses that override find_kernel_specs"""
ksm = self.ksm
resource_dir = tempfile.gettempdir()
native_name = kernelspec.NATIVE_KERNEL_NAME
native_kernel = ksm.get_kernel_spec(native_name)
class MyKSM(kernelspec.KernelSpecManager):
def get_kernel_spec(self, name):
spec = copy.copy(native_kernel)
if name == "fake":
spec.name = name
spec.resource_dir = resource_dir
elif name == native_name:
pass
else:
raise KeyError(name)
return spec
def find_kernel_specs(self):
return {
"fake": resource_dir,
native_name: native_kernel.resource_dir,
}
# ensure that get_all_specs doesn't raise if only
# find_kernel_specs and get_kernel_spec are defined
myksm = MyKSM()
specs = myksm.get_all_specs()
assert sorted(specs) == ["fake", native_name]

View File

@@ -0,0 +1,15 @@
# -----------------------------------------------------------------------------
# Copyright (c) The Jupyter Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
# -----------------------------------------------------------------------------
from .. import localinterfaces
def test_load_ips():
# Override the machinery that skips it if it was called before
localinterfaces._load_ips.called = False
# Just check this doesn't error
localinterfaces._load_ips(suppress_exceptions=False)

View File

@@ -0,0 +1,34 @@
"""Tests for KernelManager"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import tempfile
from unittest import mock
from jupyter_client.kernelspec import KernelSpec
from jupyter_client.manager import KernelManager
def test_connection_file_real_path():
"""Verify realpath is used when formatting connection file"""
with mock.patch("os.path.realpath") as patched_realpath:
patched_realpath.return_value = "foobar"
km = KernelManager(
connection_file=os.path.join(tempfile.gettempdir(), "kernel-test.json"),
kernel_name="test_kernel",
)
# KernelSpec and launch args have to be mocked as we don't have an actual kernel on disk
km._kernel_spec = KernelSpec(
resource_dir="test",
**{
"argv": ["python.exe", "-m", "test_kernel", "-f", "{connection_file}"],
"env": {},
"display_name": "test_kernel",
"language": "python",
"metadata": {},
},
)
km._launch_args = {}
cmds = km.format_kernel_cmd()
assert cmds[4] == "foobar"

View File

@@ -0,0 +1,601 @@
"""Tests for the notebook kernel and session manager."""
import asyncio
import concurrent.futures
import os
import sys
import uuid
from asyncio import ensure_future
from subprocess import PIPE
from unittest import TestCase
import pytest
from jupyter_core import paths
from tornado.testing import AsyncTestCase
from tornado.testing import gen_test
from traitlets.config.loader import Config
from ..localinterfaces import localhost
from .utils import AsyncKMSubclass
from .utils import AsyncMKMSubclass
from .utils import install_kernel
from .utils import skip_win32
from .utils import SyncKMSubclass
from .utils import SyncMKMSubclass
from .utils import test_env
from jupyter_client import AsyncKernelManager
from jupyter_client import KernelManager
from jupyter_client.multikernelmanager import AsyncMultiKernelManager
from jupyter_client.multikernelmanager import MultiKernelManager
TIMEOUT = 30
async def now(awaitable):
"""Use this function ensure that this awaitable
happens before other awaitables defined after it.
"""
(out,) = await asyncio.gather(awaitable)
return out
class TestKernelManager(TestCase):
def setUp(self):
self.env_patch = test_env()
self.env_patch.start()
super().setUp()
def tearDown(self) -> None:
self.env_patch.stop()
return super().tearDown()
# static so picklable for multiprocessing on Windows
@staticmethod
def _get_tcp_km():
c = Config()
km = MultiKernelManager(config=c)
return km
@staticmethod
def _get_tcp_km_sub():
c = Config()
km = SyncMKMSubclass(config=c)
return km
# static so picklable for multiprocessing on Windows
@staticmethod
def _get_ipc_km():
c = Config()
c.KernelManager.transport = "ipc"
c.KernelManager.ip = "test"
km = MultiKernelManager(config=c)
return km
# static so picklable for multiprocessing on Windows
@staticmethod
def _run_lifecycle(km, test_kid=None):
if test_kid:
kid = km.start_kernel(stdout=PIPE, stderr=PIPE, kernel_id=test_kid)
assert kid == test_kid
else:
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
assert km.is_alive(kid)
assert km.get_kernel(kid).ready.done()
assert kid in km
assert kid in km.list_kernel_ids()
assert len(km) == 1, f"{len(km)} != {1}"
km.restart_kernel(kid, now=True)
assert km.is_alive(kid)
assert kid in km.list_kernel_ids()
km.interrupt_kernel(kid)
k = km.get_kernel(kid)
kc = k.client()
assert isinstance(k, KernelManager)
km.shutdown_kernel(kid, now=True)
assert kid not in km, f"{kid} not in {km}"
kc.stop_channels()
def _run_cinfo(self, km, transport, ip):
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
km.get_kernel(kid)
cinfo = km.get_connection_info(kid)
self.assertEqual(transport, cinfo["transport"])
self.assertEqual(ip, cinfo["ip"])
self.assertTrue("stdin_port" in cinfo)
self.assertTrue("iopub_port" in cinfo)
stream = km.connect_iopub(kid)
stream.close()
self.assertTrue("shell_port" in cinfo)
stream = km.connect_shell(kid)
stream.close()
self.assertTrue("hb_port" in cinfo)
stream = km.connect_hb(kid)
stream.close()
km.shutdown_kernel(kid, now=True)
# static so picklable for multiprocessing on Windows
@classmethod
def test_tcp_lifecycle(cls):
km = cls._get_tcp_km()
cls._run_lifecycle(km)
def test_tcp_lifecycle_with_kernel_id(self):
km = self._get_tcp_km()
self._run_lifecycle(km, test_kid=str(uuid.uuid4()))
def test_shutdown_all(self):
km = self._get_tcp_km()
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
km.shutdown_all()
self.assertNotIn(kid, km)
# shutdown again is okay, because we have no kernels
km.shutdown_all()
def test_tcp_cinfo(self):
km = self._get_tcp_km()
self._run_cinfo(km, "tcp", localhost())
@skip_win32
def test_ipc_lifecycle(self):
km = self._get_ipc_km()
self._run_lifecycle(km)
@skip_win32
def test_ipc_cinfo(self):
km = self._get_ipc_km()
self._run_cinfo(km, "ipc", "test")
def test_start_sequence_tcp_kernels(self):
"""Ensure that a sequence of kernel startups doesn't break anything."""
self._run_lifecycle(self._get_tcp_km())
self._run_lifecycle(self._get_tcp_km())
self._run_lifecycle(self._get_tcp_km())
@skip_win32
def test_start_sequence_ipc_kernels(self):
"""Ensure that a sequence of kernel startups doesn't break anything."""
self._run_lifecycle(self._get_ipc_km())
self._run_lifecycle(self._get_ipc_km())
self._run_lifecycle(self._get_ipc_km())
def tcp_lifecycle_with_loop(self):
# Ensure each thread has an event loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.test_tcp_lifecycle()
loop.close()
def test_start_parallel_thread_kernels(self):
self.test_tcp_lifecycle()
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as thread_executor:
future1 = thread_executor.submit(self.tcp_lifecycle_with_loop)
future2 = thread_executor.submit(self.tcp_lifecycle_with_loop)
future1.result()
future2.result()
@pytest.mark.skipif(
(sys.platform == "darwin") and (sys.version_info >= (3, 6)) and (sys.version_info < (3, 8)),
reason='"Bad file descriptor" error',
)
def test_start_parallel_process_kernels(self):
self.test_tcp_lifecycle()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_executor:
future1 = thread_executor.submit(self.tcp_lifecycle_with_loop)
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as process_executor:
# Windows tests needs this target to be picklable:
future2 = process_executor.submit(self.test_tcp_lifecycle)
future2.result()
future1.result()
def test_subclass_callables(self):
km = self._get_tcp_km_sub()
km.reset_counts()
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
assert km.call_count("start_kernel") == 1
assert isinstance(km.get_kernel(kid), SyncKMSubclass)
assert km.get_kernel(kid).call_count("start_kernel") == 1
assert km.get_kernel(kid).call_count("_launch_kernel") == 1
assert km.is_alive(kid)
assert kid in km
assert kid in km.list_kernel_ids()
assert len(km) == 1, f"{len(km)} != {1}"
km.get_kernel(kid).reset_counts()
km.reset_counts()
km.restart_kernel(kid, now=True)
assert km.call_count("restart_kernel") == 1
assert km.call_count("get_kernel") == 1
assert km.get_kernel(kid).call_count("restart_kernel") == 1
assert km.get_kernel(kid).call_count("shutdown_kernel") == 1
assert km.get_kernel(kid).call_count("interrupt_kernel") == 1
assert km.get_kernel(kid).call_count("_kill_kernel") == 1
assert km.get_kernel(kid).call_count("cleanup_resources") == 1
assert km.get_kernel(kid).call_count("start_kernel") == 1
assert km.get_kernel(kid).call_count("_launch_kernel") == 1
assert km.is_alive(kid)
assert kid in km.list_kernel_ids()
km.get_kernel(kid).reset_counts()
km.reset_counts()
km.interrupt_kernel(kid)
assert km.call_count("interrupt_kernel") == 1
assert km.call_count("get_kernel") == 1
assert km.get_kernel(kid).call_count("interrupt_kernel") == 1
km.get_kernel(kid).reset_counts()
km.reset_counts()
k = km.get_kernel(kid)
assert isinstance(k, SyncKMSubclass)
assert km.call_count("get_kernel") == 1
km.get_kernel(kid).reset_counts()
km.reset_counts()
km.shutdown_all(now=True)
assert km.call_count("shutdown_kernel") == 1
assert km.call_count("remove_kernel") == 1
assert km.call_count("request_shutdown") == 0
assert km.call_count("finish_shutdown") == 0
assert km.call_count("cleanup_resources") == 0
assert kid not in km, f"{kid} not in {km}"
class TestAsyncKernelManager(AsyncTestCase):
def setUp(self):
self.env_patch = test_env()
self.env_patch.start()
super().setUp()
def tearDown(self) -> None:
self.env_patch.stop()
return super().tearDown()
# static so picklable for multiprocessing on Windows
@staticmethod
def _get_tcp_km():
c = Config()
km = AsyncMultiKernelManager(config=c)
return km
@staticmethod
def _get_tcp_km_sub():
c = Config()
km = AsyncMKMSubclass(config=c)
return km
# static so picklable for multiprocessing on Windows
@staticmethod
def _get_ipc_km():
c = Config()
c.KernelManager.transport = "ipc"
c.KernelManager.ip = "test"
km = AsyncMultiKernelManager(config=c)
return km
@staticmethod
def _get_pending_kernels_km():
c = Config()
c.AsyncMultiKernelManager.use_pending_kernels = True
km = AsyncMultiKernelManager(config=c)
return km
# static so picklable for multiprocessing on Windows
@staticmethod
async def _run_lifecycle(km, test_kid=None):
if test_kid:
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE, kernel_id=test_kid)
assert kid == test_kid
else:
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
assert await km.is_alive(kid)
assert kid in km
assert kid in km.list_kernel_ids()
assert len(km) == 1, f"{len(km)} != {1}"
await km.restart_kernel(kid, now=True)
assert await km.is_alive(kid)
assert kid in km.list_kernel_ids()
await km.interrupt_kernel(kid)
k = km.get_kernel(kid)
assert isinstance(k, AsyncKernelManager)
await km.shutdown_kernel(kid, now=True)
assert kid not in km, f"{kid} not in {km}"
async def _run_cinfo(self, km, transport, ip):
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
km.get_kernel(kid)
cinfo = km.get_connection_info(kid)
self.assertEqual(transport, cinfo["transport"])
self.assertEqual(ip, cinfo["ip"])
self.assertTrue("stdin_port" in cinfo)
self.assertTrue("iopub_port" in cinfo)
stream = km.connect_iopub(kid)
stream.close()
self.assertTrue("shell_port" in cinfo)
stream = km.connect_shell(kid)
stream.close()
self.assertTrue("hb_port" in cinfo)
stream = km.connect_hb(kid)
stream.close()
await km.shutdown_kernel(kid, now=True)
self.assertNotIn(kid, km)
@gen_test
async def test_tcp_lifecycle(self):
await self.raw_tcp_lifecycle()
@gen_test
async def test_tcp_lifecycle_with_kernel_id(self):
await self.raw_tcp_lifecycle(test_kid=str(uuid.uuid4()))
@gen_test
async def test_shutdown_all(self):
km = self._get_tcp_km()
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
await km.shutdown_all()
self.assertNotIn(kid, km)
# shutdown again is okay, because we have no kernels
await km.shutdown_all()
@gen_test(timeout=20)
async def test_use_after_shutdown_all(self):
km = self._get_tcp_km()
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
await km.shutdown_all()
self.assertNotIn(kid, km)
# Start another kernel
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
await km.shutdown_all()
self.assertNotIn(kid, km)
# shutdown again is okay, because we have no kernels
await km.shutdown_all()
@gen_test(timeout=20)
async def test_shutdown_all_while_starting(self):
km = self._get_tcp_km()
kid_future = asyncio.ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE))
# This is relying on the ordering of the asyncio queue, not sure if guaranteed or not:
kid, _ = await asyncio.gather(kid_future, km.shutdown_all())
self.assertNotIn(kid, km)
# Start another kernel
kid = await ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE))
self.assertIn(kid, km)
self.assertEqual(len(km), 1)
await km.shutdown_all()
self.assertNotIn(kid, km)
# shutdown again is okay, because we have no kernels
await km.shutdown_all()
@gen_test
async def test_use_pending_kernels(self):
km = self._get_pending_kernels_km()
kid = await ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE))
kernel = km.get_kernel(kid)
assert not kernel.ready.done()
assert kid in km
assert kid in km.list_kernel_ids()
assert len(km) == 1, f"{len(km)} != {1}"
# Wait for the kernel to start.
await kernel.ready
await km.restart_kernel(kid, now=True)
out = await km.is_alive(kid)
assert out
assert kid in km.list_kernel_ids()
await km.interrupt_kernel(kid)
k = km.get_kernel(kid)
assert isinstance(k, AsyncKernelManager)
await ensure_future(km.shutdown_kernel(kid, now=True))
# Wait for the kernel to shutdown
await kernel.ready
assert kid not in km, f"{kid} not in {km}"
@gen_test
async def test_use_pending_kernels_early_restart(self):
km = self._get_pending_kernels_km()
kid = await ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE))
kernel = km.get_kernel(kid)
assert not kernel.ready.done()
with pytest.raises(RuntimeError):
await km.restart_kernel(kid, now=True)
await kernel.ready
await ensure_future(km.shutdown_kernel(kid, now=True))
# Wait for the kernel to shutdown
await kernel.ready
assert kid not in km, f"{kid} not in {km}"
@gen_test
async def test_use_pending_kernels_early_shutdown(self):
km = self._get_pending_kernels_km()
kid = await ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE))
kernel = km.get_kernel(kid)
assert not kernel.ready.done()
# Try shutting down while the kernel is pending
await ensure_future(km.shutdown_kernel(kid, now=True))
# Wait for the kernel to shutdown
await kernel.ready
assert kid not in km, f"{kid} not in {km}"
@gen_test
async def test_use_pending_kernels_early_interrupt(self):
km = self._get_pending_kernels_km()
kid = await ensure_future(km.start_kernel(stdout=PIPE, stderr=PIPE))
kernel = km.get_kernel(kid)
assert not kernel.ready.done()
with pytest.raises(RuntimeError):
await km.interrupt_kernel(kid)
# Now wait for the kernel to be ready.
await kernel.ready
await ensure_future(km.shutdown_kernel(kid, now=True))
# Wait for the kernel to shutdown
await kernel.ready
assert kid not in km, f"{kid} not in {km}"
@gen_test
async def test_tcp_cinfo(self):
km = self._get_tcp_km()
await self._run_cinfo(km, "tcp", localhost())
@skip_win32
@gen_test
async def test_ipc_lifecycle(self):
km = self._get_ipc_km()
await self._run_lifecycle(km)
@skip_win32
@gen_test
async def test_ipc_cinfo(self):
km = self._get_ipc_km()
await self._run_cinfo(km, "ipc", "test")
@gen_test
async def test_start_sequence_tcp_kernels(self):
"""Ensure that a sequence of kernel startups doesn't break anything."""
await self._run_lifecycle(self._get_tcp_km())
await self._run_lifecycle(self._get_tcp_km())
await self._run_lifecycle(self._get_tcp_km())
@skip_win32
@gen_test
async def test_start_sequence_ipc_kernels(self):
"""Ensure that a sequence of kernel startups doesn't break anything."""
await self._run_lifecycle(self._get_ipc_km())
await self._run_lifecycle(self._get_ipc_km())
await self._run_lifecycle(self._get_ipc_km())
def tcp_lifecycle_with_loop(self):
# Ensure each thread has an event loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.raw_tcp_lifecycle())
loop.close()
# static so picklable for multiprocessing on Windows
@classmethod
async def raw_tcp_lifecycle(cls, test_kid=None):
# Since @gen_test creates an event loop, we need a raw form of
# test_tcp_lifecycle that assumes the loop already exists.
km = cls._get_tcp_km()
await cls._run_lifecycle(km, test_kid=test_kid)
# static so picklable for multiprocessing on Windows
@classmethod
def raw_tcp_lifecycle_sync(cls, test_kid=None):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(cls.raw_tcp_lifecycle(test_kid=test_kid))
loop.close()
@gen_test
async def test_start_parallel_thread_kernels(self):
await self.raw_tcp_lifecycle()
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as thread_executor:
future1 = thread_executor.submit(self.tcp_lifecycle_with_loop)
future2 = thread_executor.submit(self.tcp_lifecycle_with_loop)
future1.result()
future2.result()
@gen_test
async def test_start_parallel_process_kernels(self):
await self.raw_tcp_lifecycle()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_executor:
future1 = thread_executor.submit(self.tcp_lifecycle_with_loop)
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as process_executor:
# Windows tests needs this target to be picklable:
future2 = process_executor.submit(self.raw_tcp_lifecycle_sync)
future2.result()
future1.result()
@gen_test
async def test_subclass_callables(self):
mkm = self._get_tcp_km_sub()
mkm.reset_counts()
kid = await mkm.start_kernel(stdout=PIPE, stderr=PIPE)
assert mkm.call_count("start_kernel") == 1
assert isinstance(mkm.get_kernel(kid), AsyncKMSubclass)
assert mkm.get_kernel(kid).call_count("start_kernel") == 1
assert mkm.get_kernel(kid).call_count("_launch_kernel") == 1
assert await mkm.is_alive(kid)
assert kid in mkm
assert kid in mkm.list_kernel_ids()
assert len(mkm) == 1, f"{len(mkm)} != {1}"
mkm.get_kernel(kid).reset_counts()
mkm.reset_counts()
await mkm.restart_kernel(kid, now=True)
assert mkm.call_count("restart_kernel") == 1
assert mkm.call_count("get_kernel") == 1
assert mkm.get_kernel(kid).call_count("restart_kernel") == 1
assert mkm.get_kernel(kid).call_count("shutdown_kernel") == 1
assert mkm.get_kernel(kid).call_count("interrupt_kernel") == 1
assert mkm.get_kernel(kid).call_count("_kill_kernel") == 1
assert mkm.get_kernel(kid).call_count("cleanup_resources") == 1
assert mkm.get_kernel(kid).call_count("start_kernel") == 1
assert mkm.get_kernel(kid).call_count("_launch_kernel") == 1
assert await mkm.is_alive(kid)
assert kid in mkm.list_kernel_ids()
mkm.get_kernel(kid).reset_counts()
mkm.reset_counts()
await mkm.interrupt_kernel(kid)
assert mkm.call_count("interrupt_kernel") == 1
assert mkm.call_count("get_kernel") == 1
assert mkm.get_kernel(kid).call_count("interrupt_kernel") == 1
mkm.get_kernel(kid).reset_counts()
mkm.reset_counts()
k = mkm.get_kernel(kid)
assert isinstance(k, AsyncKMSubclass)
assert mkm.call_count("get_kernel") == 1
mkm.get_kernel(kid).reset_counts()
mkm.reset_counts()
await mkm.shutdown_all(now=True)
assert mkm.call_count("shutdown_kernel") == 1
assert mkm.call_count("remove_kernel") == 1
assert mkm.call_count("request_shutdown") == 0
assert mkm.call_count("finish_shutdown") == 0
assert mkm.call_count("cleanup_resources") == 0
assert kid not in mkm, f"{kid} not in {mkm}"
@gen_test
async def test_bad_kernelspec(self):
km = self._get_tcp_km()
install_kernel(
os.path.join(paths.jupyter_data_dir(), "kernels"),
argv=["non_existent_executable"],
name="bad",
)
with pytest.raises(FileNotFoundError):
await ensure_future(km.start_kernel(kernel_name="bad", stdout=PIPE, stderr=PIPE))
@gen_test
async def test_bad_kernelspec_pending(self):
km = self._get_pending_kernels_km()
install_kernel(
os.path.join(paths.jupyter_data_dir(), "kernels"),
argv=["non_existent_executable"],
name="bad",
)
kernel_id = await ensure_future(
km.start_kernel(kernel_name="bad", stdout=PIPE, stderr=PIPE)
)
with pytest.raises(FileNotFoundError):
await km.get_kernel(kernel_id).ready
assert kernel_id in km.list_kernel_ids()
await ensure_future(km.shutdown_kernel(kernel_id))
assert kernel_id not in km.list_kernel_ids()

View File

@@ -0,0 +1,350 @@
"""Test Provisioning"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import json
import os
import signal
import sys
from subprocess import PIPE
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import pytest
from entrypoints import EntryPoint
from entrypoints import NoSuchEntryPoint
from jupyter_core import paths
from traitlets import Int
from traitlets import Unicode
from ..connect import KernelConnectionInfo
from ..kernelspec import KernelSpecManager
from ..kernelspec import NoSuchKernel
from ..launcher import launch_kernel
from ..manager import AsyncKernelManager
from ..provisioning import KernelProvisionerBase
from ..provisioning import KernelProvisionerFactory
from ..provisioning import LocalProvisioner
pjoin = os.path.join
class SubclassedTestProvisioner(LocalProvisioner):
config_var_1: int = Int(config=True)
config_var_2: str = Unicode(config=True)
pass
class CustomTestProvisioner(KernelProvisionerBase):
process = None
pid = None
pgid = None
config_var_1: int = Int(config=True)
config_var_2: str = Unicode(config=True)
@property
def has_process(self) -> bool:
return self.process is not None
async def poll(self) -> Optional[int]:
ret = 0
if self.process:
ret = self.process.poll()
return ret
async def wait(self) -> Optional[int]:
ret = 0
if self.process:
while await self.poll() is None:
await asyncio.sleep(0.1)
# Process is no longer alive, wait and clear
ret = self.process.wait()
# Make sure all the fds get closed.
for attr in ['stdout', 'stderr', 'stdin']:
fid = getattr(self.process, attr)
if fid:
fid.close()
self.process = None
return ret
async def send_signal(self, signum: int) -> None:
if self.process:
if signum == signal.SIGINT and sys.platform == 'win32':
from ..win_interrupt import send_interrupt
send_interrupt(self.process.win32_interrupt_event)
return
# Prefer process-group over process
if self.pgid and hasattr(os, "killpg"):
try:
os.killpg(self.pgid, signum)
return
except OSError:
pass
return self.process.send_signal(signum)
async def kill(self, restart=False) -> None:
if self.process:
self.process.kill()
async def terminate(self, restart=False) -> None:
if self.process:
self.process.terminate()
async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]:
km = self.parent
if km:
# save kwargs for use in restart
km._launch_args = kwargs.copy()
# build the Popen cmd
extra_arguments = kwargs.pop('extra_arguments', [])
# write connection file / get default ports
km.write_connection_file()
self.connection_info = km.get_connection_info()
kernel_cmd = km.format_kernel_cmd(
extra_arguments=extra_arguments
) # This needs to remain here for b/c
return await super().pre_launch(cmd=kernel_cmd, **kwargs)
async def launch_kernel(self, cmd: List[str], **kwargs: Any) -> KernelConnectionInfo:
scrubbed_kwargs = kwargs
self.process = launch_kernel(cmd, **scrubbed_kwargs)
pgid = None
if hasattr(os, "getpgid"):
try:
pgid = os.getpgid(self.process.pid)
except OSError:
pass
self.pid = self.process.pid
self.pgid = pgid
return self.connection_info
async def cleanup(self, restart=False) -> None:
pass
class NewTestProvisioner(CustomTestProvisioner):
pass
def build_kernelspec(name: str, provisioner: Optional[str] = None) -> None:
spec = {
'argv': [
sys.executable,
'-m',
'jupyter_client.tests.signalkernel',
'-f',
'{connection_file}',
],
'display_name': f"Signal Test Kernel w {provisioner}",
'env': {'TEST_VARS': '${TEST_VARS}:test_var_2'},
'metadata': {},
}
if provisioner:
kernel_provisioner = {'kernel_provisioner': {'provisioner_name': provisioner}}
spec['metadata'].update(kernel_provisioner)
if provisioner != 'local-provisioner':
spec['metadata']['kernel_provisioner']['config'] = {
'config_var_1': 42,
'config_var_2': name,
}
kernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', name)
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, 'kernel.json'), 'w') as f:
f.write(json.dumps(spec))
def new_provisioner():
build_kernelspec('new_provisioner', 'new-test-provisioner')
def custom_provisioner():
build_kernelspec('custom_provisioner', 'custom-test-provisioner')
@pytest.fixture
def all_provisioners():
build_kernelspec('no_provisioner')
build_kernelspec('missing_provisioner', 'missing-provisioner')
build_kernelspec('default_provisioner', 'local-provisioner')
build_kernelspec('subclassed_provisioner', 'subclassed-test-provisioner')
custom_provisioner()
@pytest.fixture(
params=[
'no_provisioner',
'default_provisioner',
'missing_provisioner',
'custom_provisioner',
'subclassed_provisioner',
]
)
def akm(request, all_provisioners):
return AsyncKernelManager(kernel_name=request.param)
initial_provisioner_map = {
'local-provisioner': ('jupyter_client.provisioning', 'LocalProvisioner'),
'subclassed-test-provisioner': (
'jupyter_client.tests.test_provisioning',
'SubclassedTestProvisioner',
),
'custom-test-provisioner': ('jupyter_client.tests.test_provisioning', 'CustomTestProvisioner'),
}
def mock_get_all_provisioners() -> List[EntryPoint]:
result = []
for name, epstr in initial_provisioner_map.items():
result.append(EntryPoint(name, epstr[0], epstr[1]))
return result
def mock_get_provisioner(factory, name) -> EntryPoint:
if name == 'new-test-provisioner':
return EntryPoint(
'new-test-provisioner', 'jupyter_client.tests.test_provisioning', 'NewTestProvisioner'
)
if name in initial_provisioner_map:
return EntryPoint(name, initial_provisioner_map[name][0], initial_provisioner_map[name][1])
raise NoSuchEntryPoint(KernelProvisionerFactory.GROUP_NAME, name)
@pytest.fixture
def kpf(monkeypatch):
"""Setup the Kernel Provisioner Factory, mocking the entrypoint fetch calls."""
monkeypatch.setattr(
KernelProvisionerFactory, '_get_all_provisioners', mock_get_all_provisioners
)
monkeypatch.setattr(KernelProvisionerFactory, '_get_provisioner', mock_get_provisioner)
factory = KernelProvisionerFactory.instance()
return factory
class TestDiscovery:
def test_find_all_specs(self, kpf, all_provisioners):
ksm = KernelSpecManager()
kernels = ksm.get_all_specs()
# Ensure specs for initial provisioners exist,
# and missing_provisioner & new_provisioner don't
assert 'no_provisioner' in kernels
assert 'default_provisioner' in kernels
assert 'subclassed_provisioner' in kernels
assert 'custom_provisioner' in kernels
assert 'missing_provisioner' not in kernels
assert 'new_provisioner' not in kernels
def test_get_missing(self, all_provisioners):
ksm = KernelSpecManager()
with pytest.raises(NoSuchKernel):
ksm.get_kernel_spec('missing_provisioner')
def test_get_new(self, kpf):
new_provisioner() # Introduce provisioner after initialization of KPF
ksm = KernelSpecManager()
kernel = ksm.get_kernel_spec('new_provisioner')
assert 'new-test-provisioner' == kernel.metadata['kernel_provisioner']['provisioner_name']
class TestRuntime:
async def akm_test(self, kernel_mgr):
"""Starts a kernel, validates the associated provisioner's config, shuts down kernel"""
assert kernel_mgr.provisioner is None
if kernel_mgr.kernel_name == 'missing_provisioner':
with pytest.raises(NoSuchKernel):
await kernel_mgr.start_kernel()
else:
await kernel_mgr.start_kernel()
TestRuntime.validate_provisioner(kernel_mgr)
await kernel_mgr.shutdown_kernel()
assert kernel_mgr.provisioner.has_process is False
@pytest.mark.asyncio
async def test_existing(self, kpf, akm):
await self.akm_test(akm)
@pytest.mark.asyncio
async def test_new(self, kpf):
new_provisioner() # Introduce provisioner after initialization of KPF
new_km = AsyncKernelManager(kernel_name='new_provisioner')
await self.akm_test(new_km)
@pytest.mark.asyncio
async def test_custom_lifecycle(self, kpf):
custom_provisioner()
async_km = AsyncKernelManager(kernel_name='custom_provisioner')
await async_km.start_kernel(stdout=PIPE, stderr=PIPE)
is_alive = await async_km.is_alive()
assert is_alive
await async_km.restart_kernel(now=True)
is_alive = await async_km.is_alive()
assert is_alive
await async_km.interrupt_kernel()
assert isinstance(async_km, AsyncKernelManager)
await async_km.shutdown_kernel(now=True)
is_alive = await async_km.is_alive()
assert is_alive is False
assert async_km.context.closed
@pytest.mark.asyncio
async def test_default_provisioner_config(self, kpf, all_provisioners):
kpf.default_provisioner_name = 'custom-test-provisioner'
async_km = AsyncKernelManager(kernel_name='no_provisioner')
await async_km.start_kernel(stdout=PIPE, stderr=PIPE)
is_alive = await async_km.is_alive()
assert is_alive
assert isinstance(async_km.provisioner, CustomTestProvisioner)
assert async_km.provisioner.config_var_1 == 0 # Not in kernelspec, so default of 0 exists
await async_km.shutdown_kernel(now=True)
is_alive = await async_km.is_alive()
assert is_alive is False
assert async_km.context.closed
@staticmethod
def validate_provisioner(akm: AsyncKernelManager):
# Ensure the provisioner is managing a process at this point
assert akm.provisioner is not None and akm.provisioner.has_process
# Validate provisioner config
if akm.kernel_name in ['no_provisioner', 'default_provisioner']:
assert not hasattr(akm.provisioner, 'config_var_1')
assert not hasattr(akm.provisioner, 'config_var_2')
else:
assert akm.provisioner.config_var_1 == 42
assert akm.provisioner.config_var_2 == akm.kernel_name
# Validate provisioner class
if akm.kernel_name in ['no_provisioner', 'default_provisioner', 'subclassed_provisioner']:
assert isinstance(akm.provisioner, LocalProvisioner)
if akm.kernel_name == 'subclassed_provisioner':
assert isinstance(akm.provisioner, SubclassedTestProvisioner)
else:
assert not isinstance(akm.provisioner, SubclassedTestProvisioner)
else:
assert isinstance(akm.provisioner, CustomTestProvisioner)
assert not isinstance(akm.provisioner, LocalProvisioner)
if akm.kernel_name == 'new_provisioner':
assert isinstance(akm.provisioner, NewTestProvisioner)

View File

@@ -0,0 +1,29 @@
"""Test the jupyter_client public API
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import jupyter_client
from jupyter_client import connect
from jupyter_client import launcher
def test_kms():
for base in ("", "Async", "Multi"):
KM = base + "KernelManager"
assert KM in dir(jupyter_client)
def test_kcs():
for base in ("", "Blocking", "Async"):
KM = base + "KernelClient"
assert KM in dir(jupyter_client)
def test_launcher():
for name in launcher.__all__:
assert name in dir(jupyter_client)
def test_connect():
for name in connect.__all__:
assert name in dir(jupyter_client)

View File

@@ -0,0 +1,283 @@
"""Tests for the KernelManager"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import json
import os
import sys
import pytest
from jupyter_core import paths
from traitlets.config.loader import Config
from traitlets.log import get_logger
from jupyter_client.ioloop import AsyncIOLoopKernelManager
from jupyter_client.ioloop import IOLoopKernelManager
pjoin = os.path.join
def _install_kernel(name="problemtest", extra_env=None):
if extra_env is None:
extra_env = {}
kernel_dir = pjoin(paths.jupyter_data_dir(), "kernels", name)
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, "kernel.json"), "w") as f:
f.write(
json.dumps(
{
"argv": [
sys.executable,
"-m",
"jupyter_client.tests.problemkernel",
"-f",
"{connection_file}",
],
"display_name": "Problematic Test Kernel",
"env": {"TEST_VARS": "${TEST_VARS}:test_var_2", **extra_env},
}
)
)
return name
@pytest.fixture
def install_kernel():
return _install_kernel("problemtest")
@pytest.fixture
def install_fail_kernel():
return _install_kernel("problemtest-fail", extra_env={"FAIL_ON_START": "1"})
@pytest.fixture
def install_slow_fail_kernel():
return _install_kernel(
"problemtest-slow", extra_env={"STARTUP_DELAY": "5", "FAIL_ON_START": "1"}
)
@pytest.fixture(params=["tcp", "ipc"])
def transport(request):
if sys.platform == "win32" and request.param == "ipc": #
pytest.skip("Transport 'ipc' not supported on Windows.")
return request.param
@pytest.fixture
def config(transport):
c = Config()
c.KernelManager.transport = transport
if transport == "ipc":
c.KernelManager.ip = "test"
return c
@pytest.fixture
def debug_logging():
get_logger().setLevel("DEBUG")
@pytest.mark.asyncio
async def test_restart_check(config, install_kernel, debug_logging):
"""Test that the kernel is restarted and recovers"""
# If this test failes, run it with --log-cli-level=DEBUG to inspect
N_restarts = 1
config.KernelRestarter.restart_limit = N_restarts
config.KernelRestarter.debug = True
km = IOLoopKernelManager(kernel_name=install_kernel, config=config)
cbs = 0
restarts = [asyncio.Future() for i in range(N_restarts)]
def cb():
nonlocal cbs
if cbs >= N_restarts:
raise RuntimeError("Kernel restarted more than %d times!" % N_restarts)
restarts[cbs].set_result(True)
cbs += 1
try:
km.start_kernel()
km.add_restart_callback(cb, 'restart')
except BaseException:
if km.has_kernel:
km.shutdown_kernel()
raise
try:
for i in range(N_restarts + 1):
kc = km.client()
kc.start_channels()
kc.wait_for_ready(timeout=60)
kc.stop_channels()
if i < N_restarts:
# Kill without cleanup to simulate crash:
await km.provisioner.kill()
await restarts[i]
# Wait for kill + restart
max_wait = 10.0
waited = 0.0
while waited < max_wait and km.is_alive():
await asyncio.sleep(0.1)
waited += 0.1
while waited < max_wait and not km.is_alive():
await asyncio.sleep(0.1)
waited += 0.1
assert cbs == N_restarts
assert km.is_alive()
finally:
km.shutdown_kernel(now=True)
assert km.context.closed
@pytest.mark.asyncio
async def test_restarter_gives_up(config, install_fail_kernel, debug_logging):
"""Test that the restarter gives up after reaching the restart limit"""
# If this test failes, run it with --log-cli-level=DEBUG to inspect
N_restarts = 1
config.KernelRestarter.restart_limit = N_restarts
config.KernelRestarter.debug = True
km = IOLoopKernelManager(kernel_name=install_fail_kernel, config=config)
cbs = 0
restarts = [asyncio.Future() for i in range(N_restarts)]
def cb():
nonlocal cbs
if cbs >= N_restarts:
raise RuntimeError("Kernel restarted more than %d times!" % N_restarts)
restarts[cbs].set_result(True)
cbs += 1
died = asyncio.Future()
def on_death():
died.set_result(True)
try:
km.start_kernel()
km.add_restart_callback(cb, 'restart')
km.add_restart_callback(on_death, 'dead')
except BaseException:
if km.has_kernel:
km.shutdown_kernel()
raise
try:
for i in range(N_restarts):
await restarts[i]
assert await died
assert cbs == N_restarts
finally:
km.shutdown_kernel(now=True)
assert km.context.closed
@pytest.mark.asyncio
async def test_async_restart_check(config, install_kernel, debug_logging):
"""Test that the kernel is restarted and recovers"""
# If this test failes, run it with --log-cli-level=DEBUG to inspect
N_restarts = 1
config.KernelRestarter.restart_limit = N_restarts
config.KernelRestarter.debug = True
km = AsyncIOLoopKernelManager(kernel_name=install_kernel, config=config)
cbs = 0
restarts = [asyncio.Future() for i in range(N_restarts)]
def cb():
nonlocal cbs
if cbs >= N_restarts:
raise RuntimeError("Kernel restarted more than %d times!" % N_restarts)
restarts[cbs].set_result(True)
cbs += 1
try:
await km.start_kernel()
km.add_restart_callback(cb, 'restart')
except BaseException:
if km.has_kernel:
await km.shutdown_kernel()
raise
try:
for i in range(N_restarts + 1):
kc = km.client()
kc.start_channels()
await kc.wait_for_ready(timeout=60)
kc.stop_channels()
if i < N_restarts:
# Kill without cleanup to simulate crash:
await km.provisioner.kill()
await restarts[i]
# Wait for kill + restart
max_wait = 10.0
waited = 0.0
while waited < max_wait and await km.is_alive():
await asyncio.sleep(0.1)
waited += 0.1
while waited < max_wait and not await km.is_alive():
await asyncio.sleep(0.1)
waited += 0.1
assert cbs == N_restarts
assert await km.is_alive()
finally:
await km.shutdown_kernel(now=True)
assert km.context.closed
@pytest.mark.asyncio
async def test_async_restarter_gives_up(config, install_slow_fail_kernel, debug_logging):
"""Test that the restarter gives up after reaching the restart limit"""
# If this test failes, run it with --log-cli-level=DEBUG to inspect
N_restarts = 2
config.KernelRestarter.restart_limit = N_restarts
config.KernelRestarter.debug = True
config.KernelRestarter.stable_start_time = 30.0
km = AsyncIOLoopKernelManager(kernel_name=install_slow_fail_kernel, config=config)
cbs = 0
restarts = [asyncio.Future() for i in range(N_restarts)]
def cb():
nonlocal cbs
if cbs >= N_restarts:
raise RuntimeError("Kernel restarted more than %d times!" % N_restarts)
restarts[cbs].set_result(True)
cbs += 1
died = asyncio.Future()
def on_death():
died.set_result(True)
try:
await km.start_kernel()
km.add_restart_callback(cb, 'restart')
km.add_restart_callback(on_death, 'dead')
except BaseException:
if km.has_kernel:
await km.shutdown_kernel()
raise
try:
await asyncio.gather(*restarts)
assert await died
assert cbs == N_restarts
finally:
await km.shutdown_kernel(now=True)
assert km.context.closed

View File

@@ -0,0 +1,354 @@
"""test building messages with Session"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import hmac
import os
import platform
import uuid
from datetime import datetime
from unittest import mock
import pytest
import zmq
from tornado import ioloop
from zmq.eventloop.zmqstream import ZMQStream
from zmq.tests import BaseZMQTestCase
from jupyter_client import jsonutil
from jupyter_client import session as ss
def _bad_packer(obj):
raise TypeError("I don't work")
def _bad_unpacker(bytes):
raise TypeError("I don't work either")
class SessionTestCase(BaseZMQTestCase):
def setUp(self):
BaseZMQTestCase.setUp(self)
self.session = ss.Session()
@pytest.fixture
def no_copy_threshold():
"""Disable zero-copy optimizations in pyzmq >= 17"""
with mock.patch.object(zmq, "COPY_THRESHOLD", 1, create=True):
yield
@pytest.mark.usefixtures("no_copy_threshold")
class TestSession(SessionTestCase):
def test_msg(self):
"""message format"""
msg = self.session.msg("execute")
thekeys = set("header parent_header metadata content msg_type msg_id".split())
s = set(msg.keys())
self.assertEqual(s, thekeys)
self.assertTrue(isinstance(msg["content"], dict))
self.assertTrue(isinstance(msg["metadata"], dict))
self.assertTrue(isinstance(msg["header"], dict))
self.assertTrue(isinstance(msg["parent_header"], dict))
self.assertTrue(isinstance(msg["msg_id"], str))
self.assertTrue(isinstance(msg["msg_type"], str))
self.assertEqual(msg["header"]["msg_type"], "execute")
self.assertEqual(msg["msg_type"], "execute")
def test_serialize(self):
msg = self.session.msg("execute", content=dict(a=10, b=1.1))
msg_list = self.session.serialize(msg, ident=b"foo")
ident, msg_list = self.session.feed_identities(msg_list)
new_msg = self.session.deserialize(msg_list)
self.assertEqual(ident[0], b"foo")
self.assertEqual(new_msg["msg_id"], msg["msg_id"])
self.assertEqual(new_msg["msg_type"], msg["msg_type"])
self.assertEqual(new_msg["header"], msg["header"])
self.assertEqual(new_msg["content"], msg["content"])
self.assertEqual(new_msg["parent_header"], msg["parent_header"])
self.assertEqual(new_msg["metadata"], msg["metadata"])
# ensure floats don't come out as Decimal:
self.assertEqual(type(new_msg["content"]["b"]), type(new_msg["content"]["b"]))
def test_default_secure(self):
self.assertIsInstance(self.session.key, bytes)
self.assertIsInstance(self.session.auth, hmac.HMAC)
def test_send(self):
ctx = zmq.Context()
A = ctx.socket(zmq.PAIR)
B = ctx.socket(zmq.PAIR)
A.bind("inproc://test")
B.connect("inproc://test")
msg = self.session.msg("execute", content=dict(a=10))
self.session.send(A, msg, ident=b"foo", buffers=[b"bar"])
ident, msg_list = self.session.feed_identities(B.recv_multipart())
new_msg = self.session.deserialize(msg_list)
self.assertEqual(ident[0], b"foo")
self.assertEqual(new_msg["msg_id"], msg["msg_id"])
self.assertEqual(new_msg["msg_type"], msg["msg_type"])
self.assertEqual(new_msg["header"], msg["header"])
self.assertEqual(new_msg["content"], msg["content"])
self.assertEqual(new_msg["parent_header"], msg["parent_header"])
self.assertEqual(new_msg["metadata"], msg["metadata"])
self.assertEqual(new_msg["buffers"], [b"bar"])
content = msg["content"]
header = msg["header"]
header["msg_id"] = self.session.msg_id
parent = msg["parent_header"]
metadata = msg["metadata"]
header["msg_type"]
self.session.send(
A,
None,
content=content,
parent=parent,
header=header,
metadata=metadata,
ident=b"foo",
buffers=[b"bar"],
)
ident, msg_list = self.session.feed_identities(B.recv_multipart())
new_msg = self.session.deserialize(msg_list)
self.assertEqual(ident[0], b"foo")
self.assertEqual(new_msg["msg_id"], header["msg_id"])
self.assertEqual(new_msg["msg_type"], msg["msg_type"])
self.assertEqual(new_msg["header"], msg["header"])
self.assertEqual(new_msg["content"], msg["content"])
self.assertEqual(new_msg["metadata"], msg["metadata"])
self.assertEqual(new_msg["parent_header"], msg["parent_header"])
self.assertEqual(new_msg["buffers"], [b"bar"])
header["msg_id"] = self.session.msg_id
self.session.send(A, msg, ident=b"foo", buffers=[b"bar"])
ident, new_msg = self.session.recv(B)
self.assertEqual(ident[0], b"foo")
self.assertEqual(new_msg["msg_id"], header["msg_id"])
self.assertEqual(new_msg["msg_type"], msg["msg_type"])
self.assertEqual(new_msg["header"], msg["header"])
self.assertEqual(new_msg["content"], msg["content"])
self.assertEqual(new_msg["metadata"], msg["metadata"])
self.assertEqual(new_msg["parent_header"], msg["parent_header"])
self.assertEqual(new_msg["buffers"], [b"bar"])
# buffers must support the buffer protocol
with self.assertRaises(TypeError):
self.session.send(A, msg, ident=b"foo", buffers=[1])
# buffers must be contiguous
buf = memoryview(os.urandom(16))
with self.assertRaises(ValueError):
self.session.send(A, msg, ident=b"foo", buffers=[buf[::2]])
A.close()
B.close()
ctx.term()
def test_args(self):
"""initialization arguments for Session"""
s = self.session
self.assertTrue(s.pack is ss.default_packer)
self.assertTrue(s.unpack is ss.default_unpacker)
self.assertEqual(s.username, os.environ.get("USER", "username"))
s = ss.Session()
self.assertEqual(s.username, os.environ.get("USER", "username"))
self.assertRaises(TypeError, ss.Session, pack="hi")
self.assertRaises(TypeError, ss.Session, unpack="hi")
u = str(uuid.uuid4())
s = ss.Session(username="carrot", session=u)
self.assertEqual(s.session, u)
self.assertEqual(s.username, "carrot")
@pytest.mark.skipif(platform.python_implementation() == 'PyPy', reason='Test fails on PyPy')
def test_tracking(self):
"""test tracking messages"""
a, b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
s = self.session
s.copy_threshold = 1
loop = ioloop.IOLoop(make_current=False)
ZMQStream(a, io_loop=loop)
msg = s.send(a, "hello", track=False)
self.assertTrue(msg["tracker"] is ss.DONE)
msg = s.send(a, "hello", track=True)
self.assertTrue(isinstance(msg["tracker"], zmq.MessageTracker))
M = zmq.Message(b"hi there", track=True)
msg = s.send(a, "hello", buffers=[M], track=True)
t = msg["tracker"]
self.assertTrue(isinstance(t, zmq.MessageTracker))
self.assertRaises(zmq.NotDone, t.wait, 0.1)
del M
t.wait(1) # this will raise
def test_unique_msg_ids(self):
"""test that messages receive unique ids"""
ids = set()
for i in range(2**12):
h = self.session.msg_header("test")
msg_id = h["msg_id"]
self.assertTrue(msg_id not in ids)
ids.add(msg_id)
def test_feed_identities(self):
"""scrub the front for zmq IDENTITIES"""
content = dict(code="whoda", stuff=object())
self.session.msg("execute", content=content)
def test_session_id(self):
session = ss.Session()
# get bs before us
bs = session.bsession
us = session.session
self.assertEqual(us.encode("ascii"), bs)
session = ss.Session()
# get us before bs
us = session.session
bs = session.bsession
self.assertEqual(us.encode("ascii"), bs)
# change propagates:
session.session = "something else"
bs = session.bsession
us = session.session
self.assertEqual(us.encode("ascii"), bs)
session = ss.Session(session="stuff")
# get us before bs
self.assertEqual(session.bsession, session.session.encode("ascii"))
self.assertEqual(b"stuff", session.bsession)
def test_zero_digest_history(self):
session = ss.Session(digest_history_size=0)
for i in range(11):
session._add_digest(uuid.uuid4().bytes)
self.assertEqual(len(session.digest_history), 0)
def test_cull_digest_history(self):
session = ss.Session(digest_history_size=100)
for i in range(100):
session._add_digest(uuid.uuid4().bytes)
self.assertTrue(len(session.digest_history) == 100)
session._add_digest(uuid.uuid4().bytes)
self.assertTrue(len(session.digest_history) == 91)
for i in range(9):
session._add_digest(uuid.uuid4().bytes)
self.assertTrue(len(session.digest_history) == 100)
session._add_digest(uuid.uuid4().bytes)
self.assertTrue(len(session.digest_history) == 91)
def test_bad_pack(self):
try:
ss.Session(pack=_bad_packer)
except ValueError as e:
self.assertIn("could not serialize", str(e))
self.assertIn("don't work", str(e))
else:
self.fail("Should have raised ValueError")
def test_bad_unpack(self):
try:
ss.Session(unpack=_bad_unpacker)
except ValueError as e:
self.assertIn("could not handle output", str(e))
self.assertIn("don't work either", str(e))
else:
self.fail("Should have raised ValueError")
def test_bad_packer(self):
try:
ss.Session(packer=__name__ + "._bad_packer")
except ValueError as e:
self.assertIn("could not serialize", str(e))
self.assertIn("don't work", str(e))
else:
self.fail("Should have raised ValueError")
def test_bad_unpacker(self):
try:
ss.Session(unpacker=__name__ + "._bad_unpacker")
except ValueError as e:
self.assertIn("could not handle output", str(e))
self.assertIn("don't work either", str(e))
else:
self.fail("Should have raised ValueError")
def test_bad_roundtrip(self):
with self.assertRaises(ValueError):
ss.Session(unpack=lambda b: 5)
def _datetime_test(self, session):
content = dict(t=ss.utcnow())
metadata = dict(t=ss.utcnow())
p = session.msg("msg")
msg = session.msg("msg", content=content, metadata=metadata, parent=p["header"])
smsg = session.serialize(msg)
msg2 = session.deserialize(session.feed_identities(smsg)[1])
assert isinstance(msg2["header"]["date"], datetime)
self.assertEqual(msg["header"], msg2["header"])
self.assertEqual(msg["parent_header"], msg2["parent_header"])
self.assertEqual(msg["parent_header"], msg2["parent_header"])
assert isinstance(msg["content"]["t"], datetime)
assert isinstance(msg["metadata"]["t"], datetime)
assert isinstance(msg2["content"]["t"], str)
assert isinstance(msg2["metadata"]["t"], str)
self.assertEqual(msg["content"], jsonutil.extract_dates(msg2["content"]))
self.assertEqual(msg["content"], jsonutil.extract_dates(msg2["content"]))
def test_datetimes(self):
self._datetime_test(self.session)
def test_datetimes_pickle(self):
session = ss.Session(packer="pickle")
self._datetime_test(session)
def test_datetimes_msgpack(self):
msgpack = pytest.importorskip("msgpack")
session = ss.Session(
pack=msgpack.packb,
unpack=lambda buf: msgpack.unpackb(buf, raw=False),
)
self._datetime_test(session)
def test_send_raw(self):
ctx = zmq.Context()
A = ctx.socket(zmq.PAIR)
B = ctx.socket(zmq.PAIR)
A.bind("inproc://test")
B.connect("inproc://test")
msg = self.session.msg("execute", content=dict(a=10))
msg_list = [
self.session.pack(msg[part])
for part in ["header", "parent_header", "metadata", "content"]
]
self.session.send_raw(A, msg_list, ident=b"foo")
ident, new_msg_list = self.session.feed_identities(B.recv_multipart())
new_msg = self.session.deserialize(new_msg_list)
self.assertEqual(ident[0], b"foo")
self.assertEqual(new_msg["msg_type"], msg["msg_type"])
self.assertEqual(new_msg["header"], msg["header"])
self.assertEqual(new_msg["parent_header"], msg["parent_header"])
self.assertEqual(new_msg["content"], msg["content"])
self.assertEqual(new_msg["metadata"], msg["metadata"])
A.close()
B.close()
ctx.term()
def test_clone(self):
s = self.session
s._add_digest("initial")
s2 = s.clone()
assert s2.session == s.session
assert s2.digest_history == s.digest_history
assert s2.digest_history is not s.digest_history
digest = "abcdef"
s._add_digest(digest)
assert digest in s.digest_history
assert digest not in s2.digest_history

View File

@@ -0,0 +1,9 @@
from jupyter_client.ssh.tunnel import select_random_ports
def test_random_ports():
for i in range(4096):
ports = select_random_ports(10)
assert len(ports) == 10
for p in ports:
assert ports.count(p) == 1

View File

@@ -0,0 +1,256 @@
"""Testing utils for jupyter_client tests
"""
import json
import os
import sys
from tempfile import TemporaryDirectory
from typing import Dict
from unittest.mock import patch
import pytest
from jupyter_client import AsyncKernelManager
from jupyter_client import AsyncMultiKernelManager
from jupyter_client import KernelManager
from jupyter_client import MultiKernelManager
pjoin = os.path.join
skip_win32 = pytest.mark.skipif(sys.platform.startswith("win"), reason="Windows")
sample_kernel_json = {
"argv": ["cat", "{connection_file}"],
"display_name": "Test kernel",
}
def install_kernel(kernels_dir, argv=None, name="test", display_name=None):
"""install a kernel in a kernels directory"""
kernel_dir = pjoin(kernels_dir, name)
os.makedirs(kernel_dir)
kernel_json = {
"argv": argv or sample_kernel_json["argv"],
"display_name": display_name or sample_kernel_json["display_name"],
}
json_file = pjoin(kernel_dir, "kernel.json")
with open(json_file, "w") as f:
json.dump(kernel_json, f)
return kernel_dir
class test_env(object):
"""Set Jupyter path variables to a temporary directory
Useful as a context manager or with explicit start/stop
"""
def start(self):
self.test_dir = td = TemporaryDirectory()
self.env_patch = patch.dict(
os.environ,
{
"JUPYTER_CONFIG_DIR": pjoin(td.name, "jupyter"),
"JUPYTER_DATA_DIR": pjoin(td.name, "jupyter_data"),
"JUPYTER_RUNTIME_DIR": pjoin(td.name, "jupyter_runtime"),
"IPYTHONDIR": pjoin(td.name, "ipython"),
"TEST_VARS": "test_var_1",
},
)
self.env_patch.start()
def stop(self):
self.env_patch.stop()
try:
self.test_dir.cleanup()
except (PermissionError, NotADirectoryError):
if os.name != 'nt':
raise
def __enter__(self):
self.start()
return self.test_dir.name
def __exit__(self, *exc_info):
self.stop()
def execute(code="", kc=None, **kwargs):
"""wrapper for doing common steps for validating an execution request"""
from .test_message_spec import validate_message
if kc is None:
kc = KC # noqa
msg_id = kc.execute(code=code, **kwargs)
reply = kc.get_shell_msg(timeout=TIMEOUT) # noqa
validate_message(reply, "execute_reply", msg_id)
busy = kc.get_iopub_msg(timeout=TIMEOUT) # noqa
validate_message(busy, "status", msg_id)
assert busy["content"]["execution_state"] == "busy"
if not kwargs.get("silent"):
execute_input = kc.get_iopub_msg(timeout=TIMEOUT) # noqa
validate_message(execute_input, "execute_input", msg_id)
assert execute_input["content"]["code"] == code
return msg_id, reply["content"]
class RecordCallMixin:
method_calls: Dict[str, int]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.method_calls = {}
def record(self, method_name: str) -> None:
if method_name not in self.method_calls:
self.method_calls[method_name] = 0
self.method_calls[method_name] += 1
def call_count(self, method_name: str) -> int:
if method_name not in self.method_calls:
self.method_calls[method_name] = 0
return self.method_calls[method_name]
def reset_counts(self) -> None:
for record in self.method_calls:
self.method_calls[record] = 0
def subclass_recorder(f):
def wrapped(self, *args, **kwargs):
# record this call
self.record(f.__name__)
method = getattr(self._superclass, f.__name__)
# call the superclass method
r = method(self, *args, **kwargs)
# call anything defined in the actual class method
f(self, *args, **kwargs)
return r
return wrapped
class KMSubclass(RecordCallMixin):
@subclass_recorder
def start_kernel(self, **kw):
"""Record call and defer to superclass"""
@subclass_recorder
def shutdown_kernel(self, now=False, restart=False):
"""Record call and defer to superclass"""
@subclass_recorder
def restart_kernel(self, now=False, **kw):
"""Record call and defer to superclass"""
@subclass_recorder
def interrupt_kernel(self):
"""Record call and defer to superclass"""
@subclass_recorder
def request_shutdown(self, restart=False):
"""Record call and defer to superclass"""
@subclass_recorder
def finish_shutdown(self, waittime=None, pollinterval=0.1, restart=False):
"""Record call and defer to superclass"""
@subclass_recorder
def _launch_kernel(self, kernel_cmd, **kw):
"""Record call and defer to superclass"""
@subclass_recorder
def _kill_kernel(self):
"""Record call and defer to superclass"""
@subclass_recorder
def cleanup_resources(self, restart=False):
"""Record call and defer to superclass"""
@subclass_recorder
def signal_kernel(self, signum: int):
"""Record call and defer to superclass"""
@subclass_recorder
def is_alive(self):
"""Record call and defer to superclass"""
@subclass_recorder
def _send_kernel_sigterm(self, restart: bool = False):
"""Record call and defer to superclass"""
class SyncKMSubclass(KMSubclass, KernelManager):
"""Used to test subclass hierarchies to ensure methods are called when expected."""
_superclass = KernelManager
class AsyncKMSubclass(KMSubclass, AsyncKernelManager):
"""Used to test subclass hierarchies to ensure methods are called when expected."""
_superclass = AsyncKernelManager
class MKMSubclass(RecordCallMixin):
def _kernel_manager_class_default(self):
return "jupyter_client.tests.utils.SyncKMSubclass"
@subclass_recorder
def get_kernel(self, kernel_id):
"""Record call and defer to superclass"""
@subclass_recorder
def remove_kernel(self, kernel_id):
"""Record call and defer to superclass"""
@subclass_recorder
def start_kernel(self, kernel_name=None, **kwargs):
"""Record call and defer to superclass"""
@subclass_recorder
def shutdown_kernel(self, kernel_id, now=False, restart=False):
"""Record call and defer to superclass"""
@subclass_recorder
def restart_kernel(self, kernel_id, now=False):
"""Record call and defer to superclass"""
@subclass_recorder
def interrupt_kernel(self, kernel_id):
"""Record call and defer to superclass"""
@subclass_recorder
def request_shutdown(self, kernel_id, restart=False):
"""Record call and defer to superclass"""
@subclass_recorder
def finish_shutdown(self, kernel_id, waittime=None, pollinterval=0.1, restart=False):
"""Record call and defer to superclass"""
@subclass_recorder
def cleanup_resources(self, kernel_id, restart=False):
"""Record call and defer to superclass"""
@subclass_recorder
def shutdown_all(self, now=False):
"""Record call and defer to superclass"""
class SyncMKMSubclass(MKMSubclass, MultiKernelManager):
_superclass = MultiKernelManager
def _kernel_manager_class_default(self):
return "jupyter_client.tests.utils.SyncKMSubclass"
class AsyncMKMSubclass(MKMSubclass, AsyncMultiKernelManager):
_superclass = AsyncMultiKernelManager
def _kernel_manager_class_default(self):
return "jupyter_client.tests.utils.AsyncKMSubclass"