mirror of
https://github.com/aykhans/AzSuicideDataVisualization.git
synced 2025-07-02 06:22:25 +00:00
first commit
This commit is contained in:
15
.venv/Lib/site-packages/debugpy/adapter/__init__.py
Normal file
15
.venv/Lib/site-packages/debugpy/adapter/__init__.py
Normal file
@ -0,0 +1,15 @@
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See LICENSE in the project root
|
||||
# for license information.
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__all__ = []
|
||||
|
||||
import os
|
||||
|
||||
# Force absolute path on Python 2.
|
||||
__file__ = os.path.abspath(__file__)
|
||||
|
||||
access_token = None
|
||||
"""Access token used to authenticate with this adapter."""
|
228
.venv/Lib/site-packages/debugpy/adapter/__main__.py
Normal file
228
.venv/Lib/site-packages/debugpy/adapter/__main__.py
Normal file
@ -0,0 +1,228 @@
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See LICENSE in the project root
|
||||
# for license information.
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import atexit
|
||||
import codecs
|
||||
import json
|
||||
import locale
|
||||
import os
|
||||
import sys
|
||||
|
||||
# WARNING: debugpy and submodules must not be imported on top level in this module,
|
||||
# and should be imported locally inside main() instead.
|
||||
|
||||
# Force absolute path on Python 2.
|
||||
__file__ = os.path.abspath(__file__)
|
||||
|
||||
|
||||
def main(args):
|
||||
# If we're talking DAP over stdio, stderr is not guaranteed to be read from,
|
||||
# so disable it to avoid the pipe filling and locking up. This must be done
|
||||
# as early as possible, before the logging module starts writing to it.
|
||||
if args.port is None:
|
||||
sys.stderr = stderr = open(os.devnull, "w")
|
||||
atexit.register(stderr.close)
|
||||
|
||||
from debugpy import adapter
|
||||
from debugpy.common import compat, log, sockets
|
||||
from debugpy.adapter import clients, servers, sessions
|
||||
|
||||
if args.for_server is not None:
|
||||
if os.name == "posix":
|
||||
# On POSIX, we need to leave the process group and its session, and then
|
||||
# daemonize properly by double-forking (first fork already happened when
|
||||
# this process was spawned).
|
||||
os.setsid()
|
||||
if os.fork() != 0:
|
||||
sys.exit(0)
|
||||
|
||||
for stdio in sys.stdin, sys.stdout, sys.stderr:
|
||||
if stdio is not None:
|
||||
stdio.close()
|
||||
|
||||
if args.log_stderr:
|
||||
log.stderr.levels |= set(log.LEVELS)
|
||||
if args.log_dir is not None:
|
||||
log.log_dir = args.log_dir
|
||||
|
||||
log.to_file(prefix="debugpy.adapter")
|
||||
log.describe_environment("debugpy.adapter startup environment:")
|
||||
|
||||
servers.access_token = args.server_access_token
|
||||
if args.for_server is None:
|
||||
adapter.access_token = compat.force_str(codecs.encode(os.urandom(32), "hex"))
|
||||
|
||||
endpoints = {}
|
||||
try:
|
||||
client_host, client_port = clients.serve(args.host, args.port)
|
||||
except Exception as exc:
|
||||
if args.for_server is None:
|
||||
raise
|
||||
endpoints = {"error": "Can't listen for client connections: " + str(exc)}
|
||||
else:
|
||||
endpoints["client"] = {"host": client_host, "port": client_port}
|
||||
|
||||
if args.for_server is not None:
|
||||
try:
|
||||
server_host, server_port = servers.serve()
|
||||
except Exception as exc:
|
||||
endpoints = {"error": "Can't listen for server connections: " + str(exc)}
|
||||
else:
|
||||
endpoints["server"] = {"host": server_host, "port": server_port}
|
||||
|
||||
log.info(
|
||||
"Sending endpoints info to debug server at localhost:{0}:\n{1!j}",
|
||||
args.for_server,
|
||||
endpoints,
|
||||
)
|
||||
|
||||
try:
|
||||
sock = sockets.create_client()
|
||||
try:
|
||||
sock.settimeout(None)
|
||||
sock.connect(("127.0.0.1", args.for_server))
|
||||
sock_io = sock.makefile("wb", 0)
|
||||
try:
|
||||
sock_io.write(json.dumps(endpoints).encode("utf-8"))
|
||||
finally:
|
||||
sock_io.close()
|
||||
finally:
|
||||
sockets.close_socket(sock)
|
||||
except Exception:
|
||||
log.reraise_exception("Error sending endpoints info to debug server:")
|
||||
|
||||
if "error" in endpoints:
|
||||
log.error("Couldn't set up endpoints; exiting.")
|
||||
sys.exit(1)
|
||||
|
||||
listener_file = os.getenv("DEBUGPY_ADAPTER_ENDPOINTS")
|
||||
if listener_file is not None:
|
||||
log.info("Writing endpoints info to {0!r}:\n{1!j}", listener_file, endpoints)
|
||||
|
||||
def delete_listener_file():
|
||||
log.info("Listener ports closed; deleting {0!r}", listener_file)
|
||||
try:
|
||||
os.remove(listener_file)
|
||||
except Exception:
|
||||
log.swallow_exception(
|
||||
"Failed to delete {0!r}", listener_file, level="warning"
|
||||
)
|
||||
|
||||
try:
|
||||
with open(listener_file, "w") as f:
|
||||
atexit.register(delete_listener_file)
|
||||
print(json.dumps(endpoints), file=f)
|
||||
except Exception:
|
||||
log.reraise_exception("Error writing endpoints info to file:")
|
||||
|
||||
if args.port is None:
|
||||
clients.Client("stdio")
|
||||
|
||||
# These must be registered after the one above, to ensure that the listener sockets
|
||||
# are closed before the endpoint info file is deleted - this way, another process
|
||||
# can wait for the file to go away as a signal that the ports are no longer in use.
|
||||
atexit.register(servers.stop_serving)
|
||||
atexit.register(clients.stop_serving)
|
||||
|
||||
servers.wait_until_disconnected()
|
||||
log.info("All debug servers disconnected; waiting for remaining sessions...")
|
||||
|
||||
sessions.wait_until_ended()
|
||||
log.info("All debug sessions have ended; exiting.")
|
||||
|
||||
|
||||
def _parse_argv(argv):
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"--for-server", type=int, metavar="PORT", help=argparse.SUPPRESS
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=None,
|
||||
metavar="PORT",
|
||||
help="start the adapter in debugServer mode on the specified port",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--host",
|
||||
type=str,
|
||||
default="127.0.0.1",
|
||||
metavar="HOST",
|
||||
help="start the adapter in debugServer mode on the specified host",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--access-token", type=str, help="access token expected from the server"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--server-access-token", type=str, help="access token expected by the server"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--log-dir",
|
||||
type=str,
|
||||
metavar="DIR",
|
||||
help="enable logging and use DIR to save adapter logs",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--log-stderr", action="store_true", help="enable logging to stderr"
|
||||
)
|
||||
|
||||
args = parser.parse_args(argv[1:])
|
||||
|
||||
if args.port is None:
|
||||
if args.log_stderr:
|
||||
parser.error("--log-stderr requires --port")
|
||||
if args.for_server is not None:
|
||||
parser.error("--for-server requires --port")
|
||||
|
||||
return args
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# debugpy can also be invoked directly rather than via -m. In this case, the first
|
||||
# entry on sys.path is the one added automatically by Python for the directory
|
||||
# containing this file. This means that import debugpy will not work, since we need
|
||||
# the parent directory of debugpy/ to be in sys.path, rather than debugpy/adapter/.
|
||||
#
|
||||
# The other issue is that many other absolute imports will break, because they
|
||||
# will be resolved relative to debugpy/adapter/ - e.g. `import state` will then try
|
||||
# to import debugpy/adapter/state.py.
|
||||
#
|
||||
# To fix both, we need to replace the automatically added entry such that it points
|
||||
# at parent directory of debugpy/ instead of debugpy/adapter, import debugpy with that
|
||||
# in sys.path, and then remove the first entry entry altogether, so that it doesn't
|
||||
# affect any further imports we might do. For example, suppose the user did:
|
||||
#
|
||||
# python /foo/bar/debugpy/adapter ...
|
||||
#
|
||||
# At the beginning of this script, sys.path will contain "/foo/bar/debugpy/adapter"
|
||||
# as the first entry. What we want is to replace it with "/foo/bar', then import
|
||||
# debugpy with that in effect, and then remove the replaced entry before any more
|
||||
# code runs. The imported debugpy module will remain in sys.modules, and thus all
|
||||
# future imports of it or its submodules will resolve accordingly.
|
||||
if "debugpy" not in sys.modules:
|
||||
# Do not use dirname() to walk up - this can be a relative path, e.g. ".".
|
||||
sys.path[0] = sys.path[0] + "/../../"
|
||||
__import__("debugpy")
|
||||
del sys.path[0]
|
||||
|
||||
# Apply OS-global and user-specific locale settings.
|
||||
try:
|
||||
locale.setlocale(locale.LC_ALL, "")
|
||||
except Exception:
|
||||
# On POSIX, locale is set via environment variables, and this can fail if
|
||||
# those variables reference a non-existing locale. Ignore and continue using
|
||||
# the default "C" locale if so.
|
||||
pass
|
||||
|
||||
main(_parse_argv(sys.argv))
|
687
.venv/Lib/site-packages/debugpy/adapter/clients.py
Normal file
687
.venv/Lib/site-packages/debugpy/adapter/clients.py
Normal file
@ -0,0 +1,687 @@
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See LICENSE in the project root
|
||||
# for license information.
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import atexit
|
||||
import os
|
||||
import sys
|
||||
|
||||
import debugpy
|
||||
from debugpy import adapter, common, launcher
|
||||
from debugpy.common import compat, fmt, json, log, messaging, sockets
|
||||
from debugpy.common.compat import unicode
|
||||
from debugpy.adapter import components, servers, sessions
|
||||
|
||||
|
||||
class Client(components.Component):
|
||||
"""Handles the client side of a debug session."""
|
||||
|
||||
message_handler = components.Component.message_handler
|
||||
|
||||
class Capabilities(components.Capabilities):
|
||||
PROPERTIES = {
|
||||
"supportsVariableType": False,
|
||||
"supportsVariablePaging": False,
|
||||
"supportsRunInTerminalRequest": False,
|
||||
"supportsMemoryReferences": False,
|
||||
}
|
||||
|
||||
class Expectations(components.Capabilities):
|
||||
PROPERTIES = {
|
||||
"locale": "en-US",
|
||||
"linesStartAt1": True,
|
||||
"columnsStartAt1": True,
|
||||
"pathFormat": json.enum("path", optional=True), # we don't support "uri"
|
||||
}
|
||||
|
||||
def __init__(self, sock):
|
||||
if sock == "stdio":
|
||||
log.info("Connecting to client over stdio...", self)
|
||||
stream = messaging.JsonIOStream.from_stdio()
|
||||
# Make sure that nothing else tries to interfere with the stdio streams
|
||||
# that are going to be used for DAP communication from now on.
|
||||
sys.stdin = stdin = open(os.devnull, "r")
|
||||
atexit.register(stdin.close)
|
||||
sys.stdout = stdout = open(os.devnull, "w")
|
||||
atexit.register(stdout.close)
|
||||
else:
|
||||
stream = messaging.JsonIOStream.from_socket(sock)
|
||||
|
||||
with sessions.Session() as session:
|
||||
super(Client, self).__init__(session, stream)
|
||||
|
||||
self.client_id = None
|
||||
"""ID of the connecting client. This can be 'test' while running tests."""
|
||||
|
||||
self.has_started = False
|
||||
"""Whether the "launch" or "attach" request was received from the client, and
|
||||
fully handled.
|
||||
"""
|
||||
|
||||
self.start_request = None
|
||||
"""The "launch" or "attach" request as received from the client.
|
||||
"""
|
||||
|
||||
self._initialize_request = None
|
||||
"""The "initialize" request as received from the client, to propagate to the
|
||||
server later."""
|
||||
|
||||
self._deferred_events = []
|
||||
"""Deferred events from the launcher and the server that must be propagated
|
||||
only if and when the "launch" or "attach" response is sent.
|
||||
"""
|
||||
|
||||
self._known_subprocesses = set()
|
||||
"""servers.Connection instances for subprocesses that this client has been
|
||||
made aware of.
|
||||
"""
|
||||
|
||||
session.client = self
|
||||
session.register()
|
||||
|
||||
# For the transition period, send the telemetry events with both old and new
|
||||
# name. The old one should be removed once the new one lights up.
|
||||
self.channel.send_event(
|
||||
"output",
|
||||
{
|
||||
"category": "telemetry",
|
||||
"output": "ptvsd",
|
||||
"data": {"packageVersion": debugpy.__version__},
|
||||
},
|
||||
)
|
||||
self.channel.send_event(
|
||||
"output",
|
||||
{
|
||||
"category": "telemetry",
|
||||
"output": "debugpy",
|
||||
"data": {"packageVersion": debugpy.__version__},
|
||||
},
|
||||
)
|
||||
|
||||
def propagate_after_start(self, event):
|
||||
# pydevd starts sending events as soon as we connect, but the client doesn't
|
||||
# expect to see any until it receives the response to "launch" or "attach"
|
||||
# request. If client is not ready yet, save the event instead of propagating
|
||||
# it immediately.
|
||||
if self._deferred_events is not None:
|
||||
self._deferred_events.append(event)
|
||||
log.debug("Propagation deferred.")
|
||||
else:
|
||||
self.client.channel.propagate(event)
|
||||
|
||||
def _propagate_deferred_events(self):
|
||||
log.debug("Propagating deferred events to {0}...", self.client)
|
||||
for event in self._deferred_events:
|
||||
log.debug("Propagating deferred {0}", event.describe())
|
||||
self.client.channel.propagate(event)
|
||||
log.info("All deferred events propagated to {0}.", self.client)
|
||||
self._deferred_events = None
|
||||
|
||||
# Generic event handler. There are no specific handlers for client events, because
|
||||
# there are no events from the client in DAP - but we propagate them if we can, in
|
||||
# case some events appear in future protocol versions.
|
||||
@message_handler
|
||||
def event(self, event):
|
||||
if self.server:
|
||||
self.server.channel.propagate(event)
|
||||
|
||||
# Generic request handler, used if there's no specific handler below.
|
||||
@message_handler
|
||||
def request(self, request):
|
||||
return self.server.channel.delegate(request)
|
||||
|
||||
@message_handler
|
||||
def initialize_request(self, request):
|
||||
if self._initialize_request is not None:
|
||||
raise request.isnt_valid("Session is already initialized")
|
||||
|
||||
self.client_id = request("clientID", "")
|
||||
self.capabilities = self.Capabilities(self, request)
|
||||
self.expectations = self.Expectations(self, request)
|
||||
self._initialize_request = request
|
||||
|
||||
exception_breakpoint_filters = [
|
||||
{
|
||||
"filter": "raised",
|
||||
"label": "Raised Exceptions",
|
||||
"default": False,
|
||||
"description": "Break whenever any exception is raised.",
|
||||
},
|
||||
{
|
||||
"filter": "uncaught",
|
||||
"label": "Uncaught Exceptions",
|
||||
"default": True,
|
||||
"description": "Break when the process is exiting due to unhandled exception.",
|
||||
},
|
||||
{
|
||||
"filter": "userUnhandled",
|
||||
"label": "User Uncaught Exceptions",
|
||||
"default": False,
|
||||
"description": "Break when exception escapes into library code.",
|
||||
},
|
||||
]
|
||||
|
||||
return {
|
||||
"supportsCompletionsRequest": True,
|
||||
"supportsConditionalBreakpoints": True,
|
||||
"supportsConfigurationDoneRequest": True,
|
||||
"supportsDebuggerProperties": True,
|
||||
"supportsDelayedStackTraceLoading": True,
|
||||
"supportsEvaluateForHovers": True,
|
||||
"supportsExceptionInfoRequest": True,
|
||||
"supportsExceptionOptions": True,
|
||||
"supportsFunctionBreakpoints": True,
|
||||
"supportsHitConditionalBreakpoints": True,
|
||||
"supportsLogPoints": True,
|
||||
"supportsModulesRequest": True,
|
||||
"supportsSetExpression": True,
|
||||
"supportsSetVariable": True,
|
||||
"supportsValueFormattingOptions": True,
|
||||
"supportsTerminateDebuggee": True,
|
||||
"supportsGotoTargetsRequest": True,
|
||||
"supportsClipboardContext": True,
|
||||
"exceptionBreakpointFilters": exception_breakpoint_filters,
|
||||
"supportsStepInTargetsRequest": True,
|
||||
}
|
||||
|
||||
# Common code for "launch" and "attach" request handlers.
|
||||
#
|
||||
# See https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522
|
||||
# for the sequence of request and events necessary to orchestrate the start.
|
||||
def _start_message_handler(f):
|
||||
@components.Component.message_handler
|
||||
def handle(self, request):
|
||||
assert request.is_request("launch", "attach")
|
||||
if self._initialize_request is None:
|
||||
raise request.isnt_valid("Session is not initialized yet")
|
||||
if self.launcher or self.server:
|
||||
raise request.isnt_valid("Session is already started")
|
||||
|
||||
self.session.no_debug = request("noDebug", json.default(False))
|
||||
if self.session.no_debug:
|
||||
servers.dont_wait_for_first_connection()
|
||||
|
||||
self.session.debug_options = debug_options = set(
|
||||
request("debugOptions", json.array(unicode))
|
||||
)
|
||||
|
||||
f(self, request)
|
||||
if request.response is not None:
|
||||
return
|
||||
|
||||
if self.server:
|
||||
self.server.initialize(self._initialize_request)
|
||||
self._initialize_request = None
|
||||
|
||||
arguments = request.arguments
|
||||
if self.launcher:
|
||||
redirecting = arguments.get("console") == "internalConsole"
|
||||
if "RedirectOutput" in debug_options:
|
||||
# The launcher is doing output redirection, so we don't need the
|
||||
# server to do it, as well.
|
||||
arguments = dict(arguments)
|
||||
arguments["debugOptions"] = list(
|
||||
debug_options - {"RedirectOutput"}
|
||||
)
|
||||
redirecting = True
|
||||
|
||||
if arguments.get("redirectOutput"):
|
||||
arguments = dict(arguments)
|
||||
del arguments["redirectOutput"]
|
||||
redirecting = True
|
||||
|
||||
arguments["isOutputRedirected"] = redirecting
|
||||
|
||||
# pydevd doesn't send "initialized", and responds to the start request
|
||||
# immediately, without waiting for "configurationDone". If it changes
|
||||
# to conform to the DAP spec, we'll need to defer waiting for response.
|
||||
try:
|
||||
self.server.channel.request(request.command, arguments)
|
||||
except messaging.NoMoreMessages:
|
||||
# Server closed connection before we could receive the response to
|
||||
# "attach" or "launch" - this can happen when debuggee exits shortly
|
||||
# after starting. It's not an error, but we can't do anything useful
|
||||
# here at this point, either, so just bail out.
|
||||
request.respond({})
|
||||
self.session.finalize(
|
||||
fmt(
|
||||
"{0} disconnected before responding to {1!j}",
|
||||
self.server,
|
||||
request.command,
|
||||
)
|
||||
)
|
||||
return
|
||||
except messaging.MessageHandlingError as exc:
|
||||
exc.propagate(request)
|
||||
|
||||
if self.session.no_debug:
|
||||
self.start_request = request
|
||||
self.has_started = True
|
||||
request.respond({})
|
||||
self._propagate_deferred_events()
|
||||
return
|
||||
|
||||
if "clientOS" in request:
|
||||
client_os = request("clientOS", json.enum("windows", "unix")).upper()
|
||||
elif {"WindowsClient", "Windows"} & debug_options:
|
||||
client_os = "WINDOWS"
|
||||
elif {"UnixClient", "UNIX"} & debug_options:
|
||||
client_os = "UNIX"
|
||||
else:
|
||||
client_os = "WINDOWS" if sys.platform == "win32" else "UNIX"
|
||||
self.server.channel.request(
|
||||
"setDebuggerProperty",
|
||||
{
|
||||
"skipSuspendOnBreakpointException": ("BaseException",),
|
||||
"skipPrintBreakpointException": ("NameError",),
|
||||
"multiThreadsSingleNotification": True,
|
||||
"ideOS": client_os,
|
||||
},
|
||||
)
|
||||
|
||||
# Let the client know that it can begin configuring the adapter.
|
||||
self.channel.send_event("initialized")
|
||||
|
||||
self.start_request = request
|
||||
return messaging.NO_RESPONSE # will respond on "configurationDone"
|
||||
|
||||
return handle
|
||||
|
||||
@_start_message_handler
|
||||
def launch_request(self, request):
|
||||
from debugpy.adapter import launchers
|
||||
|
||||
if self.session.id != 1 or len(servers.connections()):
|
||||
raise request.cant_handle('"attach" expected')
|
||||
|
||||
debug_options = set(request("debugOptions", json.array(unicode)))
|
||||
|
||||
# Handling of properties that can also be specified as legacy "debugOptions" flags.
|
||||
# If property is explicitly set to false, but the flag is in "debugOptions", treat
|
||||
# it as an error. Returns None if the property wasn't explicitly set either way.
|
||||
def property_or_debug_option(prop_name, flag_name):
|
||||
assert prop_name[0].islower() and flag_name[0].isupper()
|
||||
|
||||
value = request(prop_name, bool, optional=True)
|
||||
if value == ():
|
||||
value = None
|
||||
|
||||
if flag_name in debug_options:
|
||||
if value is False:
|
||||
raise request.isnt_valid(
|
||||
'{0!j}:false and "debugOptions":[{1!j}] are mutually exclusive',
|
||||
prop_name,
|
||||
flag_name,
|
||||
)
|
||||
value = True
|
||||
|
||||
return value
|
||||
|
||||
# "pythonPath" is a deprecated legacy spelling. If "python" is missing, then try
|
||||
# the alternative. But if both are missing, the error message should say "python".
|
||||
python_key = "python"
|
||||
if python_key in request:
|
||||
if "pythonPath" in request:
|
||||
raise request.isnt_valid(
|
||||
'"pythonPath" is not valid if "python" is specified'
|
||||
)
|
||||
elif "pythonPath" in request:
|
||||
python_key = "pythonPath"
|
||||
python = request(python_key, json.array(unicode, vectorize=True, size=(0,)))
|
||||
if not len(python):
|
||||
python = [compat.filename(sys.executable)]
|
||||
|
||||
python += request("pythonArgs", json.array(unicode, size=(0,)))
|
||||
request.arguments["pythonArgs"] = python[1:]
|
||||
request.arguments["python"] = python
|
||||
|
||||
launcher_python = request("debugLauncherPython", unicode, optional=True)
|
||||
if launcher_python == ():
|
||||
launcher_python = python[0]
|
||||
|
||||
program = module = code = ()
|
||||
if "program" in request:
|
||||
program = request("program", unicode)
|
||||
args = [program]
|
||||
request.arguments["processName"] = program
|
||||
if "module" in request:
|
||||
module = request("module", unicode)
|
||||
args = ["-m", module]
|
||||
request.arguments["processName"] = module
|
||||
if "code" in request:
|
||||
code = request("code", json.array(unicode, vectorize=True, size=(1,)))
|
||||
args = ["-c", "\n".join(code)]
|
||||
request.arguments["processName"] = "-c"
|
||||
|
||||
num_targets = len([x for x in (program, module, code) if x != ()])
|
||||
if num_targets == 0:
|
||||
raise request.isnt_valid(
|
||||
'either "program", "module", or "code" must be specified'
|
||||
)
|
||||
elif num_targets != 1:
|
||||
raise request.isnt_valid(
|
||||
'"program", "module", and "code" are mutually exclusive'
|
||||
)
|
||||
|
||||
# Propagate "args" via CLI if and only if shell expansion is requested.
|
||||
args_expansion = request(
|
||||
"argsExpansion", json.enum("shell", "none", optional=True)
|
||||
)
|
||||
if args_expansion == "shell":
|
||||
args += request("args", json.array(unicode))
|
||||
request.arguments.pop("args", None)
|
||||
|
||||
cwd = request("cwd", unicode, optional=True)
|
||||
if cwd == ():
|
||||
# If it's not specified, but we're launching a file rather than a module,
|
||||
# and the specified path has a directory in it, use that.
|
||||
cwd = None if program == () else (os.path.dirname(program) or None)
|
||||
|
||||
console = request(
|
||||
"console",
|
||||
json.enum(
|
||||
"internalConsole",
|
||||
"integratedTerminal",
|
||||
"externalTerminal",
|
||||
optional=True,
|
||||
),
|
||||
)
|
||||
console_title = request("consoleTitle", json.default("Python Debug Console"))
|
||||
|
||||
sudo = bool(property_or_debug_option("sudo", "Sudo"))
|
||||
if sudo and sys.platform == "win32":
|
||||
raise request.cant_handle('"sudo":true is not supported on Windows.')
|
||||
|
||||
launcher_path = request("debugLauncherPath", os.path.dirname(launcher.__file__))
|
||||
adapter_host = request("debugAdapterHost", "127.0.0.1")
|
||||
|
||||
try:
|
||||
servers.serve(adapter_host)
|
||||
except Exception as exc:
|
||||
raise request.cant_handle(
|
||||
"{0} couldn't create listener socket for servers: {1}",
|
||||
self.session,
|
||||
exc,
|
||||
)
|
||||
|
||||
launchers.spawn_debuggee(
|
||||
self.session,
|
||||
request,
|
||||
[launcher_python],
|
||||
launcher_path,
|
||||
adapter_host,
|
||||
args,
|
||||
cwd,
|
||||
console,
|
||||
console_title,
|
||||
sudo,
|
||||
)
|
||||
|
||||
@_start_message_handler
|
||||
def attach_request(self, request):
|
||||
if self.session.no_debug:
|
||||
raise request.isnt_valid('"noDebug" is not supported for "attach"')
|
||||
|
||||
host = request("host", unicode, optional=True)
|
||||
port = request("port", int, optional=True)
|
||||
listen = request("listen", dict, optional=True)
|
||||
connect = request("connect", dict, optional=True)
|
||||
pid = request("processId", (int, unicode), optional=True)
|
||||
sub_pid = request("subProcessId", int, optional=True)
|
||||
|
||||
if host != () or port != ():
|
||||
if listen != ():
|
||||
raise request.isnt_valid(
|
||||
'"listen" and "host"/"port" are mutually exclusive'
|
||||
)
|
||||
if connect != ():
|
||||
raise request.isnt_valid(
|
||||
'"connect" and "host"/"port" are mutually exclusive'
|
||||
)
|
||||
if listen != ():
|
||||
if connect != ():
|
||||
raise request.isnt_valid(
|
||||
'"listen" and "connect" are mutually exclusive'
|
||||
)
|
||||
if pid != ():
|
||||
raise request.isnt_valid(
|
||||
'"listen" and "processId" are mutually exclusive'
|
||||
)
|
||||
if sub_pid != ():
|
||||
raise request.isnt_valid(
|
||||
'"listen" and "subProcessId" are mutually exclusive'
|
||||
)
|
||||
if pid != () and sub_pid != ():
|
||||
raise request.isnt_valid(
|
||||
'"processId" and "subProcessId" are mutually exclusive'
|
||||
)
|
||||
|
||||
if listen != ():
|
||||
host = listen("host", "127.0.0.1")
|
||||
port = listen("port", int)
|
||||
adapter.access_token = None
|
||||
host, port = servers.serve(host, port)
|
||||
else:
|
||||
host, port = servers.serve()
|
||||
|
||||
# There are four distinct possibilities here.
|
||||
#
|
||||
# If "processId" is specified, this is attach-by-PID. We need to inject the
|
||||
# debug server into the designated process, and then wait until it connects
|
||||
# back to us. Since the injected server can crash, there must be a timeout.
|
||||
#
|
||||
# If "subProcessId" is specified, this is attach to a known subprocess, likely
|
||||
# in response to a "debugpyAttach" event. If so, the debug server should be
|
||||
# connected already, and thus the wait timeout is zero.
|
||||
#
|
||||
# If "listen" is specified, this is attach-by-socket with the server expected
|
||||
# to connect to the adapter via debugpy.connect(). There is no PID known in
|
||||
# advance, so just wait until the first server connection indefinitely, with
|
||||
# no timeout.
|
||||
#
|
||||
# If "connect" is specified, this is attach-by-socket in which the server has
|
||||
# spawned the adapter via debugpy.listen(). There is no PID known to the client
|
||||
# in advance, but the server connection should be either be there already, or
|
||||
# the server should be connecting shortly, so there must be a timeout.
|
||||
#
|
||||
# In the last two cases, if there's more than one server connection already,
|
||||
# this is a multiprocess re-attach. The client doesn't know the PID, so we just
|
||||
# connect it to the oldest server connection that we have - in most cases, it
|
||||
# will be the one for the root debuggee process, but if it has exited already,
|
||||
# it will be some subprocess.
|
||||
if pid != ():
|
||||
if not isinstance(pid, int):
|
||||
try:
|
||||
pid = int(pid)
|
||||
except Exception:
|
||||
raise request.isnt_valid('"processId" must be parseable as int')
|
||||
debugpy_args = request("debugpyArgs", json.array(unicode))
|
||||
servers.inject(pid, debugpy_args)
|
||||
timeout = common.PROCESS_SPAWN_TIMEOUT
|
||||
pred = lambda conn: conn.pid == pid
|
||||
else:
|
||||
if sub_pid == ():
|
||||
pred = lambda conn: True
|
||||
timeout = common.PROCESS_SPAWN_TIMEOUT if listen == () else None
|
||||
else:
|
||||
pred = lambda conn: conn.pid == sub_pid
|
||||
timeout = 0
|
||||
|
||||
self.channel.send_event("debugpyWaitingForServer", {"host": host, "port": port})
|
||||
conn = servers.wait_for_connection(self.session, pred, timeout)
|
||||
if conn is None:
|
||||
if sub_pid != ():
|
||||
# If we can't find a matching subprocess, it's not always an error -
|
||||
# it might have already exited, or didn't even get a chance to connect.
|
||||
# To prevent the client from complaining, pretend that the "attach"
|
||||
# request was successful, but that the session terminated immediately.
|
||||
request.respond({})
|
||||
self.session.finalize(
|
||||
fmt('No known subprocess with "subProcessId":{0}', sub_pid)
|
||||
)
|
||||
return
|
||||
|
||||
raise request.cant_handle(
|
||||
(
|
||||
"Timed out waiting for debug server to connect."
|
||||
if timeout
|
||||
else "There is no debug server connected to this adapter."
|
||||
),
|
||||
sub_pid,
|
||||
)
|
||||
|
||||
try:
|
||||
conn.attach_to_session(self.session)
|
||||
except ValueError:
|
||||
request.cant_handle("{0} is already being debugged.", conn)
|
||||
|
||||
@message_handler
|
||||
def configurationDone_request(self, request):
|
||||
if self.start_request is None or self.has_started:
|
||||
request.cant_handle(
|
||||
'"configurationDone" is only allowed during handling of a "launch" '
|
||||
'or an "attach" request'
|
||||
)
|
||||
|
||||
try:
|
||||
self.has_started = True
|
||||
try:
|
||||
result = self.server.channel.delegate(request)
|
||||
except messaging.NoMoreMessages:
|
||||
# Server closed connection before we could receive the response to
|
||||
# "configurationDone" - this can happen when debuggee exits shortly
|
||||
# after starting. It's not an error, but we can't do anything useful
|
||||
# here at this point, either, so just bail out.
|
||||
request.respond({})
|
||||
self.start_request.respond({})
|
||||
self.session.finalize(
|
||||
fmt(
|
||||
"{0} disconnected before responding to {1!j}",
|
||||
self.server,
|
||||
request.command,
|
||||
)
|
||||
)
|
||||
return
|
||||
else:
|
||||
request.respond(result)
|
||||
except messaging.MessageHandlingError as exc:
|
||||
self.start_request.cant_handle(str(exc))
|
||||
finally:
|
||||
if self.start_request.response is None:
|
||||
self.start_request.respond({})
|
||||
self._propagate_deferred_events()
|
||||
|
||||
# Notify the client of any child processes of the debuggee that aren't already
|
||||
# being debugged.
|
||||
for conn in servers.connections():
|
||||
if conn.server is None and conn.ppid == self.session.pid:
|
||||
self.notify_of_subprocess(conn)
|
||||
|
||||
@message_handler
|
||||
def evaluate_request(self, request):
|
||||
propagated_request = self.server.channel.propagate(request)
|
||||
|
||||
def handle_response(response):
|
||||
request.respond(response.body)
|
||||
|
||||
propagated_request.on_response(handle_response)
|
||||
|
||||
return messaging.NO_RESPONSE
|
||||
|
||||
@message_handler
|
||||
def pause_request(self, request):
|
||||
request.arguments["threadId"] = "*"
|
||||
return self.server.channel.delegate(request)
|
||||
|
||||
@message_handler
|
||||
def continue_request(self, request):
|
||||
request.arguments["threadId"] = "*"
|
||||
|
||||
try:
|
||||
return self.server.channel.delegate(request)
|
||||
except messaging.NoMoreMessages:
|
||||
# pydevd can sometimes allow the debuggee to exit before the queued
|
||||
# "continue" response gets sent. Thus, a failed "continue" response
|
||||
# indicating that the server disconnected should be treated as success.
|
||||
return {"allThreadsContinued": True}
|
||||
|
||||
@message_handler
|
||||
def debugpySystemInfo_request(self, request):
|
||||
result = {"debugpy": {"version": debugpy.__version__}}
|
||||
if self.server:
|
||||
try:
|
||||
pydevd_info = self.server.channel.request("pydevdSystemInfo")
|
||||
except Exception:
|
||||
# If the server has already disconnected, or couldn't handle it,
|
||||
# report what we've got.
|
||||
pass
|
||||
else:
|
||||
result.update(pydevd_info)
|
||||
return result
|
||||
|
||||
@message_handler
|
||||
def terminate_request(self, request):
|
||||
self.session.finalize('client requested "terminate"', terminate_debuggee=True)
|
||||
return {}
|
||||
|
||||
@message_handler
|
||||
def disconnect_request(self, request):
|
||||
terminate_debuggee = request("terminateDebuggee", bool, optional=True)
|
||||
if terminate_debuggee == ():
|
||||
terminate_debuggee = None
|
||||
self.session.finalize('client requested "disconnect"', terminate_debuggee)
|
||||
return {}
|
||||
|
||||
def notify_of_subprocess(self, conn):
|
||||
with self.session:
|
||||
if self.start_request is None or conn in self._known_subprocesses:
|
||||
return
|
||||
if "processId" in self.start_request.arguments:
|
||||
log.warning(
|
||||
"Not reporting subprocess for {0}, because the parent process "
|
||||
'was attached to using "processId" rather than "port".',
|
||||
self.session,
|
||||
)
|
||||
return
|
||||
|
||||
log.info("Notifying {0} about {1}.", self, conn)
|
||||
body = dict(self.start_request.arguments)
|
||||
self._known_subprocesses.add(conn)
|
||||
|
||||
for key in "processId", "listen", "preLaunchTask", "postDebugTask":
|
||||
body.pop(key, None)
|
||||
|
||||
body["name"] = fmt("Subprocess {0}", conn.pid)
|
||||
body["request"] = "attach"
|
||||
body["subProcessId"] = conn.pid
|
||||
|
||||
for key in "args", "processName", "pythonArgs":
|
||||
body.pop(key, None)
|
||||
|
||||
host = body.pop("host", None)
|
||||
port = body.pop("port", None)
|
||||
if "connect" not in body:
|
||||
body["connect"] = {}
|
||||
if "host" not in body["connect"]:
|
||||
body["connect"]["host"] = host if host is not None else "127.0.0.1"
|
||||
if "port" not in body["connect"]:
|
||||
if port is None:
|
||||
_, port = listener.getsockname()
|
||||
body["connect"]["port"] = port
|
||||
|
||||
self.channel.send_event("debugpyAttach", body)
|
||||
|
||||
|
||||
def serve(host, port):
|
||||
global listener
|
||||
listener = sockets.serve("Client", Client, host, port)
|
||||
return listener.getsockname()
|
||||
|
||||
|
||||
def stop_serving():
|
||||
try:
|
||||
listener.close()
|
||||
except Exception:
|
||||
log.swallow_exception(level="warning")
|
187
.venv/Lib/site-packages/debugpy/adapter/components.py
Normal file
187
.venv/Lib/site-packages/debugpy/adapter/components.py
Normal file
@ -0,0 +1,187 @@
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See LICENSE in the project root
|
||||
# for license information.
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import functools
|
||||
|
||||
from debugpy.common import fmt, json, log, messaging, util
|
||||
|
||||
|
||||
ACCEPT_CONNECTIONS_TIMEOUT = 10
|
||||
|
||||
|
||||
class ComponentNotAvailable(Exception):
|
||||
def __init__(self, type):
|
||||
super(ComponentNotAvailable, self).__init__(
|
||||
fmt("{0} is not available", type.__name__)
|
||||
)
|
||||
|
||||
|
||||
class Component(util.Observable):
|
||||
"""A component managed by a debug adapter: client, launcher, or debug server.
|
||||
|
||||
Every component belongs to a Session, which is used for synchronization and
|
||||
shared data.
|
||||
|
||||
Every component has its own message channel, and provides message handlers for
|
||||
that channel. All handlers should be decorated with @Component.message_handler,
|
||||
which ensures that Session is locked for the duration of the handler. Thus, only
|
||||
one handler is running at any given time across all components, unless the lock
|
||||
is released explicitly or via Session.wait_for().
|
||||
|
||||
Components report changes to their attributes to Session, allowing one component
|
||||
to wait_for() a change caused by another component.
|
||||
"""
|
||||
|
||||
def __init__(self, session, stream=None, channel=None):
|
||||
assert (stream is None) ^ (channel is None)
|
||||
|
||||
try:
|
||||
lock_held = session.lock.acquire(blocking=False)
|
||||
assert lock_held, "__init__ of a Component subclass must lock its Session"
|
||||
finally:
|
||||
session.lock.release()
|
||||
|
||||
super(Component, self).__init__()
|
||||
|
||||
self.session = session
|
||||
|
||||
if channel is None:
|
||||
stream.name = str(self)
|
||||
channel = messaging.JsonMessageChannel(stream, self)
|
||||
channel.start()
|
||||
else:
|
||||
channel.name = channel.stream.name = str(self)
|
||||
channel.handlers = self
|
||||
self.channel = channel
|
||||
self.is_connected = True
|
||||
|
||||
# Do this last to avoid triggering useless notifications for assignments above.
|
||||
self.observers += [lambda *_: self.session.notify_changed()]
|
||||
|
||||
def __str__(self):
|
||||
return fmt("{0}[{1}]", type(self).__name__, self.session.id)
|
||||
|
||||
@property
|
||||
def client(self):
|
||||
return self.session.client
|
||||
|
||||
@property
|
||||
def launcher(self):
|
||||
return self.session.launcher
|
||||
|
||||
@property
|
||||
def server(self):
|
||||
return self.session.server
|
||||
|
||||
def wait_for(self, *args, **kwargs):
|
||||
return self.session.wait_for(*args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def message_handler(f):
|
||||
"""Applied to a message handler to automatically lock and unlock the session
|
||||
for its duration, and to validate the session state.
|
||||
|
||||
If the handler raises ComponentNotAvailable or JsonIOError, converts it to
|
||||
Message.cant_handle().
|
||||
"""
|
||||
|
||||
@functools.wraps(f)
|
||||
def lock_and_handle(self, message):
|
||||
try:
|
||||
with self.session:
|
||||
return f(self, message)
|
||||
except ComponentNotAvailable as exc:
|
||||
raise message.cant_handle("{0}", exc, silent=True)
|
||||
except messaging.MessageHandlingError as exc:
|
||||
if exc.cause is message:
|
||||
raise
|
||||
else:
|
||||
exc.propagate(message)
|
||||
except messaging.JsonIOError as exc:
|
||||
raise message.cant_handle(
|
||||
"{0} disconnected unexpectedly", exc.stream.name, silent=True
|
||||
)
|
||||
|
||||
return lock_and_handle
|
||||
|
||||
def disconnect(self):
|
||||
with self.session:
|
||||
self.is_connected = False
|
||||
self.session.finalize(fmt("{0} has disconnected", self))
|
||||
|
||||
|
||||
def missing(session, type):
|
||||
class Missing(object):
|
||||
"""A dummy component that raises ComponentNotAvailable whenever some
|
||||
attribute is accessed on it.
|
||||
"""
|
||||
|
||||
__getattr__ = __setattr__ = lambda self, *_: report()
|
||||
__bool__ = __nonzero__ = lambda self: False
|
||||
|
||||
def report():
|
||||
try:
|
||||
raise ComponentNotAvailable(type)
|
||||
except Exception as exc:
|
||||
log.reraise_exception("{0} in {1}", exc, session)
|
||||
|
||||
return Missing()
|
||||
|
||||
|
||||
class Capabilities(dict):
|
||||
"""A collection of feature flags for a component. Corresponds to JSON properties
|
||||
in the DAP "initialize" request or response, other than those that identify the
|
||||
party.
|
||||
"""
|
||||
|
||||
PROPERTIES = {}
|
||||
"""JSON property names and default values for the the capabilities represented
|
||||
by instances of this class. Keys are names, and values are either default values
|
||||
or validators.
|
||||
|
||||
If the value is callable, it must be a JSON validator; see debugpy.common.json for
|
||||
details. If the value is not callable, it is as if json.default(value) validator
|
||||
was used instead.
|
||||
"""
|
||||
|
||||
def __init__(self, component, message):
|
||||
"""Parses an "initialize" request or response and extracts the feature flags.
|
||||
|
||||
For every "X" in self.PROPERTIES, sets self["X"] to the corresponding value
|
||||
from message.payload if it's present there, or to the default value otherwise.
|
||||
"""
|
||||
|
||||
assert message.is_request("initialize") or message.is_response("initialize")
|
||||
|
||||
self.component = component
|
||||
|
||||
payload = message.payload
|
||||
for name, validate in self.PROPERTIES.items():
|
||||
value = payload.get(name, ())
|
||||
if not callable(validate):
|
||||
validate = json.default(validate)
|
||||
|
||||
try:
|
||||
value = validate(value)
|
||||
except Exception as exc:
|
||||
raise message.isnt_valid("{0!j} {1}", name, exc)
|
||||
|
||||
assert value != (), fmt(
|
||||
"{0!j} must provide a default value for missing properties.", validate
|
||||
)
|
||||
self[name] = value
|
||||
|
||||
log.debug("{0}", self)
|
||||
|
||||
def __repr__(self):
|
||||
return fmt("{0}: {1!j}", type(self).__name__, dict(self))
|
||||
|
||||
def require(self, *keys):
|
||||
for key in keys:
|
||||
if not self[key]:
|
||||
raise messaging.MessageHandlingError(
|
||||
fmt("{0} does not have capability {1!j}", self.component, key)
|
||||
)
|
196
.venv/Lib/site-packages/debugpy/adapter/launchers.py
Normal file
196
.venv/Lib/site-packages/debugpy/adapter/launchers.py
Normal file
@ -0,0 +1,196 @@
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See LICENSE in the project root
|
||||
# for license information.
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from debugpy import adapter, common
|
||||
from debugpy.common import compat, fmt, log, messaging, sockets
|
||||
from debugpy.adapter import components, servers
|
||||
|
||||
|
||||
class Launcher(components.Component):
|
||||
"""Handles the launcher side of a debug session."""
|
||||
|
||||
message_handler = components.Component.message_handler
|
||||
|
||||
def __init__(self, session, stream):
|
||||
with session:
|
||||
assert not session.launcher
|
||||
super(Launcher, self).__init__(session, stream)
|
||||
|
||||
self.pid = None
|
||||
"""Process ID of the debuggee process, as reported by the launcher."""
|
||||
|
||||
self.exit_code = None
|
||||
"""Exit code of the debuggee process."""
|
||||
|
||||
session.launcher = self
|
||||
|
||||
@message_handler
|
||||
def process_event(self, event):
|
||||
self.pid = event("systemProcessId", int)
|
||||
self.client.propagate_after_start(event)
|
||||
|
||||
@message_handler
|
||||
def output_event(self, event):
|
||||
self.client.propagate_after_start(event)
|
||||
|
||||
@message_handler
|
||||
def exited_event(self, event):
|
||||
self.exit_code = event("exitCode", int)
|
||||
# We don't want to tell the client about this just yet, because it will then
|
||||
# want to disconnect, and the launcher might still be waiting for keypress
|
||||
# (if wait-on-exit was enabled). Instead, we'll report the event when we
|
||||
# receive "terminated" from the launcher, right before it exits.
|
||||
|
||||
@message_handler
|
||||
def terminated_event(self, event):
|
||||
try:
|
||||
self.client.channel.send_event("exited", {"exitCode": self.exit_code})
|
||||
except Exception:
|
||||
pass
|
||||
self.channel.close()
|
||||
|
||||
def terminate_debuggee(self):
|
||||
with self.session:
|
||||
if self.exit_code is None:
|
||||
try:
|
||||
self.channel.request("terminate")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def spawn_debuggee(
|
||||
session,
|
||||
start_request,
|
||||
python,
|
||||
launcher_path,
|
||||
adapter_host,
|
||||
args,
|
||||
cwd,
|
||||
console,
|
||||
console_title,
|
||||
sudo,
|
||||
):
|
||||
# -E tells sudo to propagate environment variables to the target process - this
|
||||
# is necessary for launcher to get DEBUGPY_LAUNCHER_PORT and DEBUGPY_LOG_DIR.
|
||||
cmdline = ["sudo", "-E"] if sudo else []
|
||||
cmdline += python
|
||||
cmdline += [launcher_path]
|
||||
env = {}
|
||||
|
||||
arguments = dict(start_request.arguments)
|
||||
if not session.no_debug:
|
||||
_, arguments["port"] = servers.listener.getsockname()
|
||||
arguments["adapterAccessToken"] = adapter.access_token
|
||||
|
||||
def on_launcher_connected(sock):
|
||||
listener.close()
|
||||
stream = messaging.JsonIOStream.from_socket(sock)
|
||||
Launcher(session, stream)
|
||||
|
||||
try:
|
||||
listener = sockets.serve(
|
||||
"Launcher", on_launcher_connected, adapter_host, backlog=1
|
||||
)
|
||||
except Exception as exc:
|
||||
raise start_request.cant_handle(
|
||||
"{0} couldn't create listener socket for launcher: {1}", session, exc
|
||||
)
|
||||
|
||||
try:
|
||||
launcher_host, launcher_port = listener.getsockname()
|
||||
launcher_addr = (
|
||||
launcher_port
|
||||
if launcher_host == "127.0.0.1"
|
||||
else fmt("{0}:{1}", launcher_host, launcher_port)
|
||||
)
|
||||
cmdline += [str(launcher_addr), "--"]
|
||||
cmdline += args
|
||||
|
||||
if log.log_dir is not None:
|
||||
env[str("DEBUGPY_LOG_DIR")] = compat.filename_str(log.log_dir)
|
||||
if log.stderr.levels != {"warning", "error"}:
|
||||
env[str("DEBUGPY_LOG_STDERR")] = str(" ".join(log.stderr.levels))
|
||||
|
||||
if console == "internalConsole":
|
||||
log.info("{0} spawning launcher: {1!r}", session, cmdline)
|
||||
try:
|
||||
for i, arg in enumerate(cmdline):
|
||||
try:
|
||||
cmdline[i] = compat.filename_str(arg)
|
||||
except UnicodeEncodeError as exc:
|
||||
raise start_request.cant_handle(
|
||||
"Invalid command line argument {0!j}: {1}", arg, exc
|
||||
)
|
||||
|
||||
# If we are talking to the client over stdio, sys.stdin and sys.stdout
|
||||
# are redirected to avoid mangling the DAP message stream. Make sure
|
||||
# the launcher also respects that.
|
||||
subprocess.Popen(
|
||||
cmdline,
|
||||
cwd=cwd,
|
||||
env=dict(list(os.environ.items()) + list(env.items())),
|
||||
stdin=sys.stdin,
|
||||
stdout=sys.stdout,
|
||||
stderr=sys.stderr,
|
||||
)
|
||||
except Exception as exc:
|
||||
raise start_request.cant_handle("Failed to spawn launcher: {0}", exc)
|
||||
else:
|
||||
log.info('{0} spawning launcher via "runInTerminal" request.', session)
|
||||
session.client.capabilities.require("supportsRunInTerminalRequest")
|
||||
kinds = {"integratedTerminal": "integrated", "externalTerminal": "external"}
|
||||
request_args = {
|
||||
"kind": kinds[console],
|
||||
"title": console_title,
|
||||
"args": cmdline,
|
||||
"env": env,
|
||||
}
|
||||
if cwd is not None:
|
||||
request_args["cwd"] = cwd
|
||||
try:
|
||||
session.client.channel.request("runInTerminal", request_args)
|
||||
except messaging.MessageHandlingError as exc:
|
||||
exc.propagate(start_request)
|
||||
|
||||
# If using sudo, it might prompt for password, and launcher won't start running
|
||||
# until the user enters it, so don't apply timeout in that case.
|
||||
if not session.wait_for(
|
||||
lambda: session.launcher,
|
||||
timeout=(None if sudo else common.PROCESS_SPAWN_TIMEOUT),
|
||||
):
|
||||
raise start_request.cant_handle("Timed out waiting for launcher to connect")
|
||||
|
||||
try:
|
||||
session.launcher.channel.request(start_request.command, arguments)
|
||||
except messaging.MessageHandlingError as exc:
|
||||
exc.propagate(start_request)
|
||||
|
||||
if not session.wait_for(
|
||||
lambda: session.launcher.pid is not None,
|
||||
timeout=common.PROCESS_SPAWN_TIMEOUT,
|
||||
):
|
||||
raise start_request.cant_handle(
|
||||
'Timed out waiting for "process" event from launcher'
|
||||
)
|
||||
|
||||
if session.no_debug:
|
||||
return
|
||||
|
||||
# Wait for the first incoming connection regardless of the PID - it won't
|
||||
# necessarily match due to the use of stubs like py.exe or "conda run".
|
||||
conn = servers.wait_for_connection(
|
||||
session, lambda conn: True, timeout=common.PROCESS_SPAWN_TIMEOUT
|
||||
)
|
||||
if conn is None:
|
||||
raise start_request.cant_handle("Timed out waiting for debuggee to spawn")
|
||||
conn.attach_to_session(session)
|
||||
|
||||
finally:
|
||||
listener.close()
|
493
.venv/Lib/site-packages/debugpy/adapter/servers.py
Normal file
493
.venv/Lib/site-packages/debugpy/adapter/servers.py
Normal file
@ -0,0 +1,493 @@
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See LICENSE in the project root
|
||||
# for license information.
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
import debugpy
|
||||
from debugpy import adapter
|
||||
from debugpy.common import compat, fmt, json, log, messaging, sockets
|
||||
from debugpy.adapter import components
|
||||
|
||||
|
||||
access_token = None
|
||||
"""Access token used to authenticate with the servers."""
|
||||
|
||||
_lock = threading.RLock()
|
||||
|
||||
_connections = []
|
||||
"""All servers that are connected to this adapter, in order in which they connected.
|
||||
"""
|
||||
|
||||
_connections_changed = threading.Event()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""A debug server that is connected to the adapter.
|
||||
|
||||
Servers that are not participating in a debug session are managed directly by the
|
||||
corresponding Connection instance.
|
||||
|
||||
Servers that are participating in a debug session are managed by that sessions's
|
||||
Server component instance, but Connection object remains, and takes over again
|
||||
once the session ends.
|
||||
"""
|
||||
|
||||
def __init__(self, sock):
|
||||
from debugpy.adapter import sessions
|
||||
|
||||
self.disconnected = False
|
||||
|
||||
self.server = None
|
||||
"""The Server component, if this debug server belongs to Session.
|
||||
"""
|
||||
|
||||
self.pid = None
|
||||
|
||||
stream = messaging.JsonIOStream.from_socket(sock, str(self))
|
||||
self.channel = messaging.JsonMessageChannel(stream, self)
|
||||
self.channel.start()
|
||||
|
||||
try:
|
||||
self.authenticate()
|
||||
info = self.channel.request("pydevdSystemInfo")
|
||||
process_info = info("process", json.object())
|
||||
self.pid = process_info("pid", int)
|
||||
self.ppid = process_info("ppid", int, optional=True)
|
||||
if self.ppid == ():
|
||||
self.ppid = None
|
||||
self.channel.name = stream.name = str(self)
|
||||
|
||||
debugpy_dir = os.path.dirname(os.path.dirname(debugpy.__file__))
|
||||
# Note: we must check if 'debugpy' is not already in sys.modules because the
|
||||
# evaluation of an import at the wrong time could deadlock Python due to
|
||||
# its import lock.
|
||||
#
|
||||
# So, in general this evaluation shouldn't do anything. It's only
|
||||
# important when pydevd attaches automatically to a subprocess. In this
|
||||
# case, we have to make sure that debugpy is properly put back in the game
|
||||
# for users to be able to use it.v
|
||||
#
|
||||
# In this case (when the import is needed), this evaluation *must* be done
|
||||
# before the configurationDone request is sent -- if this is not respected
|
||||
# it's possible that pydevd already started secondary threads to handle
|
||||
# commands, in which case it's very likely that this command would be
|
||||
# evaluated at the wrong thread and the import could potentially deadlock
|
||||
# the program.
|
||||
#
|
||||
# Note 2: the sys module is guaranteed to be in the frame globals and
|
||||
# doesn't need to be imported.
|
||||
inject_debugpy = """
|
||||
if 'debugpy' not in sys.modules:
|
||||
sys.path.insert(0, {debugpy_dir!r})
|
||||
try:
|
||||
import debugpy
|
||||
finally:
|
||||
del sys.path[0]
|
||||
"""
|
||||
inject_debugpy = fmt(inject_debugpy, debugpy_dir=debugpy_dir)
|
||||
|
||||
try:
|
||||
self.channel.request("evaluate", {"expression": inject_debugpy})
|
||||
except messaging.MessageHandlingError:
|
||||
# Failure to inject is not a fatal error - such a subprocess can
|
||||
# still be debugged, it just won't support "import debugpy" in user
|
||||
# code - so don't terminate the session.
|
||||
log.swallow_exception(
|
||||
"Failed to inject debugpy into {0}:", self, level="warning"
|
||||
)
|
||||
|
||||
with _lock:
|
||||
# The server can disconnect concurrently before we get here, e.g. if
|
||||
# it was force-killed. If the disconnect() handler has already run,
|
||||
# don't register this server or report it, since there's nothing to
|
||||
# deregister it.
|
||||
if self.disconnected:
|
||||
return
|
||||
|
||||
if any(conn.pid == self.pid for conn in _connections):
|
||||
raise KeyError(
|
||||
fmt("{0} is already connected to this adapter", self)
|
||||
)
|
||||
|
||||
is_first_server = len(_connections) == 0
|
||||
_connections.append(self)
|
||||
_connections_changed.set()
|
||||
|
||||
except Exception:
|
||||
log.swallow_exception("Failed to accept incoming server connection:")
|
||||
self.channel.close()
|
||||
|
||||
# If this was the first server to connect, and the main thread is inside
|
||||
# wait_until_disconnected(), we want to unblock it and allow it to exit.
|
||||
dont_wait_for_first_connection()
|
||||
|
||||
# If we couldn't retrieve all the necessary info from the debug server,
|
||||
# or there's a PID clash, we don't want to track this debuggee anymore,
|
||||
# but we want to continue accepting connections.
|
||||
return
|
||||
|
||||
parent_session = sessions.get(self.ppid)
|
||||
if parent_session is None:
|
||||
log.info("No active debug session for parent process of {0}.", self)
|
||||
else:
|
||||
try:
|
||||
parent_session.client.notify_of_subprocess(self)
|
||||
return
|
||||
except Exception:
|
||||
# This might fail if the client concurrently disconnects from the parent
|
||||
# session. We still want to keep the connection around, in case the
|
||||
# client reconnects later. If the parent session was "launch", it'll take
|
||||
# care of closing the remaining server connections.
|
||||
log.swallow_exception(
|
||||
"Failed to notify parent session about {0}:", self
|
||||
)
|
||||
|
||||
# If we got to this point, the subprocess notification was either not sent,
|
||||
# or not delivered successfully. For the first server, this is expected, since
|
||||
# it corresponds to the root process, and there is no other debug session to
|
||||
# notify. But subsequent server connections represent subprocesses, and those
|
||||
# will not start running user code until the client tells them to. Since there
|
||||
# isn't going to be a client without the notification, such subprocesses have
|
||||
# to be unblocked.
|
||||
if is_first_server:
|
||||
return
|
||||
log.info("No clients to wait for - unblocking {0}.", self)
|
||||
try:
|
||||
self.channel.request("initialize", {"adapterID": "debugpy"})
|
||||
self.channel.request("attach", {"subProcessId": self.pid})
|
||||
self.channel.request("configurationDone")
|
||||
self.channel.request("disconnect")
|
||||
except Exception:
|
||||
log.swallow_exception("Failed to unblock orphaned subprocess:")
|
||||
self.channel.close()
|
||||
|
||||
def __str__(self):
|
||||
return "Server" + fmt("[?]" if self.pid is None else "[pid={0}]", self.pid)
|
||||
|
||||
def authenticate(self):
|
||||
if access_token is None and adapter.access_token is None:
|
||||
return
|
||||
auth = self.channel.request(
|
||||
"pydevdAuthorize", {"debugServerAccessToken": access_token}
|
||||
)
|
||||
if auth["clientAccessToken"] != adapter.access_token:
|
||||
self.channel.close()
|
||||
raise RuntimeError('Mismatched "clientAccessToken"; server not authorized.')
|
||||
|
||||
def request(self, request):
|
||||
raise request.isnt_valid(
|
||||
"Requests from the debug server to the client are not allowed."
|
||||
)
|
||||
|
||||
def event(self, event):
|
||||
pass
|
||||
|
||||
def terminated_event(self, event):
|
||||
self.channel.close()
|
||||
|
||||
def disconnect(self):
|
||||
with _lock:
|
||||
self.disconnected = True
|
||||
if self.server is not None:
|
||||
# If the disconnect happened while Server was being instantiated,
|
||||
# we need to tell it, so that it can clean up via Session.finalize().
|
||||
# It will also take care of deregistering the connection in that case.
|
||||
self.server.disconnect()
|
||||
elif self in _connections:
|
||||
_connections.remove(self)
|
||||
_connections_changed.set()
|
||||
|
||||
def attach_to_session(self, session):
|
||||
"""Attaches this server to the specified Session as a Server component.
|
||||
|
||||
Raises ValueError if the server already belongs to some session.
|
||||
"""
|
||||
|
||||
with _lock:
|
||||
if self.server is not None:
|
||||
raise ValueError
|
||||
log.info("Attaching {0} to {1}", self, session)
|
||||
self.server = Server(session, self)
|
||||
|
||||
|
||||
class Server(components.Component):
|
||||
"""Handles the debug server side of a debug session."""
|
||||
|
||||
message_handler = components.Component.message_handler
|
||||
|
||||
class Capabilities(components.Capabilities):
|
||||
PROPERTIES = {
|
||||
"supportsCompletionsRequest": False,
|
||||
"supportsConditionalBreakpoints": False,
|
||||
"supportsConfigurationDoneRequest": False,
|
||||
"supportsDataBreakpoints": False,
|
||||
"supportsDelayedStackTraceLoading": False,
|
||||
"supportsDisassembleRequest": False,
|
||||
"supportsEvaluateForHovers": False,
|
||||
"supportsExceptionInfoRequest": False,
|
||||
"supportsExceptionOptions": False,
|
||||
"supportsFunctionBreakpoints": False,
|
||||
"supportsGotoTargetsRequest": False,
|
||||
"supportsHitConditionalBreakpoints": False,
|
||||
"supportsLoadedSourcesRequest": False,
|
||||
"supportsLogPoints": False,
|
||||
"supportsModulesRequest": False,
|
||||
"supportsReadMemoryRequest": False,
|
||||
"supportsRestartFrame": False,
|
||||
"supportsRestartRequest": False,
|
||||
"supportsSetExpression": False,
|
||||
"supportsSetVariable": False,
|
||||
"supportsStepBack": False,
|
||||
"supportsStepInTargetsRequest": False,
|
||||
"supportsTerminateDebuggee": False,
|
||||
"supportsTerminateRequest": False,
|
||||
"supportsTerminateThreadsRequest": False,
|
||||
"supportsValueFormattingOptions": False,
|
||||
"exceptionBreakpointFilters": [],
|
||||
"additionalModuleColumns": [],
|
||||
"supportedChecksumAlgorithms": [],
|
||||
}
|
||||
|
||||
def __init__(self, session, connection):
|
||||
assert connection.server is None
|
||||
with session:
|
||||
assert not session.server
|
||||
super(Server, self).__init__(session, channel=connection.channel)
|
||||
|
||||
self.connection = connection
|
||||
|
||||
assert self.session.pid is None
|
||||
if self.session.launcher and self.session.launcher.pid != self.pid:
|
||||
log.info(
|
||||
"Launcher reported PID={0}, but server reported PID={1}",
|
||||
self.session.launcher.pid,
|
||||
self.pid,
|
||||
)
|
||||
self.session.pid = self.pid
|
||||
|
||||
session.server = self
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
"""Process ID of the debuggee process, as reported by the server."""
|
||||
return self.connection.pid
|
||||
|
||||
@property
|
||||
def ppid(self):
|
||||
"""Parent process ID of the debuggee process, as reported by the server."""
|
||||
return self.connection.ppid
|
||||
|
||||
def initialize(self, request):
|
||||
assert request.is_request("initialize")
|
||||
self.connection.authenticate()
|
||||
request = self.channel.propagate(request)
|
||||
request.wait_for_response()
|
||||
self.capabilities = self.Capabilities(self, request.response)
|
||||
|
||||
# Generic request handler, used if there's no specific handler below.
|
||||
@message_handler
|
||||
def request(self, request):
|
||||
# Do not delegate requests from the server by default. There is a security
|
||||
# boundary between the server and the adapter, and we cannot trust arbitrary
|
||||
# requests sent over that boundary, since they may contain arbitrary code
|
||||
# that the client will execute - e.g. "runInTerminal". The adapter must only
|
||||
# propagate requests that it knows are safe.
|
||||
raise request.isnt_valid(
|
||||
"Requests from the debug server to the client are not allowed."
|
||||
)
|
||||
|
||||
# Generic event handler, used if there's no specific handler below.
|
||||
@message_handler
|
||||
def event(self, event):
|
||||
self.client.propagate_after_start(event)
|
||||
|
||||
@message_handler
|
||||
def initialized_event(self, event):
|
||||
# pydevd doesn't send it, but the adapter will send its own in any case.
|
||||
pass
|
||||
|
||||
@message_handler
|
||||
def process_event(self, event):
|
||||
# If there is a launcher, it's handling the process event.
|
||||
if not self.launcher:
|
||||
self.client.propagate_after_start(event)
|
||||
|
||||
@message_handler
|
||||
def continued_event(self, event):
|
||||
# https://github.com/microsoft/ptvsd/issues/1530
|
||||
#
|
||||
# DAP specification says that a step request implies that only the thread on
|
||||
# which that step occurred is resumed for the duration of the step. However,
|
||||
# for VS compatibility, pydevd can operate in a mode that resumes all threads
|
||||
# instead. This is set according to the value of "steppingResumesAllThreads"
|
||||
# in "launch" or "attach" request, which defaults to true. If explicitly set
|
||||
# to false, pydevd will only resume the thread that was stepping.
|
||||
#
|
||||
# To ensure that the client is aware that other threads are getting resumed in
|
||||
# that mode, pydevd sends a "continued" event with "allThreadsResumed": true.
|
||||
# when responding to a step request. This ensures correct behavior in VSCode
|
||||
# and other DAP-conformant clients.
|
||||
#
|
||||
# On the other hand, VS does not follow the DAP specification in this regard.
|
||||
# When it requests a step, it assumes that all threads will be resumed, and
|
||||
# does not expect to see "continued" events explicitly reflecting that fact.
|
||||
# If such events are sent regardless, VS behaves erratically. Thus, we have
|
||||
# to suppress them specifically for VS.
|
||||
if self.client.client_id not in ("visualstudio", "vsformac"):
|
||||
self.client.propagate_after_start(event)
|
||||
|
||||
@message_handler
|
||||
def exited_event(self, event):
|
||||
# If there is a launcher, it's handling the exit code.
|
||||
if not self.launcher:
|
||||
self.client.propagate_after_start(event)
|
||||
|
||||
@message_handler
|
||||
def terminated_event(self, event):
|
||||
# Do not propagate this, since we'll report our own.
|
||||
self.channel.close()
|
||||
|
||||
def detach_from_session(self):
|
||||
with _lock:
|
||||
self.is_connected = False
|
||||
self.channel.handlers = self.connection
|
||||
self.channel.name = self.channel.stream.name = str(self.connection)
|
||||
self.connection.server = None
|
||||
|
||||
def disconnect(self):
|
||||
with _lock:
|
||||
_connections.remove(self.connection)
|
||||
_connections_changed.set()
|
||||
super(Server, self).disconnect()
|
||||
|
||||
|
||||
def serve(host="127.0.0.1", port=0):
|
||||
global listener
|
||||
listener = sockets.serve("Server", Connection, host, port)
|
||||
return listener.getsockname()
|
||||
|
||||
|
||||
def stop_serving():
|
||||
try:
|
||||
listener.close()
|
||||
except Exception:
|
||||
log.swallow_exception(level="warning")
|
||||
|
||||
|
||||
def connections():
|
||||
with _lock:
|
||||
return list(_connections)
|
||||
|
||||
|
||||
def wait_for_connection(session, predicate, timeout=None):
|
||||
"""Waits until there is a server with the specified PID connected to this adapter,
|
||||
and returns the corresponding Connection.
|
||||
|
||||
If there is more than one server connection already available, returns the oldest
|
||||
one.
|
||||
"""
|
||||
|
||||
def wait_for_timeout():
|
||||
time.sleep(timeout)
|
||||
wait_for_timeout.timed_out = True
|
||||
with _lock:
|
||||
_connections_changed.set()
|
||||
|
||||
wait_for_timeout.timed_out = timeout == 0
|
||||
if timeout:
|
||||
thread = threading.Thread(
|
||||
target=wait_for_timeout, name="servers.wait_for_connection() timeout"
|
||||
)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
if timeout != 0:
|
||||
log.info("{0} waiting for connection from debug server...", session)
|
||||
while True:
|
||||
with _lock:
|
||||
_connections_changed.clear()
|
||||
conns = (conn for conn in _connections if predicate(conn))
|
||||
conn = next(conns, None)
|
||||
if conn is not None or wait_for_timeout.timed_out:
|
||||
return conn
|
||||
_connections_changed.wait()
|
||||
|
||||
|
||||
def wait_until_disconnected():
|
||||
"""Blocks until all debug servers disconnect from the adapter.
|
||||
|
||||
If there are no server connections, waits until at least one is established first,
|
||||
before waiting for it to disconnect.
|
||||
"""
|
||||
while True:
|
||||
_connections_changed.wait()
|
||||
with _lock:
|
||||
_connections_changed.clear()
|
||||
if not len(_connections):
|
||||
return
|
||||
|
||||
|
||||
def dont_wait_for_first_connection():
|
||||
"""Unblocks any pending wait_until_disconnected() call that is waiting on the
|
||||
first server to connect.
|
||||
"""
|
||||
with _lock:
|
||||
_connections_changed.set()
|
||||
|
||||
|
||||
def inject(pid, debugpy_args):
|
||||
host, port = listener.getsockname()
|
||||
|
||||
cmdline = [
|
||||
sys.executable,
|
||||
compat.filename(os.path.dirname(debugpy.__file__)),
|
||||
"--connect",
|
||||
host + ":" + str(port),
|
||||
]
|
||||
if adapter.access_token is not None:
|
||||
cmdline += ["--adapter-access-token", adapter.access_token]
|
||||
cmdline += debugpy_args
|
||||
cmdline += ["--pid", str(pid)]
|
||||
|
||||
log.info("Spawning attach-to-PID debugger injector: {0!r}", cmdline)
|
||||
try:
|
||||
injector = subprocess.Popen(
|
||||
cmdline,
|
||||
bufsize=0,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
except Exception as exc:
|
||||
log.swallow_exception(
|
||||
"Failed to inject debug server into process with PID={0}", pid
|
||||
)
|
||||
raise messaging.MessageHandlingError(
|
||||
fmt(
|
||||
"Failed to inject debug server into process with PID={0}: {1}", pid, exc
|
||||
)
|
||||
)
|
||||
|
||||
# We need to capture the output of the injector - otherwise it can get blocked
|
||||
# on a write() syscall when it tries to print something.
|
||||
|
||||
def capture_output():
|
||||
while True:
|
||||
line = injector.stdout.readline()
|
||||
if not line:
|
||||
break
|
||||
log.info("Injector[PID={0}] output:\n{1}", pid, line.rstrip())
|
||||
log.info("Injector[PID={0}] exited.", pid)
|
||||
|
||||
thread = threading.Thread(
|
||||
target=capture_output, name=fmt("Injector[PID={0}] output", pid)
|
||||
)
|
||||
thread.daemon = True
|
||||
thread.start()
|
277
.venv/Lib/site-packages/debugpy/adapter/sessions.py
Normal file
277
.venv/Lib/site-packages/debugpy/adapter/sessions.py
Normal file
@ -0,0 +1,277 @@
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See LICENSE in the project root
|
||||
# for license information.
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import itertools
|
||||
import os
|
||||
import signal
|
||||
import threading
|
||||
import time
|
||||
|
||||
from debugpy import common
|
||||
from debugpy.common import fmt, log, util
|
||||
from debugpy.adapter import components, launchers, servers
|
||||
|
||||
|
||||
_lock = threading.RLock()
|
||||
_sessions = set()
|
||||
_sessions_changed = threading.Event()
|
||||
|
||||
|
||||
class Session(util.Observable):
|
||||
"""A debug session involving a client, an adapter, a launcher, and a debug server.
|
||||
|
||||
The client and the adapter are always present, and at least one of launcher and debug
|
||||
server is present, depending on the scenario.
|
||||
"""
|
||||
|
||||
_counter = itertools.count(1)
|
||||
|
||||
def __init__(self):
|
||||
from debugpy.adapter import clients
|
||||
|
||||
super(Session, self).__init__()
|
||||
|
||||
self.lock = threading.RLock()
|
||||
self.id = next(self._counter)
|
||||
self._changed_condition = threading.Condition(self.lock)
|
||||
|
||||
self.client = components.missing(self, clients.Client)
|
||||
"""The client component. Always present."""
|
||||
|
||||
self.launcher = components.missing(self, launchers.Launcher)
|
||||
"""The launcher componet. Always present in "launch" sessions, and never
|
||||
present in "attach" sessions.
|
||||
"""
|
||||
|
||||
self.server = components.missing(self, servers.Server)
|
||||
"""The debug server component. Always present, unless this is a "launch"
|
||||
session with "noDebug".
|
||||
"""
|
||||
|
||||
self.no_debug = None
|
||||
"""Whether this is a "noDebug" session."""
|
||||
|
||||
self.pid = None
|
||||
"""Process ID of the debuggee process."""
|
||||
|
||||
self.debug_options = {}
|
||||
"""Debug options as specified by "launch" or "attach" request."""
|
||||
|
||||
self.is_finalizing = False
|
||||
"""Whether finalize() has been invoked."""
|
||||
|
||||
self.observers += [lambda *_: self.notify_changed()]
|
||||
|
||||
def __str__(self):
|
||||
return fmt("Session[{0}]", self.id)
|
||||
|
||||
def __enter__(self):
|
||||
"""Lock the session for exclusive access."""
|
||||
self.lock.acquire()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_tb):
|
||||
"""Unlock the session."""
|
||||
self.lock.release()
|
||||
|
||||
def register(self):
|
||||
with _lock:
|
||||
_sessions.add(self)
|
||||
_sessions_changed.set()
|
||||
|
||||
def notify_changed(self):
|
||||
with self:
|
||||
self._changed_condition.notify_all()
|
||||
|
||||
# A session is considered ended once all components disconnect, and there
|
||||
# are no further incoming messages from anything to handle.
|
||||
components = self.client, self.launcher, self.server
|
||||
if all(not com or not com.is_connected for com in components):
|
||||
with _lock:
|
||||
if self in _sessions:
|
||||
log.info("{0} has ended.", self)
|
||||
_sessions.remove(self)
|
||||
_sessions_changed.set()
|
||||
|
||||
def wait_for(self, predicate, timeout=None):
|
||||
"""Waits until predicate() becomes true.
|
||||
|
||||
The predicate is invoked with the session locked. If satisfied, the method
|
||||
returns immediately. Otherwise, the lock is released (even if it was held
|
||||
at entry), and the method blocks waiting for some attribute of either self,
|
||||
self.client, self.server, or self.launcher to change. On every change, session
|
||||
is re-locked and predicate is re-evaluated, until it is satisfied.
|
||||
|
||||
While the session is unlocked, message handlers for components other than
|
||||
the one that is waiting can run, but message handlers for that one are still
|
||||
blocked.
|
||||
|
||||
If timeout is not None, the method will unblock and return after that many
|
||||
seconds regardless of whether the predicate was satisfied. The method returns
|
||||
False if it timed out, and True otherwise.
|
||||
"""
|
||||
|
||||
def wait_for_timeout():
|
||||
time.sleep(timeout)
|
||||
wait_for_timeout.timed_out = True
|
||||
self.notify_changed()
|
||||
|
||||
wait_for_timeout.timed_out = False
|
||||
if timeout is not None:
|
||||
thread = threading.Thread(
|
||||
target=wait_for_timeout, name="Session.wait_for() timeout"
|
||||
)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
with self:
|
||||
while not predicate():
|
||||
if wait_for_timeout.timed_out:
|
||||
return False
|
||||
self._changed_condition.wait()
|
||||
return True
|
||||
|
||||
def finalize(self, why, terminate_debuggee=None):
|
||||
"""Finalizes the debug session.
|
||||
|
||||
If the server is present, sends "disconnect" request with "terminateDebuggee"
|
||||
set as specified request to it; waits for it to disconnect, allowing any
|
||||
remaining messages from it to be handled; and closes the server channel.
|
||||
|
||||
If the launcher is present, sends "terminate" request to it, regardless of the
|
||||
value of terminate; waits for it to disconnect, allowing any remaining messages
|
||||
from it to be handled; and closes the launcher channel.
|
||||
|
||||
If the client is present, sends "terminated" event to it.
|
||||
|
||||
If terminate_debuggee=None, it is treated as True if the session has a Launcher
|
||||
component, and False otherwise.
|
||||
"""
|
||||
|
||||
if self.is_finalizing:
|
||||
return
|
||||
self.is_finalizing = True
|
||||
log.info("{0}; finalizing {1}.", why, self)
|
||||
|
||||
if terminate_debuggee is None:
|
||||
terminate_debuggee = bool(self.launcher)
|
||||
|
||||
try:
|
||||
self._finalize(why, terminate_debuggee)
|
||||
except Exception:
|
||||
# Finalization should never fail, and if it does, the session is in an
|
||||
# indeterminate and likely unrecoverable state, so just fail fast.
|
||||
log.swallow_exception("Fatal error while finalizing {0}", self)
|
||||
os._exit(1)
|
||||
|
||||
log.info("{0} finalized.", self)
|
||||
|
||||
def _finalize(self, why, terminate_debuggee):
|
||||
# If the client started a session, and then disconnected before issuing "launch"
|
||||
# or "attach", the main thread will be blocked waiting for the first server
|
||||
# connection to come in - unblock it, so that we can exit.
|
||||
servers.dont_wait_for_first_connection()
|
||||
|
||||
if self.server:
|
||||
if self.server.is_connected:
|
||||
if terminate_debuggee and self.launcher and self.launcher.is_connected:
|
||||
# If we were specifically asked to terminate the debuggee, and we
|
||||
# can ask the launcher to kill it, do so instead of disconnecting
|
||||
# from the server to prevent debuggee from running any more code.
|
||||
self.launcher.terminate_debuggee()
|
||||
else:
|
||||
# Otherwise, let the server handle it the best it can.
|
||||
try:
|
||||
self.server.channel.request(
|
||||
"disconnect", {"terminateDebuggee": terminate_debuggee}
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
self.server.detach_from_session()
|
||||
|
||||
if self.launcher and self.launcher.is_connected:
|
||||
# If there was a server, we just disconnected from it above, which should
|
||||
# cause the debuggee process to exit - so let's wait for that first.
|
||||
if self.server:
|
||||
log.info('{0} waiting for "exited" event...', self)
|
||||
if not self.wait_for(
|
||||
lambda: self.launcher.exit_code is not None,
|
||||
timeout=common.PROCESS_EXIT_TIMEOUT,
|
||||
):
|
||||
log.warning('{0} timed out waiting for "exited" event.', self)
|
||||
|
||||
# Terminate the debuggee process if it's still alive for any reason -
|
||||
# whether it's because there was no server to handle graceful shutdown,
|
||||
# or because the server couldn't handle it for some reason.
|
||||
self.launcher.terminate_debuggee()
|
||||
|
||||
# Wait until the launcher message queue fully drains. There is no timeout
|
||||
# here, because the final "terminated" event will only come after reading
|
||||
# user input in wait-on-exit scenarios.
|
||||
log.info("{0} waiting for {1} to disconnect...", self, self.launcher)
|
||||
self.wait_for(lambda: not self.launcher.is_connected)
|
||||
|
||||
try:
|
||||
self.launcher.channel.close()
|
||||
except Exception:
|
||||
log.swallow_exception()
|
||||
|
||||
if self.client:
|
||||
if self.client.is_connected:
|
||||
# Tell the client that debugging is over, but don't close the channel until it
|
||||
# tells us to, via the "disconnect" request.
|
||||
try:
|
||||
self.client.channel.send_event("terminated")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if (
|
||||
self.client.start_request is not None
|
||||
and self.client.start_request.command == "launch"
|
||||
):
|
||||
servers.stop_serving()
|
||||
log.info(
|
||||
'"launch" session ended - killing remaining debuggee processes.'
|
||||
)
|
||||
|
||||
pids_killed = set()
|
||||
if self.launcher and self.launcher.pid is not None:
|
||||
# Already killed above.
|
||||
pids_killed.add(self.launcher.pid)
|
||||
|
||||
while True:
|
||||
conns = [
|
||||
conn
|
||||
for conn in servers.connections()
|
||||
if conn.pid not in pids_killed
|
||||
]
|
||||
if not len(conns):
|
||||
break
|
||||
for conn in conns:
|
||||
log.info("Killing {0}", conn)
|
||||
try:
|
||||
os.kill(conn.pid, signal.SIGTERM)
|
||||
except Exception:
|
||||
log.swallow_exception("Failed to kill {0}", conn)
|
||||
pids_killed.add(conn.pid)
|
||||
|
||||
|
||||
def get(pid):
|
||||
with _lock:
|
||||
return next((session for session in _sessions if session.pid == pid), None)
|
||||
|
||||
|
||||
def wait_until_ended():
|
||||
"""Blocks until all sessions have ended.
|
||||
|
||||
A session ends when all components that it manages disconnect from it.
|
||||
"""
|
||||
while True:
|
||||
with _lock:
|
||||
if not len(_sessions):
|
||||
return
|
||||
_sessions_changed.clear()
|
||||
_sessions_changed.wait()
|
Reference in New Issue
Block a user