]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/python/lib/gdb/dap/server.py
Update copyright year range in header of all files managed by GDB
[thirdparty/binutils-gdb.git] / gdb / python / lib / gdb / dap / server.py
1 # Copyright 2022-2024 Free Software Foundation, Inc.
2
3 # This program is free software; you can redistribute it and/or modify
4 # it under the terms of the GNU General Public License as published by
5 # the Free Software Foundation; either version 3 of the License, or
6 # (at your option) any later version.
7 #
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
12 #
13 # You should have received a copy of the GNU General Public License
14 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15
16 import functools
17 import gdb
18 import heapq
19 import inspect
20 import json
21 import threading
22
23 from .io import start_json_writer, read_json
24 from .startup import (
25 exec_and_log,
26 DAPException,
27 DAPQueue,
28 in_dap_thread,
29 in_gdb_thread,
30 send_gdb,
31 send_gdb_with_response,
32 start_thread,
33 log,
34 log_stack,
35 LogLevel,
36 )
37 from .typecheck import type_check
38
39
40 # Map capability names to values.
41 _capabilities = {}
42
43 # Map command names to callables.
44 _commands = {}
45
46 # The global server.
47 _server = None
48
49
50 # A subclass of Exception that is used solely for reporting that a
51 # request needs the inferior to be stopped, but it is not stopped.
52 class NotStoppedException(Exception):
53 pass
54
55
56 # This is used to handle cancellation requests. It tracks all the
57 # needed state, so that we can cancel both requests that are in flight
58 # as well as queued requests.
59 class CancellationHandler:
60 def __init__(self):
61 # Methods on this class acquire this lock before proceeding.
62 self.lock = threading.Lock()
63 # The request currently being handled, or None.
64 self.in_flight = None
65 self.reqs = []
66
67 def starting(self, req):
68 """Call at the start of the given request.
69
70 Throws the appropriate exception if the request should be
71 immediately cancelled."""
72 with self.lock:
73 self.in_flight = req
74 while len(self.reqs) > 0 and self.reqs[0] <= req:
75 if heapq.heappop(self.reqs) == req:
76 raise KeyboardInterrupt()
77
78 def done(self, req):
79 """Indicate that the request is done."""
80 with self.lock:
81 self.in_flight = None
82
83 def cancel(self, req):
84 """Call to cancel a request.
85
86 If the request has already finished, this is ignored.
87 If the request is in flight, it is interrupted.
88 If the request has not yet been seen, the cancellation is queued."""
89 with self.lock:
90 if req == self.in_flight:
91 gdb.interrupt()
92 else:
93 # We don't actually ignore the request here, but in
94 # the 'starting' method. This way we don't have to
95 # track as much state. Also, this implementation has
96 # the weird property that a request can be cancelled
97 # before it is even sent. It didn't seem worthwhile
98 # to try to check for this.
99 heapq.heappush(self.reqs, req)
100
101
102 class Server:
103 """The DAP server class."""
104
105 def __init__(self, in_stream, out_stream, child_stream):
106 self.in_stream = in_stream
107 self.out_stream = out_stream
108 self.child_stream = child_stream
109 self.delayed_events = []
110 # This queue accepts JSON objects that are then sent to the
111 # DAP client. Writing is done in a separate thread to avoid
112 # blocking the read loop.
113 self.write_queue = DAPQueue()
114 # Reading is also done in a separate thread, and a queue of
115 # requests is kept.
116 self.read_queue = DAPQueue()
117 self.done = False
118 self.canceller = CancellationHandler()
119 global _server
120 _server = self
121
122 # Treat PARAMS as a JSON-RPC request and perform its action.
123 # PARAMS is just a dictionary from the JSON.
124 @in_dap_thread
125 def _handle_command(self, params):
126 req = params["seq"]
127 result = {
128 "request_seq": req,
129 "type": "response",
130 "command": params["command"],
131 }
132 try:
133 self.canceller.starting(req)
134 if "arguments" in params:
135 args = params["arguments"]
136 else:
137 args = {}
138 global _commands
139 body = _commands[params["command"]](**args)
140 if body is not None:
141 result["body"] = body
142 result["success"] = True
143 except NotStoppedException:
144 # This is an expected exception, and the result is clearly
145 # visible in the log, so do not log it.
146 result["success"] = False
147 result["message"] = "notStopped"
148 except KeyboardInterrupt:
149 # This can only happen when a request has been canceled.
150 result["success"] = False
151 result["message"] = "cancelled"
152 except DAPException as e:
153 # Don't normally want to see this, as it interferes with
154 # the test suite.
155 log_stack(LogLevel.FULL)
156 result["success"] = False
157 result["message"] = str(e)
158 except BaseException as e:
159 log_stack()
160 result["success"] = False
161 result["message"] = str(e)
162 self.canceller.done(req)
163 return result
164
165 # Read inferior output and sends OutputEvents to the client. It
166 # is run in its own thread.
167 def _read_inferior_output(self):
168 while True:
169 line = self.child_stream.readline()
170 self.send_event(
171 "output",
172 {
173 "category": "stdout",
174 "output": line,
175 },
176 )
177
178 # Send OBJ to the client, logging first if needed.
179 def _send_json(self, obj):
180 log("WROTE: <<<" + json.dumps(obj) + ">>>")
181 self.write_queue.put(obj)
182
183 # This is run in a separate thread and simply reads requests from
184 # the client and puts them into a queue. A separate thread is
185 # used so that 'cancel' requests can be handled -- the DAP thread
186 # will normally block, waiting for each request to complete.
187 def _reader_thread(self):
188 while True:
189 cmd = read_json(self.in_stream)
190 log("READ: <<<" + json.dumps(cmd) + ">>>")
191 # Be extra paranoid about the form here. If anything is
192 # missing, it will be put in the queue and then an error
193 # issued by ordinary request processing.
194 if (
195 "command" in cmd
196 and cmd["command"] == "cancel"
197 and "arguments" in cmd
198 # gdb does not implement progress, so there's no need
199 # to check for progressId.
200 and "requestId" in cmd["arguments"]
201 ):
202 self.canceller.cancel(cmd["arguments"]["requestId"])
203 self.read_queue.put(cmd)
204
205 @in_dap_thread
206 def main_loop(self):
207 """The main loop of the DAP server."""
208 # Before looping, start the thread that writes JSON to the
209 # client, and the thread that reads output from the inferior.
210 start_thread("output reader", self._read_inferior_output)
211 start_json_writer(self.out_stream, self.write_queue)
212 start_thread("JSON reader", self._reader_thread)
213 while not self.done:
214 cmd = self.read_queue.get()
215 result = self._handle_command(cmd)
216 self._send_json(result)
217 events = self.delayed_events
218 self.delayed_events = []
219 for event, body in events:
220 self.send_event(event, body)
221 # Got the terminate request. This is handled by the
222 # JSON-writing thread, so that we can ensure that all
223 # responses are flushed to the client before exiting.
224 self.write_queue.put(None)
225
226 @in_dap_thread
227 def send_event_later(self, event, body=None):
228 """Send a DAP event back to the client, but only after the
229 current request has completed."""
230 self.delayed_events.append((event, body))
231
232 # Note that this does not need to be run in any particular thread,
233 # because it just creates an object and writes it to a thread-safe
234 # queue.
235 def send_event(self, event, body=None):
236 """Send an event to the DAP client.
237 EVENT is the name of the event, a string.
238 BODY is the body of the event, an arbitrary object."""
239 obj = {
240 "type": "event",
241 "event": event,
242 }
243 if body is not None:
244 obj["body"] = body
245 self._send_json(obj)
246
247 def shutdown(self):
248 """Request that the server shut down."""
249 # Just set a flag. This operation is complicated because we
250 # want to write the result of the request before exiting. See
251 # main_loop.
252 self.done = True
253
254
255 def send_event(event, body=None):
256 """Send an event to the DAP client.
257 EVENT is the name of the event, a string.
258 BODY is the body of the event, an arbitrary object."""
259 global _server
260 _server.send_event(event, body)
261
262
263 # A helper decorator that checks whether the inferior is running.
264 def _check_not_running(func):
265 @functools.wraps(func)
266 def check(*args, **kwargs):
267 # Import this as late as possible. This is done to avoid
268 # circular imports.
269 from .events import inferior_running
270
271 if inferior_running:
272 raise NotStoppedException()
273 return func(*args, **kwargs)
274
275 return check
276
277
278 def request(
279 name: str,
280 *,
281 response: bool = True,
282 on_dap_thread: bool = False,
283 expect_stopped: bool = True
284 ):
285 """A decorator for DAP requests.
286
287 This registers the function as the implementation of the DAP
288 request NAME. By default, the function is invoked in the gdb
289 thread, and its result is returned as the 'body' of the DAP
290 response.
291
292 Some keyword arguments are provided as well:
293
294 If RESPONSE is False, the result of the function will not be
295 waited for and no 'body' will be in the response.
296
297 If ON_DAP_THREAD is True, the function will be invoked in the DAP
298 thread. When ON_DAP_THREAD is True, RESPONSE may not be False.
299
300 If EXPECT_STOPPED is True (the default), then the request will
301 fail with the 'notStopped' reason if it is processed while the
302 inferior is running. When EXPECT_STOPPED is False, the request
303 will proceed regardless of the inferior's state.
304 """
305
306 # Validate the parameters.
307 assert not on_dap_thread or response
308
309 def wrap(func):
310 code = func.__code__
311 # We don't permit requests to have positional arguments.
312 try:
313 assert code.co_posonlyargcount == 0
314 except AttributeError:
315 # Attribute co_posonlyargcount is supported starting python 3.8.
316 pass
317 assert code.co_argcount == 0
318 # A request must have a **args parameter.
319 assert code.co_flags & inspect.CO_VARKEYWORDS
320
321 # Type-check the calls.
322 func = type_check(func)
323
324 # Verify that the function is run on the correct thread.
325 if on_dap_thread:
326 cmd = in_dap_thread(func)
327 else:
328 func = in_gdb_thread(func)
329
330 if response:
331
332 def sync_call(**args):
333 return send_gdb_with_response(lambda: func(**args))
334
335 cmd = sync_call
336 else:
337
338 def non_sync_call(**args):
339 return send_gdb(lambda: func(**args))
340
341 cmd = non_sync_call
342
343 # If needed, check that the inferior is not running. This
344 # wrapping is done last, so the check is done first, before
345 # trying to dispatch the request to another thread.
346 if expect_stopped:
347 cmd = _check_not_running(cmd)
348
349 global _commands
350 _commands[name] = cmd
351 return cmd
352
353 return wrap
354
355
356 def capability(name, value=True):
357 """A decorator that indicates that the wrapper function implements
358 the DAP capability NAME."""
359
360 def wrap(func):
361 global _capabilities
362 _capabilities[name] = value
363 return func
364
365 return wrap
366
367
368 def client_bool_capability(name):
369 """Return the value of a boolean client capability.
370
371 If the capability was not specified, or did not have boolean type,
372 False is returned."""
373 global _server
374 if name in _server.config and isinstance(_server.config[name], bool):
375 return _server.config[name]
376 return False
377
378
379 @request("initialize", on_dap_thread=True)
380 def initialize(**args):
381 global _server, _capabilities
382 _server.config = args
383 _server.send_event_later("initialized")
384 return _capabilities.copy()
385
386
387 @request("terminate", expect_stopped=False)
388 @capability("supportsTerminateRequest")
389 def terminate(**args):
390 exec_and_log("kill")
391
392
393 @request("disconnect", on_dap_thread=True, expect_stopped=False)
394 @capability("supportTerminateDebuggee")
395 def disconnect(*, terminateDebuggee: bool = False, **args):
396 if terminateDebuggee:
397 send_gdb_with_response("kill")
398 _server.shutdown()
399
400
401 @request("cancel", on_dap_thread=True, expect_stopped=False)
402 @capability("supportsCancelRequest")
403 def cancel(**args):
404 # If a 'cancel' request can actually be satisfied, it will be
405 # handled specially in the reader thread. However, in order to
406 # construct a proper response, the request is also added to the
407 # command queue and so ends up here. Additionally, the spec says:
408 # The cancel request may return an error if it could not cancel
409 # an operation but a client should refrain from presenting this
410 # error to end users.
411 # ... which gdb takes to mean that it is fine for all cancel
412 # requests to report success.
413 return None