]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
[3.14] gh-146308: Fix error handling issues in _remote_debugging module (GH-146309...
authorPablo Galindo Salgado <Pablogsal@gmail.com>
Wed, 25 Mar 2026 01:05:47 +0000 (01:05 +0000)
committerGitHub <noreply@github.com>
Wed, 25 Mar 2026 01:05:47 +0000 (01:05 +0000)
(cherry picked from commit ae6adc907907562e4ffbb5355f12e77e9085c506)

Misc/NEWS.d/next/Core_and_Builtins/2026-03-22-19-30-00.gh-issue-146308.AxnRVA.rst [new file with mode: 0644]
Modules/_remote_debugging_module.c

diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2026-03-22-19-30-00.gh-issue-146308.AxnRVA.rst b/Misc/NEWS.d/next/Core_and_Builtins/2026-03-22-19-30-00.gh-issue-146308.AxnRVA.rst
new file mode 100644 (file)
index 0000000..89641fb
--- /dev/null
@@ -0,0 +1,4 @@
+Fixed several error handling issues in the :mod:`!_remote_debugging` module,
+including safer validation of remote ``int`` objects, clearer asyncio task
+chain failures, and cache cleanup fixes that avoid leaking or double-freeing
+metadata on allocation failure. Patch by Pablo Galindo.
index a26e6820f558f613e21b199309c64dca5b47fda6..3706a287c3a1ed2b61912af7e2b641f7f815279e 100644 (file)
@@ -79,6 +79,7 @@
 #define INTERP_STATE_BUFFER_SIZE MAX(INTERP_STATE_MIN_SIZE, 256)
 #define MAX_STACK_CHUNK_SIZE (16 * 1024 * 1024)  /* 16 MB max for stack chunks */
 #define MAX_SET_TABLE_SIZE (1 << 20)  /* 1 million entries max for set iteration */
+#define MAX_LONG_DIGITS 64  /* Allows values up to ~2^1920 */
 
 
 
@@ -753,6 +754,15 @@ read_py_long(
         return 0;
     }
 
+    if (size < 0 || size > MAX_LONG_DIGITS) {
+        PyErr_Format(PyExc_RuntimeError,
+                     "Invalid PyLong digit count: %zd (expected 0-%d)",
+                     size, MAX_LONG_DIGITS);
+        set_exception_cause(unwinder, PyExc_RuntimeError,
+                            "Invalid PyLong size (corrupted remote memory)");
+        return -1;
+    }
+
     // If the long object has inline digits, use them directly
     digit *digits;
     if (size <= _PY_NSMALLNEGINTS + _PY_NSMALLPOSINTS) {
@@ -1364,6 +1374,9 @@ process_running_task_chain(
     PyObject *coro_chain = PyStructSequence_GET_ITEM(task_info, 2);
     assert(coro_chain != NULL);
     if (PyList_GET_SIZE(coro_chain) != 1) {
+        PyErr_Format(PyExc_RuntimeError,
+                     "Expected single-item coro chain, got %zd items",
+                     PyList_GET_SIZE(coro_chain));
         set_exception_cause(unwinder, PyExc_RuntimeError, "Coro chain is not a single item");
         return -1;
     }
@@ -1625,6 +1638,7 @@ cache_tlbc_array(RemoteUnwinderObject *unwinder, uintptr_t code_addr, uintptr_t
     void *key = (void *)code_addr;
     if (_Py_hashtable_set(unwinder->tlbc_cache, key, entry) < 0) {
         tlbc_cache_entry_destroy(entry);
+        PyErr_NoMemory();
         set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to store TLBC entry in cache");
         return 0; // Cache error
     }
@@ -1803,7 +1817,11 @@ parse_code_object(RemoteUnwinderObject *unwinder,
         meta->addr_code_adaptive = real_address + (uintptr_t)unwinder->debug_offsets.code_object.co_code_adaptive;
 
         if (unwinder && unwinder->code_object_cache && _Py_hashtable_set(unwinder->code_object_cache, key, meta) < 0) {
+            func = NULL;
+            file = NULL;
+            linetable = NULL;
             cached_code_metadata_destroy(meta);
+            PyErr_NoMemory();
             set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to cache code metadata");
             goto error;
         }