]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Jun 2025 15:50:26 +0000 (17:50 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Jun 2025 15:50:26 +0000 (17:50 +0200)
added patches:
drm-nouveau-nvkm-factor-out-current-gsp-rpc-command-policies.patch
drm-nouveau-nvkm-introduce-new-gsp-reply-policy-nvkm_gsp_rpc_reply_poll.patch

queue-6.15/drm-nouveau-nvkm-factor-out-current-gsp-rpc-command-policies.patch [new file with mode: 0644]
queue-6.15/drm-nouveau-nvkm-introduce-new-gsp-reply-policy-nvkm_gsp_rpc_reply_poll.patch [new file with mode: 0644]
queue-6.15/series
queue-6.15/tools-nolibc-use-pselect6_time64-if-available.patch [deleted file]

diff --git a/queue-6.15/drm-nouveau-nvkm-factor-out-current-gsp-rpc-command-policies.patch b/queue-6.15/drm-nouveau-nvkm-factor-out-current-gsp-rpc-command-policies.patch
new file mode 100644 (file)
index 0000000..d86f64f
--- /dev/null
@@ -0,0 +1,306 @@
+From 4570355f8eaa476164cfb7ca959fdbf0cebbc9eb Mon Sep 17 00:00:00 2001
+From: Zhi Wang <zhiw@nvidia.com>
+Date: Thu, 27 Feb 2025 01:35:53 +0000
+Subject: drm/nouveau/nvkm: factor out current GSP RPC command policies
+
+From: Zhi Wang <zhiw@nvidia.com>
+
+commit 4570355f8eaa476164cfb7ca959fdbf0cebbc9eb upstream.
+
+There can be multiple cases of handling the GSP RPC messages, which are
+the reply of GSP RPC commands according to the requirement of the
+callers and the nature of the GSP RPC commands.
+
+The current supported reply policies are "callers don't care" and "receive
+the entire message" according to the requirement of the callers. To
+introduce a new policy, factor out the current RPC command reply polices.
+Also, centralize the handling of the reply in a single function.
+
+Factor out NVKM_GSP_RPC_REPLY_NOWAIT as "callers don't care" and
+NVKM_GSP_RPC_REPLY_RECV as "receive the entire message". Introduce a
+kernel doc to document the policies. Factor out
+r535_gsp_rpc_handle_reply().
+
+No functional change is intended for small GSP RPC commands. For large GSP
+commands, the caller decides the policy of how to handle the returned GSP
+RPC message.
+
+Cc: Ben Skeggs <bskeggs@nvidia.com>
+Cc: Alexandre Courbot <acourbot@nvidia.com>
+Signed-off-by: Zhi Wang <zhiw@nvidia.com>
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250227013554.8269-2-zhiw@nvidia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/gpu/nouveau.rst                      |    3 
+ drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h  |   34 +++++++--
+ drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c     |    2 
+ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c     |   75 ++++++++++-----------
+ drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c |    2 
+ 5 files changed, 72 insertions(+), 44 deletions(-)
+
+--- a/Documentation/gpu/nouveau.rst
++++ b/Documentation/gpu/nouveau.rst
+@@ -27,3 +27,6 @@ GSP Support
+ .. kernel-doc:: drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+    :doc: GSP message queue element
++
++.. kernel-doc:: drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
++   :doc: GSP message handling policy
+--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+@@ -31,6 +31,25 @@ typedef int (*nvkm_gsp_msg_ntfy_func)(vo
+ struct nvkm_gsp_event;
+ typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
++/**
++ * DOC: GSP message handling policy
++ *
++ * When sending a GSP RPC command, there can be multiple cases of handling
++ * the GSP RPC messages, which are the reply of GSP RPC commands, according
++ * to the requirement of the callers and the nature of the GSP RPC commands.
++ *
++ * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the
++ * caller after the GSP RPC command is issued.
++ *
++ * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP
++ * RPC message after the GSP RPC command is issued.
++ *
++ */
++enum nvkm_gsp_rpc_reply_policy {
++      NVKM_GSP_RPC_REPLY_NOWAIT = 0,
++      NVKM_GSP_RPC_REPLY_RECV,
++};
++
+ struct nvkm_gsp {
+       const struct nvkm_gsp_func *func;
+       struct nvkm_subdev subdev;
+@@ -188,7 +207,8 @@ struct nvkm_gsp {
+       const struct nvkm_gsp_rm {
+               void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
+-              void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
++              void *(*rpc_push)(struct nvkm_gsp *gsp, void *argv,
++                                enum nvkm_gsp_rpc_reply_policy policy, u32 repc);
+               void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
+               void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
+@@ -255,9 +275,10 @@ nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u
+ }
+ static inline void *
+-nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
++nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv,
++                enum nvkm_gsp_rpc_reply_policy policy, u32 repc)
+ {
+-      return gsp->rm->rpc_push(gsp, argv, wait, repc);
++      return gsp->rm->rpc_push(gsp, argv, policy, repc);
+ }
+ static inline void *
+@@ -268,13 +289,14 @@ nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u3
+       if (IS_ERR_OR_NULL(argv))
+               return argv;
+-      return nvkm_gsp_rpc_push(gsp, argv, true, argc);
++      return nvkm_gsp_rpc_push(gsp, argv, NVKM_GSP_RPC_REPLY_RECV, argc);
+ }
+ static inline int
+-nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
++nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv,
++              enum nvkm_gsp_rpc_reply_policy policy)
+ {
+-      void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0);
++      void *repv = nvkm_gsp_rpc_push(gsp, argv, policy, 0);
+       if (IS_ERR(repv))
+               return PTR_ERR(repv);
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
+@@ -56,7 +56,7 @@ r535_bar_bar2_update_pde(struct nvkm_gsp
+       rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
+       rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
+-      return nvkm_gsp_rpc_wr(gsp, rpc, true);
++      return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+ }
+ static void
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+@@ -585,13 +585,34 @@ r535_gsp_rpc_poll(struct nvkm_gsp *gsp,
+ }
+ static void *
+-r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait,
+-                u32 gsp_rpc_len)
++r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn,
++                        enum nvkm_gsp_rpc_reply_policy policy,
++                        u32 gsp_rpc_len)
++{
++      struct nvfw_gsp_rpc *reply;
++      void *repv = NULL;
++
++      switch (policy) {
++      case NVKM_GSP_RPC_REPLY_NOWAIT:
++              break;
++      case NVKM_GSP_RPC_REPLY_RECV:
++              reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
++              if (!IS_ERR_OR_NULL(reply))
++                      repv = reply->data;
++              else
++                      repv = reply;
++              break;
++      }
++
++      return repv;
++}
++
++static void *
++r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload,
++                enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+ {
+       struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+-      struct nvfw_gsp_rpc *msg;
+       u32 fn = rpc->function;
+-      void *repv = NULL;
+       int ret;
+       if (gsp->subdev.debug >= NV_DBG_TRACE) {
+@@ -605,15 +626,7 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp,
+       if (ret)
+               return ERR_PTR(ret);
+-      if (wait) {
+-              msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
+-              if (!IS_ERR_OR_NULL(msg))
+-                      repv = msg->data;
+-              else
+-                      repv = msg;
+-      }
+-
+-      return repv;
++      return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len);
+ }
+ static void
+@@ -797,7 +810,7 @@ r535_gsp_rpc_rm_free(struct nvkm_gsp_obj
+       rpc->params.hRoot = client->object.handle;
+       rpc->params.hObjectParent = 0;
+       rpc->params.hObjectOld = object->handle;
+-      return nvkm_gsp_rpc_wr(gsp, rpc, true);
++      return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+ }
+ static void
+@@ -815,7 +828,7 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_g
+       struct nvkm_gsp *gsp = object->client->gsp;
+       void *ret = NULL;
+-      rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc));
++      rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc));
+       if (IS_ERR_OR_NULL(rpc))
+               return rpc;
+@@ -876,7 +889,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gs
+       struct nvkm_gsp *gsp = object->client->gsp;
+       int ret = 0;
+-      rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
++      rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc);
+       if (IS_ERR_OR_NULL(rpc)) {
+               *params = NULL;
+               return PTR_ERR(rpc);
+@@ -948,8 +961,8 @@ r535_gsp_rpc_get(struct nvkm_gsp *gsp, u
+ }
+ static void *
+-r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
+-                u32 gsp_rpc_len)
++r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
++                enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+ {
+       struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+       struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+@@ -967,7 +980,7 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp,
+               rpc->length = sizeof(*rpc) + max_payload_size;
+               msg->checksum = rpc->length;
+-              repv = r535_gsp_rpc_send(gsp, payload, false, 0);
++              repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+               if (IS_ERR(repv))
+                       goto done;
+@@ -988,7 +1001,7 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp,
+                       memcpy(next, payload, size);
+-                      repv = r535_gsp_rpc_send(gsp, next, false, 0);
++                      repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+                       if (IS_ERR(repv))
+                               goto done;
+@@ -997,20 +1010,10 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp,
+               }
+               /* Wait for reply. */
+-              rpc = r535_gsp_msg_recv(gsp, fn, payload_size +
+-                                      sizeof(*rpc));
+-              if (!IS_ERR_OR_NULL(rpc)) {
+-                      if (wait) {
+-                              repv = rpc->data;
+-                      } else {
+-                              nvkm_gsp_rpc_done(gsp, rpc);
+-                              repv = NULL;
+-                      }
+-              } else {
+-                      repv = wait ? rpc : NULL;
+-              }
++              repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size +
++                                               sizeof(*rpc));
+       } else {
+-              repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len);
++              repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len);
+       }
+ done:
+@@ -1327,7 +1330,7 @@ r535_gsp_rpc_unloading_guest_driver(stru
+               rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
+       }
+-      return nvkm_gsp_rpc_wr(gsp, rpc, true);
++      return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+ }
+ enum registry_type {
+@@ -1684,7 +1687,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gs
+       build_registry(gsp, rpc);
+-      return nvkm_gsp_rpc_wr(gsp, rpc, false);
++      return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT);
+ fail:
+       clean_registry(gsp);
+@@ -1893,7 +1896,7 @@ r535_gsp_rpc_set_system_info(struct nvkm
+       info->pciConfigMirrorSize = 0x001000;
+       r535_gsp_acpi_info(gsp, &info->acpiMethodData);
+-      return nvkm_gsp_rpc_wr(gsp, info, false);
++      return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
+ }
+ static int
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
+@@ -105,7 +105,7 @@ fbsr_memlist(struct nvkm_gsp_device *dev
+                       rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
+       }
+-      ret = nvkm_gsp_rpc_wr(gsp, rpc, true);
++      ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+       if (ret)
+               return ret;
diff --git a/queue-6.15/drm-nouveau-nvkm-introduce-new-gsp-reply-policy-nvkm_gsp_rpc_reply_poll.patch b/queue-6.15/drm-nouveau-nvkm-introduce-new-gsp-reply-policy-nvkm_gsp_rpc_reply_poll.patch
new file mode 100644 (file)
index 0000000..ea25f97
--- /dev/null
@@ -0,0 +1,81 @@
+From a738fa9105ac2897701ba4067c33e85faa27d1e2 Mon Sep 17 00:00:00 2001
+From: Zhi Wang <zhiw@nvidia.com>
+Date: Thu, 27 Feb 2025 01:35:54 +0000
+Subject: drm/nouveau/nvkm: introduce new GSP reply policy NVKM_GSP_RPC_REPLY_POLL
+
+From: Zhi Wang <zhiw@nvidia.com>
+
+commit a738fa9105ac2897701ba4067c33e85faa27d1e2 upstream.
+
+Some GSP RPC commands need a new reply policy: "caller don't care about
+the message content but want to make sure a reply is received". To
+support this case, a new reply policy is introduced.
+
+NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY is a large GSP RPC command. The actual
+required policy is NVKM_GSP_RPC_REPLY_POLL. This can be observed from the
+dump of the GSP message queue. After the large GSP RPC command is issued,
+GSP will write only an empty RPC header in the queue as the reply.
+
+Without this change, the policy "receiving the entire message" is used
+for NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY. This causes the timeout of receiving
+the returned GSP message in the suspend/resume path.
+
+Introduce the new reply policy NVKM_GSP_RPC_REPLY_POLL, which waits for
+the returned GSP message but discards it for the caller. Use the new policy
+NVKM_GSP_RPC_REPLY_POLL on the GSP RPC command
+NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY.
+
+Fixes: 50f290053d79 ("drm/nouveau: support handling the return of large GSP message")
+Cc: Danilo Krummrich <dakr@kernel.org>
+Cc: Alexandre Courbot <acourbot@nvidia.com>
+Tested-by: Ben Skeggs <bskeggs@nvidia.com>
+Signed-off-by: Zhi Wang <zhiw@nvidia.com>
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250227013554.8269-3-zhiw@nvidia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h  |    4 ++++
+ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c     |    3 +++
+ drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c |    2 +-
+ 3 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+@@ -44,10 +44,14 @@ typedef void (*nvkm_gsp_event_func)(stru
+  * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP
+  * RPC message after the GSP RPC command is issued.
+  *
++ * NVKM_GSP_RPC_REPLY_POLL - If specified, wait for the specific reply and
++ * discard the reply before returning to the caller.
++ *
+  */
+ enum nvkm_gsp_rpc_reply_policy {
+       NVKM_GSP_RPC_REPLY_NOWAIT = 0,
+       NVKM_GSP_RPC_REPLY_RECV,
++      NVKM_GSP_RPC_REPLY_POLL,
+ };
+ struct nvkm_gsp {
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+@@ -602,6 +602,9 @@ r535_gsp_rpc_handle_reply(struct nvkm_gs
+               else
+                       repv = reply;
+               break;
++      case NVKM_GSP_RPC_REPLY_POLL:
++              repv = r535_gsp_msg_recv(gsp, fn, 0);
++              break;
+       }
+       return repv;
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
+@@ -105,7 +105,7 @@ fbsr_memlist(struct nvkm_gsp_device *dev
+                       rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
+       }
+-      ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
++      ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL);
+       if (ret)
+               return ret;
index c928afa9311134635b27b5b65e25aeb1bf99e86d..e50921a1eb0ca189d1478ac24cfe6bd7e492007d 100644 (file)
@@ -230,7 +230,6 @@ asoc-tegra210_ahub-add-check-to-of_device_get_match_.patch
 make-cc-option-work-correctly-for-the-wno-xyzzy-patt.patch
 gpiolib-of-add-polarity-quirk-for-s5m8767.patch
 pm-runtime-fix-denying-of-auto-suspend-in-pm_suspend.patch
-tools-nolibc-use-pselect6_time64-if-available.patch
 power-supply-max17040-adjust-thermal-channel-scaling.patch
 acpi-battery-negate-current-when-discharging.patch
 drm-amd-display-disable-dpp-rcg-before-dpp-clk-enabl.patch
@@ -487,3 +486,5 @@ selinux-fix-selinux_xfrm_alloc_user-to-set-correct-ctx_len.patch
 platform-x86-intel-uncore-freq-fail-module-load-when-plat_info-is-null.patch
 sched_ext-sched-core-don-t-call-scx_group_set_weight-prematurely-from-sched_create_group.patch
 atm-revert-atm_account_tx-if-copy_from_iter_full-fails.patch
+drm-nouveau-nvkm-factor-out-current-gsp-rpc-command-policies.patch
+drm-nouveau-nvkm-introduce-new-gsp-reply-policy-nvkm_gsp_rpc_reply_poll.patch
diff --git a/queue-6.15/tools-nolibc-use-pselect6_time64-if-available.patch b/queue-6.15/tools-nolibc-use-pselect6_time64-if-available.patch
deleted file mode 100644 (file)
index ae9562f..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-From 363033c7641dee857a543cbd3670782fd2c3bf0f Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 11 Apr 2025 11:00:40 +0200
-Subject: tools/nolibc: use pselect6_time64 if available
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-From: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
-
-[ Upstream commit 248ddc80b145515286bfb75d08034ad4c0fdb08e ]
-
-riscv32 does not have any of the older select systemcalls.
-Use pselect6_time64 instead.
-poll() is also used to implement sleep().
-
-Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
-Acked-by: Willy Tarreau <w@1wt.eu>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/include/nolibc/sys.h | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
-diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
-index 08c1c074bec89..a5decdba40223 100644
---- a/tools/include/nolibc/sys.h
-+++ b/tools/include/nolibc/sys.h
-@@ -1023,6 +1023,14 @@ int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeva
-               t.tv_nsec = timeout->tv_usec * 1000;
-       }
-       return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
-+#elif defined(__NR_pselect6_time64)
-+      struct __kernel_timespec t;
-+
-+      if (timeout) {
-+              t.tv_sec  = timeout->tv_sec;
-+              t.tv_nsec = timeout->tv_usec * 1000;
-+      }
-+      return my_syscall6(__NR_pselect6_time64, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
- #else
-       return __nolibc_enosys(__func__, nfds, rfds, wfds, efds, timeout);
- #endif
--- 
-2.39.5
-