]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
s390/bpf: Write back tail call counter for BPF_PSEUDO_CALL
authorIlya Leoshkevich <iii@linux.ibm.com>
Fri, 17 Oct 2025 09:19:06 +0000 (11:19 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 19 Oct 2025 14:34:05 +0000 (16:34 +0200)
commit c861a6b147137d10b5ff88a2c492ba376cd1b8b0 upstream.

The tailcall_bpf2bpf_hierarchy_1 test hangs on s390. Its call graph is
as follows:

  entry()
    subprog_tail()
      bpf_tail_call_static(0) -> entry + tail_call_start
    subprog_tail()
      bpf_tail_call_static(0) -> entry + tail_call_start

entry() copies its tail call counter to the subprog_tail()'s frame,
which then increments it. However, the incremented result is discarded,
leading to an astronomically large number of tail calls.

Fix by writing the incremented counter back to the entry()'s frame.

Fixes: dd691e847d28 ("s390/bpf: Implement bpf_jit_supports_subprog_tailcalls()")
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20250813121016.163375-3-iii@linux.ibm.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/s390/net/bpf_jit_comp.c

index 7907c3f9b59ab0a592ff81de93434b89fed9078f..2526a3d53fadbfb8b000601416ba1021e92ea6ab 100644 (file)
@@ -1789,13 +1789,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                jit->seen |= SEEN_FUNC;
                /*
                 * Copy the tail call counter to where the callee expects it.
-                *
-                * Note 1: The callee can increment the tail call counter, but
-                * we do not load it back, since the x86 JIT does not do this
-                * either.
-                *
-                * Note 2: We assume that the verifier does not let us call the
-                * main program, which clears the tail call counter on entry.
                 */
                /* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */
                _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
@@ -1822,6 +1815,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                call_r1(jit);
                /* lgr %b0,%r2: load return value into %b0 */
                EMIT4(0xb9040000, BPF_REG_0, REG_2);
+
+               /*
+                * Copy the potentially updated tail call counter back.
+                */
+
+               if (insn->src_reg == BPF_PSEUDO_CALL)
+                       /*
+                        * mvc frame_off+tail_call_cnt(%r15),
+                        *     tail_call_cnt(4,%r15)
+                        */
+                       _EMIT6(0xd203f000 | (jit->frame_off +
+                                            offsetof(struct prog_frame,
+                                                     tail_call_cnt)),
+                              0xf000 | offsetof(struct prog_frame,
+                                                tail_call_cnt));
+
                break;
        }
        case BPF_JMP | BPF_TAIL_CALL: {