]> git.ipfire.org Git - thirdparty/util-linux.git/commitdiff
enosys: optimize bytecode when execve is not blocked
authorThomas Weißschuh <thomas@t-8ch.de>
Sun, 4 Feb 2024 09:06:39 +0000 (10:06 +0100)
committerThomas Weißschuh <thomas@t-8ch.de>
Sun, 4 Feb 2024 09:29:57 +0000 (10:29 +0100)
Signed-off-by: Thomas Weißschuh <thomas@t-8ch.de>
misc-utils/enosys.c

index 7c9afa768fadfb815438d5d79f27780a38c89e7c..22096dfe0b2b5e06a6d6ee951302ab9e831dfcc7 100644 (file)
@@ -121,6 +121,7 @@ int main(int argc, char **argv)
        struct blocked_number *blocked;
        struct list_head *loop_ctr;
        struct list_head blocked_syscalls;
+       bool blocking_execve = false;
        INIT_LIST_HEAD(&blocked_syscalls);
        struct list_head blocked_ioctls;
        INIT_LIST_HEAD(&blocked_ioctls);
@@ -147,6 +148,8 @@ int main(int argc, char **argv)
                        blocked = xmalloc(sizeof(*blocked));
                        blocked->number = blocked_number;
                        list_add(&blocked->head, &blocked_syscalls);
+                       if (blocked_number == __NR_execve)
+                               blocking_execve = true;
 
                        break;
                case 'i':
@@ -205,13 +208,15 @@ int main(int argc, char **argv)
         *
         * See https://lore.kernel.org/all/CAAnLoWnS74dK9Wq4EQ-uzQ0qCRfSK-dLqh+HCais-5qwDjrVzg@mail.gmail.com/
         */
-       INSTR(BPF_STMT(BPF_LD | BPF_W | BPF_ABS, syscall_nr));
-       INSTR(BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_execve, 0, 5));
-       INSTR(BPF_STMT(BPF_LD | BPF_W | BPF_ABS, syscall_arg_lower32(2)));
-       INSTR(BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, (uint64_t)(uintptr_t) environ, 0, 3));
-       INSTR(BPF_STMT(BPF_LD | BPF_W | BPF_ABS, syscall_arg_upper32(2)));
-       INSTR(BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, (uint64_t)(uintptr_t) environ >> 32, 0, 1));
-       INSTR(BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW));
+       if (blocking_execve) {
+               INSTR(BPF_STMT(BPF_LD | BPF_W | BPF_ABS, syscall_nr));
+               INSTR(BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_execve, 0, 5));
+               INSTR(BPF_STMT(BPF_LD | BPF_W | BPF_ABS, syscall_arg_lower32(2)));
+               INSTR(BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, (uint64_t)(uintptr_t) environ, 0, 3));
+               INSTR(BPF_STMT(BPF_LD | BPF_W | BPF_ABS, syscall_arg_upper32(2)));
+               INSTR(BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, (uint64_t)(uintptr_t) environ >> 32, 0, 1));
+               INSTR(BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW));
+       }
 
        INSTR(BPF_STMT(BPF_LD | BPF_W | BPF_ABS, syscall_nr));