]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: Fix integer overflow in argument calculation for bpf_map_area_alloc
authorBui Quang Minh <minhquangbui99@gmail.com>
Sun, 13 Jun 2021 14:34:39 +0000 (21:34 +0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Jul 2021 14:59:59 +0000 (16:59 +0200)
[ Upstream commit 7dd5d437c258bbf4cc15b35229e5208b87b8b4e0 ]

In 32-bit architecture, the result of sizeof() is a 32-bit integer so
the expression becomes the multiplication between 2 32-bit integer which
can potentially leads to integer overflow. As a result,
bpf_map_area_alloc() allocates less memory than needed.

Fix this by casting 1 operand to u64.

Fixes: 0d2c4f964050 ("bpf: Eliminate rlimit-based memory accounting for sockmap and sockhash maps")
Fixes: 99c51064fb06 ("devmap: Use bpf_map_area_alloc() for allocating hash buckets")
Fixes: 546ac1ffb70d ("bpf: add devmap, a map for storing net device references")
Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210613143440.71975-1-minhquangbui99@gmail.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/bpf/devmap.c
net/core/sock_map.c

index 85d9d1b72a33a9c3750cbd8fcffac567a37d1cdb..b0ab5b915e6d1eb34ea4b22864b444d3a8126fb2 100644 (file)
@@ -92,7 +92,7 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries,
        int i;
        struct hlist_head *hash;
 
-       hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
+       hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
        if (hash != NULL)
                for (i = 0; i < entries; i++)
                        INIT_HLIST_HEAD(&hash[i]);
@@ -143,7 +143,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
 
                spin_lock_init(&dtab->index_lock);
        } else {
-               dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
+               dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
                                                      sizeof(struct bpf_dtab_netdev *),
                                                      dtab->map.numa_node);
                if (!dtab->netdev_map)
index d758fb83c8841d35737c25c6a42f338468b95a6f..ae62e6f96a95c97a82fa00c2443d69fd5ce3cf59 100644 (file)
@@ -44,7 +44,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
        bpf_map_init_from_attr(&stab->map, attr);
        raw_spin_lock_init(&stab->lock);
 
-       stab->sks = bpf_map_area_alloc(stab->map.max_entries *
+       stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
                                       sizeof(struct sock *),
                                       stab->map.numa_node);
        if (!stab->sks) {