]> git.ipfire.org Git - people/arne_f/kernel.git/commitdiff
flowcache: Avoid OOM condition under preasure
authorSteffen Klassert <steffen.klassert@secunet.com>
Mon, 22 Feb 2016 09:40:07 +0000 (10:40 +0100)
committerSteffen Klassert <steffen.klassert@secunet.com>
Thu, 17 Mar 2016 09:28:42 +0000 (10:28 +0100)
We can hit an OOM condition if we are under presure because
we can not free the entries in gc_list fast enough. So add
a counter for the not yet freed entries in the gc_list and
refuse new allocations if the value is too high.

Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
include/net/netns/xfrm.h
net/core/flow.c

index 730d82ad6ee521beddc3af344d8e99a339f74dd5..24cd3949a9a4f04f78ff31ce681d71833f1bbe54 100644 (file)
@@ -80,6 +80,7 @@ struct netns_xfrm {
        struct flow_cache       flow_cache_global;
        atomic_t                flow_cache_genid;
        struct list_head        flow_cache_gc_list;
+       atomic_t                flow_cache_gc_count;
        spinlock_t              flow_cache_gc_lock;
        struct work_struct      flow_cache_gc_work;
        struct work_struct      flow_cache_flush_work;
index 1033725be40bd8f254ce27680e3b8abd09ad1546..3937b1b68d5bc7ad50691716ac1612332a5dc997 100644 (file)
@@ -92,8 +92,11 @@ static void flow_cache_gc_task(struct work_struct *work)
        list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
        spin_unlock_bh(&xfrm->flow_cache_gc_lock);
 
-       list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
+       list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
                flow_entry_kill(fce, xfrm);
+               atomic_dec(&xfrm->flow_cache_gc_count);
+               WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
+       }
 }
 
 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
@@ -101,6 +104,7 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
                                     struct netns_xfrm *xfrm)
 {
        if (deleted) {
+               atomic_add(deleted, &xfrm->flow_cache_gc_count);
                fcp->hash_count -= deleted;
                spin_lock_bh(&xfrm->flow_cache_gc_lock);
                list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
@@ -232,6 +236,13 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
                if (fcp->hash_count > fc->high_watermark)
                        flow_cache_shrink(fc, fcp);
 
+               if (fcp->hash_count > 2 * fc->high_watermark ||
+                   atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
+                       atomic_inc(&net->xfrm.flow_cache_genid);
+                       flo = ERR_PTR(-ENOBUFS);
+                       goto ret_object;
+               }
+
                fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
                if (fle) {
                        fle->net = net;
@@ -446,6 +457,7 @@ int flow_cache_init(struct net *net)
        INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
        INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
        mutex_init(&net->xfrm.flow_flush_sem);
+       atomic_set(&net->xfrm.flow_cache_gc_count, 0);
 
        fc->hash_shift = 10;
        fc->low_watermark = 2 * flow_cache_hash_size(fc);