From 182e86d8d1a8b4fb30e81c154c06a569a4d04bb8 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 27 Feb 2020 13:25:49 +0100 Subject: [PATCH] 4.19-stable patches added patches: alsa-rawmidi-avoid-bit-fields-for-state-flags.patch alsa-seq-avoid-concurrent-access-to-queue-flags.patch alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch --- ...idi-avoid-bit-fields-for-state-flags.patch | 43 ++++++ ...oid-concurrent-access-to-queue-flags.patch | 96 ++++++++++++ ...nt-access-to-queue-current-tick-time.patch | 137 ++++++++++++++++++ ...imit-limit-the-max-size-of-hashtable.patch | 52 +++++++ ...hashlimit_mutex-scope-for-htable_put.patch | 78 ++++++++++ ...-rcu-cleanup-using-non-bh-safe-locks.patch | 85 +++++++++++ queue-4.19/series | 6 + 7 files changed, 497 insertions(+) create mode 100644 queue-4.19/alsa-rawmidi-avoid-bit-fields-for-state-flags.patch create mode 100644 queue-4.19/alsa-seq-avoid-concurrent-access-to-queue-flags.patch create mode 100644 queue-4.19/alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch create mode 100644 queue-4.19/netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch create mode 100644 queue-4.19/netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch create mode 100644 queue-4.19/rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch diff --git a/queue-4.19/alsa-rawmidi-avoid-bit-fields-for-state-flags.patch b/queue-4.19/alsa-rawmidi-avoid-bit-fields-for-state-flags.patch new file mode 100644 index 00000000000..32d0e00973e --- /dev/null +++ b/queue-4.19/alsa-rawmidi-avoid-bit-fields-for-state-flags.patch @@ -0,0 +1,43 @@ +From dfa9a5efe8b932a84b3b319250aa3ac60c20f876 Mon Sep 17 00:00:00 2001 +From: Takashi Iwai +Date: Fri, 14 Feb 2020 12:13:16 +0100 +Subject: ALSA: rawmidi: Avoid bit fields for state flags + +From: Takashi Iwai + +commit dfa9a5efe8b932a84b3b319250aa3ac60c20f876 upstream. + +The rawmidi state flags (opened, append, active_sensing) are stored in +bit fields that can be potentially racy when concurrently accessed +without any locks. Although the current code should be fine, there is +also no any real benefit by keeping the bitfields for this kind of +short number of members. + +This patch changes those bit fields flags to the simple bool fields. +There should be no size increase of the snd_rawmidi_substream by this +change. + +Reported-by: syzbot+576cc007eb9f2c968200@syzkaller.appspotmail.com +Link: https://lore.kernel.org/r/20200214111316.26939-4-tiwai@suse.de +Signed-off-by: Takashi Iwai +Signed-off-by: Greg Kroah-Hartman + +--- + include/sound/rawmidi.h | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/include/sound/rawmidi.h ++++ b/include/sound/rawmidi.h +@@ -92,9 +92,9 @@ struct snd_rawmidi_substream { + struct list_head list; /* list of all substream for given stream */ + int stream; /* direction */ + int number; /* substream number */ +- unsigned int opened: 1, /* open flag */ +- append: 1, /* append flag (merge more streams) */ +- active_sensing: 1; /* send active sensing when close */ ++ bool opened; /* open flag */ ++ bool append; /* append flag (merge more streams) */ ++ bool active_sensing; /* send active sensing when close */ + int use_count; /* use counter (for output) */ + size_t bytes; + struct snd_rawmidi *rmidi; diff --git a/queue-4.19/alsa-seq-avoid-concurrent-access-to-queue-flags.patch b/queue-4.19/alsa-seq-avoid-concurrent-access-to-queue-flags.patch new file mode 100644 index 00000000000..f0bbc0d9a9e --- /dev/null +++ b/queue-4.19/alsa-seq-avoid-concurrent-access-to-queue-flags.patch @@ -0,0 +1,96 @@ +From bb51e669fa49feb5904f452b2991b240ef31bc97 Mon Sep 17 00:00:00 2001 +From: Takashi Iwai +Date: Fri, 14 Feb 2020 12:13:14 +0100 +Subject: ALSA: seq: Avoid concurrent access to queue flags + +From: Takashi Iwai + +commit bb51e669fa49feb5904f452b2991b240ef31bc97 upstream. + +The queue flags are represented in bit fields and the concurrent +access may result in unexpected results. Although the current code +should be mostly OK as it's only reading a field while writing other +fields as KCSAN reported, it's safer to cover both with a proper +spinlock protection. + +This patch fixes the possible concurrent read by protecting with +q->owner_lock. Also the queue owner field is protected as well since +it's the field to be protected by the lock itself. + +Reported-by: syzbot+65c6c92d04304d0a8efc@syzkaller.appspotmail.com +Reported-by: syzbot+e60ddfa48717579799dd@syzkaller.appspotmail.com +Link: https://lore.kernel.org/r/20200214111316.26939-2-tiwai@suse.de +Signed-off-by: Takashi Iwai +Signed-off-by: Greg Kroah-Hartman + +--- + sound/core/seq/seq_queue.c | 20 ++++++++++++++++---- + 1 file changed, 16 insertions(+), 4 deletions(-) + +--- a/sound/core/seq/seq_queue.c ++++ b/sound/core/seq/seq_queue.c +@@ -405,6 +405,7 @@ int snd_seq_queue_check_access(int queue + int snd_seq_queue_set_owner(int queueid, int client, int locked) + { + struct snd_seq_queue *q = queueptr(queueid); ++ unsigned long flags; + + if (q == NULL) + return -EINVAL; +@@ -414,8 +415,10 @@ int snd_seq_queue_set_owner(int queueid, + return -EPERM; + } + ++ spin_lock_irqsave(&q->owner_lock, flags); + q->locked = locked ? 1 : 0; + q->owner = client; ++ spin_unlock_irqrestore(&q->owner_lock, flags); + queue_access_unlock(q); + queuefree(q); + +@@ -552,15 +555,17 @@ void snd_seq_queue_client_termination(in + unsigned long flags; + int i; + struct snd_seq_queue *q; ++ bool matched; + + for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { + if ((q = queueptr(i)) == NULL) + continue; + spin_lock_irqsave(&q->owner_lock, flags); +- if (q->owner == client) ++ matched = (q->owner == client); ++ if (matched) + q->klocked = 1; + spin_unlock_irqrestore(&q->owner_lock, flags); +- if (q->owner == client) { ++ if (matched) { + if (q->timer->running) + snd_seq_timer_stop(q->timer); + snd_seq_timer_reset(q->timer); +@@ -752,6 +757,8 @@ void snd_seq_info_queues_read(struct snd + int i, bpm; + struct snd_seq_queue *q; + struct snd_seq_timer *tmr; ++ bool locked; ++ int owner; + + for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { + if ((q = queueptr(i)) == NULL) +@@ -763,9 +770,14 @@ void snd_seq_info_queues_read(struct snd + else + bpm = 0; + ++ spin_lock_irq(&q->owner_lock); ++ locked = q->locked; ++ owner = q->owner; ++ spin_unlock_irq(&q->owner_lock); ++ + snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name); +- snd_iprintf(buffer, "owned by client : %d\n", q->owner); +- snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free"); ++ snd_iprintf(buffer, "owned by client : %d\n", owner); ++ snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free"); + snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq)); + snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq)); + snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped"); diff --git a/queue-4.19/alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch b/queue-4.19/alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch new file mode 100644 index 00000000000..bfe334838d9 --- /dev/null +++ b/queue-4.19/alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch @@ -0,0 +1,137 @@ +From dc7497795e014d84699c3b8809ed6df35352dd74 Mon Sep 17 00:00:00 2001 +From: Takashi Iwai +Date: Fri, 14 Feb 2020 12:13:15 +0100 +Subject: ALSA: seq: Fix concurrent access to queue current tick/time + +From: Takashi Iwai + +commit dc7497795e014d84699c3b8809ed6df35352dd74 upstream. + +snd_seq_check_queue() passes the current tick and time of the given +queue as a pointer to snd_seq_prioq_cell_out(), but those might be +updated concurrently by the seq timer update. + +Fix it by retrieving the current tick and time via the proper helper +functions at first, and pass those values to snd_seq_prioq_cell_out() +later in the loops. + +snd_seq_timer_get_cur_time() takes a new argument and adjusts with the +current system time only when it's requested so; this update isn't +needed for snd_seq_check_queue(), as it's called either from the +interrupt handler or right after queuing. + +Also, snd_seq_timer_get_cur_tick() is changed to read the value in the +spinlock for the concurrency, too. + +Reported-by: syzbot+fd5e0eaa1a32999173b2@syzkaller.appspotmail.com +Link: https://lore.kernel.org/r/20200214111316.26939-3-tiwai@suse.de +Signed-off-by: Takashi Iwai +Signed-off-by: Greg Kroah-Hartman + +--- + sound/core/seq/seq_clientmgr.c | 4 ++-- + sound/core/seq/seq_queue.c | 9 ++++++--- + sound/core/seq/seq_timer.c | 13 ++++++++++--- + sound/core/seq/seq_timer.h | 3 ++- + 4 files changed, 20 insertions(+), 9 deletions(-) + +--- a/sound/core/seq/seq_clientmgr.c ++++ b/sound/core/seq/seq_clientmgr.c +@@ -563,7 +563,7 @@ static int update_timestamp_of_queue(str + event->queue = queue; + event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK; + if (real_time) { +- event->time.time = snd_seq_timer_get_cur_time(q->timer); ++ event->time.time = snd_seq_timer_get_cur_time(q->timer, true); + event->flags |= SNDRV_SEQ_TIME_STAMP_REAL; + } else { + event->time.tick = snd_seq_timer_get_cur_tick(q->timer); +@@ -1642,7 +1642,7 @@ static int snd_seq_ioctl_get_queue_statu + tmr = queue->timer; + status->events = queue->tickq->cells + queue->timeq->cells; + +- status->time = snd_seq_timer_get_cur_time(tmr); ++ status->time = snd_seq_timer_get_cur_time(tmr, true); + status->tick = snd_seq_timer_get_cur_tick(tmr); + + status->running = tmr->running; +--- a/sound/core/seq/seq_queue.c ++++ b/sound/core/seq/seq_queue.c +@@ -251,6 +251,8 @@ void snd_seq_check_queue(struct snd_seq_ + { + unsigned long flags; + struct snd_seq_event_cell *cell; ++ snd_seq_tick_time_t cur_tick; ++ snd_seq_real_time_t cur_time; + + if (q == NULL) + return; +@@ -267,17 +269,18 @@ void snd_seq_check_queue(struct snd_seq_ + + __again: + /* Process tick queue... */ ++ cur_tick = snd_seq_timer_get_cur_tick(q->timer); + for (;;) { +- cell = snd_seq_prioq_cell_out(q->tickq, +- &q->timer->tick.cur_tick); ++ cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick); + if (!cell) + break; + snd_seq_dispatch_event(cell, atomic, hop); + } + + /* Process time queue... */ ++ cur_time = snd_seq_timer_get_cur_time(q->timer, false); + for (;;) { +- cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time); ++ cell = snd_seq_prioq_cell_out(q->timeq, &cur_time); + if (!cell) + break; + snd_seq_dispatch_event(cell, atomic, hop); +--- a/sound/core/seq/seq_timer.c ++++ b/sound/core/seq/seq_timer.c +@@ -437,14 +437,15 @@ int snd_seq_timer_continue(struct snd_se + } + + /* return current 'real' time. use timeofday() to get better granularity. */ +-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr) ++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr, ++ bool adjust_ktime) + { + snd_seq_real_time_t cur_time; + unsigned long flags; + + spin_lock_irqsave(&tmr->lock, flags); + cur_time = tmr->cur_time; +- if (tmr->running) { ++ if (adjust_ktime && tmr->running) { + struct timespec64 tm; + + ktime_get_ts64(&tm); +@@ -461,7 +462,13 @@ snd_seq_real_time_t snd_seq_timer_get_cu + high PPQ values) */ + snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr) + { +- return tmr->tick.cur_tick; ++ snd_seq_tick_time_t cur_tick; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&tmr->lock, flags); ++ cur_tick = tmr->tick.cur_tick; ++ spin_unlock_irqrestore(&tmr->lock, flags); ++ return cur_tick; + } + + +--- a/sound/core/seq/seq_timer.h ++++ b/sound/core/seq/seq_timer.h +@@ -135,7 +135,8 @@ int snd_seq_timer_set_tempo_ppq(struct s + int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position); + int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position); + int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base); +-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr); ++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr, ++ bool adjust_ktime); + snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr); + + extern int seq_default_timer_class; diff --git a/queue-4.19/netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch b/queue-4.19/netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch new file mode 100644 index 00000000000..fe28578bcd0 --- /dev/null +++ b/queue-4.19/netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch @@ -0,0 +1,52 @@ +From 8d0015a7ab76b8b1e89a3e5f5710a6e5103f2dd5 Mon Sep 17 00:00:00 2001 +From: Cong Wang +Date: Sun, 2 Feb 2020 20:30:53 -0800 +Subject: netfilter: xt_hashlimit: limit the max size of hashtable + +From: Cong Wang + +commit 8d0015a7ab76b8b1e89a3e5f5710a6e5103f2dd5 upstream. + +The user-specified hashtable size is unbound, this could +easily lead to an OOM or a hung task as we hold the global +mutex while allocating and initializing the new hashtable. + +Add a max value to cap both cfg->size and cfg->max, as +suggested by Florian. + +Reported-and-tested-by: syzbot+adf6c6c2be1c3a718121@syzkaller.appspotmail.com +Signed-off-by: Cong Wang +Reviewed-by: Florian Westphal +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Greg Kroah-Hartman + +--- + net/netfilter/xt_hashlimit.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/net/netfilter/xt_hashlimit.c ++++ b/net/netfilter/xt_hashlimit.c +@@ -845,6 +845,8 @@ hashlimit_mt(const struct sk_buff *skb, + return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3); + } + ++#define HASHLIMIT_MAX_SIZE 1048576 ++ + static int hashlimit_mt_check_common(const struct xt_mtchk_param *par, + struct xt_hashlimit_htable **hinfo, + struct hashlimit_cfg3 *cfg, +@@ -855,6 +857,14 @@ static int hashlimit_mt_check_common(con + + if (cfg->gc_interval == 0 || cfg->expire == 0) + return -EINVAL; ++ if (cfg->size > HASHLIMIT_MAX_SIZE) { ++ cfg->size = HASHLIMIT_MAX_SIZE; ++ pr_info_ratelimited("size too large, truncated to %u\n", cfg->size); ++ } ++ if (cfg->max > HASHLIMIT_MAX_SIZE) { ++ cfg->max = HASHLIMIT_MAX_SIZE; ++ pr_info_ratelimited("max too large, truncated to %u\n", cfg->max); ++ } + if (par->family == NFPROTO_IPV4) { + if (cfg->srcmask > 32 || cfg->dstmask > 32) + return -EINVAL; diff --git a/queue-4.19/netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch b/queue-4.19/netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch new file mode 100644 index 00000000000..d504920036c --- /dev/null +++ b/queue-4.19/netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch @@ -0,0 +1,78 @@ +From c4a3922d2d20c710f827d3a115ee338e8d0467df Mon Sep 17 00:00:00 2001 +From: Cong Wang +Date: Sun, 2 Feb 2020 20:30:52 -0800 +Subject: netfilter: xt_hashlimit: reduce hashlimit_mutex scope for htable_put() + +From: Cong Wang + +commit c4a3922d2d20c710f827d3a115ee338e8d0467df upstream. + +It is unnecessary to hold hashlimit_mutex for htable_destroy() +as it is already removed from the global hashtable and its +refcount is already zero. + +Also, switch hinfo->use to refcount_t so that we don't have +to hold the mutex until it reaches zero in htable_put(). + +Reported-and-tested-by: syzbot+adf6c6c2be1c3a718121@syzkaller.appspotmail.com +Acked-by: Florian Westphal +Signed-off-by: Cong Wang +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Greg Kroah-Hartman + +--- + net/netfilter/xt_hashlimit.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +--- a/net/netfilter/xt_hashlimit.c ++++ b/net/netfilter/xt_hashlimit.c +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Harald Welte "); +@@ -108,7 +109,7 @@ struct dsthash_ent { + + struct xt_hashlimit_htable { + struct hlist_node node; /* global list of all htables */ +- int use; ++ refcount_t use; + u_int8_t family; + bool rnd_initialized; + +@@ -309,7 +310,7 @@ static int htable_create(struct net *net + for (i = 0; i < hinfo->cfg.size; i++) + INIT_HLIST_HEAD(&hinfo->hash[i]); + +- hinfo->use = 1; ++ refcount_set(&hinfo->use, 1); + hinfo->count = 0; + hinfo->family = family; + hinfo->rnd_initialized = false; +@@ -428,7 +429,7 @@ static struct xt_hashlimit_htable *htabl + hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) { + if (!strcmp(name, hinfo->name) && + hinfo->family == family) { +- hinfo->use++; ++ refcount_inc(&hinfo->use); + return hinfo; + } + } +@@ -437,12 +438,11 @@ static struct xt_hashlimit_htable *htabl + + static void htable_put(struct xt_hashlimit_htable *hinfo) + { +- mutex_lock(&hashlimit_mutex); +- if (--hinfo->use == 0) { ++ if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) { + hlist_del(&hinfo->node); ++ mutex_unlock(&hashlimit_mutex); + htable_destroy(hinfo); + } +- mutex_unlock(&hashlimit_mutex); + } + + /* The algorithm used is the Simple Token Bucket Filter (TBF) diff --git a/queue-4.19/rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch b/queue-4.19/rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch new file mode 100644 index 00000000000..0ceea72913c --- /dev/null +++ b/queue-4.19/rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch @@ -0,0 +1,85 @@ +From 963485d436ccc2810177a7b08af22336ec2af67b Mon Sep 17 00:00:00 2001 +From: David Howells +Date: Thu, 6 Feb 2020 13:57:40 +0000 +Subject: rxrpc: Fix call RCU cleanup using non-bh-safe locks + +From: David Howells + +commit 963485d436ccc2810177a7b08af22336ec2af67b upstream. + +rxrpc_rcu_destroy_call(), which is called as an RCU callback to clean up a +put call, calls rxrpc_put_connection() which, deep in its bowels, takes a +number of spinlocks in a non-BH-safe way, including rxrpc_conn_id_lock and +local->client_conns_lock. RCU callbacks, however, are normally called from +softirq context, which can cause lockdep to notice the locking +inconsistency. + +To get lockdep to detect this, it's necessary to have the connection +cleaned up on the put at the end of the last of its calls, though normally +the clean up is deferred. This can be induced, however, by starting a call +on an AF_RXRPC socket and then closing the socket without reading the +reply. + +Fix this by having rxrpc_rcu_destroy_call() punt the destruction to a +workqueue if in softirq-mode and defer the destruction to process context. + +Note that another way to fix this could be to add a bunch of bh-disable +annotations to the spinlocks concerned - and there might be more than just +those two - but that means spending more time with BHs disabled. + +Note also that some of these places were covered by bh-disable spinlocks +belonging to the rxrpc_transport object, but these got removed without the +_bh annotation being retained on the next lock in. + +Fixes: 999b69f89241 ("rxrpc: Kill the client connection bundle concept") +Reported-by: syzbot+d82f3ac8d87e7ccbb2c9@syzkaller.appspotmail.com +Reported-by: syzbot+3f1fd6b8cbf8702d134e@syzkaller.appspotmail.com +Signed-off-by: David Howells +cc: Hillf Danton +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman + +--- + net/rxrpc/call_object.c | 22 +++++++++++++++++++--- + 1 file changed, 19 insertions(+), 3 deletions(-) + +--- a/net/rxrpc/call_object.c ++++ b/net/rxrpc/call_object.c +@@ -647,11 +647,11 @@ void rxrpc_put_call(struct rxrpc_call *c + } + + /* +- * Final call destruction under RCU. ++ * Final call destruction - but must be done in process context. + */ +-static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) ++static void rxrpc_destroy_call(struct work_struct *work) + { +- struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); ++ struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); + struct rxrpc_net *rxnet = call->rxnet; + + rxrpc_put_connection(call->conn); +@@ -664,6 +664,22 @@ static void rxrpc_rcu_destroy_call(struc + } + + /* ++ * Final call destruction under RCU. ++ */ ++static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) ++{ ++ struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); ++ ++ if (in_softirq()) { ++ INIT_WORK(&call->processor, rxrpc_destroy_call); ++ if (!rxrpc_queue_work(&call->processor)) ++ BUG(); ++ } else { ++ rxrpc_destroy_call(&call->processor); ++ } ++} ++ ++/* + * clean up a call + */ + void rxrpc_cleanup_call(struct rxrpc_call *call) diff --git a/queue-4.19/series b/queue-4.19/series index 5a78ebf76ab..a75639b87dc 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -87,3 +87,9 @@ ecryptfs-replace-bug_on-with-error-handling-code.patch iommu-vt-d-fix-compile-warning-from-intel-svm.h.patch genirq-proc-reject-invalid-affinity-masks-again.patch bpf-offload-replace-bitwise-and-by-logical-and-in-bpf_prog_offload_info_fill.patch +alsa-rawmidi-avoid-bit-fields-for-state-flags.patch +alsa-seq-avoid-concurrent-access-to-queue-flags.patch +alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch +netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch +netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch +rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch -- 2.47.3