--- /dev/null
+From 1a3e1f40962c445b997151a542314f3c6097f8c3 Mon Sep 17 00:00:00 2001
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Thu, 6 Aug 2020 23:20:45 -0700
+Subject: mm: memcontrol: decouple reference counting from page accounting
+
+From: Johannes Weiner <hannes@cmpxchg.org>
+
+commit 1a3e1f40962c445b997151a542314f3c6097f8c3 upstream.
+
+The reference counting of a memcg is currently coupled directly to how
+many 4k pages are charged to it. This doesn't work well with Roman's new
+slab controller, which maintains pools of objects and doesn't want to keep
+an extra balance sheet for the pages backing those objects.
+
+This unusual refcounting design (reference counts usually track pointers
+to an object) is only for historical reasons: memcg used to not take any
+css references and simply stalled offlining until all charges had been
+reparented and the page counters had dropped to zero. When we got rid of
+the reparenting requirement, the simple mechanical translation was to take
+a reference for every charge.
+
+More historical context can be found in commit e8ea14cc6ead ("mm:
+memcontrol: take a css reference for each charged page"), commit
+64f219938941 ("mm: memcontrol: remove obsolete kmemcg pinning tricks") and
+commit b2052564e66d ("mm: memcontrol: continue cache reclaim from offlined
+groups").
+
+The new slab controller exposes the limitations in this scheme, so let's
+switch it to a more idiomatic reference counting model based on actual
+kernel pointers to the memcg:
+
+- The per-cpu stock holds a reference to the memcg its caching
+
+- User pages hold a reference for their page->mem_cgroup. Transparent
+ huge pages will no longer acquire tail references in advance, we'll
+ get them if needed during the split.
+
+- Kernel pages hold a reference for their page->mem_cgroup
+
+- Pages allocated in the root cgroup will acquire and release css
+ references for simplicity. css_get() and css_put() optimize that.
+
+- The current memcg_charge_slab() already hacked around the per-charge
+ references; this change gets rid of that as well.
+
+- tcp accounting will handle reference in mem_cgroup_sk_{alloc,free}
+
+Roman:
+1) Rebased on top of the current mm tree: added css_get() in
+ mem_cgroup_charge(), dropped mem_cgroup_try_charge() part
+2) I've reformatted commit references in the commit log to make
+ checkpatch.pl happy.
+
+[hughd@google.com: remove css_put_many() from __mem_cgroup_clear_mc()]
+ Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2007302011450.2347@eggly.anvils
+
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Signed-off-by: Roman Gushchin <guro@fb.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Acked-by: Roman Gushchin <guro@fb.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Link: http://lkml.kernel.org/r/20200623174037.3951353-6-guro@fb.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Fixes: cdec2e4265df ("memcg: coalesce charging via percpu storage")
+Signed-off-by: GONG, Ruiqi <gongruiqi1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memcontrol.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2015,6 +2015,9 @@ static void drain_stock(struct memcg_sto
+ {
+ struct mem_cgroup *old = stock->cached;
+
++ if (!old)
++ return;
++
+ if (stock->nr_pages) {
+ page_counter_uncharge(&old->memory, stock->nr_pages);
+ if (do_memsw_account())
+@@ -2022,6 +2025,8 @@ static void drain_stock(struct memcg_sto
+ css_put_many(&old->css, stock->nr_pages);
+ stock->nr_pages = 0;
+ }
++
++ css_put(&old->css);
+ stock->cached = NULL;
+ }
+
+@@ -2057,6 +2062,7 @@ static void refill_stock(struct mem_cgro
+ stock = this_cpu_ptr(&memcg_stock);
+ if (stock->cached != memcg) { /* reset if necessary */
+ drain_stock(stock);
++ css_get(&memcg->css);
+ stock->cached = memcg;
+ }
+ stock->nr_pages += nr_pages;
+++ /dev/null
-From 23d05d563b7e7b0314e65c8e882bc27eac2da8e7 Mon Sep 17 00:00:00 2001
-From: Eric Dumazet <edumazet@google.com>
-Date: Tue, 12 Dec 2023 16:46:21 +0000
-Subject: net: prevent mss overflow in skb_segment()
-
-From: Eric Dumazet <edumazet@google.com>
-
-commit 23d05d563b7e7b0314e65c8e882bc27eac2da8e7 upstream.
-
-Once again syzbot is able to crash the kernel in skb_segment() [1]
-
-GSO_BY_FRAGS is a forbidden value, but unfortunately the following
-computation in skb_segment() can reach it quite easily :
-
- mss = mss * partial_segs;
-
-65535 = 3 * 5 * 17 * 257, so many initial values of mss can lead to
-a bad final result.
-
-Make sure to limit segmentation so that the new mss value is smaller
-than GSO_BY_FRAGS.
-
-[1]
-
-general protection fault, probably for non-canonical address 0xdffffc000000000e: 0000 [#1] PREEMPT SMP KASAN
-KASAN: null-ptr-deref in range [0x0000000000000070-0x0000000000000077]
-CPU: 1 PID: 5079 Comm: syz-executor993 Not tainted 6.7.0-rc4-syzkaller-00141-g1ae4cd3cbdd0 #0
-Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 11/10/2023
-RIP: 0010:skb_segment+0x181d/0x3f30 net/core/skbuff.c:4551
-Code: 83 e3 02 e9 fb ed ff ff e8 90 68 1c f9 48 8b 84 24 f8 00 00 00 48 8d 78 70 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 03 0f 8e 8a 21 00 00 48 8b 84 24 f8 00
-RSP: 0018:ffffc900043473d0 EFLAGS: 00010202
-RAX: dffffc0000000000 RBX: 0000000000010046 RCX: ffffffff886b1597
-RDX: 000000000000000e RSI: ffffffff886b2520 RDI: 0000000000000070
-RBP: ffffc90004347578 R08: 0000000000000005 R09: 000000000000ffff
-R10: 000000000000ffff R11: 0000000000000002 R12: ffff888063202ac0
-R13: 0000000000010000 R14: 000000000000ffff R15: 0000000000000046
-FS: 0000555556e7e380(0000) GS:ffff8880b9900000(0000) knlGS:0000000000000000
-CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
-CR2: 0000000020010000 CR3: 0000000027ee2000 CR4: 00000000003506f0
-DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
-DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
-Call Trace:
-<TASK>
-udp6_ufo_fragment+0xa0e/0xd00 net/ipv6/udp_offload.c:109
-ipv6_gso_segment+0x534/0x17e0 net/ipv6/ip6_offload.c:120
-skb_mac_gso_segment+0x290/0x610 net/core/gso.c:53
-__skb_gso_segment+0x339/0x710 net/core/gso.c:124
-skb_gso_segment include/net/gso.h:83 [inline]
-validate_xmit_skb+0x36c/0xeb0 net/core/dev.c:3626
-__dev_queue_xmit+0x6f3/0x3d60 net/core/dev.c:4338
-dev_queue_xmit include/linux/netdevice.h:3134 [inline]
-packet_xmit+0x257/0x380 net/packet/af_packet.c:276
-packet_snd net/packet/af_packet.c:3087 [inline]
-packet_sendmsg+0x24c6/0x5220 net/packet/af_packet.c:3119
-sock_sendmsg_nosec net/socket.c:730 [inline]
-__sock_sendmsg+0xd5/0x180 net/socket.c:745
-__sys_sendto+0x255/0x340 net/socket.c:2190
-__do_sys_sendto net/socket.c:2202 [inline]
-__se_sys_sendto net/socket.c:2198 [inline]
-__x64_sys_sendto+0xe0/0x1b0 net/socket.c:2198
-do_syscall_x64 arch/x86/entry/common.c:52 [inline]
-do_syscall_64+0x40/0x110 arch/x86/entry/common.c:83
-entry_SYSCALL_64_after_hwframe+0x63/0x6b
-RIP: 0033:0x7f8692032aa9
-Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 d1 19 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
-RSP: 002b:00007fff8d685418 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
-RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007f8692032aa9
-RDX: 0000000000010048 RSI: 00000000200000c0 RDI: 0000000000000003
-RBP: 00000000000f4240 R08: 0000000020000540 R09: 0000000000000014
-R10: 0000000000000000 R11: 0000000000000246 R12: 00007fff8d685480
-R13: 0000000000000001 R14: 00007fff8d685480 R15: 0000000000000003
-</TASK>
-Modules linked in:
----[ end trace 0000000000000000 ]---
-RIP: 0010:skb_segment+0x181d/0x3f30 net/core/skbuff.c:4551
-Code: 83 e3 02 e9 fb ed ff ff e8 90 68 1c f9 48 8b 84 24 f8 00 00 00 48 8d 78 70 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 03 0f 8e 8a 21 00 00 48 8b 84 24 f8 00
-RSP: 0018:ffffc900043473d0 EFLAGS: 00010202
-RAX: dffffc0000000000 RBX: 0000000000010046 RCX: ffffffff886b1597
-RDX: 000000000000000e RSI: ffffffff886b2520 RDI: 0000000000000070
-RBP: ffffc90004347578 R08: 0000000000000005 R09: 000000000000ffff
-R10: 000000000000ffff R11: 0000000000000002 R12: ffff888063202ac0
-R13: 0000000000010000 R14: 000000000000ffff R15: 0000000000000046
-FS: 0000555556e7e380(0000) GS:ffff8880b9900000(0000) knlGS:0000000000000000
-CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
-CR2: 0000000020010000 CR3: 0000000027ee2000 CR4: 00000000003506f0
-DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
-DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
-
-Fixes: 3953c46c3ac7 ("sk_buff: allow segmenting based on frag sizes")
-Signed-off-by: Eric Dumazet <edumazet@google.com>
-Cc: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
-Reviewed-by: Willem de Bruijn <willemb@google.com>
-Link: https://lore.kernel.org/r/20231212164621.4131800-1-edumazet@google.com
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- net/core/skbuff.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -3625,8 +3625,9 @@ struct sk_buff *skb_segment(struct sk_bu
- /* GSO partial only requires that we trim off any excess that
- * doesn't fit into an MSS sized block, so take care of that
- * now.
-+ * Cap len to not accidentally hit GSO_BY_FRAGS.
- */
-- partial_segs = len / mss;
-+ partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss;
- if (partial_segs > 1)
- mss *= partial_segs;
- else
--- /dev/null
+From 5bc09b397cbf1221f8a8aacb1152650c9195b02b Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Sun, 4 Feb 2024 01:16:45 +0900
+Subject: nilfs2: fix potential bug in end_buffer_async_write
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 5bc09b397cbf1221f8a8aacb1152650c9195b02b upstream.
+
+According to a syzbot report, end_buffer_async_write(), which handles the
+completion of block device writes, may detect abnormal condition of the
+buffer async_write flag and cause a BUG_ON failure when using nilfs2.
+
+Nilfs2 itself does not use end_buffer_async_write(). But, the async_write
+flag is now used as a marker by commit 7f42ec394156 ("nilfs2: fix issue
+with race condition of competition between segments for dirty blocks") as
+a means of resolving double list insertion of dirty blocks in
+nilfs_lookup_dirty_data_buffers() and nilfs_lookup_node_buffers() and the
+resulting crash.
+
+This modification is safe as long as it is used for file data and b-tree
+node blocks where the page caches are independent. However, it was
+irrelevant and redundant to also introduce async_write for segment summary
+and super root blocks that share buffers with the backing device. This
+led to the possibility that the BUG_ON check in end_buffer_async_write
+would fail as described above, if independent writebacks of the backing
+device occurred in parallel.
+
+The use of async_write for segment summary buffers has already been
+removed in a previous change.
+
+Fix this issue by removing the manipulation of the async_write flag for
+the remaining super root block buffer.
+
+Link: https://lkml.kernel.org/r/20240203161645.4992-1-konishi.ryusuke@gmail.com
+Fixes: 7f42ec394156 ("nilfs2: fix issue with race condition of competition between segments for dirty blocks")
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+5c04210f7c7f897c1e7f@syzkaller.appspotmail.com
+Closes: https://lkml.kernel.org/r/00000000000019a97c05fd42f8c8@google.com
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/segment.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1702,7 +1702,6 @@ static void nilfs_segctor_prepare_write(
+
+ list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ b_assoc_buffers) {
+- set_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
+ if (bh->b_page != bd_page) {
+ lock_page(bd_page);
+@@ -1713,6 +1712,7 @@ static void nilfs_segctor_prepare_write(
+ }
+ break;
+ }
++ set_buffer_async_write(bh);
+ if (bh->b_page != fs_page) {
+ nilfs_begin_page_io(fs_page);
+ fs_page = bh->b_page;
+@@ -1798,7 +1798,6 @@ static void nilfs_abort_logs(struct list
+
+ list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ b_assoc_buffers) {
+- clear_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
+ clear_buffer_uptodate(bh);
+ if (bh->b_page != bd_page) {
+@@ -1807,6 +1806,7 @@ static void nilfs_abort_logs(struct list
+ }
+ break;
+ }
++ clear_buffer_async_write(bh);
+ if (bh->b_page != fs_page) {
+ nilfs_end_page_io(fs_page, err);
+ fs_page = bh->b_page;
+@@ -1894,8 +1894,9 @@ static void nilfs_segctor_complete_write
+ BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
+ BIT(BH_NILFS_Redirected));
+
+- set_mask_bits(&bh->b_state, clear_bits, set_bits);
+ if (bh == segbuf->sb_super_root) {
++ set_buffer_uptodate(bh);
++ clear_buffer_dirty(bh);
+ if (bh->b_page != bd_page) {
+ end_page_writeback(bd_page);
+ bd_page = bh->b_page;
+@@ -1903,6 +1904,7 @@ static void nilfs_segctor_complete_write
+ update_sr = true;
+ break;
+ }
++ set_mask_bits(&bh->b_state, clear_bits, set_bits);
+ if (bh->b_page != fs_page) {
+ nilfs_end_page_io(fs_page, 0);
+ fs_page = bh->b_page;
--- /dev/null
+From 5124a0a549857c4b87173280e192eea24dea72ad Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Fri, 27 Jan 2023 01:41:14 +0900
+Subject: nilfs2: replace WARN_ONs for invalid DAT metadata block requests
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 5124a0a549857c4b87173280e192eea24dea72ad upstream.
+
+If DAT metadata file block access fails due to corruption of the DAT file
+or abnormal virtual block numbers held by b-trees or inodes, a kernel
+warning is generated.
+
+This replaces the WARN_ONs by error output, so that a kernel, booted with
+panic_on_warn, does not panic. This patch also replaces the detected
+return code -ENOENT with another internal code -EINVAL to notify the bmap
+layer of metadata corruption. When the bmap layer sees -EINVAL, it
+handles the abnormal situation with nilfs_bmap_convert_error() and finally
+returns code -EIO as it should.
+
+Link: https://lkml.kernel.org/r/0000000000005cc3d205ea23ddcf@google.com
+Link: https://lkml.kernel.org/r/20230126164114.6911-1-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: <syzbot+5d5d25f90f195a3cfcb4@syzkaller.appspotmail.com>
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/dat.c | 27 +++++++++++++++++----------
+ 1 file changed, 17 insertions(+), 10 deletions(-)
+
+--- a/fs/nilfs2/dat.c
++++ b/fs/nilfs2/dat.c
+@@ -40,8 +40,21 @@ static inline struct nilfs_dat_info *NIL
+ static int nilfs_dat_prepare_entry(struct inode *dat,
+ struct nilfs_palloc_req *req, int create)
+ {
+- return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
+- create, &req->pr_entry_bh);
++ int ret;
++
++ ret = nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
++ create, &req->pr_entry_bh);
++ if (unlikely(ret == -ENOENT)) {
++ nilfs_err(dat->i_sb,
++ "DAT doesn't have a block to manage vblocknr = %llu",
++ (unsigned long long)req->pr_entry_nr);
++ /*
++ * Return internal code -EINVAL to notify bmap layer of
++ * metadata corruption.
++ */
++ ret = -EINVAL;
++ }
++ return ret;
+ }
+
+ static void nilfs_dat_commit_entry(struct inode *dat,
+@@ -123,11 +136,7 @@ static void nilfs_dat_commit_free(struct
+
+ int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
+ {
+- int ret;
+-
+- ret = nilfs_dat_prepare_entry(dat, req, 0);
+- WARN_ON(ret == -ENOENT);
+- return ret;
++ return nilfs_dat_prepare_entry(dat, req, 0);
+ }
+
+ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
+@@ -154,10 +163,8 @@ int nilfs_dat_prepare_end(struct inode *
+ int ret;
+
+ ret = nilfs_dat_prepare_entry(dat, req, 0);
+- if (ret < 0) {
+- WARN_ON(ret == -ENOENT);
++ if (ret < 0)
+ return ret;
+- }
+
+ kaddr = kmap_atomic(req->pr_entry_bh->b_page);
+ entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
--- /dev/null
+From 944d5fe50f3f03daacfea16300e656a1691c4a23 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linuxfoundation.org>
+Date: Sun, 4 Feb 2024 15:25:12 +0000
+Subject: sched/membarrier: reduce the ability to hammer on sys_membarrier
+
+From: Linus Torvalds <torvalds@linuxfoundation.org>
+
+commit 944d5fe50f3f03daacfea16300e656a1691c4a23 upstream.
+
+On some systems, sys_membarrier can be very expensive, causing overall
+slowdowns for everything. So put a lock on the path in order to
+serialize the accesses to prevent the ability for this to be called at
+too high of a frequency and saturate the machine.
+
+Reviewed-and-tested-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Acked-by: Borislav Petkov <bp@alien8.de>
+Fixes: 22e4ebb97582 ("membarrier: Provide expedited private command")
+Fixes: c5f58bd58f43 ("membarrier: Provide GLOBAL_EXPEDITED command")
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[ converted to explicit mutex_*() calls - cleanup.h is not in this stable
+ branch - gregkh ]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/membarrier.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/kernel/sched/membarrier.c
++++ b/kernel/sched/membarrier.c
+@@ -34,6 +34,8 @@
+ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
+ | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
+
++static DEFINE_MUTEX(membarrier_ipi_mutex);
++
+ static void ipi_mb(void *info)
+ {
+ smp_mb(); /* IPIs should be serializing but paranoid. */
+@@ -64,6 +66,7 @@ static int membarrier_global_expedited(v
+ fallback = true;
+ }
+
++ mutex_lock(&membarrier_ipi_mutex);
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ struct task_struct *p;
+@@ -104,6 +107,7 @@ static int membarrier_global_expedited(v
+ * rq->curr modification in scheduler.
+ */
+ smp_mb(); /* exit from system call is not a mb */
++ mutex_unlock(&membarrier_ipi_mutex);
+ return 0;
+ }
+
+@@ -144,6 +148,7 @@ static int membarrier_private_expedited(
+ fallback = true;
+ }
+
++ mutex_lock(&membarrier_ipi_mutex);
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ struct task_struct *p;
+@@ -182,6 +187,7 @@ static int membarrier_private_expedited(
+ * rq->curr modification in scheduler.
+ */
+ smp_mb(); /* exit from system call is not a mb */
++ mutex_unlock(&membarrier_ipi_mutex);
+
+ return 0;
+ }
irqchip-irq-brcmstb-l2-add-write-memory-barrier-before-exit.patch
pmdomain-core-move-the-unused-cleanup-to-a-_sync-initcall.patch
revert-md-raid5-wait-for-md_sb_change_pending-in-rai.patch
-net-prevent-mss-overflow-in-skb_segment.patch
+sched-membarrier-reduce-the-ability-to-hammer-on-sys_membarrier.patch
+mm-memcontrol-decouple-reference-counting-from-page-accounting.patch
+nilfs2-fix-potential-bug-in-end_buffer_async_write.patch
+nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch