--- /dev/null
+From c174b53e95adf2eece2afc56cd9798374919f99a Mon Sep 17 00:00:00 2001
+From: Zicheng Qu <quzicheng@huawei.com>
+Date: Mon, 28 Oct 2024 14:20:27 +0000
+Subject: ad7780: fix division by zero in ad7780_write_raw()
+
+From: Zicheng Qu <quzicheng@huawei.com>
+
+commit c174b53e95adf2eece2afc56cd9798374919f99a upstream.
+
+In the ad7780_write_raw() , val2 can be zero, which might lead to a
+division by zero error in DIV_ROUND_CLOSEST(). The ad7780_write_raw()
+is based on iio_info's write_raw. While val is explicitly declared that
+can be zero (in read mode), val2 is not specified to be non-zero.
+
+Fixes: 9085daa4abcc ("staging: iio: ad7780: add gain & filter gpio support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zicheng Qu <quzicheng@huawei.com>
+Link: https://patch.msgid.link/20241028142027.1032332-1-quzicheng@huawei.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/ad7780.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/adc/ad7780.c
++++ b/drivers/iio/adc/ad7780.c
+@@ -152,7 +152,7 @@ static int ad7780_write_raw(struct iio_d
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+- if (val != 0)
++ if (val != 0 || val2 == 0)
+ return -EINVAL;
+
+ vref = st->int_vref_mv * 1000000LL;
--- /dev/null
+From d6e6a74d4cea853b5321eeabb69c611148eedefe Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Wed, 23 Oct 2024 13:03:14 +0100
+Subject: ARM: 9429/1: ioremap: Sync PGDs for VMALLOC shadow
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit d6e6a74d4cea853b5321eeabb69c611148eedefe upstream.
+
+When sync:ing the VMALLOC area to other CPUs, make sure to also
+sync the KASAN shadow memory for the VMALLOC area, so that we
+don't get stale entries for the shadow memory in the top level PGD.
+
+Since we are now copying PGDs in two instances, create a helper
+function named memcpy_pgd() to do the actual copying, and
+create a helper to map the addresses of VMALLOC_START and
+VMALLOC_END into the corresponding shadow memory.
+
+Co-developed-by: Melon Liu <melon1335@163.com>
+
+Cc: stable@vger.kernel.org
+Fixes: 565cbaad83d8 ("ARM: 9202/1: kasan: support CONFIG_KASAN_VMALLOC")
+Link: https://lore.kernel.org/linux-arm-kernel/a1a1d062-f3a2-4d05-9836-3b098de9db6d@foss.st.com/
+Reported-by: Clement LE GOFFIC <clement.legoffic@foss.st.com>
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Suggested-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/ioremap.c | 33 +++++++++++++++++++++++++++++----
+ 1 file changed, 29 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -23,6 +23,7 @@
+ */
+ #include <linux/module.h>
+ #include <linux/errno.h>
++#include <linux/kasan.h>
+ #include <linux/mm.h>
+ #include <linux/vmalloc.h>
+ #include <linux/io.h>
+@@ -115,16 +116,40 @@ int ioremap_page(unsigned long virt, uns
+ }
+ EXPORT_SYMBOL(ioremap_page);
+
++#ifdef CONFIG_KASAN
++static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
++{
++ return (unsigned long)kasan_mem_to_shadow((void *)addr);
++}
++#else
++static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
++{
++ return 0;
++}
++#endif
++
++static void memcpy_pgd(struct mm_struct *mm, unsigned long start,
++ unsigned long end)
++{
++ end = ALIGN(end, PGDIR_SIZE);
++ memcpy(pgd_offset(mm, start), pgd_offset_k(start),
++ sizeof(pgd_t) * (pgd_index(end) - pgd_index(start)));
++}
++
+ void __check_vmalloc_seq(struct mm_struct *mm)
+ {
+ int seq;
+
+ do {
+ seq = atomic_read(&init_mm.context.vmalloc_seq);
+- memcpy(pgd_offset(mm, VMALLOC_START),
+- pgd_offset_k(VMALLOC_START),
+- sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
+- pgd_index(VMALLOC_START)));
++ memcpy_pgd(mm, VMALLOC_START, VMALLOC_END);
++ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
++ unsigned long start =
++ arm_kasan_mem_to_shadow(VMALLOC_START);
++ unsigned long end =
++ arm_kasan_mem_to_shadow(VMALLOC_END);
++ memcpy_pgd(mm, start, end);
++ }
+ /*
+ * Use a store-release so that other CPUs that observe the
+ * counter's new value are guaranteed to see the results of the
--- /dev/null
+From 44e9a3bb76e5f2eecd374c8176b2c5163c8bb2e2 Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Wed, 23 Oct 2024 13:04:44 +0100
+Subject: ARM: 9430/1: entry: Do a dummy read from VMAP shadow
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit 44e9a3bb76e5f2eecd374c8176b2c5163c8bb2e2 upstream.
+
+When switching task, in addition to a dummy read from the new
+VMAP stack, also do a dummy read from the VMAP stack's
+corresponding KASAN shadow memory to sync things up in
+the new MM context.
+
+Cc: stable@vger.kernel.org
+Fixes: a1c510d0adc6 ("ARM: implement support for vmap'ed stacks")
+Link: https://lore.kernel.org/linux-arm-kernel/a1a1d062-f3a2-4d05-9836-3b098de9db6d@foss.st.com/
+Reported-by: Clement LE GOFFIC <clement.legoffic@foss.st.com>
+Suggested-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/entry-armv.S | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -25,6 +25,7 @@
+ #include <asm/tls.h>
+ #include <asm/system_info.h>
+ #include <asm/uaccess-asm.h>
++#include <asm/kasan_def.h>
+
+ #include "entry-header.S"
+ #include <asm/probes.h>
+@@ -555,6 +556,13 @@ ENTRY(__switch_to)
+ @ entries covering the vmalloc region.
+ @
+ ldr r2, [ip]
++#ifdef CONFIG_KASAN_VMALLOC
++ @ Also dummy read from the KASAN shadow memory for the new stack if we
++ @ are using KASAN
++ mov_l r2, KASAN_SHADOW_OFFSET
++ add r2, r2, ip, lsr #KASAN_SHADOW_SCALE_SHIFT
++ ldr r2, [r2]
++#endif
+ #endif
+
+ @ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
--- /dev/null
+From 93ee385254d53849c01dd8ab9bc9d02790ee7f0e Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Wed, 23 Oct 2024 13:05:34 +0100
+Subject: ARM: 9431/1: mm: Pair atomic_set_release() with _read_acquire()
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit 93ee385254d53849c01dd8ab9bc9d02790ee7f0e upstream.
+
+The code for syncing vmalloc memory PGD pointers is using
+atomic_read() in pair with atomic_set_release() but the
+proper pairing is atomic_read_acquire() paired with
+atomic_set_release().
+
+This is done to clearly instruct the compiler to not
+reorder the memcpy() or similar calls inside the section
+so that we do not observe changes to init_mm. memcpy()
+calls should be identified by the compiler as having
+unpredictable side effects, but let's try to be on the
+safe side.
+
+Cc: stable@vger.kernel.org
+Fixes: d31e23aff011 ("ARM: mm: make vmalloc_seq handling SMP safe")
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/ioremap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -141,7 +141,7 @@ void __check_vmalloc_seq(struct mm_struc
+ int seq;
+
+ do {
+- seq = atomic_read(&init_mm.context.vmalloc_seq);
++ seq = atomic_read_acquire(&init_mm.context.vmalloc_seq);
+ memcpy_pgd(mm, VMALLOC_START, VMALLOC_END);
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ unsigned long start =
--- /dev/null
+From 955710afcb3bb63e21e186451ed5eba85fa14d0b Mon Sep 17 00:00:00 2001
+From: Patrick Donnelly <pdonnell@redhat.com>
+Date: Sat, 12 Oct 2024 20:54:11 -0400
+Subject: ceph: extract entity name from device id
+
+From: Patrick Donnelly <pdonnell@redhat.com>
+
+commit 955710afcb3bb63e21e186451ed5eba85fa14d0b upstream.
+
+Previously, the "name" in the new device syntax "<name>@<fsid>.<fsname>"
+was ignored because (presumably) tests were done using mount.ceph which
+also passed the entity name using "-o name=foo". If mounting is done
+without the mount.ceph helper, the new device id syntax fails to set
+the name properly.
+
+Cc: stable@vger.kernel.org
+Link: https://tracker.ceph.com/issues/68516
+Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
+Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/super.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -281,7 +281,9 @@ static int ceph_parse_new_source(const c
+ size_t len;
+ struct ceph_fsid fsid;
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
++ struct ceph_options *opts = pctx->copts;
+ struct ceph_mount_options *fsopt = pctx->opts;
++ const char *name_start = dev_name;
+ char *fsid_start, *fs_name_start;
+
+ if (*dev_name_end != '=') {
+@@ -292,8 +294,14 @@ static int ceph_parse_new_source(const c
+ fsid_start = strchr(dev_name, '@');
+ if (!fsid_start)
+ return invalfc(fc, "missing cluster fsid");
+- ++fsid_start; /* start of cluster fsid */
++ len = fsid_start - name_start;
++ kfree(opts->name);
++ opts->name = kstrndup(name_start, len, GFP_KERNEL);
++ if (!opts->name)
++ return -ENOMEM;
++ dout("using %s entity name", opts->name);
+
++ ++fsid_start; /* start of cluster fsid */
+ fs_name_start = strchr(fsid_start, '.');
+ if (!fs_name_start)
+ return invalfc(fc, "missing file system name");
--- /dev/null
+From dbc16915279a548a204154368da23d402c141c81 Mon Sep 17 00:00:00 2001
+From: "yuan.gao" <yuan.gao@ucloud.cn>
+Date: Fri, 18 Oct 2024 14:44:35 +0800
+Subject: mm/slub: Avoid list corruption when removing a slab from the full list
+
+From: yuan.gao <yuan.gao@ucloud.cn>
+
+commit dbc16915279a548a204154368da23d402c141c81 upstream.
+
+Boot with slub_debug=UFPZ.
+
+If allocated object failed in alloc_consistency_checks, all objects of
+the slab will be marked as used, and then the slab will be removed from
+the partial list.
+
+When an object belonging to the slab got freed later, the remove_full()
+function is called. Because the slab is neither on the partial list nor
+on the full list, it eventually lead to a list corruption (actually a
+list poison being detected).
+
+So we need to mark and isolate the slab page with metadata corruption,
+do not put it back in circulation.
+
+Because the debug caches avoid all the fastpaths, reusing the frozen bit
+to mark slab page with metadata corruption seems to be fine.
+
+[ 4277.385669] list_del corruption, ffffea00044b3e50->next is LIST_POISON1 (dead000000000100)
+[ 4277.387023] ------------[ cut here ]------------
+[ 4277.387880] kernel BUG at lib/list_debug.c:56!
+[ 4277.388680] invalid opcode: 0000 [#1] PREEMPT SMP PTI
+[ 4277.389562] CPU: 5 PID: 90 Comm: kworker/5:1 Kdump: loaded Tainted: G OE 6.6.1-1 #1
+[ 4277.392113] Workqueue: xfs-inodegc/vda1 xfs_inodegc_worker [xfs]
+[ 4277.393551] RIP: 0010:__list_del_entry_valid_or_report+0x7b/0xc0
+[ 4277.394518] Code: 48 91 82 e8 37 f9 9a ff 0f 0b 48 89 fe 48 c7 c7 28 49 91 82 e8 26 f9 9a ff 0f 0b 48 89 fe 48 c7 c7 58 49 91
+[ 4277.397292] RSP: 0018:ffffc90000333b38 EFLAGS: 00010082
+[ 4277.398202] RAX: 000000000000004e RBX: ffffea00044b3e50 RCX: 0000000000000000
+[ 4277.399340] RDX: 0000000000000002 RSI: ffffffff828f8715 RDI: 00000000ffffffff
+[ 4277.400545] RBP: ffffea00044b3e40 R08: 0000000000000000 R09: ffffc900003339f0
+[ 4277.401710] R10: 0000000000000003 R11: ffffffff82d44088 R12: ffff888112cf9910
+[ 4277.402887] R13: 0000000000000001 R14: 0000000000000001 R15: ffff8881000424c0
+[ 4277.404049] FS: 0000000000000000(0000) GS:ffff88842fd40000(0000) knlGS:0000000000000000
+[ 4277.405357] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 4277.406389] CR2: 00007f2ad0b24000 CR3: 0000000102a3a006 CR4: 00000000007706e0
+[ 4277.407589] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 4277.408780] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 4277.410000] PKRU: 55555554
+[ 4277.410645] Call Trace:
+[ 4277.411234] <TASK>
+[ 4277.411777] ? die+0x32/0x80
+[ 4277.412439] ? do_trap+0xd6/0x100
+[ 4277.413150] ? __list_del_entry_valid_or_report+0x7b/0xc0
+[ 4277.414158] ? do_error_trap+0x6a/0x90
+[ 4277.414948] ? __list_del_entry_valid_or_report+0x7b/0xc0
+[ 4277.415915] ? exc_invalid_op+0x4c/0x60
+[ 4277.416710] ? __list_del_entry_valid_or_report+0x7b/0xc0
+[ 4277.417675] ? asm_exc_invalid_op+0x16/0x20
+[ 4277.418482] ? __list_del_entry_valid_or_report+0x7b/0xc0
+[ 4277.419466] ? __list_del_entry_valid_or_report+0x7b/0xc0
+[ 4277.420410] free_to_partial_list+0x515/0x5e0
+[ 4277.421242] ? xfs_iext_remove+0x41a/0xa10 [xfs]
+[ 4277.422298] xfs_iext_remove+0x41a/0xa10 [xfs]
+[ 4277.423316] ? xfs_inodegc_worker+0xb4/0x1a0 [xfs]
+[ 4277.424383] xfs_bmap_del_extent_delay+0x4fe/0x7d0 [xfs]
+[ 4277.425490] __xfs_bunmapi+0x50d/0x840 [xfs]
+[ 4277.426445] xfs_itruncate_extents_flags+0x13a/0x490 [xfs]
+[ 4277.427553] xfs_inactive_truncate+0xa3/0x120 [xfs]
+[ 4277.428567] xfs_inactive+0x22d/0x290 [xfs]
+[ 4277.429500] xfs_inodegc_worker+0xb4/0x1a0 [xfs]
+[ 4277.430479] process_one_work+0x171/0x340
+[ 4277.431227] worker_thread+0x277/0x390
+[ 4277.431962] ? __pfx_worker_thread+0x10/0x10
+[ 4277.432752] kthread+0xf0/0x120
+[ 4277.433382] ? __pfx_kthread+0x10/0x10
+[ 4277.434134] ret_from_fork+0x2d/0x50
+[ 4277.434837] ? __pfx_kthread+0x10/0x10
+[ 4277.435566] ret_from_fork_asm+0x1b/0x30
+[ 4277.436280] </TASK>
+
+Fixes: 643b113849d8 ("slub: enable tracking of full slabs")
+Suggested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Suggested-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: yuan.gao <yuan.gao@ucloud.cn>
+Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Acked-by: Christoph Lameter <cl@linux.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slab.h | 5 +++++
+ mm/slub.c | 9 ++++++++-
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -78,6 +78,11 @@ struct slab {
+ struct {
+ unsigned inuse:16;
+ unsigned objects:15;
++ /*
++ * If slab debugging is enabled then the
++ * frozen bit can be reused to indicate
++ * that the slab was corrupted
++ */
+ unsigned frozen:1;
+ };
+ };
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1275,6 +1275,11 @@ static int check_slab(struct kmem_cache
+ slab->inuse, slab->objects);
+ return 0;
+ }
++ if (slab->frozen) {
++ slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed");
++ return 0;
++ }
++
+ /* Slab_pad_check fixes things up after itself */
+ slab_pad_check(s, slab);
+ return 1;
+@@ -1463,6 +1468,7 @@ bad:
+ slab_fix(s, "Marking all objects used");
+ slab->inuse = slab->objects;
+ slab->freelist = NULL;
++ slab->frozen = 1; /* mark consistency-failed slab as frozen */
+ }
+ return false;
+ }
+@@ -2162,7 +2168,8 @@ static void *alloc_single_from_partial(s
+ slab->inuse++;
+
+ if (!alloc_debug_processing(s, slab, object, orig_size)) {
+- remove_partial(n, slab);
++ if (folio_test_slab(slab_folio(slab)))
++ remove_partial(n, slab);
+ return NULL;
+ }
+
--- /dev/null
+From 45c9f2b856a075a34873d00788d2e8a250c1effd Mon Sep 17 00:00:00 2001
+From: Vasily Gorbik <gor@linux.ibm.com>
+Date: Tue, 19 Nov 2024 14:54:07 +0100
+Subject: s390/entry: Mark IRQ entries to fix stack depot warnings
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+commit 45c9f2b856a075a34873d00788d2e8a250c1effd upstream.
+
+The stack depot filters out everything outside of the top interrupt
+context as an uninteresting or irrelevant part of the stack traces. This
+helps with stack trace de-duplication, avoiding an explosion of saved
+stack traces that share the same IRQ context code path but originate
+from different randomly interrupted points, eventually exhausting the
+stack depot.
+
+Filtering uses in_irqentry_text() to identify functions within the
+.irqentry.text and .softirqentry.text sections, which then become the
+last stack trace entries being saved.
+
+While __do_softirq() is placed into the .softirqentry.text section by
+common code, populating .irqentry.text is architecture-specific.
+
+Currently, the .irqentry.text section on s390 is empty, which prevents
+stack depot filtering and de-duplication and could result in warnings
+like:
+
+Stack depot reached limit capacity
+WARNING: CPU: 0 PID: 286113 at lib/stackdepot.c:252 depot_alloc_stack+0x39a/0x3c8
+
+with PREEMPT and KASAN enabled.
+
+Fix this by moving the IO/EXT interrupt handlers from .kprobes.text into
+the .irqentry.text section and updating the kprobes blacklist to include
+the .irqentry.text section.
+
+This is done only for asynchronous interrupts and explicitly not for
+program checks, which are synchronous and where the context beyond the
+program check is important to preserve. Despite machine checks being
+somewhat in between, they are extremely rare, and preserving context
+when possible is also of value.
+
+SVCs and Restart Interrupts are not relevant, one being always at the
+boundary to user space and the other being a one-time thing.
+
+IRQ entries filtering is also optionally used in ftrace function graph,
+where the same logic applies.
+
+Cc: stable@vger.kernel.org # 5.15+
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kernel/entry.S | 4 ++++
+ arch/s390/kernel/kprobes.c | 6 ++++++
+ 2 files changed, 10 insertions(+)
+
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -458,9 +458,13 @@ SYM_CODE_START(\name)
+ SYM_CODE_END(\name)
+ .endm
+
++ .section .irqentry.text, "ax"
++
+ INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
+ INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
+
++ .section .kprobes.text, "ax"
++
+ /*
+ * Load idle PSW.
+ */
+--- a/arch/s390/kernel/kprobes.c
++++ b/arch/s390/kernel/kprobes.c
+@@ -518,6 +518,12 @@ int __init arch_init_kprobes(void)
+ return 0;
+ }
+
++int __init arch_populate_kprobe_blacklist(void)
++{
++ return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
++ (unsigned long)__irqentry_text_end);
++}
++
+ int arch_trampoline_kprobe(struct kprobe *p)
+ {
+ return 0;
iommu-io-pgtable-arm-fix-stage-2-map-unmap-for-concatenated-tables.patch
leds-lp55xx-remove-redundant-test-for-invalid-channel-number.patch
clk-qcom-gcc-qcs404-fix-initial-rate-of-gpll3.patch
+ad7780-fix-division-by-zero-in-ad7780_write_raw.patch
+arm-9429-1-ioremap-sync-pgds-for-vmalloc-shadow.patch
+s390-entry-mark-irq-entries-to-fix-stack-depot-warnings.patch
+arm-9430-1-entry-do-a-dummy-read-from-vmap-shadow.patch
+arm-9431-1-mm-pair-atomic_set_release-with-_read_acquire.patch
+mm-slub-avoid-list-corruption-when-removing-a-slab-from-the-full-list.patch
+ceph-extract-entity-name-from-device-id.patch