--- /dev/null
+From e169bd4fb2b36c4b2bee63c35c740c85daeb2e86 Mon Sep 17 00:00:00 2001
+From: Maksim Kiselev <bigunclemax@gmail.com>
+Date: Wed, 24 Jan 2024 10:24:36 +0300
+Subject: aoe: avoid potential deadlock at set_capacity
+
+From: Maksim Kiselev <bigunclemax@gmail.com>
+
+commit e169bd4fb2b36c4b2bee63c35c740c85daeb2e86 upstream.
+
+Move set_capacity() outside of the section procected by (&d->lock).
+To avoid possible interrupt unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+[1] lock(&bdev->bd_size_lock);
+ local_irq_disable();
+ [2] lock(&d->lock);
+ [3] lock(&bdev->bd_size_lock);
+ <Interrupt>
+[4] lock(&d->lock);
+
+ *** DEADLOCK ***
+
+Where [1](&bdev->bd_size_lock) hold by zram_add()->set_capacity().
+[2]lock(&d->lock) hold by aoeblk_gdalloc(). And aoeblk_gdalloc()
+is trying to acquire [3](&bdev->bd_size_lock) at set_capacity() call.
+In this situation an attempt to acquire [4]lock(&d->lock) from
+aoecmd_cfg_rsp() will lead to deadlock.
+
+So the simplest solution is breaking lock dependency
+[2](&d->lock) -> [3](&bdev->bd_size_lock) by moving set_capacity()
+outside.
+
+Signed-off-by: Maksim Kiselev <bigunclemax@gmail.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20240124072436.3745720-2-bigunclemax@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+[ Larry: backport to 5.15.y. Minor conflict resolved due to missing commit d9c2bd252a457
+ aoe: add error handling support for add_disk() ]
+Signed-off-by: Larry Bassel <larry.bassel@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/aoe/aoeblk.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/block/aoe/aoeblk.c
++++ b/drivers/block/aoe/aoeblk.c
+@@ -346,6 +346,7 @@ aoeblk_gdalloc(void *vp)
+ struct gendisk *gd;
+ mempool_t *mp;
+ struct blk_mq_tag_set *set;
++ sector_t ssize;
+ ulong flags;
+ int late = 0;
+ int err;
+@@ -408,7 +409,7 @@ aoeblk_gdalloc(void *vp)
+ gd->minors = AOE_PARTITIONS;
+ gd->fops = &aoe_bdops;
+ gd->private_data = d;
+- set_capacity(gd, d->ssize);
++ ssize = d->ssize;
+ snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
+ d->aoemajor, d->aoeminor);
+
+@@ -417,6 +418,8 @@ aoeblk_gdalloc(void *vp)
+
+ spin_unlock_irqrestore(&d->lock, flags);
+
++ set_capacity(gd, ssize);
++
+ device_add_disk(NULL, gd, aoe_attr_groups);
+ aoedisk_add_debugfs(d);
+
--- /dev/null
+From 973c7a0d8a38e675570c3336c664f5610bd4eb19 Mon Sep 17 00:00:00 2001
+From: Andrii Nakryiko <andrii@kernel.org>
+Date: Thu, 9 Nov 2023 16:26:37 -0800
+Subject: bpf: fix precision backtracking instruction iteration
+
+From: Andrii Nakryiko <andrii@kernel.org>
+
+commit 4bb7ea946a370707315ab774432963ce47291946 upstream.
+
+Fix an edge case in __mark_chain_precision() which prematurely stops
+backtracking instructions in a state if it happens that state's first
+and last instruction indexes are the same. This situations doesn't
+necessarily mean that there were no instructions simulated in a state,
+but rather that we starting from the instruction, jumped around a bit,
+and then ended up at the same instruction before checkpointing or
+marking precision.
+
+To distinguish between these two possible situations, we need to consult
+jump history. If it's empty or contain a single record "bridging" parent
+state and first instruction of processed state, then we indeed
+backtracked all instructions in this state. But if history is not empty,
+we are definitely not done yet.
+
+Move this logic inside get_prev_insn_idx() to contain it more nicely.
+Use -ENOENT return code to denote "we are out of instructions"
+situation.
+
+This bug was exposed by verifier_loop1.c's bounded_recursion subtest, once
+the next fix in this patch set is applied.
+
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Fixes: b5dc0163d8fd ("bpf: precise scalar_value tracking")
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/r/20231110002638.4168352-3-andrii@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Aaron Lu <ziqianlu@bytedance.com>
+Reported-by: Wei Wei <weiwei.danny@bytedance.com>
+Closes: https://lore.kernel.org/all/20250605070921.GA3795@bytedance/
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2134,12 +2134,29 @@ static int push_jmp_history(struct bpf_v
+
+ /* Backtrack one insn at a time. If idx is not at the top of recorded
+ * history then previous instruction came from straight line execution.
++ * Return -ENOENT if we exhausted all instructions within given state.
++ *
++ * It's legal to have a bit of a looping with the same starting and ending
++ * insn index within the same state, e.g.: 3->4->5->3, so just because current
++ * instruction index is the same as state's first_idx doesn't mean we are
++ * done. If there is still some jump history left, we should keep going. We
++ * need to take into account that we might have a jump history between given
++ * state's parent and itself, due to checkpointing. In this case, we'll have
++ * history entry recording a jump from last instruction of parent state and
++ * first instruction of given state.
+ */
+ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
+ u32 *history)
+ {
+ u32 cnt = *history;
+
++ if (i == st->first_insn_idx) {
++ if (cnt == 0)
++ return -ENOENT;
++ if (cnt == 1 && st->jmp_history[0].idx == i)
++ return -ENOENT;
++ }
++
+ if (cnt && st->jmp_history[cnt - 1].idx == i) {
+ i = st->jmp_history[cnt - 1].prev_idx;
+ (*history)--;
+@@ -2630,9 +2647,9 @@ static int __mark_chain_precision(struct
+ * Nothing to be tracked further in the parent state.
+ */
+ return 0;
+- if (i == first_idx)
+- break;
+ i = get_prev_insn_idx(st, i, &history);
++ if (i == -ENOENT)
++ break;
+ if (i >= env->prog->len) {
+ /* This can happen if backtracking reached insn 0
+ * and there are still reg_mask or stack_mask
--- /dev/null
+From a454d84ee20baf7bd7be90721b9821f73c7d23d9 Mon Sep 17 00:00:00 2001
+From: John Fastabend <john.fastabend@gmail.com>
+Date: Fri, 1 Sep 2023 13:21:37 -0700
+Subject: bpf, sockmap: Fix skb refcnt race after locking changes
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+commit a454d84ee20baf7bd7be90721b9821f73c7d23d9 upstream.
+
+There is a race where skb's from the sk_psock_backlog can be referenced
+after userspace side has already skb_consumed() the sk_buff and its refcnt
+dropped to zer0 causing use after free.
+
+The flow is the following:
+
+ while ((skb = skb_peek(&psock->ingress_skb))
+ sk_psock_handle_Skb(psock, skb, ..., ingress)
+ if (!ingress) ...
+ sk_psock_skb_ingress
+ sk_psock_skb_ingress_enqueue(skb)
+ msg->skb = skb
+ sk_psock_queue_msg(psock, msg)
+ skb_dequeue(&psock->ingress_skb)
+
+The sk_psock_queue_msg() puts the msg on the ingress_msg queue. This is
+what the application reads when recvmsg() is called. An application can
+read this anytime after the msg is placed on the queue. The recvmsg hook
+will also read msg->skb and then after user space reads the msg will call
+consume_skb(skb) on it effectively free'ing it.
+
+But, the race is in above where backlog queue still has a reference to
+the skb and calls skb_dequeue(). If the skb_dequeue happens after the
+user reads and free's the skb we have a use after free.
+
+The !ingress case does not suffer from this problem because it uses
+sendmsg_*(sk, msg) which does not pass the sk_buff further down the
+stack.
+
+The following splat was observed with 'test_progs -t sockmap_listen':
+
+ [ 1022.710250][ T2556] general protection fault, ...
+ [...]
+ [ 1022.712830][ T2556] Workqueue: events sk_psock_backlog
+ [ 1022.713262][ T2556] RIP: 0010:skb_dequeue+0x4c/0x80
+ [ 1022.713653][ T2556] Code: ...
+ [...]
+ [ 1022.720699][ T2556] Call Trace:
+ [ 1022.720984][ T2556] <TASK>
+ [ 1022.721254][ T2556] ? die_addr+0x32/0x80^M
+ [ 1022.721589][ T2556] ? exc_general_protection+0x25a/0x4b0
+ [ 1022.722026][ T2556] ? asm_exc_general_protection+0x22/0x30
+ [ 1022.722489][ T2556] ? skb_dequeue+0x4c/0x80
+ [ 1022.722854][ T2556] sk_psock_backlog+0x27a/0x300
+ [ 1022.723243][ T2556] process_one_work+0x2a7/0x5b0
+ [ 1022.723633][ T2556] worker_thread+0x4f/0x3a0
+ [ 1022.723998][ T2556] ? __pfx_worker_thread+0x10/0x10
+ [ 1022.724386][ T2556] kthread+0xfd/0x130
+ [ 1022.724709][ T2556] ? __pfx_kthread+0x10/0x10
+ [ 1022.725066][ T2556] ret_from_fork+0x2d/0x50
+ [ 1022.725409][ T2556] ? __pfx_kthread+0x10/0x10
+ [ 1022.725799][ T2556] ret_from_fork_asm+0x1b/0x30
+ [ 1022.726201][ T2556] </TASK>
+
+To fix we add an skb_get() before passing the skb to be enqueued in the
+engress queue. This bumps the skb->users refcnt so that consume_skb()
+and kfree_skb will not immediately free the sk_buff. With this we can
+be sure the skb is still around when we do the dequeue. Then we just
+need to decrement the refcnt or free the skb in the backlog case which
+we do by calling kfree_skb() on the ingress case as well as the sendmsg
+case.
+
+Before locking change from fixes tag we had the sock locked so we
+couldn't race with user and there was no issue here.
+
+Fixes: 799aa7f98d53e ("skmsg: Avoid lock_sock() in sk_psock_backlog()")
+Reported-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: Xu Kuohai <xukuohai@huawei.com>
+Tested-by: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/bpf/20230901202137.214666-1-john.fastabend@gmail.com
+Signed-off-by: Pranav Tyagi <pranav.tyagi03@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skmsg.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -608,12 +608,18 @@ static int sk_psock_skb_ingress_self(str
+ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
+ u32 off, u32 len, bool ingress)
+ {
++ int err = 0;
++
+ if (!ingress) {
+ if (!sock_writeable(psock->sk))
+ return -EAGAIN;
+ return skb_send_sock(psock->sk, skb, off, len);
+ }
+- return sk_psock_skb_ingress(psock, skb, off, len);
++ skb_get(skb);
++ err = sk_psock_skb_ingress(psock, skb, off, len);
++ if (err < 0)
++ kfree_skb(skb);
++ return err;
+ }
+
+ static void sk_psock_skb_state(struct sk_psock *psock,
+@@ -693,9 +699,7 @@ static void sk_psock_backlog(struct work
+ /* The entire skb sent, clear state */
+ sk_psock_skb_state(psock, state, 0, 0);
+ skb = skb_dequeue(&psock->ingress_skb);
+- if (!ingress) {
+- kfree_skb(skb);
+- }
++ kfree_skb(skb);
+ }
+ end:
+ mutex_unlock(&psock->work_mutex);
--- /dev/null
+From ce6dede912f064a855acf6f04a04cbb2c25b8c8c Mon Sep 17 00:00:00 2001
+From: Edward Adam Davis <eadavis@qq.com>
+Date: Thu, 11 Apr 2024 20:05:28 +0800
+Subject: jfs: fix null ptr deref in dtInsertEntry
+
+From: Edward Adam Davis <eadavis@qq.com>
+
+commit ce6dede912f064a855acf6f04a04cbb2c25b8c8c upstream.
+
+[syzbot reported]
+general protection fault, probably for non-canonical address 0xdffffc0000000001: 0000 [#1] PREEMPT SMP KASAN PTI
+KASAN: null-ptr-deref in range [0x0000000000000008-0x000000000000000f]
+CPU: 0 PID: 5061 Comm: syz-executor404 Not tainted 6.8.0-syzkaller-08951-gfe46a7dd189e #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/27/2024
+RIP: 0010:dtInsertEntry+0xd0c/0x1780 fs/jfs/jfs_dtree.c:3713
+...
+[Analyze]
+In dtInsertEntry(), when the pointer h has the same value as p, after writing
+name in UniStrncpy_to_le(), p->header.flag will be cleared. This will cause the
+previously true judgment "p->header.flag & BT-LEAF" to change to no after writing
+the name operation, this leads to entering an incorrect branch and accessing the
+uninitialized object ih when judging this condition for the second time.
+
+[Fix]
+After got the page, check freelist first, if freelist == 0 then exit dtInsert()
+and return -EINVAL.
+
+Reported-by: syzbot+bba84aef3a26fb93deb9@syzkaller.appspotmail.com
+Signed-off-by: Edward Adam Davis <eadavis@qq.com>
+Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>
+Signed-off-by: Aditya Dutt <duttaditya18@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jfs/jfs_dtree.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/jfs/jfs_dtree.c
++++ b/fs/jfs/jfs_dtree.c
+@@ -835,6 +835,8 @@ int dtInsert(tid_t tid, struct inode *ip
+ * the full page.
+ */
+ DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
++ if (p->header.freelist == 0)
++ return -EINVAL;
+
+ /*
+ * insert entry for new key
--- /dev/null
+From 880a88f318cf1d2a0f4c0a7ff7b07e2062b434a4 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Tue, 8 Jul 2025 22:15:04 +0100
+Subject: rxrpc: Fix oops due to non-existence of prealloc backlog struct
+
+From: David Howells <dhowells@redhat.com>
+
+commit 880a88f318cf1d2a0f4c0a7ff7b07e2062b434a4 upstream.
+
+If an AF_RXRPC service socket is opened and bound, but calls are
+preallocated, then rxrpc_alloc_incoming_call() will oops because the
+rxrpc_backlog struct doesn't get allocated until the first preallocation is
+made.
+
+Fix this by returning NULL from rxrpc_alloc_incoming_call() if there is no
+backlog struct. This will cause the incoming call to be aborted.
+
+Reported-by: Junvyyang, Tencent Zhuque Lab <zhuque@tencent.com>
+Suggested-by: Junvyyang, Tencent Zhuque Lab <zhuque@tencent.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: LePremierHomme <kwqcheii@proton.me>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: Willy Tarreau <w@1wt.eu>
+cc: Simon Horman <horms@kernel.org>
+cc: linux-afs@lists.infradead.org
+Link: https://patch.msgid.link/20250708211506.2699012-3-dhowells@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rxrpc/call_accept.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -270,6 +270,9 @@ static struct rxrpc_call *rxrpc_alloc_in
+ unsigned short call_tail, conn_tail, peer_tail;
+ unsigned short call_count, conn_count;
+
++ if (!b)
++ return NULL;
++
+ /* #calls >= #conns >= #peers must hold true. */
+ call_head = smp_load_acquire(&b->call_backlog_head);
+ call_tail = b->call_backlog_tail;
x86-cpu-amd-properly-check-the-tsa-microcode.patch
fs-proc-do_task_stat-use-__for_each_thread.patch
ice-safer-stats-processing.patch
+rxrpc-fix-oops-due-to-non-existence-of-prealloc-backlog-struct.patch
+bpf-fix-precision-backtracking-instruction-iteration.patch
+thermal-int340x_thermal-handle-data_vault-when-the-value-is-zero_size_ptr.patch
+aoe-avoid-potential-deadlock-at-set_capacity.patch
+bpf-sockmap-fix-skb-refcnt-race-after-locking-changes.patch
+jfs-fix-null-ptr-deref-in-dtinsertentry.patch
+xen-replace-xen_remap-with-memremap.patch
--- /dev/null
+From 7931e28098a4c1a2a6802510b0cbe57546d2049d Mon Sep 17 00:00:00 2001
+From: "Lee, Chun-Yi" <joeyli.kernel@gmail.com>
+Date: Mon, 8 Aug 2022 21:21:58 +0800
+Subject: thermal/int340x_thermal: handle data_vault when the value is ZERO_SIZE_PTR
+
+From: Lee, Chun-Yi <joeyli.kernel@gmail.com>
+
+commit 7931e28098a4c1a2a6802510b0cbe57546d2049d upstream.
+
+In some case, the GDDV returns a package with a buffer which has
+zero length. It causes that kmemdup() returns ZERO_SIZE_PTR (0x10).
+
+Then the data_vault_read() got NULL point dereference problem when
+accessing the 0x10 value in data_vault.
+
+[ 71.024560] BUG: kernel NULL pointer dereference, address:
+0000000000000010
+
+This patch uses ZERO_OR_NULL_PTR() for checking ZERO_SIZE_PTR or
+NULL value in data_vault.
+
+Signed-off-by: "Lee, Chun-Yi" <jlee@suse.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+[ Larry: backport to 5.15.y. Minor conflict resolved due to missing commit 9e5d3d6be664
+ thermal: int340x: Consolidate freeing of acpi_buffer pointer ]
+Signed-off-by: Larry Bassel <larry.bassel@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/intel/int340x_thermal/int3400_thermal.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
++++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+@@ -469,7 +469,7 @@ static void int3400_setup_gddv(struct in
+ priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
+ obj->package.elements[0].buffer.length,
+ GFP_KERNEL);
+- if (!priv->data_vault) {
++ if (ZERO_OR_NULL_PTR(priv->data_vault)) {
+ kfree(buffer.pointer);
+ return;
+ }
+@@ -540,7 +540,7 @@ static int int3400_thermal_probe(struct
+ goto free_imok;
+ }
+
+- if (priv->data_vault) {
++ if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
+ result = sysfs_create_group(&pdev->dev.kobj,
+ &data_attribute_group);
+ if (result)
+@@ -558,7 +558,8 @@ static int int3400_thermal_probe(struct
+ free_sysfs:
+ cleanup_odvp(priv);
+ if (priv->data_vault) {
+- sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
++ if (!ZERO_OR_NULL_PTR(priv->data_vault))
++ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
+ kfree(priv->data_vault);
+ }
+ free_uuid:
+@@ -590,7 +591,7 @@ static int int3400_thermal_remove(struct
+ if (!priv->rel_misc_dev_res)
+ acpi_thermal_rel_misc_device_remove(priv->adev->handle);
+
+- if (priv->data_vault)
++ if (!ZERO_OR_NULL_PTR(priv->data_vault))
+ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
+ sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
+ sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
--- /dev/null
+From 41925b105e345ebc84cedb64f59d20cb14a62613 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 30 May 2022 10:26:34 +0200
+Subject: xen: replace xen_remap() with memremap()
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 41925b105e345ebc84cedb64f59d20cb14a62613 upstream.
+
+xen_remap() is used to establish mappings for frames not under direct
+control of the kernel: for Xenstore and console ring pages, and for
+grant pages of non-PV guests.
+
+Today xen_remap() is defined to use ioremap() on x86 (doing uncached
+mappings), and ioremap_cache() on Arm (doing cached mappings).
+
+Uncached mappings for those use cases are bad for performance, so they
+should be avoided if possible. As all use cases of xen_remap() don't
+require uncached mappings (the mapped area is always physical RAM),
+a mapping using the standard WB cache mode is fine.
+
+As sparse is flagging some of the xen_remap() use cases to be not
+appropriate for iomem(), as the result is not annotated with the
+__iomem modifier, eliminate xen_remap() completely and replace all
+use cases with memremap() specifying the MEMREMAP_WB caching mode.
+
+xen_unmap() can be replaced with memunmap().
+
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Acked-by: Stefano Stabellini <sstabellini@kernel.org>
+Link: https://lore.kernel.org/r/20220530082634.6339-1-jgross@suse.com
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Teddy Astie <teddy.astie@vates.tech> [backport to 5.15.y]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/xen/page.h | 3 ---
+ drivers/tty/hvc/hvc_xen.c | 2 +-
+ drivers/xen/grant-table.c | 6 +++---
+ drivers/xen/xenbus/xenbus_probe.c | 3 +--
+ include/xen/arm/page.h | 3 ---
+ 5 files changed, 5 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/include/asm/xen/page.h
++++ b/arch/x86/include/asm/xen/page.h
+@@ -355,9 +355,6 @@ unsigned long arbitrary_virt_to_mfn(void
+ void make_lowmem_page_readonly(void *vaddr);
+ void make_lowmem_page_readwrite(void *vaddr);
+
+-#define xen_remap(cookie, size) ioremap((cookie), (size))
+-#define xen_unmap(cookie) iounmap((cookie))
+-
+ static inline bool xen_arch_need_swiotlb(struct device *dev,
+ phys_addr_t phys,
+ dma_addr_t dev_addr)
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -270,7 +270,7 @@ static int xen_hvm_console_init(void)
+ if (r < 0 || v == 0)
+ goto err;
+ gfn = v;
+- info->intf = xen_remap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE);
++ info->intf = memremap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB);
+ if (info->intf == NULL)
+ goto err;
+ info->vtermno = HVC_COOKIE;
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -743,7 +743,7 @@ int gnttab_setup_auto_xlat_frames(phys_a
+ if (xen_auto_xlat_grant_frames.count)
+ return -EINVAL;
+
+- vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
++ vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
+ if (vaddr == NULL) {
+ pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
+ &addr);
+@@ -751,7 +751,7 @@ int gnttab_setup_auto_xlat_frames(phys_a
+ }
+ pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
+ if (!pfn) {
+- xen_unmap(vaddr);
++ memunmap(vaddr);
+ return -ENOMEM;
+ }
+ for (i = 0; i < max_nr_gframes; i++)
+@@ -770,7 +770,7 @@ void gnttab_free_auto_xlat_frames(void)
+ if (!xen_auto_xlat_grant_frames.count)
+ return;
+ kfree(xen_auto_xlat_grant_frames.pfn);
+- xen_unmap(xen_auto_xlat_grant_frames.vaddr);
++ memunmap(xen_auto_xlat_grant_frames.vaddr);
+
+ xen_auto_xlat_grant_frames.pfn = NULL;
+ xen_auto_xlat_grant_frames.count = 0;
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -982,8 +982,7 @@ static int __init xenbus_init(void)
+ #endif
+ xen_store_gfn = (unsigned long)v;
+ xen_store_interface =
+- xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
+- XEN_PAGE_SIZE);
++ memremap(xen_store_gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB);
+ break;
+ default:
+ pr_warn("Xenstore state unknown\n");
+--- a/include/xen/arm/page.h
++++ b/include/xen/arm/page.h
+@@ -109,9 +109,6 @@ static inline bool set_phys_to_machine(u
+ return __set_phys_to_machine(pfn, mfn);
+ }
+
+-#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
+-#define xen_unmap(cookie) iounmap((cookie))
+-
+ bool xen_arch_need_swiotlb(struct device *dev,
+ phys_addr_t phys,
+ dma_addr_t dev_addr);