]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.4
authorSasha Levin <sashal@kernel.org>
Wed, 13 Jan 2021 17:32:17 +0000 (12:32 -0500)
committerSasha Levin <sashal@kernel.org>
Wed, 13 Jan 2021 17:32:17 +0000 (12:32 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.4/series [new file with mode: 0644]
queue-5.4/vfio-iommu-add-dma-available-capability.patch [new file with mode: 0644]
queue-5.4/x86-asm-32-add-ends-to-some-functions-and-relabel-wi.patch [new file with mode: 0644]

diff --git a/queue-5.4/series b/queue-5.4/series
new file mode 100644 (file)
index 0000000..b98dfd1
--- /dev/null
@@ -0,0 +1,2 @@
+x86-asm-32-add-ends-to-some-functions-and-relabel-wi.patch
+vfio-iommu-add-dma-available-capability.patch
diff --git a/queue-5.4/vfio-iommu-add-dma-available-capability.patch b/queue-5.4/vfio-iommu-add-dma-available-capability.patch
new file mode 100644 (file)
index 0000000..a5985bc
--- /dev/null
@@ -0,0 +1,100 @@
+From 1515165f3485ea5d4b35037a636f91b8ce6a1aa3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Jan 2021 12:31:28 -0500
+Subject: vfio iommu: Add dma available capability
+
+From: Matthew Rosato <mjrosato@linux.ibm.com>
+
+[ Upstream commit 7d6e1329652ed971d1b6e0e7bea66fba5044e271 ]
+
+The following functional changes were needed for backport:
+- vfio_iommu_type1_get_info doesn't exist, call
+  vfio_iommu_dma_avail_build_caps from vfio_iommu_type1_ioctl.
+- As further fallout from this, vfio_iommu_dma_avail_build_caps must
+  acquire and release the iommu mutex lock.  To do so, the return value is
+  stored in a local variable as in vfio_iommu_iova_build_caps.
+
+Upstream commit description:
+Commit 492855939bdb ("vfio/type1: Limit DMA mappings per container")
+added the ability to limit the number of memory backed DMA mappings.
+However on s390x, when lazy mapping is in use, we use a very large
+number of concurrent mappings.  Let's provide the current allowable
+number of DMA mappings to userspace via the IOMMU info chain so that
+userspace can take appropriate mitigation.
+
+Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/vfio_iommu_type1.c | 22 ++++++++++++++++++++++
+ include/uapi/linux/vfio.h       | 15 +++++++++++++++
+ 2 files changed, 37 insertions(+)
+
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 3b31e83a92155..bc6ba41686fa3 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -2303,6 +2303,24 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
+       return ret;
+ }
++static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu,
++                                         struct vfio_info_cap *caps)
++{
++      struct vfio_iommu_type1_info_dma_avail cap_dma_avail;
++      int ret;
++
++      mutex_lock(&iommu->lock);
++      cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL;
++      cap_dma_avail.header.version = 1;
++
++      cap_dma_avail.avail = iommu->dma_avail;
++
++      ret = vfio_info_add_capability(caps, &cap_dma_avail.header,
++                                     sizeof(cap_dma_avail));
++      mutex_unlock(&iommu->lock);
++      return ret;
++}
++
+ static long vfio_iommu_type1_ioctl(void *iommu_data,
+                                  unsigned int cmd, unsigned long arg)
+ {
+@@ -2349,6 +2367,10 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
+               info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
+               ret = vfio_iommu_iova_build_caps(iommu, &caps);
++
++              if (!ret)
++                      ret = vfio_iommu_dma_avail_build_caps(iommu, &caps);
++
+               if (ret)
+                       return ret;
+diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
+index 9e843a147ead0..cabc93118f9c8 100644
+--- a/include/uapi/linux/vfio.h
++++ b/include/uapi/linux/vfio.h
+@@ -748,6 +748,21 @@ struct vfio_iommu_type1_info_cap_iova_range {
+       struct  vfio_iova_range iova_ranges[];
+ };
++/*
++ * The DMA available capability allows to report the current number of
++ * simultaneously outstanding DMA mappings that are allowed.
++ *
++ * The structure below defines version 1 of this capability.
++ *
++ * avail: specifies the current number of outstanding DMA mappings allowed.
++ */
++#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
++
++struct vfio_iommu_type1_info_dma_avail {
++      struct  vfio_info_cap_header header;
++      __u32   avail;
++};
++
+ #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
+ /**
+-- 
+2.27.0
+
diff --git a/queue-5.4/x86-asm-32-add-ends-to-some-functions-and-relabel-wi.patch b/queue-5.4/x86-asm-32-add-ends-to-some-functions-and-relabel-wi.patch
new file mode 100644 (file)
index 0000000..a7f6670
--- /dev/null
@@ -0,0 +1,241 @@
+From d0c7e32146c3ff122a064d46e4de344de18b4b14 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2019 13:51:05 +0200
+Subject: x86/asm/32: Add ENDs to some functions and relabel with SYM_CODE_*
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+All these are functions which are invoked from elsewhere but they are
+not typical C functions. So annotate them using the new SYM_CODE_START.
+All these were not balanced with any END, so mark their ends by
+SYM_CODE_END, appropriately.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen bits]
+Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [hibernate]
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Len Brown <len.brown@intel.com>
+Cc: linux-arch@vger.kernel.org
+Cc: linux-pm@vger.kernel.org
+Cc: Pavel Machek <pavel@ucw.cz>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Pingfan Liu <kernelfans@gmail.com>
+Cc: Stefano Stabellini <sstabellini@kernel.org>
+Cc: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86-ml <x86@kernel.org>
+Cc: xen-devel@lists.xenproject.org
+Link: https://lkml.kernel.org/r/20191011115108.12392-26-jslaby@suse.cz
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/entry/entry_32.S            | 3 ++-
+ arch/x86/kernel/acpi/wakeup_32.S     | 7 ++++---
+ arch/x86/kernel/ftrace_32.S          | 3 ++-
+ arch/x86/kernel/head_32.S            | 3 ++-
+ arch/x86/power/hibernate_asm_32.S    | 6 ++++--
+ arch/x86/realmode/rm/trampoline_32.S | 6 ++++--
+ arch/x86/xen/xen-asm_32.S            | 7 ++++---
+ 7 files changed, 22 insertions(+), 13 deletions(-)
+
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 390edb7638265..bde3e0f85425f 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -869,9 +869,10 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
+  * Xen doesn't set %esp to be precisely what the normal SYSENTER
+  * entry point expects, so fix it up before using the normal path.
+  */
+-ENTRY(xen_sysenter_target)
++SYM_CODE_START(xen_sysenter_target)
+       addl    $5*4, %esp                      /* remove xen-provided frame */
+       jmp     .Lsysenter_past_esp
++SYM_CODE_END(xen_sysenter_target)
+ #endif
+ /*
+diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
+index e95e95960156b..5b076cb79f5fb 100644
+--- a/arch/x86/kernel/acpi/wakeup_32.S
++++ b/arch/x86/kernel/acpi/wakeup_32.S
+@@ -9,8 +9,7 @@
+       .code32
+       ALIGN
+-ENTRY(wakeup_pmode_return)
+-wakeup_pmode_return:
++SYM_CODE_START(wakeup_pmode_return)
+       movw    $__KERNEL_DS, %ax
+       movw    %ax, %ss
+       movw    %ax, %fs
+@@ -39,6 +38,7 @@ wakeup_pmode_return:
+       # jump to place where we left off
+       movl    saved_eip, %eax
+       jmp     *%eax
++SYM_CODE_END(wakeup_pmode_return)
+ bogus_magic:
+       jmp     bogus_magic
+@@ -72,7 +72,7 @@ restore_registers:
+       popfl
+       ret
+-ENTRY(do_suspend_lowlevel)
++SYM_CODE_START(do_suspend_lowlevel)
+       call    save_processor_state
+       call    save_registers
+       pushl   $3
+@@ -87,6 +87,7 @@ ret_point:
+       call    restore_registers
+       call    restore_processor_state
+       ret
++SYM_CODE_END(do_suspend_lowlevel)
+ .data
+ ALIGN
+diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
+index 073aab525d800..2cc0303522c99 100644
+--- a/arch/x86/kernel/ftrace_32.S
++++ b/arch/x86/kernel/ftrace_32.S
+@@ -89,7 +89,7 @@ WEAK(ftrace_stub)
+       ret
+ END(ftrace_caller)
+-ENTRY(ftrace_regs_caller)
++SYM_CODE_START(ftrace_regs_caller)
+       /*
+        * We're here from an mcount/fentry CALL, and the stack frame looks like:
+        *
+@@ -163,6 +163,7 @@ GLOBAL(ftrace_regs_call)
+       popl    %eax
+       jmp     .Lftrace_ret
++SYM_CODE_END(ftrace_regs_caller)
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ENTRY(ftrace_graph_caller)
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 2e6a0676c1f43..11a5d5ade52ce 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+  * can.
+  */
+ __HEAD
+-ENTRY(startup_32)
++SYM_CODE_START(startup_32)
+       movl pa(initial_stack),%ecx
+       
+       /* test KEEP_SEGMENTS flag to see if the bootloader is asking
+@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
+ #else
+       jmp .Ldefault_entry
+ #endif /* CONFIG_PARAVIRT */
++SYM_CODE_END(startup_32)
+ #ifdef CONFIG_HOTPLUG_CPU
+ /*
+diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
+index 6fe383002125f..a19ed3d231853 100644
+--- a/arch/x86/power/hibernate_asm_32.S
++++ b/arch/x86/power/hibernate_asm_32.S
+@@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend)
+       ret
+ ENDPROC(swsusp_arch_suspend)
+-ENTRY(restore_image)
++SYM_CODE_START(restore_image)
+       /* prepare to jump to the image kernel */
+       movl    restore_jump_address, %ebx
+       movl    restore_cr3, %ebp
+@@ -45,9 +45,10 @@ ENTRY(restore_image)
+       /* jump to relocated restore code */
+       movl    relocated_restore_code, %eax
+       jmpl    *%eax
++SYM_CODE_END(restore_image)
+ /* code below has been relocated to a safe page */
+-ENTRY(core_restore_code)
++SYM_CODE_START(core_restore_code)
+       movl    temp_pgt, %eax
+       movl    %eax, %cr3
+@@ -77,6 +78,7 @@ copy_loop:
+ done:
+       jmpl    *%ebx
++SYM_CODE_END(core_restore_code)
+       /* code below belongs to the image kernel */
+       .align PAGE_SIZE
+diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
+index 1868b158480d4..3a0ef0d577344 100644
+--- a/arch/x86/realmode/rm/trampoline_32.S
++++ b/arch/x86/realmode/rm/trampoline_32.S
+@@ -29,7 +29,7 @@
+       .code16
+       .balign PAGE_SIZE
+-ENTRY(trampoline_start)
++SYM_CODE_START(trampoline_start)
+       wbinvd                  # Needed for NUMA-Q should be harmless for others
+       LJMPW_RM(1f)
+@@ -54,11 +54,13 @@ ENTRY(trampoline_start)
+       lmsw    %dx                     # into protected mode
+       ljmpl   $__BOOT_CS, $pa_startup_32
++SYM_CODE_END(trampoline_start)
+       .section ".text32","ax"
+       .code32
+-ENTRY(startup_32)                     # note: also used from wakeup_asm.S
++SYM_CODE_START(startup_32)                    # note: also used from wakeup_asm.S
+       jmp     *%eax
++SYM_CODE_END(startup_32)
+       .bss
+       .balign 8
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+index cd177772fe4d5..2712e91553063 100644
+--- a/arch/x86/xen/xen-asm_32.S
++++ b/arch/x86/xen/xen-asm_32.S
+@@ -56,7 +56,7 @@
+       _ASM_EXTABLE(1b,2b)
+ .endm
+-ENTRY(xen_iret)
++SYM_CODE_START(xen_iret)
+       /* test eflags for special cases */
+       testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
+       jnz hyper_iret
+@@ -122,6 +122,7 @@ xen_iret_end_crit:
+ hyper_iret:
+       /* put this out of line since its very rarely used */
+       jmp hypercall_page + __HYPERVISOR_iret * 32
++SYM_CODE_END(xen_iret)
+       .globl xen_iret_start_crit, xen_iret_end_crit
+@@ -152,7 +153,7 @@ hyper_iret:
+  * The only caveat is that if the outer eax hasn't been restored yet (i.e.
+  * it's still on stack), we need to restore its value here.
+  */
+-ENTRY(xen_iret_crit_fixup)
++SYM_CODE_START(xen_iret_crit_fixup)
+       /*
+        * Paranoia: Make sure we're really coming from kernel space.
+        * One could imagine a case where userspace jumps into the
+@@ -179,4 +180,4 @@ ENTRY(xen_iret_crit_fixup)
+ 2:
+       ret
+-END(xen_iret_crit_fixup)
++SYM_CODE_END(xen_iret_crit_fixup)
+-- 
+2.27.0
+