From: Greg Kroah-Hartman Date: Thu, 2 May 2019 14:19:56 +0000 (+0200) Subject: 4.9-stable patches X-Git-Tag: v4.9.173~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=8766998333f58c1886d88b10f2b9e9ea3de9583e;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: vfio-type1-limit-dma-mappings-per-container.patch --- diff --git a/queue-4.9/series b/queue-4.9/series index 1150bf2da9d..2b760d1fdec 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -29,3 +29,4 @@ libata-fix-using-dma-buffers-on-stack.patch gpio-of-fix-of_gpiochip_add-error-path.patch kconfig-mn-conf-handle-backspace-h-key.patch leds-pca9532-fix-a-potential-null-pointer-dereferenc.patch +vfio-type1-limit-dma-mappings-per-container.patch diff --git a/queue-4.9/vfio-type1-limit-dma-mappings-per-container.patch b/queue-4.9/vfio-type1-limit-dma-mappings-per-container.patch new file mode 100644 index 00000000000..3de6abeca6a --- /dev/null +++ b/queue-4.9/vfio-type1-limit-dma-mappings-per-container.patch @@ -0,0 +1,93 @@ +From 492855939bdb59c6f947b0b5b44af9ad82b7e38c Mon Sep 17 00:00:00 2001 +From: Alex Williamson +Date: Wed, 3 Apr 2019 12:36:21 -0600 +Subject: vfio/type1: Limit DMA mappings per container + +From: Alex Williamson + +commit 492855939bdb59c6f947b0b5b44af9ad82b7e38c upstream. + +Memory backed DMA mappings are accounted against a user's locked +memory limit, including multiple mappings of the same memory. This +accounting bounds the number of such mappings that a user can create. +However, DMA mappings that are not backed by memory, such as DMA +mappings of device MMIO via mmaps, do not make use of page pinning +and therefore do not count against the user's locked memory limit. +These mappings still consume memory, but the memory is not well +associated to the process for the purpose of oom killing a task. + +To add bounding on this use case, we introduce a limit to the total +number of concurrent DMA mappings that a user is allowed to create. +This limit is exposed as a tunable module option where the default +value of 64K is expected to be well in excess of any reasonable use +case (a large virtual machine configuration would typically only make +use of tens of concurrent mappings). + +This fixes CVE-2019-3882. + +Reviewed-by: Eric Auger +Tested-by: Eric Auger +Reviewed-by: Peter Xu +Reviewed-by: Cornelia Huck +Signed-off-by: Alex Williamson +[groeck: Adjust for missing upstream commit] +Signed-off-by: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman +--- + drivers/vfio/vfio_iommu_type1.c | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -53,10 +53,16 @@ module_param_named(disable_hugepages, + MODULE_PARM_DESC(disable_hugepages, + "Disable VFIO IOMMU support for IOMMU hugepages."); + ++static unsigned int dma_entry_limit __read_mostly = U16_MAX; ++module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); ++MODULE_PARM_DESC(dma_entry_limit, ++ "Maximum number of user DMA mappings per container (65535)."); ++ + struct vfio_iommu { + struct list_head domain_list; + struct mutex lock; + struct rb_root dma_list; ++ unsigned int dma_avail; + bool v2; + bool nesting; + }; +@@ -384,6 +390,7 @@ static void vfio_remove_dma(struct vfio_ + vfio_unmap_unpin(iommu, dma); + vfio_unlink_dma(iommu, dma); + kfree(dma); ++ iommu->dma_avail++; + } + + static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) +@@ -584,12 +591,18 @@ static int vfio_dma_do_map(struct vfio_i + return -EEXIST; + } + ++ if (!iommu->dma_avail) { ++ mutex_unlock(&iommu->lock); ++ return -ENOSPC; ++ } ++ + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) { + mutex_unlock(&iommu->lock); + return -ENOMEM; + } + ++ iommu->dma_avail--; + dma->iova = iova; + dma->vaddr = vaddr; + dma->prot = prot; +@@ -905,6 +918,7 @@ static void *vfio_iommu_type1_open(unsig + + INIT_LIST_HEAD(&iommu->domain_list); + iommu->dma_list = RB_ROOT; ++ iommu->dma_avail = dma_entry_limit; + mutex_init(&iommu->lock); + + return iommu;