From: Greg Kroah-Hartman Date: Mon, 3 Jul 2017 08:51:15 +0000 (+0200) Subject: 4.11-stable patches X-Git-Tag: v3.18.60~38 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7e309a2b1c764d467b736797f1fd99a21eab09a6;p=thirdparty%2Fkernel%2Fstable-queue.git 4.11-stable patches added patches: mm-vmalloc.c-huge-vmap-fail-gracefully-on-unexpected-huge-vmap-mappings.patch pinctrl-amd-use-regular-interrupt-instead-of-chained.patch --- diff --git a/queue-4.11/mm-vmalloc.c-huge-vmap-fail-gracefully-on-unexpected-huge-vmap-mappings.patch b/queue-4.11/mm-vmalloc.c-huge-vmap-fail-gracefully-on-unexpected-huge-vmap-mappings.patch new file mode 100644 index 00000000000..2b2849e4de7 --- /dev/null +++ b/queue-4.11/mm-vmalloc.c-huge-vmap-fail-gracefully-on-unexpected-huge-vmap-mappings.patch @@ -0,0 +1,73 @@ +From 029c54b09599573015a5c18dbe59cbdf42742237 Mon Sep 17 00:00:00 2001 +From: Ard Biesheuvel +Date: Fri, 23 Jun 2017 15:08:41 -0700 +Subject: mm/vmalloc.c: huge-vmap: fail gracefully on unexpected huge vmap mappings + +From: Ard Biesheuvel + +commit 029c54b09599573015a5c18dbe59cbdf42742237 upstream. + +Existing code that uses vmalloc_to_page() may assume that any address +for which is_vmalloc_addr() returns true may be passed into +vmalloc_to_page() to retrieve the associated struct page. + +This is not un unreasonable assumption to make, but on architectures +that have CONFIG_HAVE_ARCH_HUGE_VMAP=y, it no longer holds, and we need +to ensure that vmalloc_to_page() does not go off into the weeds trying +to dereference huge PUDs or PMDs as table entries. + +Given that vmalloc() and vmap() themselves never create huge mappings or +deal with compound pages at all, there is no correct answer in this +case, so return NULL instead, and issue a warning. + +When reading /proc/kcore on arm64, you will hit an oops as soon as you +hit the huge mappings used for the various segments that make up the +mapping of vmlinux. With this patch applied, you will no longer hit the +oops, but the kcore contents willl be incorrect (these regions will be +zeroed out) + +We are fixing this for kcore specifically, so it avoids vread() for +those regions. At least one other problematic user exists, i.e., +/dev/kmem, but that is currently broken on arm64 for other reasons. + +Link: http://lkml.kernel.org/r/20170609082226.26152-1-ard.biesheuvel@linaro.org +Signed-off-by: Ard Biesheuvel +Acked-by: Mark Rutland +Reviewed-by: Laura Abbott +Cc: Michal Hocko +Cc: zhong jiang +Cc: Dave Hansen +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/vmalloc.c | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void + if (p4d_none(*p4d)) + return NULL; + pud = pud_offset(p4d, addr); +- if (pud_none(*pud)) ++ ++ /* ++ * Don't dereference bad PUD or PMD (below) entries. This will also ++ * identify huge mappings, which we may encounter on architectures ++ * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be ++ * identified as vmalloc addresses by is_vmalloc_addr(), but are ++ * not [unambiguously] associated with a struct page, so there is ++ * no correct value to return for them. ++ */ ++ WARN_ON_ONCE(pud_bad(*pud)); ++ if (pud_none(*pud) || pud_bad(*pud)) + return NULL; + pmd = pmd_offset(pud, addr); +- if (pmd_none(*pmd)) ++ WARN_ON_ONCE(pmd_bad(*pmd)); ++ if (pmd_none(*pmd) || pmd_bad(*pmd)) + return NULL; + + ptep = pte_offset_map(pmd, addr); diff --git a/queue-4.11/pinctrl-amd-use-regular-interrupt-instead-of-chained.patch b/queue-4.11/pinctrl-amd-use-regular-interrupt-instead-of-chained.patch new file mode 100644 index 00000000000..83670189766 --- /dev/null +++ b/queue-4.11/pinctrl-amd-use-regular-interrupt-instead-of-chained.patch @@ -0,0 +1,155 @@ +From ba714a9c1dea85e0bf2899d02dfeb9c70040427c Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 23 May 2017 23:23:32 +0200 +Subject: pinctrl/amd: Use regular interrupt instead of chained + +From: Thomas Gleixner + +commit ba714a9c1dea85e0bf2899d02dfeb9c70040427c upstream. + +The AMD pinctrl driver uses a chained interrupt to demultiplex the GPIO +interrupts. Kevin Vandeventer reported, that his new AMD Ryzen locks up +hard on boot when the AMD pinctrl driver is initialized. The reason is an +interrupt storm. It's not clear whether that's caused by hardware or +firmware or both. + +Using chained interrupts on X86 is a dangerous endavour. If a system is +misconfigured or the hardware buggy there is no safety net to catch an +interrupt storm. + +Convert the driver to use a regular interrupt for the demultiplex +handler. This allows the interrupt storm detector to catch the malfunction +and lets the system boot up. + +This should be backported to stable because it's likely that more users run +into this problem as the AMD Ryzen machines are spreading. + +Reported-by: Kevin Vandeventer +Link: https://bugzilla.suse.com/show_bug.cgi?id=1034261 +Signed-off-by: Thomas Gleixner +Signed-off-by: Linus Walleij +Signed-off-by: Borislav Petkov +Signed-off-by: Greg Kroah-Hartman +--- + drivers/pinctrl/pinctrl-amd.c | 91 ++++++++++++++++++------------------------ + 1 file changed, 41 insertions(+), 50 deletions(-) + +--- a/drivers/pinctrl/pinctrl-amd.c ++++ b/drivers/pinctrl/pinctrl-amd.c +@@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip + .flags = IRQCHIP_SKIP_SET_WAKE, + }; + +-static void amd_gpio_irq_handler(struct irq_desc *desc) ++#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF)) ++ ++static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) + { +- u32 i; +- u32 off; +- u32 reg; +- u32 pin_reg; +- u64 reg64; +- int handled = 0; +- unsigned int irq; ++ struct amd_gpio *gpio_dev = dev_id; ++ struct gpio_chip *gc = &gpio_dev->gc; ++ irqreturn_t ret = IRQ_NONE; ++ unsigned int i, irqnr; + unsigned long flags; +- struct irq_chip *chip = irq_desc_get_chip(desc); +- struct gpio_chip *gc = irq_desc_get_handler_data(desc); +- struct amd_gpio *gpio_dev = gpiochip_get_data(gc); ++ u32 *regs, regval; ++ u64 status, mask; + +- chained_irq_enter(chip, desc); +- /*enable GPIO interrupt again*/ ++ /* Read the wake status */ + spin_lock_irqsave(&gpio_dev->lock, flags); +- reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); +- reg64 = reg; +- reg64 = reg64 << 32; +- +- reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0); +- reg64 |= reg; ++ status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); ++ status <<= 32; ++ status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0); + spin_unlock_irqrestore(&gpio_dev->lock, flags); + +- /* +- * first 46 bits indicates interrupt status. +- * one bit represents four interrupt sources. +- */ +- for (off = 0; off < 46 ; off++) { +- if (reg64 & BIT(off)) { +- for (i = 0; i < 4; i++) { +- pin_reg = readl(gpio_dev->base + +- (off * 4 + i) * 4); +- if ((pin_reg & BIT(INTERRUPT_STS_OFF)) || +- (pin_reg & BIT(WAKE_STS_OFF))) { +- irq = irq_find_mapping(gc->irqdomain, +- off * 4 + i); +- generic_handle_irq(irq); +- writel(pin_reg, +- gpio_dev->base +- + (off * 4 + i) * 4); +- handled++; +- } +- } ++ /* Bit 0-45 contain the relevant status bits */ ++ status &= (1ULL << 46) - 1; ++ regs = gpio_dev->base; ++ for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) { ++ if (!(status & mask)) ++ continue; ++ status &= ~mask; ++ ++ /* Each status bit covers four pins */ ++ for (i = 0; i < 4; i++) { ++ regval = readl(regs + i); ++ if (!(regval & PIN_IRQ_PENDING)) ++ continue; ++ irq = irq_find_mapping(gc->irqdomain, irqnr + i); ++ generic_handle_irq(irq); ++ /* Clear interrupt */ ++ writel(regval, regs + i); ++ ret = IRQ_HANDLED; + } + } + +- if (handled == 0) +- handle_bad_irq(desc); +- ++ /* Signal EOI to the GPIO unit */ + spin_lock_irqsave(&gpio_dev->lock, flags); +- reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); +- reg |= EOI_MASK; +- writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG); ++ regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG); ++ regval |= EOI_MASK; ++ writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG); + spin_unlock_irqrestore(&gpio_dev->lock, flags); + +- chained_irq_exit(chip, desc); ++ return ret; + } + + static int amd_get_groups_count(struct pinctrl_dev *pctldev) +@@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platfor + goto out2; + } + +- gpiochip_set_chained_irqchip(&gpio_dev->gc, +- &amd_gpio_irqchip, +- irq_base, +- amd_gpio_irq_handler); ++ ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0, ++ KBUILD_MODNAME, gpio_dev); ++ if (ret) ++ goto out2; ++ + platform_set_drvdata(pdev, gpio_dev); + + dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); diff --git a/queue-4.11/series b/queue-4.11/series index 9e40a7c2b16..329afca6ebf 100644 --- a/queue-4.11/series +++ b/queue-4.11/series @@ -49,3 +49,5 @@ perf-x86-intel-uncore-fix-wrong-box-pointer-check.patch drm-vmwgfx-free-hash-table-allocated-by-cmdbuf-managed-res-mgr.patch dm-thin-do-not-queue-freed-thin-mapping-for-next-stage-processing.patch x86-mm-fix-boot-crash-caused-by-incorrect-loop-count-calculation-in-sync_global_pgds.patch +pinctrl-amd-use-regular-interrupt-instead-of-chained.patch +mm-vmalloc.c-huge-vmap-fail-gracefully-on-unexpected-huge-vmap-mappings.patch