]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.11-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2013 23:12:21 +0000 (16:12 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2013 23:12:21 +0000 (16:12 -0700)
added patches:
acpi-ipmi-fix-atomic-context-requirement-of-ipmi_msg_handler.patch
arm-tegra-unify-tegra-s-kconfig-a-bit-more.patch
drm-nouveau-bios-init-stub-opcode-0xaa.patch
irq-force-hardirq-exit-s-softirq-processing-on-its-own-stack.patch
xfs-fix-node-forward-in-xfs_node_toosmall.patch

queue-3.11/acpi-ipmi-fix-atomic-context-requirement-of-ipmi_msg_handler.patch [new file with mode: 0644]
queue-3.11/arm-tegra-unify-tegra-s-kconfig-a-bit-more.patch [new file with mode: 0644]
queue-3.11/drm-nouveau-bios-init-stub-opcode-0xaa.patch [new file with mode: 0644]
queue-3.11/irq-force-hardirq-exit-s-softirq-processing-on-its-own-stack.patch [new file with mode: 0644]
queue-3.11/series
queue-3.11/xfs-fix-node-forward-in-xfs_node_toosmall.patch [new file with mode: 0644]

diff --git a/queue-3.11/acpi-ipmi-fix-atomic-context-requirement-of-ipmi_msg_handler.patch b/queue-3.11/acpi-ipmi-fix-atomic-context-requirement-of-ipmi_msg_handler.patch
new file mode 100644 (file)
index 0000000..325d2f3
--- /dev/null
@@ -0,0 +1,181 @@
+From 06a8566bcf5cf7db9843a82cde7a33c7bf3947d9 Mon Sep 17 00:00:00 2001
+From: Lv Zheng <lv.zheng@intel.com>
+Date: Fri, 13 Sep 2013 13:13:23 +0800
+Subject: ACPI / IPMI: Fix atomic context requirement of ipmi_msg_handler()
+
+From: Lv Zheng <lv.zheng@intel.com>
+
+commit 06a8566bcf5cf7db9843a82cde7a33c7bf3947d9 upstream.
+
+This patch fixes the issues indicated by the test results that
+ipmi_msg_handler() is invoked in atomic context.
+
+BUG: scheduling while atomic: kipmi0/18933/0x10000100
+Modules linked in: ipmi_si acpi_ipmi ...
+CPU: 3 PID: 18933 Comm: kipmi0 Tainted: G       AW    3.10.0-rc7+ #2
+Hardware name: QCI QSSC-S4R/QSSC-S4R, BIOS QSSC-S4R.QCI.01.00.0027.070120100606 07/01/2010
+ ffff8838245eea00 ffff88103fc63c98 ffffffff814c4a1e ffff88103fc63ca8
+ ffffffff814bfbab ffff88103fc63d28 ffffffff814c73e0 ffff88103933cbd4
+ 0000000000000096 ffff88103fc63ce8 ffff88102f618000 ffff881035c01fd8
+Call Trace:
+ <IRQ>  [<ffffffff814c4a1e>] dump_stack+0x19/0x1b
+ [<ffffffff814bfbab>] __schedule_bug+0x46/0x54
+ [<ffffffff814c73e0>] __schedule+0x83/0x59c
+ [<ffffffff81058853>] __cond_resched+0x22/0x2d
+ [<ffffffff814c794b>] _cond_resched+0x14/0x1d
+ [<ffffffff814c6d82>] mutex_lock+0x11/0x32
+ [<ffffffff8101e1e9>] ? __default_send_IPI_dest_field.constprop.0+0x53/0x58
+ [<ffffffffa09e3f9c>] ipmi_msg_handler+0x23/0x166 [ipmi_si]
+ [<ffffffff812bf6e4>] deliver_response+0x55/0x5a
+ [<ffffffff812c0fd4>] handle_new_recv_msgs+0xb67/0xc65
+ [<ffffffff81007ad1>] ? read_tsc+0x9/0x19
+ [<ffffffff814c8620>] ? _raw_spin_lock_irq+0xa/0xc
+ [<ffffffffa09e1128>] ipmi_thread+0x5c/0x146 [ipmi_si]
+ ...
+
+Also Tony Camuso says:
+
+ We were getting occasional "Scheduling while atomic" call traces
+ during boot on some systems. Problem was first seen on a Cisco C210
+ but we were able to reproduce it on a Cisco c220m3. Setting
+ CONFIG_LOCKDEP and LOCKDEP_SUPPORT to 'y' exposed a lockdep around
+ tx_msg_lock in acpi_ipmi.c struct acpi_ipmi_device.
+
+ =================================
+ [ INFO: inconsistent lock state ]
+ 2.6.32-415.el6.x86_64-debug-splck #1
+ ---------------------------------
+ inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
+ ksoftirqd/3/17 [HC0[0]:SC1[1]:HE1:SE0] takes:
+  (&ipmi_device->tx_msg_lock){+.?...}, at: [<ffffffff81337a27>] ipmi_msg_handler+0x71/0x126
+ {SOFTIRQ-ON-W} state was registered at:
+   [<ffffffff810ba11c>] __lock_acquire+0x63c/0x1570
+   [<ffffffff810bb0f4>] lock_acquire+0xa4/0x120
+   [<ffffffff815581cc>] __mutex_lock_common+0x4c/0x400
+   [<ffffffff815586ea>] mutex_lock_nested+0x4a/0x60
+   [<ffffffff8133789d>] acpi_ipmi_space_handler+0x11b/0x234
+   [<ffffffff81321c62>] acpi_ev_address_space_dispatch+0x170/0x1be
+
+The fix implemented by this change has been tested by Tony:
+
+ Tested the patch in a boot loop with lockdep debug enabled and never
+ saw the problem in over 400 reboots.
+
+Reported-and-tested-by: Tony Camuso <tcamuso@redhat.com>
+Signed-off-by: Lv Zheng <lv.zheng@intel.com>
+Reviewed-by: Huang Ying <ying.huang@intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Jonghwan Choi <jhbird.choi@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpi_ipmi.c |   24 ++++++++++++++----------
+ 1 file changed, 14 insertions(+), 10 deletions(-)
+
+--- a/drivers/acpi/acpi_ipmi.c
++++ b/drivers/acpi/acpi_ipmi.c
+@@ -39,6 +39,7 @@
+ #include <linux/ipmi.h>
+ #include <linux/device.h>
+ #include <linux/pnp.h>
++#include <linux/spinlock.h>
+ MODULE_AUTHOR("Zhao Yakui");
+ MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
+@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
+       struct list_head head;
+       /* the IPMI request message list */
+       struct list_head tx_msg_list;
+-      struct mutex    tx_msg_lock;
++      spinlock_t      tx_msg_lock;
+       acpi_handle handle;
+       struct pnp_dev *pnp_dev;
+       ipmi_user_t     user_interface;
+@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct
+       struct kernel_ipmi_msg *msg;
+       struct acpi_ipmi_buffer *buffer;
+       struct acpi_ipmi_device *device;
++      unsigned long flags;
+       msg = &tx_msg->tx_message;
+       /*
+@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct
+       /* Get the msgid */
+       device = tx_msg->device;
+-      mutex_lock(&device->tx_msg_lock);
++      spin_lock_irqsave(&device->tx_msg_lock, flags);
+       device->curr_msgid++;
+       tx_msg->tx_msgid = device->curr_msgid;
+-      mutex_unlock(&device->tx_msg_lock);
++      spin_unlock_irqrestore(&device->tx_msg_lock, flags);
+ }
+ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
+@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi
+       int msg_found = 0;
+       struct acpi_ipmi_msg *tx_msg;
+       struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
++      unsigned long flags;
+       if (msg->user != ipmi_device->user_interface) {
+               dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
+@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi
+               ipmi_free_recv_msg(msg);
+               return;
+       }
+-      mutex_lock(&ipmi_device->tx_msg_lock);
++      spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
+       list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+               if (msg->msgid == tx_msg->tx_msgid) {
+                       msg_found = 1;
+@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi
+               }
+       }
+-      mutex_unlock(&ipmi_device->tx_msg_lock);
++      spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+       if (!msg_found) {
+               dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
+                       "returned.\n", msg->msgid);
+@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, ac
+       struct acpi_ipmi_device *ipmi_device = handler_context;
+       int err, rem_time;
+       acpi_status status;
++      unsigned long flags;
+       /*
+        * IPMI opregion message.
+        * IPMI message is firstly written to the BMC and system software
+@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, ac
+               return AE_NO_MEMORY;
+       acpi_format_ipmi_msg(tx_msg, address, value);
+-      mutex_lock(&ipmi_device->tx_msg_lock);
++      spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
+       list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
+-      mutex_unlock(&ipmi_device->tx_msg_lock);
++      spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+       err = ipmi_request_settime(ipmi_device->user_interface,
+                                       &tx_msg->addr,
+                                       tx_msg->tx_msgid,
+@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, ac
+       status = AE_OK;
+ end_label:
+-      mutex_lock(&ipmi_device->tx_msg_lock);
++      spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
+       list_del(&tx_msg->head);
+-      mutex_unlock(&ipmi_device->tx_msg_lock);
++      spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+       kfree(tx_msg);
+       return status;
+ }
+@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct
+       INIT_LIST_HEAD(&ipmi_device->head);
+-      mutex_init(&ipmi_device->tx_msg_lock);
++      spin_lock_init(&ipmi_device->tx_msg_lock);
+       INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+       ipmi_install_space_handler(ipmi_device);
diff --git a/queue-3.11/arm-tegra-unify-tegra-s-kconfig-a-bit-more.patch b/queue-3.11/arm-tegra-unify-tegra-s-kconfig-a-bit-more.patch
new file mode 100644 (file)
index 0000000..145390f
--- /dev/null
@@ -0,0 +1,92 @@
+From 20984c44b5a08620778ea14fa5807489170fd5ca Mon Sep 17 00:00:00 2001
+From: Stephen Warren <swarren@nvidia.com>
+Date: Tue, 6 Aug 2013 14:38:51 -0600
+Subject: ARM: tegra: unify Tegra's Kconfig a bit more
+
+From: Stephen Warren <swarren@nvidia.com>
+
+commit 20984c44b5a08620778ea14fa5807489170fd5ca upstream.
+
+Move all common select clauses from ARCH_TEGRA_*_SOC to ARCH_TEGRA to
+eliminate duplication. The USB-related selects all should have been
+common too, but were missing from Tegra114 previously. Move these to
+ARCH_TEGRA too. The latter fixes a build break when only Tegra114
+support was enabled, but not Tegra20 or Tegra30 support.
+
+Signed-off-by: Stephen Warren <swarren@nvidia.com>
+Reported-by: Paul Walmsley <pwalmsley@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-tegra/Kconfig |   21 ++++++---------------
+ 1 file changed, 6 insertions(+), 15 deletions(-)
+
+--- a/arch/arm/mach-tegra/Kconfig
++++ b/arch/arm/mach-tegra/Kconfig
+@@ -2,18 +2,24 @@ config ARCH_TEGRA
+       bool "NVIDIA Tegra" if ARCH_MULTI_V7
+       select ARCH_HAS_CPUFREQ
+       select ARCH_REQUIRE_GPIOLIB
++      select ARM_GIC
+       select CLKDEV_LOOKUP
+       select CLKSRC_MMIO
+       select CLKSRC_OF
+       select COMMON_CLK
++      select CPU_V7
+       select GENERIC_CLOCKEVENTS
+       select HAVE_ARM_SCU if SMP
+       select HAVE_ARM_TWD if LOCAL_TIMERS
+       select HAVE_CLK
+       select HAVE_SMP
+       select MIGHT_HAVE_CACHE_L2X0
++      select PINCTRL
+       select SOC_BUS
+       select SPARSE_IRQ
++      select USB_ARCH_HAS_EHCI if USB_SUPPORT
++      select USB_ULPI if USB_PHY
++      select USB_ULPI_VIEWPORT if USB_PHY
+       select USE_OF
+       help
+         This enables support for NVIDIA Tegra based systems.
+@@ -27,15 +33,9 @@ config ARCH_TEGRA_2x_SOC
+       select ARM_ERRATA_720789
+       select ARM_ERRATA_754327 if SMP
+       select ARM_ERRATA_764369 if SMP
+-      select ARM_GIC
+-      select CPU_V7
+-      select PINCTRL
+       select PINCTRL_TEGRA20
+       select PL310_ERRATA_727915 if CACHE_L2X0
+       select PL310_ERRATA_769419 if CACHE_L2X0
+-      select USB_ARCH_HAS_EHCI if USB_SUPPORT
+-      select USB_ULPI if USB_PHY
+-      select USB_ULPI_VIEWPORT if USB_PHY
+       help
+         Support for NVIDIA Tegra AP20 and T20 processors, based on the
+         ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
+@@ -44,14 +44,8 @@ config ARCH_TEGRA_3x_SOC
+       bool "Enable support for Tegra30 family"
+       select ARM_ERRATA_754322
+       select ARM_ERRATA_764369 if SMP
+-      select ARM_GIC
+-      select CPU_V7
+-      select PINCTRL
+       select PINCTRL_TEGRA30
+       select PL310_ERRATA_769419 if CACHE_L2X0
+-      select USB_ARCH_HAS_EHCI if USB_SUPPORT
+-      select USB_ULPI if USB_PHY
+-      select USB_ULPI_VIEWPORT if USB_PHY
+       help
+         Support for NVIDIA Tegra T30 processor family, based on the
+         ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
+@@ -59,10 +53,7 @@ config ARCH_TEGRA_3x_SOC
+ config ARCH_TEGRA_114_SOC
+       bool "Enable support for Tegra114 family"
+       select HAVE_ARM_ARCH_TIMER
+-      select ARM_GIC
+       select ARM_L1_CACHE_SHIFT_6
+-      select CPU_V7
+-      select PINCTRL
+       select PINCTRL_TEGRA114
+       help
+         Support for NVIDIA Tegra T114 processor family, based on the
diff --git a/queue-3.11/drm-nouveau-bios-init-stub-opcode-0xaa.patch b/queue-3.11/drm-nouveau-bios-init-stub-opcode-0xaa.patch
new file mode 100644 (file)
index 0000000..e0f87e1
--- /dev/null
@@ -0,0 +1,51 @@
+From 5495e39fb3695182b9f2a72fe4169056cada37a1 Mon Sep 17 00:00:00 2001
+From: Ben Skeggs <bskeggs@redhat.com>
+Date: Tue, 10 Sep 2013 12:11:01 +1000
+Subject: drm/nouveau/bios/init: stub opcode 0xaa
+
+From: Ben Skeggs <bskeggs@redhat.com>
+
+commit 5495e39fb3695182b9f2a72fe4169056cada37a1 upstream.
+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/core/subdev/bios/init.c |   19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+@@ -579,8 +579,22 @@ static void
+ init_reserved(struct nvbios_init *init)
+ {
+       u8 opcode = nv_ro08(init->bios, init->offset);
+-      trace("RESERVED\t0x%02x\n", opcode);
+-      init->offset += 1;
++      u8 length, i;
++
++      switch (opcode) {
++      case 0xaa:
++              length = 4;
++              break;
++      default:
++              length = 1;
++              break;
++      }
++
++      trace("RESERVED 0x%02x\t", opcode);
++      for (i = 1; i < length; i++)
++              cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
++      cont("\n");
++      init->offset += length;
+ }
+ /**
+@@ -2135,6 +2149,7 @@ static struct nvbios_init_opcode {
+       [0x99] = { init_zm_auxch },
+       [0x9a] = { init_i2c_long_if },
+       [0xa9] = { init_gpio_ne },
++      [0xaa] = { init_reserved },
+ };
+ #define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/queue-3.11/irq-force-hardirq-exit-s-softirq-processing-on-its-own-stack.patch b/queue-3.11/irq-force-hardirq-exit-s-softirq-processing-on-its-own-stack.patch
new file mode 100644 (file)
index 0000000..5b5db70
--- /dev/null
@@ -0,0 +1,154 @@
+From ded797547548a5b8e7b92383a41e4c0e6b0ecb7f Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <fweisbec@gmail.com>
+Date: Tue, 24 Sep 2013 00:50:25 +0200
+Subject: irq: Force hardirq exit's softirq processing on its own stack
+
+From: Frederic Weisbecker <fweisbec@gmail.com>
+
+commit ded797547548a5b8e7b92383a41e4c0e6b0ecb7f upstream.
+
+The commit facd8b80c67a3cf64a467c4a2ac5fb31f2e6745b
+("irq: Sanitize invoke_softirq") converted irq exit
+calls of do_softirq() to __do_softirq() on all architectures,
+assuming it was only used there for its irq disablement
+properties.
+
+But as a side effect, the softirqs processed in the end
+of the hardirq are always called on the inline current
+stack that is used by irq_exit() instead of the softirq
+stack provided by the archs that override do_softirq().
+
+The result is mostly safe if the architecture runs irq_exit()
+on a separate irq stack because then softirqs are processed
+on that same stack that is near empty at this stage (assuming
+hardirq aren't nesting).
+
+Otherwise irq_exit() runs in the task stack and so does the softirq
+too. The interrupted call stack can be randomly deep already and
+the softirq can dig through it even further. To add insult to the
+injury, this softirq can be interrupted by a new hardirq, maximizing
+the chances for a stack overrun as reported in powerpc for example:
+
+       do_IRQ: stack overflow: 1920
+       CPU: 0 PID: 1602 Comm: qemu-system-ppc Not tainted 3.10.4-300.1.fc19.ppc64p7 #1
+       Call Trace:
+       [c0000000050a8740] .show_stack+0x130/0x200 (unreliable)
+       [c0000000050a8810] .dump_stack+0x28/0x3c
+       [c0000000050a8880] .do_IRQ+0x2b8/0x2c0
+       [c0000000050a8930] hardware_interrupt_common+0x154/0x180
+       --- Exception: 501 at .cp_start_xmit+0x3a4/0x820 [8139cp]
+               LR = .cp_start_xmit+0x390/0x820 [8139cp]
+       [c0000000050a8d40] .dev_hard_start_xmit+0x394/0x640
+       [c0000000050a8e00] .sch_direct_xmit+0x110/0x260
+       [c0000000050a8ea0] .dev_queue_xmit+0x260/0x630
+       [c0000000050a8f40] .br_dev_queue_push_xmit+0xc4/0x130 [bridge]
+       [c0000000050a8fc0] .br_dev_xmit+0x198/0x270 [bridge]
+       [c0000000050a9070] .dev_hard_start_xmit+0x394/0x640
+       [c0000000050a9130] .dev_queue_xmit+0x428/0x630
+       [c0000000050a91d0] .ip_finish_output+0x2a4/0x550
+       [c0000000050a9290] .ip_local_out+0x50/0x70
+       [c0000000050a9310] .ip_queue_xmit+0x148/0x420
+       [c0000000050a93b0] .tcp_transmit_skb+0x4e4/0xaf0
+       [c0000000050a94a0] .__tcp_ack_snd_check+0x7c/0xf0
+       [c0000000050a9520] .tcp_rcv_established+0x1e8/0x930
+       [c0000000050a95f0] .tcp_v4_do_rcv+0x21c/0x570
+       [c0000000050a96c0] .tcp_v4_rcv+0x734/0x930
+       [c0000000050a97a0] .ip_local_deliver_finish+0x184/0x360
+       [c0000000050a9840] .ip_rcv_finish+0x148/0x400
+       [c0000000050a98d0] .__netif_receive_skb_core+0x4f8/0xb00
+       [c0000000050a99d0] .netif_receive_skb+0x44/0x110
+       [c0000000050a9a70] .br_handle_frame_finish+0x2bc/0x3f0 [bridge]
+       [c0000000050a9b20] .br_nf_pre_routing_finish+0x2ac/0x420 [bridge]
+       [c0000000050a9bd0] .br_nf_pre_routing+0x4dc/0x7d0 [bridge]
+       [c0000000050a9c70] .nf_iterate+0x114/0x130
+       [c0000000050a9d30] .nf_hook_slow+0xb4/0x1e0
+       [c0000000050a9e00] .br_handle_frame+0x290/0x330 [bridge]
+       [c0000000050a9ea0] .__netif_receive_skb_core+0x34c/0xb00
+       [c0000000050a9fa0] .netif_receive_skb+0x44/0x110
+       [c0000000050aa040] .napi_gro_receive+0xe8/0x120
+       [c0000000050aa0c0] .cp_rx_poll+0x31c/0x590 [8139cp]
+       [c0000000050aa1d0] .net_rx_action+0x1dc/0x310
+       [c0000000050aa2b0] .__do_softirq+0x158/0x330
+       [c0000000050aa3b0] .irq_exit+0xc8/0x110
+       [c0000000050aa430] .do_IRQ+0xdc/0x2c0
+       [c0000000050aa4e0] hardware_interrupt_common+0x154/0x180
+        --- Exception: 501 at .bad_range+0x1c/0x110
+                LR = .get_page_from_freelist+0x908/0xbb0
+       [c0000000050aa7d0] .list_del+0x18/0x50 (unreliable)
+       [c0000000050aa850] .get_page_from_freelist+0x908/0xbb0
+       [c0000000050aa9e0] .__alloc_pages_nodemask+0x21c/0xae0
+       [c0000000050aaba0] .alloc_pages_vma+0xd0/0x210
+       [c0000000050aac60] .handle_pte_fault+0x814/0xb70
+       [c0000000050aad50] .__get_user_pages+0x1a4/0x640
+       [c0000000050aae60] .get_user_pages_fast+0xec/0x160
+       [c0000000050aaf10] .__gfn_to_pfn_memslot+0x3b0/0x430 [kvm]
+       [c0000000050aafd0] .kvmppc_gfn_to_pfn+0x64/0x130 [kvm]
+       [c0000000050ab070] .kvmppc_mmu_map_page+0x94/0x530 [kvm]
+       [c0000000050ab190] .kvmppc_handle_pagefault+0x174/0x610 [kvm]
+       [c0000000050ab270] .kvmppc_handle_exit_pr+0x464/0x9b0 [kvm]
+       [c0000000050ab320]  kvm_start_lightweight+0x1ec/0x1fc [kvm]
+       [c0000000050ab4f0] .kvmppc_vcpu_run_pr+0x168/0x3b0 [kvm]
+       [c0000000050ab9c0] .kvmppc_vcpu_run+0xc8/0xf0 [kvm]
+       [c0000000050aba50] .kvm_arch_vcpu_ioctl_run+0x5c/0x1a0 [kvm]
+       [c0000000050abae0] .kvm_vcpu_ioctl+0x478/0x730 [kvm]
+       [c0000000050abc90] .do_vfs_ioctl+0x4ec/0x7c0
+       [c0000000050abd80] .SyS_ioctl+0xd4/0xf0
+       [c0000000050abe30] syscall_exit+0x0/0x98
+
+Since this is a regression, this patch proposes a minimalistic
+and low-risk solution by blindly forcing the hardirq exit processing of
+softirqs on the softirq stack. This way we should reduce significantly
+the opportunities for task stack overflow dug by softirqs.
+
+Longer term solutions may involve extending the hardirq stack coverage to
+irq_exit(), etc...
+
+Reported-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Paul Mackerras <paulus@au1.ibm.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul Mackerras <paulus@au1.ibm.com>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: James E.J. Bottomley <jejb@parisc-linux.org>
+Cc: Helge Deller <deller@gmx.de>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/softirq.c |   15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -328,10 +328,19 @@ void irq_enter(void)
+ static inline void invoke_softirq(void)
+ {
+-      if (!force_irqthreads)
+-              __do_softirq();
+-      else
++      if (!force_irqthreads) {
++              /*
++               * We can safely execute softirq on the current stack if
++               * it is the irq stack, because it should be near empty
++               * at this stage. But we have no way to know if the arch
++               * calls irq_exit() on the irq stack. So call softirq
++               * in its own stack to prevent from any overrun on top
++               * of a potentially deep task stack.
++               */
++              do_softirq();
++      } else {
+               wakeup_softirqd();
++      }
+ }
+ static inline void tick_irq_exit(void)
index e7fa29ac9d7bce84685762264d12526e48f930f9..49c59ed02a366d26aa9c27a0958bbfe9bd7280dc 100644 (file)
@@ -103,3 +103,8 @@ usb-serial-option-ignore-card-reader-interface-on-huawei-e1750.patch
 dmaengine-imx-dma-fix-lockdep-issue-between-irqhandler-and-tasklet.patch
 dmaengine-imx-dma-fix-callback-path-in-tasklet.patch
 dmaengine-imx-dma-fix-slow-path-issue-in-prep_dma_cyclic.patch
+acpi-ipmi-fix-atomic-context-requirement-of-ipmi_msg_handler.patch
+xfs-fix-node-forward-in-xfs_node_toosmall.patch
+drm-nouveau-bios-init-stub-opcode-0xaa.patch
+irq-force-hardirq-exit-s-softirq-processing-on-its-own-stack.patch
+arm-tegra-unify-tegra-s-kconfig-a-bit-more.patch
diff --git a/queue-3.11/xfs-fix-node-forward-in-xfs_node_toosmall.patch b/queue-3.11/xfs-fix-node-forward-in-xfs_node_toosmall.patch
new file mode 100644 (file)
index 0000000..4ee211c
--- /dev/null
@@ -0,0 +1,63 @@
+From 997def25e4b9cee3b01609e18a52f926bca8bd2b Mon Sep 17 00:00:00 2001
+From: Mark Tinguely <tinguely@sgi.com>
+Date: Mon, 23 Sep 2013 12:18:58 -0500
+Subject: xfs: fix node forward in xfs_node_toosmall
+
+From: Mark Tinguely <tinguely@sgi.com>
+
+commit 997def25e4b9cee3b01609e18a52f926bca8bd2b upstream.
+
+Commit f5ea1100 cleans up the disk to host conversions for
+node directory entries, but because a variable is reused in
+xfs_node_toosmall() the next node is not correctly found.
+If the original node is small enough (<= 3/8 of the node size),
+this change may incorrectly cause a node collapse when it should
+not. That will cause an assert in xfstest generic/319:
+
+   Assertion failed: first <= last && last < BBTOB(bp->b_length),
+   file: /root/newest/xfs/fs/xfs/xfs_trans_buf.c, line: 569
+
+Keep the original node header to get the correct forward node.
+
+(When a node is considered for a merge with a sibling, it overwrites the
+ sibling pointers of the original incore nodehdr with the sibling's
+ pointers.  This leads to loop considering the original node as a merge
+ candidate with itself in the second pass, and so it incorrectly
+ determines a merge should occur.)
+
+[v3: added Dave Chinner's (slightly modified) suggestion to the commit header,
+       cleaned up whitespace.  -bpm]
+
+Signed-off-by: Mark Tinguely <tinguely@sgi.com>
+Reviewed-by: Ben Myers <bpm@sgi.com>
+Signed-off-by: Ben Myers <bpm@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ fs/xfs/xfs_da_btree.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/xfs/xfs_da_btree.c
++++ b/fs/xfs/xfs_da_btree.c
+@@ -1223,6 +1223,7 @@ xfs_da3_node_toosmall(
+       /* start with smaller blk num */
+       forward = nodehdr.forw < nodehdr.back;
+       for (i = 0; i < 2; forward = !forward, i++) {
++              struct xfs_da3_icnode_hdr thdr;
+               if (forward)
+                       blkno = nodehdr.forw;
+               else
+@@ -1235,10 +1236,10 @@ xfs_da3_node_toosmall(
+                       return(error);
+               node = bp->b_addr;
+-              xfs_da3_node_hdr_from_disk(&nodehdr, node);
++              xfs_da3_node_hdr_from_disk(&thdr, node);
+               xfs_trans_brelse(state->args->trans, bp);
+-              if (count - nodehdr.count >= 0)
++              if (count - thdr.count >= 0)
+                       break;  /* fits with at least 25% to spare */
+       }
+       if (i >= 2) {