--- /dev/null
+From ac570e0493815e0b41681c89cb50d66421429d27 Mon Sep 17 00:00:00 2001
+From: Olof Johansson <olof@lixom.net>
+Date: Wed, 11 Sep 2013 15:27:41 -0700
+Subject: ARM: kvm: rename cpu_reset to avoid name clash
+
+From: Olof Johansson <olof@lixom.net>
+
+commit ac570e0493815e0b41681c89cb50d66421429d27 upstream.
+
+cpu_reset is already #defined in <asm/proc-fns.h> as processor.reset,
+so it expands here and causes problems.
+
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/reset.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/kvm/reset.c
++++ b/arch/arm/kvm/reset.c
+@@ -58,14 +58,14 @@ static const struct kvm_irq_level a15_vt
+ */
+ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ {
+- struct kvm_regs *cpu_reset;
++ struct kvm_regs *reset_regs;
+ const struct kvm_irq_level *cpu_vtimer_irq;
+
+ switch (vcpu->arch.target) {
+ case KVM_ARM_TARGET_CORTEX_A15:
+ if (vcpu->vcpu_id > a15_max_cpu_idx)
+ return -EINVAL;
+- cpu_reset = &a15_regs_reset;
++ reset_regs = &a15_regs_reset;
+ vcpu->arch.midr = read_cpuid_id();
+ cpu_vtimer_irq = &a15_vtimer_irq;
+ break;
+@@ -74,7 +74,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu
+ }
+
+ /* Reset core registers */
+- memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
++ memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
+
+ /* Reset CP15 registers */
+ kvm_reset_coprocs(vcpu);
--- /dev/null
+From 4dc3231f818baf7415c67ee06c51ace0973ae736 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joro@8bytes.org>
+Date: Wed, 25 Sep 2013 12:11:33 +0200
+Subject: ARM: mach-integrator: Add stub for pci_v3_early_init() for !CONFIG_PCI
+
+From: Joerg Roedel <joro@8bytes.org>
+
+commit 4dc3231f818baf7415c67ee06c51ace0973ae736 upstream.
+
+This fixes a compile error where CONFIG_PCI is disabled:
+
+ LD init/built-in.o
+arch/arm/mach-integrator/built-in.o: In function `ap_map_io':
+integrator_cp.c:(.init.text+0x570): undefined reference to `pci_v3_early_init'
+make[1]: *** [vmlinux] Error 1
+make: *** [sub-make] Error 2
+
+Signed-off-by: Joerg Roedel <joro@8bytes.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-integrator/pci_v3.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/arm/mach-integrator/pci_v3.h
++++ b/arch/arm/mach-integrator/pci_v3.h
+@@ -1,2 +1,9 @@
+ /* Simple oneliner include to the PCIv3 early init */
++#ifdef CONFIG_PCI
+ extern int pci_v3_early_init(void);
++#else
++static inline int pci_v3_early_init(void)
++{
++ return 0;
++}
++#endif
--- /dev/null
+From d967967e8d1116fb38bad25e58714b5dddd03cca Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 13 Sep 2013 10:52:49 +0300
+Subject: ASoC: 88pm860x: array overflow in snd_soc_put_volsw_2r_st()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit d967967e8d1116fb38bad25e58714b5dddd03cca upstream.
+
+This is called from snd_ctl_elem_write() with user supplied data so we
+need to add some bounds checking.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Mark Brown <broonie@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/88pm860x-codec.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/sound/soc/codecs/88pm860x-codec.c
++++ b/sound/soc/codecs/88pm860x-codec.c
+@@ -349,6 +349,9 @@ static int snd_soc_put_volsw_2r_st(struc
+ val = ucontrol->value.integer.value[0];
+ val2 = ucontrol->value.integer.value[1];
+
++ if (val >= ARRAY_SIZE(st_table) || val2 >= ARRAY_SIZE(st_table))
++ return -EINVAL;
++
+ err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m);
+ if (err < 0)
+ return err;
--- /dev/null
+From d63733aed90b432e5cc489ddfa28e342f91b4652 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 13 Sep 2013 10:53:36 +0300
+Subject: ASoC: ab8500-codec: info leak in anc_status_control_put()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit d63733aed90b432e5cc489ddfa28e342f91b4652 upstream.
+
+If the user passes an invalid value it leads to an info leak when we
+print the error message or it could oops. This is called with user
+supplied data from snd_ctl_elem_write().
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Mark Brown <broonie@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/ab8500-codec.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/ab8500-codec.c
++++ b/sound/soc/codecs/ab8500-codec.c
+@@ -1225,13 +1225,18 @@ static int anc_status_control_put(struct
+ struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
+ struct device *dev = codec->dev;
+ bool apply_fir, apply_iir;
+- int req, status;
++ unsigned int req;
++ int status;
+
+ dev_dbg(dev, "%s: Enter.\n", __func__);
+
+ mutex_lock(&drvdata->anc_lock);
+
+ req = ucontrol->value.integer.value[0];
++ if (req >= ARRAY_SIZE(enum_anc_state)) {
++ status = -EINVAL;
++ goto cleanup;
++ }
+ if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR &&
+ req != ANC_APPLY_IIR) {
+ dev_err(dev, "%s: ERROR: Unsupported status to set '%s'!\n",
--- /dev/null
+From f8d7b13e14357ed19d2ca2799539600418dc3939 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 13 Sep 2013 10:52:14 +0300
+Subject: ASoC: max98095: a couple array underflows
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit f8d7b13e14357ed19d2ca2799539600418dc3939 upstream.
+
+The ->put() function are called from snd_ctl_elem_write() with user
+supplied data. The limit checks here could underflow leading to a
+crash.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Mark Brown <broonie@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/max98095.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/codecs/max98095.c
++++ b/sound/soc/codecs/max98095.c
+@@ -1863,7 +1863,7 @@ static int max98095_put_eq_enum(struct s
+ struct max98095_pdata *pdata = max98095->pdata;
+ int channel = max98095_get_eq_channel(kcontrol->id.name);
+ struct max98095_cdata *cdata;
+- int sel = ucontrol->value.integer.value[0];
++ unsigned int sel = ucontrol->value.integer.value[0];
+ struct max98095_eq_cfg *coef_set;
+ int fs, best, best_val, i;
+ int regmask, regsave;
+@@ -2016,7 +2016,7 @@ static int max98095_put_bq_enum(struct s
+ struct max98095_pdata *pdata = max98095->pdata;
+ int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
+ struct max98095_cdata *cdata;
+- int sel = ucontrol->value.integer.value[0];
++ unsigned int sel = ucontrol->value.integer.value[0];
+ struct max98095_biquad_cfg *coef_set;
+ int fs, best, best_val, i;
+ int regmask, regsave;
--- /dev/null
+From 1b0135b5e20c56b2edae29e92b91c0b12c983432 Mon Sep 17 00:00:00 2001
+From: Gabor Juhos <juhosg@openwrt.org>
+Date: Wed, 25 Sep 2013 15:32:35 +0200
+Subject: avr32: fix clockevents kernel warning
+
+From: Gabor Juhos <juhosg@openwrt.org>
+
+commit 1b0135b5e20c56b2edae29e92b91c0b12c983432 upstream.
+
+Since commit 01426478df3a8791ff5c8b6b82d409e699cfaf38
+(avr32: Use generic idle loop) the kernel throws the
+following warning on avr32:
+
+ WARNING: at 900322e4 [verbose debug info unavailable]
+ Modules linked in:
+ CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.0-rc2 #117
+ task: 901c3ecc ti: 901c0000 task.ti: 901c0000
+ PC is at cpu_idle_poll_ctrl+0x1c/0x38
+ LR is at comparator_mode+0x3e/0x40
+ pc : [<900322e4>] lr : [<90014882>] Not tainted
+ sp : 901c1f74 r12: 00000000 r11: 901c74a0
+ r10: 901d2510 r9 : 00000001 r8 : 901db4de
+ r7 : 901c74a0 r6 : 00000001 r5 : 00410020 r4 : 901db574
+ r3 : 00410024 r2 : 90206fe0 r1 : 00000000 r0 : 007f0000
+ Flags: qvnzc
+ Mode bits: hjmde....G
+ CPU Mode: Supervisor
+ Call trace:
+ [<90039ede>] clockevents_set_mode+0x16/0x2e
+ [<90039f00>] clockevents_shutdown+0xa/0x1e
+ [<9003a078>] clockevents_exchange_device+0x58/0x70
+ [<9003a78c>] tick_check_new_device+0x38/0x54
+ [<9003a1a2>] clockevents_register_device+0x32/0x90
+ [<900035c4>] time_init+0xa8/0x108
+ [<90000520>] start_kernel+0x128/0x23c
+
+When the 'avr32_comparator' clockevent device is registered,
+the clockevent core sets the mode of that clockevent device
+to CLOCK_EVT_MODE_SHUTDOWN. Due to this, the 'comparator_mode'
+function calls the 'cpu_idle_poll_ctrl' to disables idle poll.
+This results in the aforementioned warning because the polling
+is not enabled yet.
+
+Change the code to only disable idle poll if it is enabled by
+the same function to avoid the warning.
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+Acked-by: Hans-Christian Egtvedt <egtvedt@samfundet.no>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/avr32/kernel/time.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/avr32/kernel/time.c
++++ b/arch/avr32/kernel/time.c
+@@ -98,7 +98,14 @@ static void comparator_mode(enum clock_e
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ sysreg_write(COMPARE, 0);
+ pr_debug("%s: stop\n", evdev->name);
+- cpu_idle_poll_ctrl(false);
++ if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
++ evdev->mode == CLOCK_EVT_MODE_RESUME) {
++ /*
++ * Only disable idle poll if we have forced that
++ * in a previous call.
++ */
++ cpu_idle_poll_ctrl(false);
++ }
+ break;
+ default:
+ BUG();
--- /dev/null
+From 0a3658cccdf5326ea508efeb1879b0e2508bb0c3 Mon Sep 17 00:00:00 2001
+From: Peng Chen <pengchen@qti.qualcomm.com>
+Date: Fri, 30 Aug 2013 17:41:40 +0800
+Subject: Bluetooth: Add a new PID/VID 0cf3/e005 for AR3012.
+
+From: Peng Chen <pengchen@qti.qualcomm.com>
+
+commit 0a3658cccdf5326ea508efeb1879b0e2508bb0c3 upstream.
+
+usb device info:
+
+T: Bus=06 Lev=01 Prnt=01 Port=01 Cnt=01 Dev#= 15 Spd=12 MxCh= 0
+D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1
+P: Vendor=0cf3 ProdID=e005 Rev= 0.02
+C:* #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA
+I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms
+E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms
+
+Signed-off-by: Peng Chen <pengchen@qca.qualcomm.com>
+Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bluetooth/ath3k.c | 2 ++
+ drivers/bluetooth/btusb.c | 1 +
+ 2 files changed, 3 insertions(+)
+
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -85,6 +85,7 @@ static struct usb_device_id ath3k_table[
+ { USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
++ { USB_DEVICE(0x0CF3, 0xE005) },
+ { USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0489, 0xe057) },
+ { USB_DEVICE(0x13d3, 0x3393) },
+@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -148,6 +148,7 @@ static struct usb_device_id blacklist_ta
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
--- /dev/null
+From 38a172bef8c93ecbfd69715fd88396988e4073fd Mon Sep 17 00:00:00 2001
+From: Raphael Kubo da Costa <rakuco@FreeBSD.org>
+Date: Mon, 2 Sep 2013 14:57:51 +0300
+Subject: Bluetooth: Add support for BCM20702A0 [0b05, 17cb]
+
+From: Raphael Kubo da Costa <rakuco@FreeBSD.org>
+
+commit 38a172bef8c93ecbfd69715fd88396988e4073fd upstream.
+
+Yet another vendor specific ID for this chipset; this one for the ASUS
+USB-BT400 Bluetooth 4.0 adapter.
+
+T: Bus=03 Lev=02 Prnt=02 Port=01 Cnt=01 Dev#= 6 Spd=12 MxCh= 0
+D: Ver= 2.00 Cls=ff(vend.) Sub=01 Prot=01 MxPS=64 #Cfgs= 1
+P: Vendor=0b05 ProdID=17cb Rev=01.12
+S: Manufacturer=Broadcom Corp
+S: Product=BCM20702A0
+S: SerialNumber=000272C64400
+C: #Ifs= 4 Cfg#= 1 Atr=a0 MxPwr=100mA
+I: If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none)
+I: If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none)
+I: If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none)
+I: If#= 3 Alt= 0 #EPs= 0 Cls=fe(app. ) Sub=01 Prot=01 Driver=(none)
+
+Signed-off-by: Raphael Kubo da Costa <rakuco@FreeBSD.org>
+Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bluetooth/btusb.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[
+
+ /* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0b05, 0x17b5) },
++ { USB_DEVICE(0x0b05, 0x17cb) },
+ { USB_DEVICE(0x04ca, 0x2003) },
+ { USB_DEVICE(0x0489, 0xe042) },
+ { USB_DEVICE(0x413c, 0x8197) },
--- /dev/null
+From 89cbb4da0abee2f39d75f67f9fd57f7410c8b65c Mon Sep 17 00:00:00 2001
+From: Andre Guedes <andre.guedes@openbossa.org>
+Date: Wed, 31 Jul 2013 16:25:29 -0300
+Subject: Bluetooth: Fix encryption key size for peripheral role
+
+From: Andre Guedes <andre.guedes@openbossa.org>
+
+commit 89cbb4da0abee2f39d75f67f9fd57f7410c8b65c upstream.
+
+This patch fixes the connection encryption key size information when
+the host is playing the peripheral role. We should set conn->enc_key_
+size in hci_le_ltk_request_evt, otherwise it is left uninitialized.
+
+Signed-off-by: Andre Guedes <andre.guedes@openbossa.org>
+Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bluetooth/hci_event.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3556,6 +3556,8 @@ static void hci_le_ltk_request_evt(struc
+ else
+ conn->pending_sec_level = BT_SECURITY_MEDIUM;
+
++ conn->enc_key_size = ltk->enc_size;
++
+ hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
+
+ if (ltk->type & HCI_SMP_STK) {
--- /dev/null
+From bf5430360ebe4b2d0c51d91f782e649107b502eb Mon Sep 17 00:00:00 2001
+From: Johan Hedberg <johan.hedberg@intel.com>
+Date: Fri, 13 Sep 2013 08:58:18 +0300
+Subject: Bluetooth: Fix rfkill functionality during the HCI setup stage
+
+From: Johan Hedberg <johan.hedberg@intel.com>
+
+commit bf5430360ebe4b2d0c51d91f782e649107b502eb upstream.
+
+We need to let the setup stage complete cleanly even when the HCI device
+is rfkilled. Otherwise the HCI device will stay in an undefined state
+and never get notified to user space through mgmt (even when it gets
+unblocked through rfkill).
+
+This patch makes sure that hci_dev_open() can be called in the HCI_SETUP
+stage, that blocking the device doesn't abort the setup stage, and that
+the device gets proper powered down as soon as the setup stage completes
+in case it was blocked meanwhile.
+
+The bug that this patch fixed can be very easily reproduced using e.g.
+the rfkill command line too. By running "rfkill block all" before
+inserting a Bluetooth dongle the resulting HCI device goes into a state
+where it is never announced over mgmt, not even when "rfkill unblock all"
+is run.
+
+Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
+Acked-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bluetooth/hci_core.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1134,7 +1134,11 @@ int hci_dev_open(__u16 dev)
+ goto done;
+ }
+
+- if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
++ /* Check for rfkill but allow the HCI setup stage to proceed
++ * (which in itself doesn't cause any RF activity).
++ */
++ if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
++ !test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ ret = -ERFKILL;
+ goto done;
+ }
+@@ -1556,7 +1560,8 @@ static int hci_rfkill_set_block(void *da
+
+ if (blocked) {
+ set_bit(HCI_RFKILLED, &hdev->dev_flags);
+- hci_dev_do_close(hdev);
++ if (!test_bit(HCI_SETUP, &hdev->dev_flags))
++ hci_dev_do_close(hdev);
+ } else {
+ clear_bit(HCI_RFKILLED, &hdev->dev_flags);
+ }
+@@ -1581,9 +1586,13 @@ static void hci_power_on(struct work_str
+ return;
+ }
+
+- if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
++ if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
++ clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
++ hci_dev_do_close(hdev);
++ } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
+ HCI_AUTO_OFF_TIMEOUT);
++ }
+
+ if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
+ mgmt_index_added(hdev);
--- /dev/null
+From f8776218e8546397be64ad2bc0ebf4748522d6e3 Mon Sep 17 00:00:00 2001
+From: Andre Guedes <andre.guedes@openbossa.org>
+Date: Wed, 31 Jul 2013 16:25:28 -0300
+Subject: Bluetooth: Fix security level for peripheral role
+
+From: Andre Guedes <andre.guedes@openbossa.org>
+
+commit f8776218e8546397be64ad2bc0ebf4748522d6e3 upstream.
+
+While playing the peripheral role, the host gets a LE Long Term Key
+Request Event from the controller when a connection is established
+with a bonded device. The host then informs the LTK which should be
+used for the connection. Once the link is encrypted, the host gets
+an Encryption Change Event.
+
+Therefore we should set conn->pending_sec_level instead of conn->
+sec_level in hci_le_ltk_request_evt. This way, conn->sec_level is
+properly updated in hci_encrypt_change_evt.
+
+Moreover, since we have a LTK associated to the device, we have at
+least BT_SECURITY_MEDIUM security level.
+
+Signed-off-by: Andre Guedes <andre.guedes@openbossa.org>
+Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bluetooth/hci_event.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3552,7 +3552,9 @@ static void hci_le_ltk_request_evt(struc
+ cp.handle = cpu_to_le16(conn->handle);
+
+ if (ltk->authenticated)
+- conn->sec_level = BT_SECURITY_HIGH;
++ conn->pending_sec_level = BT_SECURITY_HIGH;
++ else
++ conn->pending_sec_level = BT_SECURITY_MEDIUM;
+
+ hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
+
--- /dev/null
+From 5e130367d43ff22836bbae380d197d600fe8ddbb Mon Sep 17 00:00:00 2001
+From: Johan Hedberg <johan.hedberg@intel.com>
+Date: Fri, 13 Sep 2013 08:58:17 +0300
+Subject: Bluetooth: Introduce a new HCI_RFKILLED flag
+
+From: Johan Hedberg <johan.hedberg@intel.com>
+
+commit 5e130367d43ff22836bbae380d197d600fe8ddbb upstream.
+
+This makes it more convenient to check for rfkill (no need to check for
+dev->rfkill before calling rfkill_blocked()) and also avoids potential
+races if the RFKILL state needs to be checked from within the rfkill
+callback.
+
+Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
+Acked-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/bluetooth/hci.h | 1 +
+ net/bluetooth/hci_core.c | 15 ++++++++++-----
+ 2 files changed, 11 insertions(+), 5 deletions(-)
+
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -104,6 +104,7 @@ enum {
+ enum {
+ HCI_SETUP,
+ HCI_AUTO_OFF,
++ HCI_RFKILLED,
+ HCI_MGMT,
+ HCI_PAIRABLE,
+ HCI_SERVICE_CACHE,
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1134,7 +1134,7 @@ int hci_dev_open(__u16 dev)
+ goto done;
+ }
+
+- if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
++ if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+ ret = -ERFKILL;
+ goto done;
+ }
+@@ -1554,10 +1554,12 @@ static int hci_rfkill_set_block(void *da
+
+ BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
+
+- if (!blocked)
+- return 0;
+-
+- hci_dev_do_close(hdev);
++ if (blocked) {
++ set_bit(HCI_RFKILLED, &hdev->dev_flags);
++ hci_dev_do_close(hdev);
++ } else {
++ clear_bit(HCI_RFKILLED, &hdev->dev_flags);
++}
+
+ return 0;
+ }
+@@ -2197,6 +2199,9 @@ int hci_register_dev(struct hci_dev *hde
+ }
+ }
+
++ if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
++ set_bit(HCI_RFKILLED, &hdev->dev_flags);
++
+ set_bit(HCI_SETUP, &hdev->dev_flags);
+
+ if (hdev->dev_type != HCI_AMP)
--- /dev/null
+From db4efbbeb457b6f9f4d8c4b090d1170d12f026e1 Mon Sep 17 00:00:00 2001
+From: Arend van Spriel <arend@broadcom.com>
+Date: Wed, 25 Sep 2013 12:11:01 +0200
+Subject: brcmfmac: obtain platform data upon module initialization
+
+From: Arend van Spriel <arend@broadcom.com>
+
+commit db4efbbeb457b6f9f4d8c4b090d1170d12f026e1 upstream.
+
+The driver uses platform_driver_probe() to obtain platform data
+if any. However, that function is placed in the .init section so
+it must be called upon driver module initialization.
+
+The problem was reported by Fenguang Wu resulting in a kernel
+oops because the .init section was already freed.
+
+[ 48.966342] Switched to clocksource tsc
+[ 48.970002] kernel tried to execute NX-protected page - exploit attempt? (uid: 0)
+[ 48.970851] BUG: unable to handle kernel paging request at ffffffff82196446
+[ 48.970957] IP: [<ffffffff82196446>] classes_init+0x26/0x26
+[ 48.970957] PGD 1e76067 PUD 1e77063 PMD f388063 PTE 8000000002196163
+[ 48.970957] Oops: 0011 [#1]
+[ 48.970957] CPU: 0 PID: 17 Comm: kworker/0:1 Not tainted 3.11.0-rc7-00444-gc52dd7f #23
+[ 48.970957] Workqueue: events brcmf_driver_init
+[ 48.970957] task: ffff8800001d2000 ti: ffff8800001d4000 task.ti: ffff8800001d4000
+[ 48.970957] RIP: 0010:[<ffffffff82196446>] [<ffffffff82196446>] classes_init+0x26/0x26
+[ 48.970957] RSP: 0000:ffff8800001d5d40 EFLAGS: 00000286
+[ 48.970957] RAX: 0000000000000001 RBX: ffffffff820c5620 RCX: 0000000000000000
+[ 48.970957] RDX: 0000000000000001 RSI: ffffffff816f7380 RDI: ffffffff820c56c0
+[ 48.970957] RBP: ffff8800001d5d50 R08: ffff8800001d2508 R09: 0000000000000002
+[ 48.970957] R10: 0000000000000000 R11: 0001f7ce298c5620 R12: ffff8800001c76b0
+[ 48.970957] R13: ffffffff81e91d40 R14: 0000000000000000 R15: ffff88000e0ce300
+[ 48.970957] FS: 0000000000000000(0000) GS:ffffffff81e84000(0000) knlGS:0000000000000000
+[ 48.970957] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
+[ 48.970957] CR2: ffffffff82196446 CR3: 0000000001e75000 CR4: 00000000000006b0
+[ 48.970957] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 48.970957] DR3: 0000000000000000 DR6: 0000000000000000 DR7: 0000000000000000
+[ 48.970957] Stack:
+[ 48.970957] ffffffff816f7df8 ffffffff820c5620 ffff8800001d5d60 ffffffff816eeec9
+[ 48.970957] ffff8800001d5de0 ffffffff81073dc5 ffffffff81073d68 ffff8800001d5db8
+[ 48.970957] 0000000000000086 ffffffff820c5620 ffffffff824f7fd0 0000000000000000
+[ 48.970957] Call Trace:
+[ 48.970957] [<ffffffff816f7df8>] ? brcmf_sdio_init+0x18/0x70
+[ 48.970957] [<ffffffff816eeec9>] brcmf_driver_init+0x9/0x10
+[ 48.970957] [<ffffffff81073dc5>] process_one_work+0x1d5/0x480
+[ 48.970957] [<ffffffff81073d68>] ? process_one_work+0x178/0x480
+[ 48.970957] [<ffffffff81074188>] worker_thread+0x118/0x3a0
+[ 48.970957] [<ffffffff81074070>] ? process_one_work+0x480/0x480
+[ 48.970957] [<ffffffff8107aa17>] kthread+0xe7/0xf0
+[ 48.970957] [<ffffffff810829f7>] ? finish_task_switch.constprop.57+0x37/0xd0
+[ 48.970957] [<ffffffff8107a930>] ? __kthread_parkme+0x80/0x80
+[ 48.970957] [<ffffffff81a6923a>] ret_from_fork+0x7a/0xb0
+[ 48.970957] [<ffffffff8107a930>] ? __kthread_parkme+0x80/0x80
+[ 48.970957] Code: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc
+cc cc cc cc cc cc <cc> cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc
+[ 48.970957] RIP [<ffffffff82196446>] classes_init+0x26/0x26
+[ 48.970957] RSP <ffff8800001d5d40>
+[ 48.970957] CR2: ffffffff82196446
+[ 48.970957] ---[ end trace 62980817cd525f14 ]---
+
+Reported-by: Fengguang Wu <fengguang.wu@intel.com>
+Reviewed-by: Hante Meuleman <meuleman@broadcom.com>
+Reviewed-by: Pieter-Paul Giesberts <pieterpg@broadcom.com>
+Tested-by: Fengguang Wu <fengguang.wu@intel.com>
+Signed-off-by: Arend van Spriel <arend@broadcom.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c | 28 +++++++----------
+ drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h | 3 +
+ drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c | 14 ++++----
+ drivers/net/wireless/brcm80211/brcmfmac/usb.c | 2 -
+ 4 files changed, 24 insertions(+), 23 deletions(-)
+
+--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+@@ -465,8 +465,6 @@ static struct sdio_driver brcmf_sdmmc_dr
+
+ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
+ {
+- int ret;
+-
+ brcmf_dbg(SDIO, "Enter\n");
+
+ brcmfmac_sdio_pdata = pdev->dev.platform_data;
+@@ -474,11 +472,7 @@ static int brcmf_sdio_pd_probe(struct pl
+ if (brcmfmac_sdio_pdata->power_on)
+ brcmfmac_sdio_pdata->power_on();
+
+- ret = sdio_register_driver(&brcmf_sdmmc_driver);
+- if (ret)
+- brcmf_err("sdio_register_driver failed: %d\n", ret);
+-
+- return ret;
++ return 0;
+ }
+
+ static int brcmf_sdio_pd_remove(struct platform_device *pdev)
+@@ -501,6 +495,15 @@ static struct platform_driver brcmf_sdio
+ }
+ };
+
++void brcmf_sdio_register(void)
++{
++ int ret;
++
++ ret = sdio_register_driver(&brcmf_sdmmc_driver);
++ if (ret)
++ brcmf_err("sdio_register_driver failed: %d\n", ret);
++}
++
+ void brcmf_sdio_exit(void)
+ {
+ brcmf_dbg(SDIO, "Enter\n");
+@@ -511,18 +514,13 @@ void brcmf_sdio_exit(void)
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
+ }
+
+-void brcmf_sdio_init(void)
++void __init brcmf_sdio_init(void)
+ {
+ int ret;
+
+ brcmf_dbg(SDIO, "Enter\n");
+
+ ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
+- if (ret == -ENODEV) {
+- brcmf_dbg(SDIO, "No platform data available, registering without.\n");
+- ret = sdio_register_driver(&brcmf_sdmmc_driver);
+- }
+-
+- if (ret)
+- brcmf_err("driver registration failed: %d\n", ret);
++ if (ret == -ENODEV)
++ brcmf_dbg(SDIO, "No platform data available.\n");
+ }
+--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
++++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+@@ -154,10 +154,11 @@ extern int brcmf_bus_start(struct device
+ #ifdef CONFIG_BRCMFMAC_SDIO
+ extern void brcmf_sdio_exit(void);
+ extern void brcmf_sdio_init(void);
++extern void brcmf_sdio_register(void);
+ #endif
+ #ifdef CONFIG_BRCMFMAC_USB
+ extern void brcmf_usb_exit(void);
+-extern void brcmf_usb_init(void);
++extern void brcmf_usb_register(void);
+ #endif
+
+ #endif /* _BRCMF_BUS_H_ */
+--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+@@ -1020,21 +1020,23 @@ u32 brcmf_get_chip_info(struct brcmf_if
+ return bus->chip << 4 | bus->chiprev;
+ }
+
+-static void brcmf_driver_init(struct work_struct *work)
++static void brcmf_driver_register(struct work_struct *work)
+ {
+- brcmf_debugfs_init();
+-
+ #ifdef CONFIG_BRCMFMAC_SDIO
+- brcmf_sdio_init();
++ brcmf_sdio_register();
+ #endif
+ #ifdef CONFIG_BRCMFMAC_USB
+- brcmf_usb_init();
++ brcmf_usb_register();
+ #endif
+ }
+-static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init);
++static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
+
+ static int __init brcmfmac_module_init(void)
+ {
++ brcmf_debugfs_init();
++#ifdef CONFIG_BRCMFMAC_SDIO
++ brcmf_sdio_init();
++#endif
+ if (!schedule_work(&brcmf_driver_work))
+ return -EBUSY;
+
+--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+@@ -1540,7 +1540,7 @@ void brcmf_usb_exit(void)
+ brcmf_release_fw(&fw_image_list);
+ }
+
+-void brcmf_usb_init(void)
++void brcmf_usb_register(void)
+ {
+ brcmf_dbg(USB, "Enter\n");
+ INIT_LIST_HEAD(&fw_image_list);
--- /dev/null
+From 72023656961b8c81a168a7a6762d589339d0d7ec Mon Sep 17 00:00:00 2001
+From: Dan Aloni <alonid@stratoscale.com>
+Date: Mon, 30 Sep 2013 13:45:02 -0700
+Subject: fs/binfmt_elf.c: prevent a coredump with a large vm_map_count from Oopsing
+
+From: Dan Aloni <alonid@stratoscale.com>
+
+commit 72023656961b8c81a168a7a6762d589339d0d7ec upstream.
+
+A high setting of max_map_count, and a process core-dumping with a large
+enough vm_map_count could result in an NT_FILE note not being written,
+and the kernel crashing immediately later because it has assumed
+otherwise.
+
+Reproduction of the oops-causing bug described here:
+
+ https://lkml.org/lkml/2013/8/30/50
+
+Rge ussue originated in commit 2aa362c49c31 ("coredump: extend core dump
+note section to contain file names of mapped file") from Oct 4, 2012.
+
+This patch make that section optional in that case. fill_files_note()
+should signify the error, and also let the info struct in
+elf_core_dump() be zero-initialized so that we can check for the
+optionally written note.
+
+[akpm@linux-foundation.org: avoid abusing E2BIG, remove a couple of not-really-needed local variables]
+[akpm@linux-foundation.org: fix sparse warning]
+Signed-off-by: Dan Aloni <alonid@stratoscale.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Denys Vlasenko <vda.linux@googlemail.com>
+Reported-by: Martin MOKREJS <mmokrejs@gmail.com>
+Tested-by: Martin MOKREJS <mmokrejs@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/binfmt_elf.c | 30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1413,7 +1413,7 @@ static void fill_siginfo_note(struct mem
+ * long file_ofs
+ * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
+ */
+-static void fill_files_note(struct memelfnote *note)
++static int fill_files_note(struct memelfnote *note)
+ {
+ struct vm_area_struct *vma;
+ unsigned count, size, names_ofs, remaining, n;
+@@ -1428,11 +1428,11 @@ static void fill_files_note(struct memel
+ names_ofs = (2 + 3 * count) * sizeof(data[0]);
+ alloc:
+ if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
+- goto err;
++ return -EINVAL;
+ size = round_up(size, PAGE_SIZE);
+ data = vmalloc(size);
+ if (!data)
+- goto err;
++ return -ENOMEM;
+
+ start_end_ofs = data + 2;
+ name_base = name_curpos = ((char *)data) + names_ofs;
+@@ -1485,7 +1485,7 @@ static void fill_files_note(struct memel
+
+ size = name_curpos - (char *)data;
+ fill_note(note, "CORE", NT_FILE, size, data);
+- err: ;
++ return 0;
+ }
+
+ #ifdef CORE_DUMP_USE_REGSET
+@@ -1686,8 +1686,8 @@ static int fill_note_info(struct elfhdr
+ fill_auxv_note(&info->auxv, current->mm);
+ info->size += notesize(&info->auxv);
+
+- fill_files_note(&info->files);
+- info->size += notesize(&info->files);
++ if (fill_files_note(&info->files) == 0)
++ info->size += notesize(&info->files);
+
+ return 1;
+ }
+@@ -1719,7 +1719,8 @@ static int write_note_info(struct elf_no
+ return 0;
+ if (first && !writenote(&info->auxv, file, foffset))
+ return 0;
+- if (first && !writenote(&info->files, file, foffset))
++ if (first && info->files.data &&
++ !writenote(&info->files, file, foffset))
+ return 0;
+
+ for (i = 1; i < info->thread_notes; ++i)
+@@ -1806,6 +1807,7 @@ static int elf_dump_thread_status(long s
+
+ struct elf_note_info {
+ struct memelfnote *notes;
++ struct memelfnote *notes_files;
+ struct elf_prstatus *prstatus; /* NT_PRSTATUS */
+ struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
+ struct list_head thread_list;
+@@ -1896,9 +1898,12 @@ static int fill_note_info(struct elfhdr
+
+ fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
+ fill_auxv_note(info->notes + 3, current->mm);
+- fill_files_note(info->notes + 4);
++ info->numnote = 4;
+
+- info->numnote = 5;
++ if (fill_files_note(info->notes + info->numnote) == 0) {
++ info->notes_files = info->notes + info->numnote;
++ info->numnote++;
++ }
+
+ /* Try to dump the FPU. */
+ info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
+@@ -1960,8 +1965,9 @@ static void free_note_info(struct elf_no
+ kfree(list_entry(tmp, struct elf_thread_status, list));
+ }
+
+- /* Free data allocated by fill_files_note(): */
+- vfree(info->notes[4].data);
++ /* Free data possibly allocated by fill_files_note(): */
++ if (info->notes_files)
++ vfree(info->notes_files->data);
+
+ kfree(info->prstatus);
+ kfree(info->psinfo);
+@@ -2044,7 +2050,7 @@ static int elf_core_dump(struct coredump
+ struct vm_area_struct *vma, *gate_vma;
+ struct elfhdr *elf = NULL;
+ loff_t offset = 0, dataoff, foffset;
+- struct elf_note_info info;
++ struct elf_note_info info = { };
+ struct elf_phdr *phdr4note = NULL;
+ struct elf_shdr *shdr4extnum = NULL;
+ Elf_Half e_phnum;
--- /dev/null
+From 0ab08f576b9e6a6b689fc6b4e632079b978e619b Mon Sep 17 00:00:00 2001
+From: Maxim Patlasov <MPatlasov@parallels.com>
+Date: Fri, 13 Sep 2013 19:20:16 +0400
+Subject: fuse: fix fallocate vs. ftruncate race
+
+From: Maxim Patlasov <MPatlasov@parallels.com>
+
+commit 0ab08f576b9e6a6b689fc6b4e632079b978e619b upstream.
+
+A former patch introducing FUSE_I_SIZE_UNSTABLE flag provided detailed
+description of races between ftruncate and anyone who can extend i_size:
+
+> 1. As in the previous scenario fuse_dentry_revalidate() discovered that i_size
+> changed (due to our own fuse_do_setattr()) and is going to call
+> truncate_pagecache() for some 'new_size' it believes valid right now. But by
+> the time that particular truncate_pagecache() is called ...
+> 2. fuse_do_setattr() returns (either having called truncate_pagecache() or
+> not -- it doesn't matter).
+> 3. The file is extended either by write(2) or ftruncate(2) or fallocate(2).
+> 4. mmap-ed write makes a page in the extended region dirty.
+
+This patch adds necessary bits to fuse_file_fallocate() to protect from that
+race.
+
+Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/file.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -2467,6 +2467,7 @@ static long fuse_file_fallocate(struct f
+ {
+ struct fuse_file *ff = file->private_data;
+ struct inode *inode = file->f_inode;
++ struct fuse_inode *fi = get_fuse_inode(inode);
+ struct fuse_conn *fc = ff->fc;
+ struct fuse_req *req;
+ struct fuse_fallocate_in inarg = {
+@@ -2495,6 +2496,9 @@ static long fuse_file_fallocate(struct f
+ }
+ }
+
++ if (!(mode & FALLOC_FL_KEEP_SIZE))
++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
++
+ req = fuse_get_req_nopages(fc);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+@@ -2527,6 +2531,9 @@ static long fuse_file_fallocate(struct f
+ fuse_invalidate_attr(inode);
+
+ out:
++ if (!(mode & FALLOC_FL_KEEP_SIZE))
++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
++
+ if (lock_inode)
+ mutex_unlock(&inode->i_mutex);
+
--- /dev/null
+From bde52788bdb755b9e4b75db6c434f30e32a0ca0b Mon Sep 17 00:00:00 2001
+From: Maxim Patlasov <MPatlasov@parallels.com>
+Date: Fri, 13 Sep 2013 19:19:54 +0400
+Subject: fuse: wait for writeback in fuse_file_fallocate()
+
+From: Maxim Patlasov <MPatlasov@parallels.com>
+
+commit bde52788bdb755b9e4b75db6c434f30e32a0ca0b upstream.
+
+The patch fixes a race between mmap-ed write and fallocate(PUNCH_HOLE):
+
+1) An user makes a page dirty via mmap-ed write.
+2) The user performs fallocate(2) with mode == PUNCH_HOLE|KEEP_SIZE
+ and <offset, size> covering the page.
+3) Before truncate_pagecache_range call from fuse_file_fallocate,
+ the page goes to write-back. The page is fully processed by fuse_writepage
+ (including end_page_writeback on the page), but fuse_flush_writepages did
+ nothing because fi->writectr < 0.
+4) truncate_pagecache_range is called and fuse_file_fallocate is finishing
+ by calling fuse_release_nowrite. The latter triggers processing queued
+ write-back request which will write stale data to the hole soon.
+
+Changed in v2 (thanks to Brian for suggestion):
+ - Do not truncate page cache until FUSE_FALLOCATE succeeded. Otherwise,
+ we can end up in returning -ENOTSUPP while user data is already punched
+ from page cache. Use filemap_write_and_wait_range() instead.
+Changed in v3 (thanks to Miklos for suggestion):
+ - fuse_wait_on_writeback() is prone to livelocks; use fuse_set_nowrite()
+ instead. So far as we need a dirty-page barrier only, fuse_sync_writes()
+ should be enough.
+ - rebased to for-linus branch of fuse.git
+
+Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/file.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -2484,8 +2484,15 @@ static long fuse_file_fallocate(struct f
+
+ if (lock_inode) {
+ mutex_lock(&inode->i_mutex);
+- if (mode & FALLOC_FL_PUNCH_HOLE)
+- fuse_set_nowrite(inode);
++ if (mode & FALLOC_FL_PUNCH_HOLE) {
++ loff_t endbyte = offset + length - 1;
++ err = filemap_write_and_wait_range(inode->i_mapping,
++ offset, endbyte);
++ if (err)
++ goto out;
++
++ fuse_sync_writes(inode);
++ }
+ }
+
+ req = fuse_get_req_nopages(fc);
+@@ -2520,11 +2527,8 @@ static long fuse_file_fallocate(struct f
+ fuse_invalidate_attr(inode);
+
+ out:
+- if (lock_inode) {
+- if (mode & FALLOC_FL_PUNCH_HOLE)
+- fuse_release_nowrite(inode);
++ if (lock_inode)
+ mutex_unlock(&inode->i_mutex);
+- }
+
+ return err;
+ }
--- /dev/null
+From fac7fa162a19100298d5d91359960037dc5bfca9 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
+Date: Wed, 25 Sep 2013 02:36:54 +0200
+Subject: gpio/omap: auto-setup a GPIO when used as an IRQ
+
+From: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
+
+commit fac7fa162a19100298d5d91359960037dc5bfca9 upstream.
+
+The OMAP GPIO controller HW requires a pin to be configured in GPIO
+input mode in order to operate as an interrupt input. Since drivers
+should not be aware of whether an interrupt pin is also a GPIO or not,
+the HW should be fully configured/enabled as an IRQ if a driver solely
+uses IRQ APIs such as request_irq(), and never calls any GPIO-related
+APIs. As such, add the missing HW setup to the OMAP GPIO controller's
+irq_chip driver.
+
+Since this bypasses the GPIO subsystem we have to ensure that another
+driver won't be able to request the same GPIO pin that is used as an
+IRQ and set its direction as output. Requesting the GPIO and setting
+its direction as input is allowed though.
+
+This fixes smsc911x ethernet support for tobi and igep OMAP3 boards
+and OMAP4 SDP SPI based ethernet that use a GPIO as an interrupt line.
+
+Acked-by: Stephen Warren <swarren@nvidia.com>
+Tested-by: George Cherian <george.cherian@ti.com>
+Tested-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Tested-by: Lars Poeschel <poeschel@lemonage.de>
+Reviewed-by: Kevin Hilman <khilman@linaro.org>
+Tested-by: Kevin Hilman <khilman@linaro.org>
+Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Acked-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-omap.c | 129 ++++++++++++++++++++++++++++++-----------------
+ 1 file changed, 83 insertions(+), 46 deletions(-)
+
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -424,6 +424,52 @@ static int _set_gpio_triggering(struct g
+ return 0;
+ }
+
++static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
++{
++ if (bank->regs->pinctrl) {
++ void __iomem *reg = bank->base + bank->regs->pinctrl;
++
++ /* Claim the pin for MPU */
++ __raw_writel(__raw_readl(reg) | (1 << offset), reg);
++ }
++
++ if (bank->regs->ctrl && !BANK_USED(bank)) {
++ void __iomem *reg = bank->base + bank->regs->ctrl;
++ u32 ctrl;
++
++ ctrl = __raw_readl(reg);
++ /* Module is enabled, clocks are not gated */
++ ctrl &= ~GPIO_MOD_CTRL_BIT;
++ __raw_writel(ctrl, reg);
++ bank->context.ctrl = ctrl;
++ }
++}
++
++static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
++{
++ void __iomem *base = bank->base;
++
++ if (bank->regs->wkup_en &&
++ !LINE_USED(bank->mod_usage, offset) &&
++ !LINE_USED(bank->irq_usage, offset)) {
++ /* Disable wake-up during idle for dynamic tick */
++ _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
++ bank->context.wake_en =
++ __raw_readl(bank->base + bank->regs->wkup_en);
++ }
++
++ if (bank->regs->ctrl && !BANK_USED(bank)) {
++ void __iomem *reg = bank->base + bank->regs->ctrl;
++ u32 ctrl;
++
++ ctrl = __raw_readl(reg);
++ /* Module is disabled, clocks are gated */
++ ctrl |= GPIO_MOD_CTRL_BIT;
++ __raw_writel(ctrl, reg);
++ bank->context.ctrl = ctrl;
++ }
++}
++
+ static int gpio_is_input(struct gpio_bank *bank, int mask)
+ {
+ void __iomem *reg = bank->base + bank->regs->direction;
+@@ -437,9 +483,10 @@ static int gpio_irq_type(struct irq_data
+ unsigned gpio = 0;
+ int retval;
+ unsigned long flags;
++ unsigned offset;
+
+- if (WARN_ON(!BANK_USED(bank)))
+- return -EINVAL;
++ if (!BANK_USED(bank))
++ pm_runtime_get_sync(bank->dev);
+
+ #ifdef CONFIG_ARCH_OMAP1
+ if (d->irq > IH_MPUIO_BASE)
+@@ -457,7 +504,16 @@ static int gpio_irq_type(struct irq_data
+ return -EINVAL;
+
+ spin_lock_irqsave(&bank->lock, flags);
+- retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
++ offset = GPIO_INDEX(bank, gpio);
++ retval = _set_gpio_triggering(bank, offset, type);
++ if (!LINE_USED(bank->mod_usage, offset)) {
++ _enable_gpio_module(bank, offset);
++ _set_gpio_direction(bank, offset, 1);
++ } else if (!gpio_is_input(bank, 1 << offset)) {
++ spin_unlock_irqrestore(&bank->lock, flags);
++ return -EINVAL;
++ }
++
+ bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+@@ -620,30 +676,14 @@ static int omap_gpio_request(struct gpio
+
+ spin_lock_irqsave(&bank->lock, flags);
+ /* Set trigger to none. You need to enable the desired trigger with
+- * request_irq() or set_irq_type().
++ * request_irq() or set_irq_type(). Only do this if the IRQ line has
++ * not already been requested.
+ */
+- _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+-
+- if (bank->regs->pinctrl) {
+- void __iomem *reg = bank->base + bank->regs->pinctrl;
+-
+- /* Claim the pin for MPU */
+- __raw_writel(__raw_readl(reg) | (1 << offset), reg);
++ if (!LINE_USED(bank->irq_usage, offset)) {
++ _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
++ _enable_gpio_module(bank, offset);
+ }
+-
+- if (bank->regs->ctrl && !BANK_USED(bank)) {
+- void __iomem *reg = bank->base + bank->regs->ctrl;
+- u32 ctrl;
+-
+- ctrl = __raw_readl(reg);
+- /* Module is enabled, clocks are not gated */
+- ctrl &= ~GPIO_MOD_CTRL_BIT;
+- __raw_writel(ctrl, reg);
+- bank->context.ctrl = ctrl;
+- }
+-
+ bank->mod_usage |= 1 << offset;
+-
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+@@ -652,31 +692,11 @@ static int omap_gpio_request(struct gpio
+ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
+ {
+ struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
+- void __iomem *base = bank->base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+-
+- if (bank->regs->wkup_en) {
+- /* Disable wake-up during idle for dynamic tick */
+- _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
+- bank->context.wake_en =
+- __raw_readl(bank->base + bank->regs->wkup_en);
+- }
+-
+ bank->mod_usage &= ~(1 << offset);
+-
+- if (bank->regs->ctrl && !BANK_USED(bank)) {
+- void __iomem *reg = bank->base + bank->regs->ctrl;
+- u32 ctrl;
+-
+- ctrl = __raw_readl(reg);
+- /* Module is disabled, clocks are gated */
+- ctrl |= GPIO_MOD_CTRL_BIT;
+- __raw_writel(ctrl, reg);
+- bank->context.ctrl = ctrl;
+- }
+-
++ _disable_gpio_module(bank, offset);
+ _reset_gpio(bank, bank->chip.base + offset);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+@@ -778,8 +798,16 @@ static void gpio_irq_shutdown(struct irq
+
+ spin_lock_irqsave(&bank->lock, flags);
+ bank->irq_usage &= ~(1 << offset);
++ _disable_gpio_module(bank, offset);
+ _reset_gpio(bank, gpio);
+ spin_unlock_irqrestore(&bank->lock, flags);
++
++ /*
++ * If this is the last IRQ to be freed in the bank,
++ * disable the bank module.
++ */
++ if (!BANK_USED(bank))
++ pm_runtime_put(bank->dev);
+ }
+
+ static void gpio_ack_irq(struct irq_data *d)
+@@ -929,13 +957,22 @@ static int gpio_output(struct gpio_chip
+ {
+ struct gpio_bank *bank;
+ unsigned long flags;
++ int retval = 0;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ spin_lock_irqsave(&bank->lock, flags);
++
++ if (LINE_USED(bank->irq_usage, offset)) {
++ retval = -EINVAL;
++ goto exit;
++ }
++
+ bank->set_dataout(bank, offset, value);
+ _set_gpio_direction(bank, offset, 0);
++
++exit:
+ spin_unlock_irqrestore(&bank->lock, flags);
+- return 0;
++ return retval;
+ }
+
+ static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
--- /dev/null
+From fa365e4d729065b5e85165df3dc9699ed47489cc Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
+Date: Wed, 25 Sep 2013 02:36:52 +0200
+Subject: gpio/omap: maintain GPIO and IRQ usage separately
+
+From: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
+
+commit fa365e4d729065b5e85165df3dc9699ed47489cc upstream.
+
+The GPIO OMAP controller pins can be used as IRQ and GPIO
+independently so is necessary to keep track GPIO pins and
+IRQ lines usage separately to make sure that the bank will
+always be enabled while being used.
+
+Also move gpio_is_input() definition in preparation for the
+next patch that setups the controller's irq_chip driver when
+a caller requests an interrupt line.
+
+Acked-by: Stephen Warren <swarren@nvidia.com>
+Tested-by: George Cherian <george.cherian@ti.com>
+Tested-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Tested-by: Lars Poeschel <poeschel@lemonage.de>
+Reviewed-by: Kevin Hilman <khilman@linaro.org>
+Tested-by: Kevin Hilman <khilman@linaro.org>
+Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Acked-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-omap.c | 35 +++++++++++++++++++++--------------
+ 1 file changed, 21 insertions(+), 14 deletions(-)
+
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -63,6 +63,7 @@ struct gpio_bank {
+ struct gpio_chip chip;
+ struct clk *dbck;
+ u32 mod_usage;
++ u32 irq_usage;
+ u32 dbck_enable_mask;
+ bool dbck_enabled;
+ struct device *dev;
+@@ -86,6 +87,9 @@ struct gpio_bank {
+ #define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
+ #define GPIO_MOD_CTRL_BIT BIT(0)
+
++#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
++#define LINE_USED(line, offset) (line & (1 << offset))
++
+ static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
+ {
+ return bank->chip.base + gpio_irq;
+@@ -420,6 +424,13 @@ static int _set_gpio_triggering(struct g
+ return 0;
+ }
+
++static int gpio_is_input(struct gpio_bank *bank, int mask)
++{
++ void __iomem *reg = bank->base + bank->regs->direction;
++
++ return __raw_readl(reg) & mask;
++}
++
+ static int gpio_irq_type(struct irq_data *d, unsigned type)
+ {
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+@@ -427,7 +438,7 @@ static int gpio_irq_type(struct irq_data
+ int retval;
+ unsigned long flags;
+
+- if (WARN_ON(!bank->mod_usage))
++ if (WARN_ON(!BANK_USED(bank)))
+ return -EINVAL;
+
+ #ifdef CONFIG_ARCH_OMAP1
+@@ -447,6 +458,7 @@ static int gpio_irq_type(struct irq_data
+
+ spin_lock_irqsave(&bank->lock, flags);
+ retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
++ bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+@@ -603,7 +615,7 @@ static int omap_gpio_request(struct gpio
+ * If this is the first gpio_request for the bank,
+ * enable the bank module.
+ */
+- if (!bank->mod_usage)
++ if (!BANK_USED(bank))
+ pm_runtime_get_sync(bank->dev);
+
+ spin_lock_irqsave(&bank->lock, flags);
+@@ -619,7 +631,7 @@ static int omap_gpio_request(struct gpio
+ __raw_writel(__raw_readl(reg) | (1 << offset), reg);
+ }
+
+- if (bank->regs->ctrl && !bank->mod_usage) {
++ if (bank->regs->ctrl && !BANK_USED(bank)) {
+ void __iomem *reg = bank->base + bank->regs->ctrl;
+ u32 ctrl;
+
+@@ -654,7 +666,7 @@ static void omap_gpio_free(struct gpio_c
+
+ bank->mod_usage &= ~(1 << offset);
+
+- if (bank->regs->ctrl && !bank->mod_usage) {
++ if (bank->regs->ctrl && !BANK_USED(bank)) {
+ void __iomem *reg = bank->base + bank->regs->ctrl;
+ u32 ctrl;
+
+@@ -672,7 +684,7 @@ static void omap_gpio_free(struct gpio_c
+ * If this is the last gpio to be freed in the bank,
+ * disable the bank module.
+ */
+- if (!bank->mod_usage)
++ if (!BANK_USED(bank))
+ pm_runtime_put(bank->dev);
+ }
+
+@@ -762,8 +774,10 @@ static void gpio_irq_shutdown(struct irq
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = irq_to_gpio(bank, d->hwirq);
+ unsigned long flags;
++ unsigned offset = GPIO_INDEX(bank, gpio);
+
+ spin_lock_irqsave(&bank->lock, flags);
++ bank->irq_usage &= ~(1 << offset);
+ _reset_gpio(bank, gpio);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ }
+@@ -897,13 +911,6 @@ static int gpio_input(struct gpio_chip *
+ return 0;
+ }
+
+-static int gpio_is_input(struct gpio_bank *bank, int mask)
+-{
+- void __iomem *reg = bank->base + bank->regs->direction;
+-
+- return __raw_readl(reg) & mask;
+-}
+-
+ static int gpio_get(struct gpio_chip *chip, unsigned offset)
+ {
+ struct gpio_bank *bank;
+@@ -1400,7 +1407,7 @@ void omap2_gpio_prepare_for_idle(int pwr
+ struct gpio_bank *bank;
+
+ list_for_each_entry(bank, &omap_gpio_list, node) {
+- if (!bank->mod_usage || !bank->loses_context)
++ if (!BANK_USED(bank) || !bank->loses_context)
+ continue;
+
+ bank->power_mode = pwr_mode;
+@@ -1414,7 +1421,7 @@ void omap2_gpio_resume_after_idle(void)
+ struct gpio_bank *bank;
+
+ list_for_each_entry(bank, &omap_gpio_list, node) {
+- if (!bank->mod_usage || !bank->loses_context)
++ if (!BANK_USED(bank) || !bank->loses_context)
+ continue;
+
+ pm_runtime_get_sync(bank->dev);
--- /dev/null
+From fd90cecbde065eac6ecc3ef38abace725ad27010 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Wed, 21 Aug 2013 13:56:34 +0100
+Subject: iommu/arm-smmu: don't enable SMMU device until probing has completed
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit fd90cecbde065eac6ecc3ef38abace725ad27010 upstream.
+
+We currently reset and enable the SMMU before the device has finished
+being probed, so if we fail later on (for example, because we couldn't
+request a global irq successfully) then we will leave the device in an
+active state.
+
+This patch delays the reset and enabling of the SMMU hardware until
+probing has completed.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/arm-smmu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -1839,8 +1839,6 @@ static int arm_smmu_device_dt_probe(stru
+ goto out_put_parent;
+ }
+
+- arm_smmu_device_reset(smmu);
+-
+ for (i = 0; i < smmu->num_global_irqs; ++i) {
+ err = request_irq(smmu->irqs[i],
+ arm_smmu_global_fault,
+@@ -1858,6 +1856,8 @@ static int arm_smmu_device_dt_probe(stru
+ spin_lock(&arm_smmu_devices_lock);
+ list_add(&smmu->list, &arm_smmu_devices);
+ spin_unlock(&arm_smmu_devices_lock);
++
++ arm_smmu_device_reset(smmu);
+ return 0;
+
+ out_free_irqs:
--- /dev/null
+From faea13b72dbdb77e4d6ad83344596486611708b0 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 21 Aug 2013 09:33:30 +0100
+Subject: iommu/arm-smmu: fix a signedness bug
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit faea13b72dbdb77e4d6ad83344596486611708b0 upstream.
+
+Unsigned char is never equal to -1.
+
+Tested-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/arm-smmu.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -379,6 +379,7 @@ struct arm_smmu_cfg {
+ u32 cbar;
+ pgd_t *pgd;
+ };
++#define INVALID_IRPTNDX 0xff
+
+ struct arm_smmu_domain {
+ /*
+@@ -830,7 +831,7 @@ static int arm_smmu_init_domain_context(
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
+ root_cfg->irptndx, irq);
+- root_cfg->irptndx = -1;
++ root_cfg->irptndx = INVALID_IRPTNDX;
+ goto out_free_context;
+ }
+
+@@ -855,7 +856,7 @@ static void arm_smmu_destroy_domain_cont
+ if (!smmu)
+ return;
+
+- if (root_cfg->irptndx != -1) {
++ if (root_cfg->irptndx != INVALID_IRPTNDX) {
+ irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
+ free_irq(irq, domain);
+ }
--- /dev/null
+From 6614ee77f49d37f9bb77eb3e81431ca8fcc4042e Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 21 Aug 2013 09:34:20 +0100
+Subject: iommu/arm-smmu: fix iommu_present() test in init
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 6614ee77f49d37f9bb77eb3e81431ca8fcc4042e upstream.
+
+The extra semi-colon on the end breaks the test.
+
+Tested-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/arm-smmu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -1948,10 +1948,10 @@ static int __init arm_smmu_init(void)
+ return ret;
+
+ /* Oh, for a proper bus abstraction */
+- if (!iommu_present(&platform_bus_type));
++ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+
+- if (!iommu_present(&amba_bustype));
++ if (!iommu_present(&amba_bustype))
+ bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+
+ return 0;
--- /dev/null
+From 53dad6d3a8e5ac1af8bacc6ac2134ae1a8b085f1 Mon Sep 17 00:00:00 2001
+From: Davidlohr Bueso <davidlohr@hp.com>
+Date: Mon, 23 Sep 2013 17:04:45 -0700
+Subject: ipc: fix race with LSMs
+
+From: Davidlohr Bueso <davidlohr@hp.com>
+
+commit 53dad6d3a8e5ac1af8bacc6ac2134ae1a8b085f1 upstream.
+
+Currently, IPC mechanisms do security and auditing related checks under
+RCU. However, since security modules can free the security structure,
+for example, through selinux_[sem,msg_queue,shm]_free_security(), we can
+race if the structure is freed before other tasks are done with it,
+creating a use-after-free condition. Manfred illustrates this nicely,
+for instance with shared mem and selinux:
+
+ -> do_shmat calls rcu_read_lock()
+ -> do_shmat calls shm_object_check().
+ Checks that the object is still valid - but doesn't acquire any locks.
+ Then it returns.
+ -> do_shmat calls security_shm_shmat (e.g. selinux_shm_shmat)
+ -> selinux_shm_shmat calls ipc_has_perm()
+ -> ipc_has_perm accesses ipc_perms->security
+
+shm_close()
+ -> shm_close acquires rw_mutex & shm_lock
+ -> shm_close calls shm_destroy
+ -> shm_destroy calls security_shm_free (e.g. selinux_shm_free_security)
+ -> selinux_shm_free_security calls ipc_free_security(&shp->shm_perm)
+ -> ipc_free_security calls kfree(ipc_perms->security)
+
+This patch delays the freeing of the security structures after all RCU
+readers are done. Furthermore it aligns the security life cycle with
+that of the rest of IPC - freeing them based on the reference counter.
+For situations where we need not free security, the current behavior is
+kept. Linus states:
+
+ "... the old behavior was suspect for another reason too: having the
+ security blob go away from under a user sounds like it could cause
+ various other problems anyway, so I think the old code was at least
+ _prone_ to bugs even if it didn't have catastrophic behavior."
+
+I have tested this patch with IPC testcases from LTP on both my
+quad-core laptop and on a 64 core NUMA server. In both cases selinux is
+enabled, and tests pass for both voluntary and forced preemption models.
+While the mentioned races are theoretical (at least no one as reported
+them), I wanted to make sure that this new logic doesn't break anything
+we weren't aware of.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
+Acked-by: Manfred Spraul <manfred@colorfullife.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ ipc/msg.c | 19 +++++++++++++------
+ ipc/sem.c | 34 ++++++++++++++++++----------------
+ ipc/shm.c | 17 ++++++++++++-----
+ ipc/util.c | 32 ++++++++++++--------------------
+ ipc/util.h | 10 +++++++++-
+ 5 files changed, 64 insertions(+), 48 deletions(-)
+
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -167,6 +167,15 @@ static inline void msg_rmid(struct ipc_n
+ ipc_rmid(&msg_ids(ns), &s->q_perm);
+ }
+
++static void msg_rcu_free(struct rcu_head *head)
++{
++ struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
++ struct msg_queue *msq = ipc_rcu_to_struct(p);
++
++ security_msg_queue_free(msq);
++ ipc_rcu_free(head);
++}
++
+ /**
+ * newque - Create a new msg queue
+ * @ns: namespace
+@@ -191,15 +200,14 @@ static int newque(struct ipc_namespace *
+ msq->q_perm.security = NULL;
+ retval = security_msg_queue_alloc(msq);
+ if (retval) {
+- ipc_rcu_putref(msq);
++ ipc_rcu_putref(msq, ipc_rcu_free);
+ return retval;
+ }
+
+ /* ipc_addid() locks msq upon success. */
+ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
+ if (id < 0) {
+- security_msg_queue_free(msq);
+- ipc_rcu_putref(msq);
++ ipc_rcu_putref(msq, msg_rcu_free);
+ return id;
+ }
+
+@@ -277,8 +285,7 @@ static void freeque(struct ipc_namespace
+ free_msg(msg);
+ }
+ atomic_sub(msq->q_cbytes, &ns->msg_bytes);
+- security_msg_queue_free(msq);
+- ipc_rcu_putref(msq);
++ ipc_rcu_putref(msq, msg_rcu_free);
+ }
+
+ /*
+@@ -724,7 +731,7 @@ long do_msgsnd(int msqid, long mtype, vo
+ rcu_read_lock();
+ ipc_lock_object(&msq->q_perm);
+
+- ipc_rcu_putref(msq);
++ ipc_rcu_putref(msq, ipc_rcu_free);
+ if (msq->q_perm.deleted) {
+ err = -EIDRM;
+ goto out_unlock0;
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -260,6 +260,15 @@ static void sem_wait_array(struct sem_ar
+ }
+ }
+
++static void sem_rcu_free(struct rcu_head *head)
++{
++ struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
++ struct sem_array *sma = ipc_rcu_to_struct(p);
++
++ security_sem_free(sma);
++ ipc_rcu_free(head);
++}
++
+ /*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+@@ -408,12 +417,7 @@ static inline struct sem_array *sem_obta
+ static inline void sem_lock_and_putref(struct sem_array *sma)
+ {
+ sem_lock(sma, NULL, -1);
+- ipc_rcu_putref(sma);
+-}
+-
+-static inline void sem_putref(struct sem_array *sma)
+-{
+- ipc_rcu_putref(sma);
++ ipc_rcu_putref(sma, ipc_rcu_free);
+ }
+
+ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
+@@ -492,14 +496,13 @@ static int newary(struct ipc_namespace *
+ sma->sem_perm.security = NULL;
+ retval = security_sem_alloc(sma);
+ if (retval) {
+- ipc_rcu_putref(sma);
++ ipc_rcu_putref(sma, ipc_rcu_free);
+ return retval;
+ }
+
+ id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
+ if (id < 0) {
+- security_sem_free(sma);
+- ipc_rcu_putref(sma);
++ ipc_rcu_putref(sma, sem_rcu_free);
+ return id;
+ }
+ ns->used_sems += nsems;
+@@ -1081,8 +1084,7 @@ static void freeary(struct ipc_namespace
+
+ wake_up_sem_queue_do(&tasks);
+ ns->used_sems -= sma->sem_nsems;
+- security_sem_free(sma);
+- ipc_rcu_putref(sma);
++ ipc_rcu_putref(sma, sem_rcu_free);
+ }
+
+ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
+@@ -1326,7 +1328,7 @@ static int semctl_main(struct ipc_namesp
+ rcu_read_unlock();
+ sem_io = ipc_alloc(sizeof(ushort)*nsems);
+ if(sem_io == NULL) {
+- sem_putref(sma);
++ ipc_rcu_putref(sma, ipc_rcu_free);
+ return -ENOMEM;
+ }
+
+@@ -1362,20 +1364,20 @@ static int semctl_main(struct ipc_namesp
+ if(nsems > SEMMSL_FAST) {
+ sem_io = ipc_alloc(sizeof(ushort)*nsems);
+ if(sem_io == NULL) {
+- sem_putref(sma);
++ ipc_rcu_putref(sma, ipc_rcu_free);
+ return -ENOMEM;
+ }
+ }
+
+ if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
+- sem_putref(sma);
++ ipc_rcu_putref(sma, ipc_rcu_free);
+ err = -EFAULT;
+ goto out_free;
+ }
+
+ for (i = 0; i < nsems; i++) {
+ if (sem_io[i] > SEMVMX) {
+- sem_putref(sma);
++ ipc_rcu_putref(sma, ipc_rcu_free);
+ err = -ERANGE;
+ goto out_free;
+ }
+@@ -1663,7 +1665,7 @@ static struct sem_undo *find_alloc_undo(
+ /* step 2: allocate new undo structure */
+ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+ if (!new) {
+- sem_putref(sma);
++ ipc_rcu_putref(sma, ipc_rcu_free);
+ return ERR_PTR(-ENOMEM);
+ }
+
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -155,6 +155,15 @@ static inline struct shmid_kernel *shm_l
+ return container_of(ipcp, struct shmid_kernel, shm_perm);
+ }
+
++static void shm_rcu_free(struct rcu_head *head)
++{
++ struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
++ struct shmid_kernel *shp = ipc_rcu_to_struct(p);
++
++ security_shm_free(shp);
++ ipc_rcu_free(head);
++}
++
+ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
+ {
+ ipc_rmid(&shm_ids(ns), &s->shm_perm);
+@@ -196,8 +205,7 @@ static void shm_destroy(struct ipc_names
+ user_shm_unlock(file_inode(shp->shm_file)->i_size,
+ shp->mlock_user);
+ fput (shp->shm_file);
+- security_shm_free(shp);
+- ipc_rcu_putref(shp);
++ ipc_rcu_putref(shp, shm_rcu_free);
+ }
+
+ /*
+@@ -485,7 +493,7 @@ static int newseg(struct ipc_namespace *
+ shp->shm_perm.security = NULL;
+ error = security_shm_alloc(shp);
+ if (error) {
+- ipc_rcu_putref(shp);
++ ipc_rcu_putref(shp, ipc_rcu_free);
+ return error;
+ }
+
+@@ -554,8 +562,7 @@ no_id:
+ user_shm_unlock(size, shp->mlock_user);
+ fput(file);
+ no_file:
+- security_shm_free(shp);
+- ipc_rcu_putref(shp);
++ ipc_rcu_putref(shp, shm_rcu_free);
+ return error;
+ }
+
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -465,11 +465,6 @@ void ipc_free(void* ptr, int size)
+ kfree(ptr);
+ }
+
+-struct ipc_rcu {
+- struct rcu_head rcu;
+- atomic_t refcount;
+-} ____cacheline_aligned_in_smp;
+-
+ /**
+ * ipc_rcu_alloc - allocate ipc and rcu space
+ * @size: size desired
+@@ -496,27 +491,24 @@ int ipc_rcu_getref(void *ptr)
+ return atomic_inc_not_zero(&p->refcount);
+ }
+
+-/**
+- * ipc_schedule_free - free ipc + rcu space
+- * @head: RCU callback structure for queued work
+- */
+-static void ipc_schedule_free(struct rcu_head *head)
+-{
+- vfree(container_of(head, struct ipc_rcu, rcu));
+-}
+-
+-void ipc_rcu_putref(void *ptr)
++void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
+ {
+ struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
+
+ if (!atomic_dec_and_test(&p->refcount))
+ return;
+
+- if (is_vmalloc_addr(ptr)) {
+- call_rcu(&p->rcu, ipc_schedule_free);
+- } else {
+- kfree_rcu(p, rcu);
+- }
++ call_rcu(&p->rcu, func);
++}
++
++void ipc_rcu_free(struct rcu_head *head)
++{
++ struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
++
++ if (is_vmalloc_addr(p))
++ vfree(p);
++ else
++ kfree(p);
+ }
+
+ /**
+--- a/ipc/util.h
++++ b/ipc/util.h
+@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ip
+ static inline void shm_exit_ns(struct ipc_namespace *ns) { }
+ #endif
+
++struct ipc_rcu {
++ struct rcu_head rcu;
++ atomic_t refcount;
++} ____cacheline_aligned_in_smp;
++
++#define ipc_rcu_to_struct(p) ((void *)(p+1))
++
+ /*
+ * Structure that holds the parameters needed by the ipc operations
+ * (see after)
+@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size);
+ */
+ void* ipc_rcu_alloc(int size);
+ int ipc_rcu_getref(void *ptr);
+-void ipc_rcu_putref(void *ptr);
++void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
++void ipc_rcu_free(struct rcu_head *head);
+
+ struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
+ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
--- /dev/null
+From 4271b05a227dc6175b66c3d9941aeab09048aeb2 Mon Sep 17 00:00:00 2001
+From: Davidlohr Bueso <davidlohr@hp.com>
+Date: Mon, 30 Sep 2013 13:45:26 -0700
+Subject: ipc,msg: prevent race with rmid in msgsnd,msgrcv
+
+From: Davidlohr Bueso <davidlohr@hp.com>
+
+commit 4271b05a227dc6175b66c3d9941aeab09048aeb2 upstream.
+
+This fixes a race in both msgrcv() and msgsnd() between finding the msg
+and actually dealing with the queue, as another thread can delete shmid
+underneath us if we are preempted before acquiring the
+kern_ipc_perm.lock.
+
+Manfred illustrates this nicely:
+
+Assume a preemptible kernel that is preempted just after
+
+ msq = msq_obtain_object_check(ns, msqid)
+
+in do_msgrcv(). The only lock that is held is rcu_read_lock().
+
+Now the other thread processes IPC_RMID. When the first task is
+resumed, then it will happily wait for messages on a deleted queue.
+
+Fix this by checking for if the queue has been deleted after taking the
+lock.
+
+Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
+Reported-by: Manfred Spraul <manfred@colorfullife.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ ipc/msg.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -689,6 +689,12 @@ long do_msgsnd(int msqid, long mtype, vo
+ if (ipcperms(ns, &msq->q_perm, S_IWUGO))
+ goto out_unlock0;
+
++ /* raced with RMID? */
++ if (msq->q_perm.deleted) {
++ err = -EIDRM;
++ goto out_unlock0;
++ }
++
+ err = security_msg_queue_msgsnd(msq, msg, msgflg);
+ if (err)
+ goto out_unlock0;
+@@ -895,6 +901,13 @@ long do_msgrcv(int msqid, void __user *b
+ goto out_unlock1;
+
+ ipc_lock_object(&msq->q_perm);
++
++ /* raced with RMID? */
++ if (msq->q_perm.deleted) {
++ msg = ERR_PTR(-EIDRM);
++ goto out_unlock0;
++ }
++
+ msg = find_msg(msq, &msgtyp, mode);
+ if (!IS_ERR(msg)) {
+ /*
--- /dev/null
+From 5e9d527591421ccdb16acb8c23662231135d8686 Mon Sep 17 00:00:00 2001
+From: Manfred Spraul <manfred@colorfullife.com>
+Date: Mon, 30 Sep 2013 13:45:04 -0700
+Subject: ipc/sem.c: fix race in sem_lock()
+
+From: Manfred Spraul <manfred@colorfullife.com>
+
+commit 5e9d527591421ccdb16acb8c23662231135d8686 upstream.
+
+The exclusion of complex operations in sem_lock() is insufficient: after
+acquiring the per-semaphore lock, a simple op must first check that
+sem_perm.lock is not locked and only after that test check
+complex_count. The current code does it the other way around - and that
+creates a race. Details are below.
+
+The patch is a complete rewrite of sem_lock(), based in part on the code
+from Mike Galbraith. It removes all gotos and all loops and thus the
+risk of livelocks.
+
+I have tested the patch (together with the next one) on my i3 laptop and
+it didn't cause any problems.
+
+The bug is probably also present in 3.10 and 3.11, but for these kernels
+it might be simpler just to move the test of sma->complex_count after
+the spin_is_locked() test.
+
+Details of the bug:
+
+Assume:
+ - sma->complex_count = 0.
+ - Thread 1: semtimedop(complex op that must sleep)
+ - Thread 2: semtimedop(simple op).
+
+Pseudo-Trace:
+
+Thread 1: sem_lock(): acquire sem_perm.lock
+Thread 1: sem_lock(): check for ongoing simple ops
+ Nothing ongoing, thread 2 is still before sem_lock().
+Thread 1: try_atomic_semop()
+ <<< preempted.
+
+Thread 2: sem_lock():
+ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+ {
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+ <<<<<<<<<
+ <<< complex_count is still 0.
+ <<<
+ <<< Here it is preempted
+ <<<<<<<<<
+
+Thread 1: try_atomic_semop() returns, notices that it must sleep.
+Thread 1: increases sma->complex_count.
+Thread 1: drops sem_perm.lock
+Thread 2:
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+ <<< sem_perm.lock already dropped, thus no "goto again;"
+
+ locknum = sops->sem_num;
+
+Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
+Cc: Mike Galbraith <bitbucket@online.de>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ ipc/sem.c | 122 +++++++++++++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 78 insertions(+), 44 deletions(-)
+
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -244,70 +244,104 @@ static void merge_queues(struct sem_arra
+ }
+
+ /*
++ * Wait until all currently ongoing simple ops have completed.
++ * Caller must own sem_perm.lock.
++ * New simple ops cannot start, because simple ops first check
++ * that sem_perm.lock is free.
++ */
++static void sem_wait_array(struct sem_array *sma)
++{
++ int i;
++ struct sem *sem;
++
++ for (i = 0; i < sma->sem_nsems; i++) {
++ sem = sma->sem_base + i;
++ spin_unlock_wait(&sem->lock);
++ }
++}
++
++/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+- *
+- * Carefully guard against sma->complex_count changing between zero
+- * and non-zero while we are spinning for the lock. The value of
+- * sma->complex_count cannot change while we are holding the lock,
+- * so sem_unlock should be fine.
+- *
+- * The global lock path checks that all the local locks have been released,
+- * checking each local lock once. This means that the local lock paths
+- * cannot start their critical sections while the global lock is held.
+ */
+ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+ {
+- int locknum;
+- again:
+- if (nsops == 1 && !sma->complex_count) {
+- struct sem *sem = sma->sem_base + sops->sem_num;
++ struct sem *sem;
+
+- /* Lock just the semaphore we are interested in. */
+- spin_lock(&sem->lock);
++ if (nsops != 1) {
++ /* Complex operation - acquire a full lock */
++ ipc_lock_object(&sma->sem_perm);
+
+- /*
+- * If sma->complex_count was set while we were spinning,
+- * we may need to look at things we did not lock here.
++ /* And wait until all simple ops that are processed
++ * right now have dropped their locks.
+ */
+- if (unlikely(sma->complex_count)) {
+- spin_unlock(&sem->lock);
+- goto lock_array;
+- }
++ sem_wait_array(sma);
++ return -1;
++ }
++
++ /*
++ * Only one semaphore affected - try to optimize locking.
++ * The rules are:
++ * - optimized locking is possible if no complex operation
++ * is either enqueued or processed right now.
++ * - The test for enqueued complex ops is simple:
++ * sma->complex_count != 0
++ * - Testing for complex ops that are processed right now is
++ * a bit more difficult. Complex ops acquire the full lock
++ * and first wait that the running simple ops have completed.
++ * (see above)
++ * Thus: If we own a simple lock and the global lock is free
++ * and complex_count is now 0, then it will stay 0 and
++ * thus just locking sem->lock is sufficient.
++ */
++ sem = sma->sem_base + sops->sem_num;
+
++ if (sma->complex_count == 0) {
+ /*
+- * Another process is holding the global lock on the
+- * sem_array; we cannot enter our critical section,
+- * but have to wait for the global lock to be released.
++ * It appears that no complex operation is around.
++ * Acquire the per-semaphore lock.
+ */
+- if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+- spin_unlock(&sem->lock);
+- spin_unlock_wait(&sma->sem_perm.lock);
+- goto again;
++ spin_lock(&sem->lock);
++
++ /* Then check that the global lock is free */
++ if (!spin_is_locked(&sma->sem_perm.lock)) {
++ /* spin_is_locked() is not a memory barrier */
++ smp_mb();
++
++ /* Now repeat the test of complex_count:
++ * It can't change anymore until we drop sem->lock.
++ * Thus: if is now 0, then it will stay 0.
++ */
++ if (sma->complex_count == 0) {
++ /* fast path successful! */
++ return sops->sem_num;
++ }
+ }
++ spin_unlock(&sem->lock);
++ }
++
++ /* slow path: acquire the full lock */
++ ipc_lock_object(&sma->sem_perm);
+
+- locknum = sops->sem_num;
++ if (sma->complex_count == 0) {
++ /* False alarm:
++ * There is no complex operation, thus we can switch
++ * back to the fast path.
++ */
++ spin_lock(&sem->lock);
++ ipc_unlock_object(&sma->sem_perm);
++ return sops->sem_num;
+ } else {
+- int i;
+- /*
+- * Lock the semaphore array, and wait for all of the
+- * individual semaphore locks to go away. The code
+- * above ensures no new single-lock holders will enter
+- * their critical section while the array lock is held.
++ /* Not a false alarm, thus complete the sequence for a
++ * full lock.
+ */
+- lock_array:
+- ipc_lock_object(&sma->sem_perm);
+- for (i = 0; i < sma->sem_nsems; i++) {
+- struct sem *sem = sma->sem_base + i;
+- spin_unlock_wait(&sem->lock);
+- }
+- locknum = -1;
++ sem_wait_array(sma);
++ return -1;
+ }
+- return locknum;
+ }
+
+ static inline void sem_unlock(struct sem_array *sma, int locknum)
--- /dev/null
+From 7f42ec3941560f0902fe3671e36f2c20ffd3af0a Mon Sep 17 00:00:00 2001
+From: Vyacheslav Dubeyko <slava@dubeyko.com>
+Date: Mon, 30 Sep 2013 13:45:12 -0700
+Subject: nilfs2: fix issue with race condition of competition between segments for dirty blocks
+
+From: Vyacheslav Dubeyko <slava@dubeyko.com>
+
+commit 7f42ec3941560f0902fe3671e36f2c20ffd3af0a upstream.
+
+Many NILFS2 users were reported about strange file system corruption
+(for example):
+
+ NILFS: bad btree node (blocknr=185027): level = 0, flags = 0x0, nchildren = 768
+ NILFS error (device sda4): nilfs_bmap_last_key: broken bmap (inode number=11540)
+
+But such error messages are consequence of file system's issue that takes
+place more earlier. Fortunately, Jerome Poulin <jeromepoulin@gmail.com>
+and Anton Eliasson <devel@antoneliasson.se> were reported about another
+issue not so recently. These reports describe the issue with segctor
+thread's crash:
+
+ BUG: unable to handle kernel paging request at 0000000000004c83
+ IP: nilfs_end_page_io+0x12/0xd0 [nilfs2]
+
+ Call Trace:
+ nilfs_segctor_do_construct+0xf25/0x1b20 [nilfs2]
+ nilfs_segctor_construct+0x17b/0x290 [nilfs2]
+ nilfs_segctor_thread+0x122/0x3b0 [nilfs2]
+ kthread+0xc0/0xd0
+ ret_from_fork+0x7c/0xb0
+
+These two issues have one reason. This reason can raise third issue
+too. Third issue results in hanging of segctor thread with eating of
+100% CPU.
+
+REPRODUCING PATH:
+
+One of the possible way or the issue reproducing was described by
+Jermoe me Poulin <jeromepoulin@gmail.com>:
+
+1. init S to get to single user mode.
+2. sysrq+E to make sure only my shell is running
+3. start network-manager to get my wifi connection up
+4. login as root and launch "screen"
+5. cd /boot/log/nilfs which is a ext3 mount point and can log when NILFS dies.
+6. lscp | xz -9e > lscp.txt.xz
+7. mount my snapshot using mount -o cp=3360839,ro /dev/vgUbuntu/root /mnt/nilfs
+8. start a screen to dump /proc/kmsg to text file since rsyslog is killed
+9. start a screen and launch strace -f -o find-cat.log -t find
+/mnt/nilfs -type f -exec cat {} > /dev/null \;
+10. start a screen and launch strace -f -o apt-get.log -t apt-get update
+11. launch the last command again as it did not crash the first time
+12. apt-get crashes
+13. ps aux > ps-aux-crashed.log
+13. sysrq+W
+14. sysrq+E wait for everything to terminate
+15. sysrq+SUSB
+
+Simplified way of the issue reproducing is starting kernel compilation
+task and "apt-get update" in parallel.
+
+REPRODUCIBILITY:
+
+The issue is reproduced not stable [60% - 80%]. It is very important to
+have proper environment for the issue reproducing. The critical
+conditions for successful reproducing:
+
+(1) It should have big modified file by mmap() way.
+
+(2) This file should have the count of dirty blocks are greater that
+ several segments in size (for example, two or three) from time to time
+ during processing.
+
+(3) It should be intensive background activity of files modification
+ in another thread.
+
+INVESTIGATION:
+
+First of all, it is possible to see that the reason of crash is not valid
+page address:
+
+ NILFS [nilfs_segctor_complete_write]:2100 bh->b_count 0, bh->b_blocknr 13895680, bh->b_size 13897727, bh->b_page 0000000000001a82
+ NILFS [nilfs_segctor_complete_write]:2101 segbuf->sb_segnum 6783
+
+Moreover, value of b_page (0x1a82) is 6786. This value looks like segment
+number. And b_blocknr with b_size values look like block numbers. So,
+buffer_head's pointer points on not proper address value.
+
+Detailed investigation of the issue is discovered such picture:
+
+ [-----------------------------SEGMENT 6783-------------------------------]
+ NILFS [nilfs_segctor_do_construct]:2310 nilfs_segctor_begin_construction
+ NILFS [nilfs_segctor_do_construct]:2321 nilfs_segctor_collect
+ NILFS [nilfs_segctor_do_construct]:2336 nilfs_segctor_assign
+ NILFS [nilfs_segctor_do_construct]:2367 nilfs_segctor_update_segusage
+ NILFS [nilfs_segctor_do_construct]:2371 nilfs_segctor_prepare_write
+ NILFS [nilfs_segctor_do_construct]:2376 nilfs_add_checksums_on_logs
+ NILFS [nilfs_segctor_do_construct]:2381 nilfs_segctor_write
+ NILFS [nilfs_segbuf_submit_bio]:464 bio->bi_sector 111149024, segbuf->sb_segnum 6783
+
+ [-----------------------------SEGMENT 6784-------------------------------]
+ NILFS [nilfs_segctor_do_construct]:2310 nilfs_segctor_begin_construction
+ NILFS [nilfs_segctor_do_construct]:2321 nilfs_segctor_collect
+ NILFS [nilfs_lookup_dirty_data_buffers]:782 bh->b_count 1, bh->b_page ffffea000709b000, page->index 0, i_ino 1033103, i_size 25165824
+ NILFS [nilfs_lookup_dirty_data_buffers]:783 bh->b_assoc_buffers.next ffff8802174a6798, bh->b_assoc_buffers.prev ffff880221cffee8
+ NILFS [nilfs_segctor_do_construct]:2336 nilfs_segctor_assign
+ NILFS [nilfs_segctor_do_construct]:2367 nilfs_segctor_update_segusage
+ NILFS [nilfs_segctor_do_construct]:2371 nilfs_segctor_prepare_write
+ NILFS [nilfs_segctor_do_construct]:2376 nilfs_add_checksums_on_logs
+ NILFS [nilfs_segctor_do_construct]:2381 nilfs_segctor_write
+ NILFS [nilfs_segbuf_submit_bh]:575 bh->b_count 1, bh->b_page ffffea000709b000, page->index 0, i_ino 1033103, i_size 25165824
+ NILFS [nilfs_segbuf_submit_bh]:576 segbuf->sb_segnum 6784
+ NILFS [nilfs_segbuf_submit_bh]:577 bh->b_assoc_buffers.next ffff880218a0d5f8, bh->b_assoc_buffers.prev ffff880218bcdf50
+ NILFS [nilfs_segbuf_submit_bio]:464 bio->bi_sector 111150080, segbuf->sb_segnum 6784, segbuf->sb_nbio 0
+ [----------] ditto
+ NILFS [nilfs_segbuf_submit_bio]:464 bio->bi_sector 111164416, segbuf->sb_segnum 6784, segbuf->sb_nbio 15
+
+ [-----------------------------SEGMENT 6785-------------------------------]
+ NILFS [nilfs_segctor_do_construct]:2310 nilfs_segctor_begin_construction
+ NILFS [nilfs_segctor_do_construct]:2321 nilfs_segctor_collect
+ NILFS [nilfs_lookup_dirty_data_buffers]:782 bh->b_count 2, bh->b_page ffffea000709b000, page->index 0, i_ino 1033103, i_size 25165824
+ NILFS [nilfs_lookup_dirty_data_buffers]:783 bh->b_assoc_buffers.next ffff880219277e80, bh->b_assoc_buffers.prev ffff880221cffc88
+ NILFS [nilfs_segctor_do_construct]:2367 nilfs_segctor_update_segusage
+ NILFS [nilfs_segctor_do_construct]:2371 nilfs_segctor_prepare_write
+ NILFS [nilfs_segctor_do_construct]:2376 nilfs_add_checksums_on_logs
+ NILFS [nilfs_segctor_do_construct]:2381 nilfs_segctor_write
+ NILFS [nilfs_segbuf_submit_bh]:575 bh->b_count 2, bh->b_page ffffea000709b000, page->index 0, i_ino 1033103, i_size 25165824
+ NILFS [nilfs_segbuf_submit_bh]:576 segbuf->sb_segnum 6785
+ NILFS [nilfs_segbuf_submit_bh]:577 bh->b_assoc_buffers.next ffff880218a0d5f8, bh->b_assoc_buffers.prev ffff880222cc7ee8
+ NILFS [nilfs_segbuf_submit_bio]:464 bio->bi_sector 111165440, segbuf->sb_segnum 6785, segbuf->sb_nbio 0
+ [----------] ditto
+ NILFS [nilfs_segbuf_submit_bio]:464 bio->bi_sector 111177728, segbuf->sb_segnum 6785, segbuf->sb_nbio 12
+
+ NILFS [nilfs_segctor_do_construct]:2399 nilfs_segctor_wait
+ NILFS [nilfs_segbuf_wait]:676 segbuf->sb_segnum 6783
+ NILFS [nilfs_segbuf_wait]:676 segbuf->sb_segnum 6784
+ NILFS [nilfs_segbuf_wait]:676 segbuf->sb_segnum 6785
+
+ NILFS [nilfs_segctor_complete_write]:2100 bh->b_count 0, bh->b_blocknr 13895680, bh->b_size 13897727, bh->b_page 0000000000001a82
+
+ BUG: unable to handle kernel paging request at 0000000000001a82
+ IP: [<ffffffffa024d0f2>] nilfs_end_page_io+0x12/0xd0 [nilfs2]
+
+Usually, for every segment we collect dirty files in list. Then, dirty
+blocks are gathered for every dirty file, prepared for write and
+submitted by means of nilfs_segbuf_submit_bh() call. Finally, it takes
+place complete write phase after calling nilfs_end_bio_write() on the
+block layer. Buffers/pages are marked as not dirty on final phase and
+processed files removed from the list of dirty files.
+
+It is possible to see that we had three prepare_write and submit_bio
+phases before segbuf_wait and complete_write phase. Moreover, segments
+compete between each other for dirty blocks because on every iteration
+of segments processing dirty buffer_heads are added in several lists of
+payload_buffers:
+
+ [SEGMENT 6784]: bh->b_assoc_buffers.next ffff880218a0d5f8, bh->b_assoc_buffers.prev ffff880218bcdf50
+ [SEGMENT 6785]: bh->b_assoc_buffers.next ffff880218a0d5f8, bh->b_assoc_buffers.prev ffff880222cc7ee8
+
+The next pointer is the same but prev pointer has changed. It means
+that buffer_head has next pointer from one list but prev pointer from
+another. Such modification can be made several times. And, finally, it
+can be resulted in various issues: (1) segctor hanging, (2) segctor
+crashing, (3) file system metadata corruption.
+
+FIX:
+This patch adds:
+
+(1) setting of BH_Async_Write flag in nilfs_segctor_prepare_write()
+ for every proccessed dirty block;
+
+(2) checking of BH_Async_Write flag in
+ nilfs_lookup_dirty_data_buffers() and
+ nilfs_lookup_dirty_node_buffers();
+
+(3) clearing of BH_Async_Write flag in nilfs_segctor_complete_write(),
+ nilfs_abort_logs(), nilfs_forget_buffer(), nilfs_clear_dirty_page().
+
+Reported-by: Jerome Poulin <jeromepoulin@gmail.com>
+Reported-by: Anton Eliasson <devel@antoneliasson.se>
+Cc: Paul Fertser <fercerpav@gmail.com>
+Cc: ARAI Shun-ichi <hermes@ceres.dti.ne.jp>
+Cc: Piotr Szymaniak <szarpaj@grubelek.pl>
+Cc: Juan Barry Manuel Canham <Linux@riotingpacifist.net>
+Cc: Zahid Chowdhury <zahid.chowdhury@starsolutions.com>
+Cc: Elmer Zhang <freeboy6716@gmail.com>
+Cc: Kenneth Langga <klangga@gmail.com>
+Signed-off-by: Vyacheslav Dubeyko <slava@dubeyko.com>
+Acked-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nilfs2/page.c | 2 ++
+ fs/nilfs2/segment.c | 11 +++++++++--
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_h
+ clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_checked(bh);
+ clear_buffer_nilfs_redirected(bh);
++ clear_buffer_async_write(bh);
+ clear_buffer_dirty(bh);
+ if (nilfs_page_buffers_clean(page))
+ __nilfs_clear_page_dirty(page);
+@@ -429,6 +430,7 @@ void nilfs_clear_dirty_page(struct page
+ "discard block %llu, size %zu",
+ (u64)bh->b_blocknr, bh->b_size);
+ }
++ clear_buffer_async_write(bh);
+ clear_buffer_dirty(bh);
+ clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_checked(bh);
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -665,7 +665,7 @@ static size_t nilfs_lookup_dirty_data_bu
+
+ bh = head = page_buffers(page);
+ do {
+- if (!buffer_dirty(bh))
++ if (!buffer_dirty(bh) || buffer_async_write(bh))
+ continue;
+ get_bh(bh);
+ list_add_tail(&bh->b_assoc_buffers, listp);
+@@ -699,7 +699,8 @@ static void nilfs_lookup_dirty_node_buff
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ bh = head = page_buffers(pvec.pages[i]);
+ do {
+- if (buffer_dirty(bh)) {
++ if (buffer_dirty(bh) &&
++ !buffer_async_write(bh)) {
+ get_bh(bh);
+ list_add_tail(&bh->b_assoc_buffers,
+ listp);
+@@ -1579,6 +1580,7 @@ static void nilfs_segctor_prepare_write(
+
+ list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
+ b_assoc_buffers) {
++ set_buffer_async_write(bh);
+ if (bh->b_page != bd_page) {
+ if (bd_page) {
+ lock_page(bd_page);
+@@ -1592,6 +1594,7 @@ static void nilfs_segctor_prepare_write(
+
+ list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ b_assoc_buffers) {
++ set_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
+ if (bh->b_page != bd_page) {
+ lock_page(bd_page);
+@@ -1677,6 +1680,7 @@ static void nilfs_abort_logs(struct list
+ list_for_each_entry(segbuf, logs, sb_list) {
+ list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
+ b_assoc_buffers) {
++ clear_buffer_async_write(bh);
+ if (bh->b_page != bd_page) {
+ if (bd_page)
+ end_page_writeback(bd_page);
+@@ -1686,6 +1690,7 @@ static void nilfs_abort_logs(struct list
+
+ list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ b_assoc_buffers) {
++ clear_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
+ if (bh->b_page != bd_page) {
+ end_page_writeback(bd_page);
+@@ -1755,6 +1760,7 @@ static void nilfs_segctor_complete_write
+ b_assoc_buffers) {
+ set_buffer_uptodate(bh);
+ clear_buffer_dirty(bh);
++ clear_buffer_async_write(bh);
+ if (bh->b_page != bd_page) {
+ if (bd_page)
+ end_page_writeback(bd_page);
+@@ -1776,6 +1782,7 @@ static void nilfs_segctor_complete_write
+ b_assoc_buffers) {
+ set_buffer_uptodate(bh);
+ clear_buffer_dirty(bh);
++ clear_buffer_async_write(bh);
+ clear_buffer_delay(bh);
+ clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_redirected(bh);
--- /dev/null
+From f7e3334a6bcb42e7295a9bd9cb36ca4e6e4e66b4 Mon Sep 17 00:00:00 2001
+From: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+Date: Fri, 27 Sep 2013 10:18:09 -0500
+Subject: powerpc: Fix memory hotplug with sparse vmemmap
+
+From: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+
+commit f7e3334a6bcb42e7295a9bd9cb36ca4e6e4e66b4 upstream.
+
+Previous commit 46723bfa540... introduced a new config option
+HAVE_BOOTMEM_INFO_NODE that ended up breaking memory hot-remove for ppc
+when sparse vmemmap is not defined.
+
+This patch defines HAVE_BOOTMEM_INFO_NODE for ppc and adds the call to
+register_page_bootmem_info_node. Without this we get a BUG_ON for memory
+hot remove in put_page_bootmem().
+
+This also adds a stub for register_page_bootmem_memmap to allow ppc to build
+with sparse vmemmap defined. Leaving this as a stub is fine since the same
+vmemmap addresses are also handled in vmemmap_populate and as such are
+properly mapped.
+
+Signed-off-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/init_64.c | 4 ++++
+ arch/powerpc/mm/mem.c | 9 +++++++++
+ mm/Kconfig | 2 +-
+ 3 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/init_64.c
++++ b/arch/powerpc/mm/init_64.c
+@@ -300,5 +300,9 @@ void vmemmap_free(unsigned long start, u
+ {
+ }
+
++void register_page_bootmem_memmap(unsigned long section_nr,
++ struct page *start_page, unsigned long size)
++{
++}
+ #endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -297,12 +297,21 @@ void __init paging_init(void)
+ }
+ #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
+
++static void __init register_page_bootmem_info(void)
++{
++ int i;
++
++ for_each_online_node(i)
++ register_page_bootmem_info_node(NODE_DATA(i));
++}
++
+ void __init mem_init(void)
+ {
+ #ifdef CONFIG_SWIOTLB
+ swiotlb_init(0);
+ #endif
+
++ register_page_bootmem_info();
+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ set_max_mapnr(max_pfn);
+ free_all_bootmem();
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -183,7 +183,7 @@ config MEMORY_HOTPLUG_SPARSE
+ config MEMORY_HOTREMOVE
+ bool "Allow for memory hot remove"
+ select MEMORY_ISOLATION
+- select HAVE_BOOTMEM_INFO_NODE if X86_64
++ select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
+ depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
+ depends on MIGRATION
+
--- /dev/null
+From d9813c3681a36774b254c0cdc9cce53c9e22c756 Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Date: Tue, 1 Oct 2013 16:54:05 +1000
+Subject: powerpc: Fix parameter clobber in csum_partial_copy_generic()
+
+From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+
+commit d9813c3681a36774b254c0cdc9cce53c9e22c756 upstream.
+
+The csum_partial_copy_generic() uses register r7 to adjust the remaining
+bytes to process. Unfortunately, r7 also holds a parameter, namely the
+address of the flag to set in case of access exceptions while reading
+the source buffer. Lacking a quantum implementation of PowerPC, this
+commit instead uses register r9 to do the adjusting, leaving r7's
+pointer uncorrupted.
+
+Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/lib/checksum_64.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/lib/checksum_64.S
++++ b/arch/powerpc/lib/checksum_64.S
+@@ -269,8 +269,8 @@ _GLOBAL(csum_partial_copy_generic)
+ rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
+ beq .Lcopy_aligned
+
+- li r7,4
+- sub r6,r7,r6
++ li r9,4
++ sub r6,r9,r6
+ mtctr r6
+
+ 1:
--- /dev/null
+From 1cf389df090194a0976dc867b7fffe99d9d490cb Mon Sep 17 00:00:00 2001
+From: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
+Date: Tue, 1 Oct 2013 14:04:53 -0700
+Subject: powerpc/iommu: Use GFP_KERNEL instead of GFP_ATOMIC in iommu_init_table()
+
+From: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
+
+commit 1cf389df090194a0976dc867b7fffe99d9d490cb upstream.
+
+Under heavy (DLPAR?) stress, we tripped this panic() in
+arch/powerpc/kernel/iommu.c::iommu_init_table():
+
+ page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+ if (!page)
+ panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
+
+Before the panic() we got a page allocation failure for an order-2
+allocation. There appears to be memory free, but perhaps not in the
+ATOMIC context. I looked through all the call-sites of
+iommu_init_table() and didn't see any obvious reason to need an ATOMIC
+allocation. Most call-sites in fact have an explicit GFP_KERNEL
+allocation shortly before the call to iommu_init_table(), indicating we
+are not in an atomic context. There is some indirection for some paths,
+but I didn't see any locks indicating that GFP_KERNEL is inappropriate.
+
+With this change under the same conditions, we have not been able to
+reproduce the panic.
+
+Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/iommu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -661,7 +661,7 @@ struct iommu_table *iommu_init_table(str
+ /* number of bytes needed for the bitmap */
+ sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
+
+- page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
++ page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
+ if (!page)
+ panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
+ tbl->it_map = page_address(page);
--- /dev/null
+From a53b27b3abeef406de92a2bb0ceb6fb4c3fb8fc4 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <michael@ellerman.id.au>
+Date: Wed, 2 Oct 2013 18:04:06 +1000
+Subject: powerpc/perf: Fix handling of FAB events
+
+From: Michael Ellerman <michael@ellerman.id.au>
+
+commit a53b27b3abeef406de92a2bb0ceb6fb4c3fb8fc4 upstream.
+
+Commit 4df4899 "Add power8 EBB support" included a bug in the handling
+of the FAB_CRESP_MATCH and FAB_TYPE_MATCH fields.
+
+These values are pulled out of the event code using EVENT_THR_CTL_SHIFT,
+however we were then or'ing that value directly into MMCR1.
+
+This meant we were failing to set the FAB fields correctly, and also
+potentially corrupting the value for PMC4SEL. Leading to no counts for
+the FAB events and incorrect counts for PMC4.
+
+The fix is simply to shift left the FAB value correctly before or'ing it
+with MMCR1.
+
+Reported-by: Sooraj Ravindran Nair <soonair3@in.ibm.com>
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/perf/power8-pmu.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/perf/power8-pmu.c
++++ b/arch/powerpc/perf/power8-pmu.c
+@@ -199,6 +199,7 @@
+ #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
+ #define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
+ #define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
++#define MMCR1_FAB_SHIFT 36
+ #define MMCR1_DC_QUAL_SHIFT 47
+ #define MMCR1_IC_QUAL_SHIFT 46
+
+@@ -388,8 +389,8 @@ static int power8_compute_mmcr(u64 event
+ * the threshold bits are used for the match value.
+ */
+ if (event_is_fab_match(event[i])) {
+- mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) &
+- EVENT_THR_CTL_MASK;
++ mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
++ EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
+ } else {
+ val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
+ mmcra |= val << MMCRA_THR_CTL_SHIFT;
--- /dev/null
+From 8f21bd0090052e740944f9397e2be5ac7957ded7 Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Date: Tue, 1 Oct 2013 17:11:35 +1000
+Subject: powerpc: Restore registers on error exit from csum_partial_copy_generic()
+
+From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+
+commit 8f21bd0090052e740944f9397e2be5ac7957ded7 upstream.
+
+The csum_partial_copy_generic() function saves the PowerPC non-volatile
+r14, r15, and r16 registers for the main checksum-and-copy loop.
+Unfortunately, it fails to restore them upon error exit from this loop,
+which results in silent corruption of these registers in the presumably
+rare event of an access exception within that loop.
+
+This commit therefore restores these register on error exit from the loop.
+
+Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/lib/checksum_64.S | 54 ++++++++++++++++++++++++++++++-----------
+ 1 file changed, 40 insertions(+), 14 deletions(-)
+
+--- a/arch/powerpc/lib/checksum_64.S
++++ b/arch/powerpc/lib/checksum_64.S
+@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
+ blr
+
+
+- .macro source
++ .macro srcnr
+ 100:
+ .section __ex_table,"a"
+ .align 3
+- .llong 100b,.Lsrc_error
++ .llong 100b,.Lsrc_error_nr
+ .previous
+ .endm
+
+- .macro dest
++ .macro source
++150:
++ .section __ex_table,"a"
++ .align 3
++ .llong 150b,.Lsrc_error
++ .previous
++ .endm
++
++ .macro dstnr
+ 200:
+ .section __ex_table,"a"
+ .align 3
+- .llong 200b,.Ldest_error
++ .llong 200b,.Ldest_error_nr
++ .previous
++ .endm
++
++ .macro dest
++250:
++ .section __ex_table,"a"
++ .align 3
++ .llong 250b,.Ldest_error
+ .previous
+ .endm
+
+@@ -274,11 +290,11 @@ _GLOBAL(csum_partial_copy_generic)
+ mtctr r6
+
+ 1:
+-source; lhz r6,0(r3) /* align to doubleword */
++srcnr; lhz r6,0(r3) /* align to doubleword */
+ subi r5,r5,2
+ addi r3,r3,2
+ adde r0,r0,r6
+-dest; sth r6,0(r4)
++dstnr; sth r6,0(r4)
+ addi r4,r4,2
+ bdnz 1b
+
+@@ -392,10 +408,10 @@ dest; std r16,56(r4)
+
+ mtctr r6
+ 3:
+-source; ld r6,0(r3)
++srcnr; ld r6,0(r3)
+ addi r3,r3,8
+ adde r0,r0,r6
+-dest; std r6,0(r4)
++dstnr; std r6,0(r4)
+ addi r4,r4,8
+ bdnz 3b
+
+@@ -405,10 +421,10 @@ dest; std r6,0(r4)
+ srdi. r6,r5,2
+ beq .Lcopy_tail_halfword
+
+-source; lwz r6,0(r3)
++srcnr; lwz r6,0(r3)
+ addi r3,r3,4
+ adde r0,r0,r6
+-dest; stw r6,0(r4)
++dstnr; stw r6,0(r4)
+ addi r4,r4,4
+ subi r5,r5,4
+
+@@ -416,10 +432,10 @@ dest; stw r6,0(r4)
+ srdi. r6,r5,1
+ beq .Lcopy_tail_byte
+
+-source; lhz r6,0(r3)
++srcnr; lhz r6,0(r3)
+ addi r3,r3,2
+ adde r0,r0,r6
+-dest; sth r6,0(r4)
++dstnr; sth r6,0(r4)
+ addi r4,r4,2
+ subi r5,r5,2
+
+@@ -427,10 +443,10 @@ dest; sth r6,0(r4)
+ andi. r6,r5,1
+ beq .Lcopy_finish
+
+-source; lbz r6,0(r3)
++srcnr; lbz r6,0(r3)
+ sldi r9,r6,8 /* Pad the byte out to 16 bits */
+ adde r0,r0,r9
+-dest; stb r6,0(r4)
++dstnr; stb r6,0(r4)
+
+ .Lcopy_finish:
+ addze r0,r0 /* add in final carry */
+@@ -440,6 +456,11 @@ dest; stb r6,0(r4)
+ blr
+
+ .Lsrc_error:
++ ld r14,STK_REG(R14)(r1)
++ ld r15,STK_REG(R15)(r1)
++ ld r16,STK_REG(R16)(r1)
++ addi r1,r1,STACKFRAMESIZE
++.Lsrc_error_nr:
+ cmpdi 0,r7,0
+ beqlr
+ li r6,-EFAULT
+@@ -447,6 +468,11 @@ dest; stb r6,0(r4)
+ blr
+
+ .Ldest_error:
++ ld r14,STK_REG(R14)(r1)
++ ld r15,STK_REG(R15)(r1)
++ ld r16,STK_REG(R16)(r1)
++ addi r1,r1,STACKFRAMESIZE
++.Ldest_error_nr:
+ cmpdi 0,r8,0
+ beqlr
+ li r6,-EFAULT
--- /dev/null
+From d1211af3049f4c9c1d8d4eb8f8098cc4f4f0d0c7 Mon Sep 17 00:00:00 2001
+From: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
+Date: Wed, 2 Oct 2013 00:34:10 +0530
+Subject: powerpc/sysfs: Disable writing to PURR in guest mode
+
+From: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
+
+commit d1211af3049f4c9c1d8d4eb8f8098cc4f4f0d0c7 upstream.
+
+arch/powerpc/kernel/sysfs.c exports PURR with write permission.
+This may be valid for kernel in phyp mode. But writing to
+the file in guest mode causes crash due to a priviledge violation
+
+Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/sysfs.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -17,6 +17,7 @@
+ #include <asm/machdep.h>
+ #include <asm/smp.h>
+ #include <asm/pmc.h>
++#include <asm/firmware.h>
+
+ #include "cacheinfo.h"
+
+@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
+ SYSFS_PMCSETUP(dscr, SPRN_DSCR);
+ SYSFS_PMCSETUP(pir, SPRN_PIR);
+
++/*
++ Lets only enable read for phyp resources and
++ enable write when needed with a separate function.
++ Lets be conservative and default to pseries.
++*/
+ static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
+ static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
+ static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
+-static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
++static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
+ static DEVICE_ATTR(pir, 0400, show_pir, NULL);
+
+ unsigned long dscr_default = 0;
+ EXPORT_SYMBOL(dscr_default);
+
++static void add_write_permission_dev_attr(struct device_attribute *attr)
++{
++ attr->attr.mode |= 0200;
++}
++
+ static ssize_t show_dscr_default(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+@@ -394,8 +405,11 @@ static void register_cpu_online(unsigned
+ if (cpu_has_feature(CPU_FTR_MMCRA))
+ device_create_file(s, &dev_attr_mmcra);
+
+- if (cpu_has_feature(CPU_FTR_PURR))
++ if (cpu_has_feature(CPU_FTR_PURR)) {
++ if (!firmware_has_feature(FW_FEATURE_LPAR))
++ add_write_permission_dev_attr(&dev_attr_purr);
+ device_create_file(s, &dev_attr_purr);
++ }
+
+ if (cpu_has_feature(CPU_FTR_SPURR))
+ device_create_file(s, &dev_attr_spurr);
--- /dev/null
+From e9bdc3d6143d1c4b8d8ce5231fc958268331f983 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Thu, 26 Sep 2013 13:29:09 +1000
+Subject: powerpc/tm: Switch out userspace PPR and DSCR sooner
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit e9bdc3d6143d1c4b8d8ce5231fc958268331f983 upstream.
+
+When we do a treclaim or trecheckpoint we end up running with userspace
+PPR and DSCR values. Currently we don't do anything special to avoid
+running with user values which could cause a severe performance
+degradation.
+
+This patch moves the PPR and DSCR save and restore around treclaim and
+trecheckpoint so that we run with user values for a much shorter period.
+More care is taken with the PPR as it's impact is greater than the DSCR.
+
+This is similar to user exceptions, where we run HTM_MEDIUM early to
+ensure that we don't run with a userspace PPR values in the kernel.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/tm.S | 94 +++++++++++++++++++++++++++++++----------------
+ 1 file changed, 63 insertions(+), 31 deletions(-)
+
+--- a/arch/powerpc/kernel/tm.S
++++ b/arch/powerpc/kernel/tm.S
+@@ -79,6 +79,11 @@ _GLOBAL(tm_abort)
+ TABORT(R3)
+ blr
+
++ .section ".toc","aw"
++DSCR_DEFAULT:
++ .tc dscr_default[TC],dscr_default
++
++ .section ".text"
+
+ /* void tm_reclaim(struct thread_struct *thread,
+ * unsigned long orig_msr,
+@@ -187,11 +192,18 @@ dont_backup_fp:
+ std r1, PACATMSCRATCH(r13)
+ ld r1, PACAR1(r13)
+
++ /* Store the PPR in r11 and reset to decent value */
++ std r11, GPR11(r1) /* Temporary stash */
++ mfspr r11, SPRN_PPR
++ HMT_MEDIUM
++
+ /* Now get some more GPRS free */
+ std r7, GPR7(r1) /* Temporary stash */
+ std r12, GPR12(r1) /* '' '' '' */
+ ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */
+
++ std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */
++
+ addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
+
+ /* Make r7 look like an exception frame so that we
+@@ -203,15 +215,19 @@ dont_backup_fp:
+ SAVE_GPR(0, r7) /* user r0 */
+ SAVE_GPR(2, r7) /* user r2 */
+ SAVE_4GPRS(3, r7) /* user r3-r6 */
+- SAVE_4GPRS(8, r7) /* user r8-r11 */
++ SAVE_GPR(8, r7) /* user r8 */
++ SAVE_GPR(9, r7) /* user r9 */
++ SAVE_GPR(10, r7) /* user r10 */
+ ld r3, PACATMSCRATCH(r13) /* user r1 */
+ ld r4, GPR7(r1) /* user r7 */
+- ld r5, GPR12(r1) /* user r12 */
+- GET_SCRATCH0(6) /* user r13 */
++ ld r5, GPR11(r1) /* user r11 */
++ ld r6, GPR12(r1) /* user r12 */
++ GET_SCRATCH0(8) /* user r13 */
+ std r3, GPR1(r7)
+ std r4, GPR7(r7)
+- std r5, GPR12(r7)
+- std r6, GPR13(r7)
++ std r5, GPR11(r7)
++ std r6, GPR12(r7)
++ std r8, GPR13(r7)
+
+ SAVE_NVGPRS(r7) /* user r14-r31 */
+
+@@ -234,14 +250,12 @@ dont_backup_fp:
+ std r6, _XER(r7)
+
+
+- /* ******************** TAR, PPR, DSCR ********** */
++ /* ******************** TAR, DSCR ********** */
+ mfspr r3, SPRN_TAR
+- mfspr r4, SPRN_PPR
+- mfspr r5, SPRN_DSCR
++ mfspr r4, SPRN_DSCR
+
+ std r3, THREAD_TM_TAR(r12)
+- std r4, THREAD_TM_PPR(r12)
+- std r5, THREAD_TM_DSCR(r12)
++ std r4, THREAD_TM_DSCR(r12)
+
+ /* MSR and flags: We don't change CRs, and we don't need to alter
+ * MSR.
+@@ -258,7 +272,7 @@ dont_backup_fp:
+ std r3, THREAD_TM_TFHAR(r12)
+ std r4, THREAD_TM_TFIAR(r12)
+
+- /* AMR and PPR are checkpointed too, but are unsupported by Linux. */
++ /* AMR is checkpointed too, but is unsupported by Linux. */
+
+ /* Restore original MSR/IRQ state & clear TM mode */
+ ld r14, TM_FRAME_L0(r1) /* Orig MSR */
+@@ -274,6 +288,12 @@ dont_backup_fp:
+ mtcr r4
+ mtlr r0
+ ld r2, 40(r1)
++
++ /* Load system default DSCR */
++ ld r4, DSCR_DEFAULT@toc(r2)
++ ld r0, 0(r4)
++ mtspr SPRN_DSCR, r0
++
+ blr
+
+
+@@ -358,25 +378,24 @@ dont_restore_fp:
+
+ restore_gprs:
+
+- /* ******************** TAR, PPR, DSCR ********** */
+- ld r4, THREAD_TM_TAR(r3)
+- ld r5, THREAD_TM_PPR(r3)
+- ld r6, THREAD_TM_DSCR(r3)
++ /* ******************** CR,LR,CCR,MSR ********** */
++ ld r4, _CTR(r7)
++ ld r5, _LINK(r7)
++ ld r6, _CCR(r7)
++ ld r8, _XER(r7)
++
++ mtctr r4
++ mtlr r5
++ mtcr r6
++ mtxer r8
+
++ /* ******************** TAR ******************** */
++ ld r4, THREAD_TM_TAR(r3)
+ mtspr SPRN_TAR, r4
+- mtspr SPRN_PPR, r5
+- mtspr SPRN_DSCR, r6
+
+- /* ******************** CR,LR,CCR,MSR ********** */
+- ld r3, _CTR(r7)
+- ld r4, _LINK(r7)
+- ld r5, _CCR(r7)
+- ld r6, _XER(r7)
+-
+- mtctr r3
+- mtlr r4
+- mtcr r5
+- mtxer r6
++ /* Load up the PPR and DSCR in GPRs only at this stage */
++ ld r5, THREAD_TM_DSCR(r3)
++ ld r6, THREAD_TM_PPR(r3)
+
+ /* Clear the MSR RI since we are about to change R1. EE is already off
+ */
+@@ -384,19 +403,26 @@ restore_gprs:
+ mtmsrd r4, 1
+
+ REST_4GPRS(0, r7) /* GPR0-3 */
+- REST_GPR(4, r7) /* GPR4-6 */
+- REST_GPR(5, r7)
+- REST_GPR(6, r7)
++ REST_GPR(4, r7) /* GPR4 */
+ REST_4GPRS(8, r7) /* GPR8-11 */
+ REST_2GPRS(12, r7) /* GPR12-13 */
+
+ REST_NVGPRS(r7) /* GPR14-31 */
+
+- ld r7, GPR7(r7) /* GPR7 */
++ /* Load up PPR and DSCR here so we don't run with user values for long
++ */
++ mtspr SPRN_DSCR, r5
++ mtspr SPRN_PPR, r6
++
++ REST_GPR(5, r7) /* GPR5-7 */
++ REST_GPR(6, r7)
++ ld r7, GPR7(r7)
+
+ /* Commit register state as checkpointed state: */
+ TRECHKPT
+
++ HMT_MEDIUM
++
+ /* Our transactional state has now changed.
+ *
+ * Now just get out of here. Transactional (current) state will be
+@@ -419,6 +445,12 @@ restore_gprs:
+ mtcr r4
+ mtlr r0
+ ld r2, 40(r1)
++
++ /* Load system default DSCR */
++ ld r4, DSCR_DEFAULT@toc(r2)
++ ld r0, 0(r4)
++ mtspr SPRN_DSCR, r0
++
+ blr
+
+ /* ****************************************************************** */
--- /dev/null
+From c69e63b0f135fa51d6e1c38b5ac8a1def15ea3fa Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Wed, 2 Oct 2013 17:15:15 +1000
+Subject: powerpc/tm: Turn interrupts hard off in tm_reclaim()
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit c69e63b0f135fa51d6e1c38b5ac8a1def15ea3fa upstream.
+
+We can't take IRQs in tm_reclaim as we might have a bogus r13 and r1.
+
+This turns IRQs hard off in this function.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/tm.S | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/powerpc/kernel/tm.S
++++ b/arch/powerpc/kernel/tm.S
+@@ -128,6 +128,7 @@ _GLOBAL(tm_reclaim)
+ mr r15, r14
+ ori r15, r15, MSR_FP
+ li r16, MSR_RI
++ ori r16, r16, MSR_EE /* IRQs hard off */
+ andc r15, r15, r16
+ oris r15, r15, MSR_VEC@h
+ #ifdef CONFIG_VSX
--- /dev/null
+From e82b89a6f19bae73fb064d1b3dd91fcefbb478f4 Mon Sep 17 00:00:00 2001
+From: Prarit Bhargava <prarit@redhat.com>
+Date: Mon, 23 Sep 2013 09:33:36 -0400
+Subject: powerpc/vio: Fix modalias_show return values
+
+From: Prarit Bhargava <prarit@redhat.com>
+
+commit e82b89a6f19bae73fb064d1b3dd91fcefbb478f4 upstream.
+
+modalias_show() should return an empty string on error, not -ENODEV.
+
+This causes the following false and annoying error:
+
+> find /sys/devices -name modalias -print0 | xargs -0 cat >/dev/null
+cat: /sys/devices/vio/4000/modalias: No such device
+cat: /sys/devices/vio/4001/modalias: No such device
+cat: /sys/devices/vio/4002/modalias: No such device
+cat: /sys/devices/vio/4004/modalias: No such device
+cat: /sys/devices/vio/modalias: No such device
+
+Signed-off-by: Prarit Bhargava <prarit@redhat.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/vio.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/kernel/vio.c
++++ b/arch/powerpc/kernel/vio.c
+@@ -1529,11 +1529,15 @@ static ssize_t modalias_show(struct devi
+ const char *cp;
+
+ dn = dev->of_node;
+- if (!dn)
+- return -ENODEV;
++ if (!dn) {
++ strcat(buf, "\n");
++ return strlen(buf);
++ }
+ cp = of_get_property(dn, "compatible", NULL);
+- if (!cp)
+- return -ENODEV;
++ if (!cp) {
++ strcat(buf, "\n");
++ return strlen(buf);
++ }
+
+ return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
+ }
--- /dev/null
+From bf00ca35cec8f0894dcfd90f88b03af1d5c7b86f Mon Sep 17 00:00:00 2001
+From: Nishanth Menon <nm@ti.com>
+Date: Fri, 27 Sep 2013 08:25:14 -0500
+Subject: regulator: ti-abb: Fix bias voltage glitch in transition to bypass mode
+
+From: Nishanth Menon <nm@ti.com>
+
+commit bf00ca35cec8f0894dcfd90f88b03af1d5c7b86f upstream.
+
+As documented in Application Note SWPA117 v2.1(NDA), LDO override has a
+requirement that when switching from Bias active + override active
+mode(FBB/RBB) to Bypass(nominal) mode, LDO reset must be performed
+*after* LDO transitions to Bypass(nominal) mode.
+
+The same rule in reverse applies when switching from a ABB bypass mode
+to ABB enabled - LDO override *must* be performed prior to transition to
+required ABB mode, if we do not do that, the same glitch takes place.
+
+Currently while transitioning to ABB bypass, we reset the LDO overide
+prior to the transition which causes a few milliseconds where ABB LDO
+voltage could go all the way to 800mV(based on SoC process node),
+during this period, the delta voltage between VDD rail and VBB rail
+could cause the system to improperly function.
+
+Signed-off-by: Nishanth Menon <nm@ti.com>
+Signed-off-by: Mark Brown <broonie@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/ti-abb-regulator.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/drivers/regulator/ti-abb-regulator.c
++++ b/drivers/regulator/ti-abb-regulator.c
+@@ -279,8 +279,12 @@ static int ti_abb_set_opp(struct regulat
+ ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
+ abb->base);
+
+- /* program LDO VBB vset override if needed */
+- if (abb->ldo_base)
++ /*
++ * program LDO VBB vset override if needed for !bypass mode
++ * XXX: Do not switch sequence - for !bypass, LDO override reset *must*
++ * be performed *before* switch to bias mode else VBB glitches.
++ */
++ if (abb->ldo_base && info->opp_sel != TI_ABB_NOMINAL_OPP)
+ ti_abb_program_ldovbb(dev, abb, info);
+
+ /* Initiate ABB ldo change */
+@@ -295,6 +299,14 @@ static int ti_abb_set_opp(struct regulat
+ if (ret)
+ goto out;
+
++ /*
++ * Reset LDO VBB vset override bypass mode
++ * XXX: Do not switch sequence - for bypass, LDO override reset *must*
++ * be performed *after* switch to bypass else VBB glitches.
++ */
++ if (abb->ldo_base && info->opp_sel == TI_ABB_NOMINAL_OPP)
++ ti_abb_program_ldovbb(dev, abb, info);
++
+ out:
+ return ret;
+ }
ip_tunnel_core-change-__skb_push-back-to-skb_push.patch
sit-allow-to-use-rtnl-ops-on-fb-tunnel.patch
ip6tnl-allow-to-use-rtnl-ops-on-fb-tunnel.patch
+avr32-fix-clockevents-kernel-warning.patch
+regulator-ti-abb-fix-bias-voltage-glitch-in-transition-to-bypass-mode.patch
+fs-binfmt_elf.c-prevent-a-coredump-with-a-large-vm_map_count-from-oopsing.patch
+gpio-omap-maintain-gpio-and-irq-usage-separately.patch
+gpio-omap-auto-setup-a-gpio-when-used-as-an-irq.patch
+asoc-max98095-a-couple-array-underflows.patch
+asoc-88pm860x-array-overflow-in-snd_soc_put_volsw_2r_st.patch
+asoc-ab8500-codec-info-leak-in-anc_status_control_put.patch
+arm-kvm-rename-cpu_reset-to-avoid-name-clash.patch
+arm-mach-integrator-add-stub-for-pci_v3_early_init-for-config_pci.patch
+iommu-arm-smmu-fix-a-signedness-bug.patch
+iommu-arm-smmu-fix-iommu_present-test-in-init.patch
+iommu-arm-smmu-don-t-enable-smmu-device-until-probing-has-completed.patch
+powerpc-iommu-use-gfp_kernel-instead-of-gfp_atomic-in-iommu_init_table.patch
+powerpc-perf-fix-handling-of-fab-events.patch
+powerpc-tm-switch-out-userspace-ppr-and-dscr-sooner.patch
+powerpc-tm-turn-interrupts-hard-off-in-tm_reclaim.patch
+powerpc-vio-fix-modalias_show-return-values.patch
+powerpc-fix-parameter-clobber-in-csum_partial_copy_generic.patch
+powerpc-fix-memory-hotplug-with-sparse-vmemmap.patch
+powerpc-sysfs-disable-writing-to-purr-in-guest-mode.patch
+powerpc-restore-registers-on-error-exit-from-csum_partial_copy_generic.patch
+fuse-wait-for-writeback-in-fuse_file_fallocate.patch
+fuse-fix-fallocate-vs.-ftruncate-race.patch
+brcmfmac-obtain-platform-data-upon-module-initialization.patch
+bluetooth-fix-security-level-for-peripheral-role.patch
+bluetooth-fix-encryption-key-size-for-peripheral-role.patch
+bluetooth-add-a-new-pid-vid-0cf3-e005-for-ar3012.patch
+bluetooth-add-support-for-bcm20702a0.patch
+bluetooth-introduce-a-new-hci_rfkilled-flag.patch
+bluetooth-fix-rfkill-functionality-during-the-hci-setup-stage.patch
+nilfs2-fix-issue-with-race-condition-of-competition-between-segments-for-dirty-blocks.patch
+ipc-sem.c-fix-race-in-sem_lock.patch
+ipc-msg-prevent-race-with-rmid-in-msgsnd-msgrcv.patch
+ipc-fix-race-with-lsms.patch