--- /dev/null
+From f9b71fef12f0d6ac5c7051cfd87f7700f78c56b6 Mon Sep 17 00:00:00 2001
+From: Peter Maydell <peter.maydell@linaro.org>
+Date: Thu, 22 Aug 2013 17:47:48 +0100
+Subject: ARM: PCI: versatile: Fix map_irq function to match hardware
+
+From: Peter Maydell <peter.maydell@linaro.org>
+
+commit f9b71fef12f0d6ac5c7051cfd87f7700f78c56b6 upstream.
+
+The PCI controller code for the Versatile board has never had the
+correct IRQ mapping for hardware. For many years it had an odd
+mapping ("all interrupts are int 27") which aligned with the
+equivalent bug in QEMU. However as of commit 1bc39ac5dab265
+the mapping changed and no longer matched either hardware or QEMU,
+with the result that any PCI card beyond the first in QEMU would
+not have functioning interrupts; for example a boot with a SCSI
+controller would time out as follows:
+
+ ------------
+ sym0: <895a> rev 0x0 at pci 0000:00:0d.0 irq 92
+ sym0: SCSI BUS has been reset.
+ scsi0 : sym-2.2.3
+ [...]
+ scsi 0:0:0:0: ABORT operation started
+ scsi 0:0:0:0: ABORT operation timed-out.
+ scsi 0:0:0:0: DEVICE RESET operation started
+ scsi 0:0:0:0: DEVICE RESET operation timed-out.
+ scsi 0:0:0:0: BUS RESET operation started
+ scsi 0:0:0:0: BUS RESET operation timed-out.
+ scsi 0:0:0:0: HOST RESET operation started
+ sym0: SCSI BUS has been reset
+ ------------
+
+Fix the mapping so that it matches real hardware (checked against the
+schematics for PB926 and backplane, and tested against the hardware).
+This allows PCI cards using interrupts to work on hardware for the
+first time; this change will also work with QEMU 1.5 or later, where
+the equivalent bugs in the modelling of the hardware have been fixed.
+
+Although QEMU will attempt to autodetect whether the kernel is
+expecting the long-standing "everything is int 27" mapping or the one
+hardware has, for certainty we force it into "definitely behave like
+hardware mode"; this will avoid unexpected surprises later if we
+implement sparse irqs. This is harmless on hardware.
+
+Thanks to Paul Gortmaker for bisecting the problem and finding an initial
+solution, to Russell King for providing the correct interrupt mapping,
+and to Guenter Roeck for providing an initial version of this patch
+and prodding me into relocating the hardware and retesting everything.
+
+Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Kevin Hilman <khilman@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-versatile/pci.c | 25 +++++++++++++++++++------
+ 1 file changed, 19 insertions(+), 6 deletions(-)
+
+--- a/arch/arm/mach-versatile/pci.c
++++ b/arch/arm/mach-versatile/pci.c
+@@ -295,6 +295,19 @@ int __init pci_versatile_setup(int nr, s
+ __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
+
+ /*
++ * For many years the kernel and QEMU were symbiotically buggy
++ * in that they both assumed the same broken IRQ mapping.
++ * QEMU therefore attempts to auto-detect old broken kernels
++ * so that they still work on newer QEMU as they did on old
++ * QEMU. Since we now use the correct (ie matching-hardware)
++ * IRQ mapping we write a definitely different value to a
++ * PCI_INTERRUPT_LINE register to tell QEMU that we expect
++ * real hardware behaviour and it need not be backwards
++ * compatible for us. This write is harmless on real hardware.
++ */
++ __raw_writel(0, VERSATILE_PCI_VIRT_BASE+PCI_INTERRUPT_LINE);
++
++ /*
+ * Do not to map Versatile FPGA PCI device into memory space
+ */
+ pci_slot_ignore |= (1 << myslot);
+@@ -327,13 +340,13 @@ static int __init versatile_map_irq(cons
+ {
+ int irq;
+
+- /* slot, pin, irq
+- * 24 1 IRQ_SIC_PCI0
+- * 25 1 IRQ_SIC_PCI1
+- * 26 1 IRQ_SIC_PCI2
+- * 27 1 IRQ_SIC_PCI3
++ /*
++ * Slot INTA INTB INTC INTD
++ * 31 PCI1 PCI2 PCI3 PCI0
++ * 30 PCI0 PCI1 PCI2 PCI3
++ * 29 PCI3 PCI0 PCI1 PCI2
+ */
+- irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
++ irq = IRQ_SIC_PCI0 + ((slot + 2 + pin - 1) & 3);
+
+ return irq;
+ }
--- /dev/null
+From 829f9fedee30cde2ec15e88d57ec11074db791e2 Mon Sep 17 00:00:00 2001
+From: Peter Maydell <peter.maydell@linaro.org>
+Date: Thu, 22 Aug 2013 17:47:49 +0100
+Subject: ARM: PCI: versatile: Fix PCI I/O
+
+From: Peter Maydell <peter.maydell@linaro.org>
+
+commit 829f9fedee30cde2ec15e88d57ec11074db791e2 upstream.
+
+The versatile PCI controller code was confused between the
+PCI I/O window (at 0x43000000) and the first PCI memory
+window (at 0x44000000). Pass the correct base address to
+pci_remap_io() so that PCI I/O accesses work.
+
+Since the first PCI memory window isn't used at all (it's
+an odd size), rename the associated variables and labels
+so that it's clear that it isn't related to the I/O window.
+
+This has been tested and confirmed to fix PCI I/O accesses
+both on physical PB926+PCI backplane hardware and on QEMU.
+
+Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Kevin Hilman <khilman@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-versatile/include/mach/platform.h | 2 ++
+ arch/arm/mach-versatile/pci.c | 16 ++++++++--------
+ 2 files changed, 10 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/mach-versatile/include/mach/platform.h
++++ b/arch/arm/mach-versatile/include/mach/platform.h
+@@ -231,12 +231,14 @@
+ /* PCI space */
+ #define VERSATILE_PCI_BASE 0x41000000 /* PCI Interface */
+ #define VERSATILE_PCI_CFG_BASE 0x42000000
++#define VERSATILE_PCI_IO_BASE 0x43000000
+ #define VERSATILE_PCI_MEM_BASE0 0x44000000
+ #define VERSATILE_PCI_MEM_BASE1 0x50000000
+ #define VERSATILE_PCI_MEM_BASE2 0x60000000
+ /* Sizes of above maps */
+ #define VERSATILE_PCI_BASE_SIZE 0x01000000
+ #define VERSATILE_PCI_CFG_BASE_SIZE 0x02000000
++#define VERSATILE_PCI_IO_BASE_SIZE 0x01000000
+ #define VERSATILE_PCI_MEM_BASE0_SIZE 0x0c000000 /* 32Mb */
+ #define VERSATILE_PCI_MEM_BASE1_SIZE 0x10000000 /* 256Mb */
+ #define VERSATILE_PCI_MEM_BASE2_SIZE 0x10000000 /* 256Mb */
+--- a/arch/arm/mach-versatile/pci.c
++++ b/arch/arm/mach-versatile/pci.c
+@@ -170,8 +170,8 @@ static struct pci_ops pci_versatile_ops
+ .write = versatile_write_config,
+ };
+
+-static struct resource io_mem = {
+- .name = "PCI I/O space",
++static struct resource unused_mem = {
++ .name = "PCI unused",
+ .start = VERSATILE_PCI_MEM_BASE0,
+ .end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
+ .flags = IORESOURCE_MEM,
+@@ -195,9 +195,9 @@ static int __init pci_versatile_setup_re
+ {
+ int ret = 0;
+
+- ret = request_resource(&iomem_resource, &io_mem);
++ ret = request_resource(&iomem_resource, &unused_mem);
+ if (ret) {
+- printk(KERN_ERR "PCI: unable to allocate I/O "
++ printk(KERN_ERR "PCI: unable to allocate unused "
+ "memory region (%d)\n", ret);
+ goto out;
+ }
+@@ -205,7 +205,7 @@ static int __init pci_versatile_setup_re
+ if (ret) {
+ printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
+ "memory region (%d)\n", ret);
+- goto release_io_mem;
++ goto release_unused_mem;
+ }
+ ret = request_resource(&iomem_resource, &pre_mem);
+ if (ret) {
+@@ -225,8 +225,8 @@ static int __init pci_versatile_setup_re
+
+ release_non_mem:
+ release_resource(&non_mem);
+- release_io_mem:
+- release_resource(&io_mem);
++ release_unused_mem:
++ release_resource(&unused_mem);
+ out:
+ return ret;
+ }
+@@ -246,7 +246,7 @@ int __init pci_versatile_setup(int nr, s
+ goto out;
+ }
+
+- ret = pci_ioremap_io(0, VERSATILE_PCI_MEM_BASE0);
++ ret = pci_ioremap_io(0, VERSATILE_PCI_IO_BASE);
+ if (ret)
+ goto out;
+
--- /dev/null
+From 99f2b130370b904ca5300079243fdbcafa2c708b Mon Sep 17 00:00:00 2001
+From: Peter Maydell <peter.maydell@linaro.org>
+Date: Thu, 22 Aug 2013 17:47:50 +0100
+Subject: ARM: PCI: versatile: Fix SMAP register offsets
+
+From: Peter Maydell <peter.maydell@linaro.org>
+
+commit 99f2b130370b904ca5300079243fdbcafa2c708b upstream.
+
+The SMAP register offsets in the versatile PCI controller code were
+all off by four. (This didn't have any observable bad effects
+because on this board PHYS_OFFSET is zero, and (a) writing zero to
+the flags register at offset 0x10 has no effect and (b) the reset
+value of the SMAP register is zero anyway, so failing to write SMAP2
+didn't matter.)
+
+Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Kevin Hilman <khilman@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-versatile/pci.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/mach-versatile/pci.c
++++ b/arch/arm/mach-versatile/pci.c
+@@ -43,9 +43,9 @@
+ #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
+ #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
+ #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
+-#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
+-#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
+-#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
++#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
++#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
++#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
+ #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
+
+ #define DEVICE_ID_OFFSET 0x00
--- /dev/null
+From 178cd9ce377232518ec17ff2ecab2e80fa60784c Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 20 Aug 2013 11:47:42 +0100
+Subject: arm64: perf: fix ARMv8 EVTYPE_MASK to include NSH bit
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 178cd9ce377232518ec17ff2ecab2e80fa60784c upstream.
+
+This is a port of f2fe09b055e2 ("ARM: 7663/1: perf: fix ARMv7 EVTYPE_MASK
+to include NSH bit") to arm64, which fixes the broken evtype mask to
+include the NSH bit, allowing profiling at EL2.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/perf_event.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -784,7 +784,7 @@ static const unsigned armv8_pmuv3_perf_c
+ /*
+ * PMXEVTYPER: Event selection reg
+ */
+-#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
++#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
+ #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
+
+ /*
--- /dev/null
+From 8455e6ec70f33b0e8c3ffd47067e00481f09f454 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 20 Aug 2013 11:47:41 +0100
+Subject: arm64: perf: fix group validation when using enable_on_exec
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 8455e6ec70f33b0e8c3ffd47067e00481f09f454 upstream.
+
+This is a port of cb2d8b342aa0 ("ARM: 7698/1: perf: fix group validation
+when using enable_on_exec") to arm64, which fixes the event validation
+checking so that events in the OFF state are still considered when
+enable_on_exec is true.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/perf_event.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -325,7 +325,10 @@ validate_event(struct pmu_hw_events *hw_
+ if (is_software_event(event))
+ return 1;
+
+- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
++ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
++ return 1;
++
++ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+ return 1;
+
+ return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
--- /dev/null
+From 73e216a8a42c0ef3d08071705c946c38fdbe12b0 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@redhat.com>
+Date: Thu, 5 Sep 2013 08:38:10 -0400
+Subject: cifs: ensure that srv_mutex is held when dealing with ssocket pointer
+
+From: Jeff Layton <jlayton@redhat.com>
+
+commit 73e216a8a42c0ef3d08071705c946c38fdbe12b0 upstream.
+
+Oleksii reported that he had seen an oops similar to this:
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000088
+IP: [<ffffffff814dcc13>] sock_sendmsg+0x93/0xd0
+PGD 0
+Oops: 0000 [#1] PREEMPT SMP
+Modules linked in: ipt_MASQUERADE xt_REDIRECT xt_tcpudp iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack ip_tables x_tables carl9170 ath usb_storage f2fs nfnetlink_log nfnetlink md4 cifs dns_resolver hid_generic usbhid hid af_packet uvcvideo videobuf2_vmalloc videobuf2_memops videobuf2_core videodev rfcomm btusb bnep bluetooth qmi_wwan qcserial cdc_wdm usb_wwan usbnet usbserial mii snd_hda_codec_hdmi snd_hda_codec_realtek iwldvm mac80211 coretemp intel_powerclamp kvm_intel kvm iwlwifi snd_hda_intel cfg80211 snd_hda_codec xhci_hcd e1000e ehci_pci snd_hwdep sdhci_pci snd_pcm ehci_hcd microcode psmouse sdhci thinkpad_acpi mmc_core i2c_i801 pcspkr usbcore hwmon snd_timer snd_page_alloc snd ptp rfkill pps_core soundcore evdev usb_common vboxnetflt(O) vboxdrv(O)Oops#2 Part8
+ loop tun binfmt_misc fuse msr acpi_call(O) ipv6 autofs4
+CPU: 0 PID: 21612 Comm: kworker/0:1 Tainted: G W O 3.10.1SIGN #28
+Hardware name: LENOVO 2306CTO/2306CTO, BIOS G2ET92WW (2.52 ) 02/22/2013
+Workqueue: cifsiod cifs_echo_request [cifs]
+task: ffff8801e1f416f0 ti: ffff880148744000 task.ti: ffff880148744000
+RIP: 0010:[<ffffffff814dcc13>] [<ffffffff814dcc13>] sock_sendmsg+0x93/0xd0
+RSP: 0000:ffff880148745b00 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: ffff880148745b78 RCX: 0000000000000048
+RDX: ffff880148745c90 RSI: ffff880181864a00 RDI: ffff880148745b78
+RBP: ffff880148745c48 R08: 0000000000000048 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000000 R12: ffff880181864a00
+R13: ffff880148745c90 R14: 0000000000000048 R15: 0000000000000048
+FS: 0000000000000000(0000) GS:ffff88021e200000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000000088 CR3: 000000020c42c000 CR4: 00000000001407b0
+Oops#2 Part7
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
+Stack:
+ ffff880148745b30 ffffffff810c4af9 0000004848745b30 ffff880181864a00
+ ffffffff81ffbc40 0000000000000000 ffff880148745c90 ffffffff810a5aab
+ ffff880148745bc0 ffffffff81ffbc40 ffff880148745b60 ffffffff815a9fb8
+Call Trace:
+ [<ffffffff810c4af9>] ? finish_task_switch+0x49/0xe0
+ [<ffffffff810a5aab>] ? lock_timer_base.isra.36+0x2b/0x50
+ [<ffffffff815a9fb8>] ? _raw_spin_unlock_irqrestore+0x18/0x40
+ [<ffffffff810a673f>] ? try_to_del_timer_sync+0x4f/0x70
+ [<ffffffff815aa38f>] ? _raw_spin_unlock_bh+0x1f/0x30
+ [<ffffffff814dcc87>] kernel_sendmsg+0x37/0x50
+ [<ffffffffa081a0e0>] smb_send_kvec+0xd0/0x1d0 [cifs]
+ [<ffffffffa081a263>] smb_send_rqst+0x83/0x1f0 [cifs]
+ [<ffffffffa081ab6c>] cifs_call_async+0xec/0x1b0 [cifs]
+ [<ffffffffa08245e0>] ? free_rsp_buf+0x40/0x40 [cifs]
+Oops#2 Part6
+ [<ffffffffa082606e>] SMB2_echo+0x8e/0xb0 [cifs]
+ [<ffffffffa0808789>] cifs_echo_request+0x79/0xa0 [cifs]
+ [<ffffffff810b45b3>] process_one_work+0x173/0x4a0
+ [<ffffffff810b52a1>] worker_thread+0x121/0x3a0
+ [<ffffffff810b5180>] ? manage_workers.isra.27+0x2b0/0x2b0
+ [<ffffffff810bae00>] kthread+0xc0/0xd0
+ [<ffffffff810bad40>] ? kthread_create_on_node+0x120/0x120
+ [<ffffffff815b199c>] ret_from_fork+0x7c/0xb0
+ [<ffffffff810bad40>] ? kthread_create_on_node+0x120/0x120
+Code: 84 24 b8 00 00 00 4c 89 f1 4c 89 ea 4c 89 e6 48 89 df 4c 89 60 18 48 c7 40 28 00 00 00 00 4c 89 68 30 44 89 70 14 49 8b 44 24 28 <ff> 90 88 00 00 00 3d ef fd ff ff 74 10 48 8d 65 e0 5b 41 5c 41
+ RIP [<ffffffff814dcc13>] sock_sendmsg+0x93/0xd0
+ RSP <ffff880148745b00>
+CR2: 0000000000000088
+
+The client was in the middle of trying to send a frame when the
+server->ssocket pointer got zeroed out. In most places, that we access
+that pointer, the srv_mutex is held. There's only one spot that I see
+that the server->ssocket pointer gets set and the srv_mutex isn't held.
+This patch corrects that.
+
+The upstream bug report was here:
+
+ https://bugzilla.kernel.org/show_bug.cgi?id=60557
+
+Reported-by: Oleksii Shevchuk <alxchk@gmail.com>
+Signed-off-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/connect.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -379,6 +379,7 @@ cifs_reconnect(struct TCP_Server_Info *s
+ try_to_freeze();
+
+ /* we should try only the port we connected to before */
++ mutex_lock(&server->srv_mutex);
+ rc = generic_ip_connect(server);
+ if (rc) {
+ cifs_dbg(FYI, "reconnect error %d\n", rc);
+@@ -390,6 +391,7 @@ cifs_reconnect(struct TCP_Server_Info *s
+ server->tcpStatus = CifsNeedNegotiate;
+ spin_unlock(&GlobalMid_Lock);
+ }
++ mutex_unlock(&server->srv_mutex);
+ } while (server->tcpStatus == CifsNeedReconnect);
+
+ return rc;
--- /dev/null
+From 1a05096de82f3cd672c76389f63964952678506f Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilovsky@samba.org>
+Date: Thu, 5 Sep 2013 15:04:04 +0400
+Subject: CIFS: Fix a memory leak when a lease break comes
+
+From: Pavel Shilovsky <pshilovsky@samba.org>
+
+commit 1a05096de82f3cd672c76389f63964952678506f upstream.
+
+This happens when we receive a lease break from a server, then
+find an appropriate lease key in opened files and schedule the
+oplock_break slow work. lw pointer isn't freed in this case.
+
+Signed-off-by: Pavel Shilovsky <pshilovsky@samba.org>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2misc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -473,6 +473,7 @@ smb2_is_valid_lease_break(char *buffer,
+
+ queue_work(cifsiod_wq, &cfile->oplock_break);
+
++ kfree(lw);
+ spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
--- /dev/null
+From 933d4b36576c951d0371bbfed05ec0135d516a6e Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilovsky@samba.org>
+Date: Thu, 5 Sep 2013 15:00:07 +0400
+Subject: CIFS: Fix missing lease break
+
+From: Pavel Shilovsky <pshilovsky@samba.org>
+
+commit 933d4b36576c951d0371bbfed05ec0135d516a6e upstream.
+
+If a server sends a lease break to a connection that doesn't have
+opens with a lease key specified in the server response, we can't
+find an open file to send an ack. Fix this by walking through
+all connections we have.
+
+Signed-off-by: Pavel Shilovsky <pshilovsky@samba.org>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2misc.c | 149 ++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 80 insertions(+), 69 deletions(-)
+
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -417,19 +417,76 @@ cifs_ses_oplock_break(struct work_struct
+ }
+
+ static bool
+-smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
++smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
++ struct smb2_lease_break_work *lw)
++{
++ bool found;
++ __u8 lease_state;
++ struct list_head *tmp;
++ struct cifsFileInfo *cfile;
++ struct cifs_pending_open *open;
++ struct cifsInodeInfo *cinode;
++ int ack_req = le32_to_cpu(rsp->Flags &
++ SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
++
++ lease_state = smb2_map_lease_to_oplock(rsp->NewLeaseState);
++
++ list_for_each(tmp, &tcon->openFileList) {
++ cfile = list_entry(tmp, struct cifsFileInfo, tlist);
++ cinode = CIFS_I(cfile->dentry->d_inode);
++
++ if (memcmp(cinode->lease_key, rsp->LeaseKey,
++ SMB2_LEASE_KEY_SIZE))
++ continue;
++
++ cifs_dbg(FYI, "found in the open list\n");
++ cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
++ le32_to_cpu(rsp->NewLeaseState));
++
++ smb2_set_oplock_level(cinode, lease_state);
++
++ if (ack_req)
++ cfile->oplock_break_cancelled = false;
++ else
++ cfile->oplock_break_cancelled = true;
++
++ queue_work(cifsiod_wq, &cfile->oplock_break);
++ kfree(lw);
++ return true;
++ }
++
++ found = false;
++ list_for_each_entry(open, &tcon->pending_opens, olist) {
++ if (memcmp(open->lease_key, rsp->LeaseKey,
++ SMB2_LEASE_KEY_SIZE))
++ continue;
++
++ if (!found && ack_req) {
++ found = true;
++ memcpy(lw->lease_key, open->lease_key,
++ SMB2_LEASE_KEY_SIZE);
++ lw->tlink = cifs_get_tlink(open->tlink);
++ queue_work(cifsiod_wq, &lw->lease_break);
++ }
++
++ cifs_dbg(FYI, "found in the pending open list\n");
++ cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
++ le32_to_cpu(rsp->NewLeaseState));
++
++ open->oplock = lease_state;
++ }
++ return found;
++}
++
++static bool
++smb2_is_valid_lease_break(char *buffer)
+ {
+ struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
+ struct list_head *tmp, *tmp1, *tmp2;
++ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+- struct cifsInodeInfo *cinode;
+- struct cifsFileInfo *cfile;
+- struct cifs_pending_open *open;
+ struct smb2_lease_break_work *lw;
+- bool found;
+- int ack_req = le32_to_cpu(rsp->Flags &
+- SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
+
+ lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
+ if (!lw)
+@@ -442,72 +499,26 @@ smb2_is_valid_lease_break(char *buffer,
+
+ /* look up tcon based on tid & uid */
+ spin_lock(&cifs_tcp_ses_lock);
+- list_for_each(tmp, &server->smb_ses_list) {
+- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
++ list_for_each(tmp, &cifs_tcp_ses_list) {
++ server = list_entry(tmp, struct TCP_Server_Info, tcp_ses_list);
+
+- spin_lock(&cifs_file_list_lock);
+- list_for_each(tmp1, &ses->tcon_list) {
+- tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+-
+- cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
+- list_for_each(tmp2, &tcon->openFileList) {
+- cfile = list_entry(tmp2, struct cifsFileInfo,
+- tlist);
+- cinode = CIFS_I(cfile->dentry->d_inode);
+-
+- if (memcmp(cinode->lease_key, rsp->LeaseKey,
+- SMB2_LEASE_KEY_SIZE))
+- continue;
+-
+- cifs_dbg(FYI, "found in the open list\n");
+- cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
+- le32_to_cpu(rsp->NewLeaseState));
+-
+- smb2_set_oplock_level(cinode,
+- smb2_map_lease_to_oplock(rsp->NewLeaseState));
+-
+- if (ack_req)
+- cfile->oplock_break_cancelled = false;
+- else
+- cfile->oplock_break_cancelled = true;
+-
+- queue_work(cifsiod_wq, &cfile->oplock_break);
+-
+- kfree(lw);
+- spin_unlock(&cifs_file_list_lock);
+- spin_unlock(&cifs_tcp_ses_lock);
+- return true;
+- }
++ list_for_each(tmp1, &server->smb_ses_list) {
++ ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
+
+- found = false;
+- list_for_each_entry(open, &tcon->pending_opens, olist) {
+- if (memcmp(open->lease_key, rsp->LeaseKey,
+- SMB2_LEASE_KEY_SIZE))
+- continue;
+-
+- if (!found && ack_req) {
+- found = true;
+- memcpy(lw->lease_key, open->lease_key,
+- SMB2_LEASE_KEY_SIZE);
+- lw->tlink = cifs_get_tlink(open->tlink);
+- queue_work(cifsiod_wq,
+- &lw->lease_break);
++ spin_lock(&cifs_file_list_lock);
++ list_for_each(tmp2, &ses->tcon_list) {
++ tcon = list_entry(tmp2, struct cifs_tcon,
++ tcon_list);
++ cifs_stats_inc(
++ &tcon->stats.cifs_stats.num_oplock_brks);
++ if (smb2_tcon_has_lease(tcon, rsp, lw)) {
++ spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cifs_tcp_ses_lock);
++ return true;
+ }
+-
+- cifs_dbg(FYI, "found in the pending open list\n");
+- cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
+- le32_to_cpu(rsp->NewLeaseState));
+-
+- open->oplock =
+- smb2_map_lease_to_oplock(rsp->NewLeaseState);
+- }
+- if (found) {
+- spin_unlock(&cifs_file_list_lock);
+- spin_unlock(&cifs_tcp_ses_lock);
+- return true;
+ }
++ spin_unlock(&cifs_file_list_lock);
+ }
+- spin_unlock(&cifs_file_list_lock);
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+ kfree(lw);
+@@ -533,7 +544,7 @@ smb2_is_valid_oplock_break(char *buffer,
+ if (rsp->StructureSize !=
+ smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
+ if (le16_to_cpu(rsp->StructureSize) == 44)
+- return smb2_is_valid_lease_break(buffer, server);
++ return smb2_is_valid_lease_break(buffer);
+ else
+ return false;
+ }
--- /dev/null
+From f983827bcb9d2c34c4d8935861a1e9128aec2baf Mon Sep 17 00:00:00 2001
+From: Colin Cross <ccross@android.com>
+Date: Wed, 28 Aug 2013 18:41:47 -0700
+Subject: cpuidle: coupled: abort idle if pokes are pending
+
+From: Colin Cross <ccross@android.com>
+
+commit f983827bcb9d2c34c4d8935861a1e9128aec2baf upstream.
+
+Joseph Lo <josephl@nvidia.com> reported a lockup on Tegra20 caused
+by a race condition in coupled cpuidle. When two or more cpus
+enter idle at the same time, the first cpus to arrive may go to the
+ready loop without processing pending pokes from the last cpu to
+arrive.
+
+This patch adds a check for pending pokes once all cpus have been
+synchronized in the ready loop and resets the coupled state and
+retries if any cpus failed to handle their pending poke.
+
+Retrying on all cpus may trigger the same issue again, so this patch
+also adds a check to ensure that each cpu has received at least one
+poke between when it enters the waiting loop and when it moves on to
+the ready loop.
+
+Reported-and-tested-by: Joseph Lo <josephl@nvidia.com>
+Tested-by: Stephen Warren <swarren@nvidia.com>
+Signed-off-by: Colin Cross <ccross@android.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpuidle/coupled.c | 107 +++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 82 insertions(+), 25 deletions(-)
+
+--- a/drivers/cpuidle/coupled.c
++++ b/drivers/cpuidle/coupled.c
+@@ -106,6 +106,7 @@ struct cpuidle_coupled {
+ cpumask_t coupled_cpus;
+ int requested_state[NR_CPUS];
+ atomic_t ready_waiting_counts;
++ atomic_t abort_barrier;
+ int online_count;
+ int refcnt;
+ int prevent;
+@@ -122,12 +123,19 @@ static DEFINE_MUTEX(cpuidle_coupled_lock
+ static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
+
+ /*
+- * The cpuidle_coupled_poked_mask mask is used to avoid calling
++ * The cpuidle_coupled_poke_pending mask is used to avoid calling
+ * __smp_call_function_single with the per cpu call_single_data struct already
+ * in use. This prevents a deadlock where two cpus are waiting for each others
+ * call_single_data struct to be available
+ */
+-static cpumask_t cpuidle_coupled_poked_mask;
++static cpumask_t cpuidle_coupled_poke_pending;
++
++/*
++ * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
++ * once to minimize entering the ready loop with a poke pending, which would
++ * require aborting and retrying.
++ */
++static cpumask_t cpuidle_coupled_poked;
+
+ /**
+ * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
+@@ -291,10 +299,11 @@ static inline int cpuidle_coupled_get_st
+ return state;
+ }
+
+-static void cpuidle_coupled_poked(void *info)
++static void cpuidle_coupled_handle_poke(void *info)
+ {
+ int cpu = (unsigned long)info;
+- cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
++ cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
++ cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
+ }
+
+ /**
+@@ -313,7 +322,7 @@ static void cpuidle_coupled_poke(int cpu
+ {
+ struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
+
+- if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
++ if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
+ __smp_call_function_single(cpu, csd, 0);
+ }
+
+@@ -340,30 +349,19 @@ static void cpuidle_coupled_poke_others(
+ * @coupled: the struct coupled that contains the current cpu
+ * @next_state: the index in drv->states of the requested state for this cpu
+ *
+- * Updates the requested idle state for the specified cpuidle device,
+- * poking all coupled cpus out of idle if necessary to let them see the new
+- * state.
++ * Updates the requested idle state for the specified cpuidle device.
++ * Returns the number of waiting cpus.
+ */
+-static void cpuidle_coupled_set_waiting(int cpu,
++static int cpuidle_coupled_set_waiting(int cpu,
+ struct cpuidle_coupled *coupled, int next_state)
+ {
+- int w;
+-
+ coupled->requested_state[cpu] = next_state;
+
+ /*
+- * If this is the last cpu to enter the waiting state, poke
+- * all the other cpus out of their waiting state so they can
+- * enter a deeper state. This can race with one of the cpus
+- * exiting the waiting state due to an interrupt and
+- * decrementing waiting_count, see comment below.
+- *
+ * The atomic_inc_return provides a write barrier to order the write
+ * to requested_state with the later write that increments ready_count.
+ */
+- w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
+- if (w == coupled->online_count)
+- cpuidle_coupled_poke_others(cpu, coupled);
++ return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
+ }
+
+ /**
+@@ -418,13 +416,24 @@ static void cpuidle_coupled_set_done(int
+ static int cpuidle_coupled_clear_pokes(int cpu)
+ {
+ local_irq_enable();
+- while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
++ while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
+ cpu_relax();
+ local_irq_disable();
+
+ return need_resched() ? -EINTR : 0;
+ }
+
++static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
++{
++ cpumask_t cpus;
++ int ret;
++
++ cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
++ ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
++
++ return ret;
++}
++
+ /**
+ * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
+ * @dev: struct cpuidle_device for the current cpu
+@@ -449,6 +458,7 @@ int cpuidle_enter_state_coupled(struct c
+ {
+ int entered_state = -1;
+ struct cpuidle_coupled *coupled = dev->coupled;
++ int w;
+
+ if (!coupled)
+ return -EINVAL;
+@@ -465,14 +475,33 @@ int cpuidle_enter_state_coupled(struct c
+ /* Read barrier ensures online_count is read after prevent is cleared */
+ smp_rmb();
+
+- cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
++reset:
++ cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
++
++ w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
++ /*
++ * If this is the last cpu to enter the waiting state, poke
++ * all the other cpus out of their waiting state so they can
++ * enter a deeper state. This can race with one of the cpus
++ * exiting the waiting state due to an interrupt and
++ * decrementing waiting_count, see comment below.
++ */
++ if (w == coupled->online_count) {
++ cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
++ cpuidle_coupled_poke_others(dev->cpu, coupled);
++ }
+
+ retry:
+ /*
+ * Wait for all coupled cpus to be idle, using the deepest state
+- * allowed for a single cpu.
++ * allowed for a single cpu. If this was not the poking cpu, wait
++ * for at least one poke before leaving to avoid a race where
++ * two cpus could arrive at the waiting loop at the same time,
++ * but the first of the two to arrive could skip the loop without
++ * processing the pokes from the last to arrive.
+ */
+- while (!cpuidle_coupled_cpus_waiting(coupled)) {
++ while (!cpuidle_coupled_cpus_waiting(coupled) ||
++ !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+@@ -493,6 +522,12 @@ retry:
+ }
+
+ /*
++ * Make sure final poke status for this cpu is visible before setting
++ * cpu as ready.
++ */
++ smp_wmb();
++
++ /*
+ * All coupled cpus are probably idle. There is a small chance that
+ * one of the other cpus just became active. Increment the ready count,
+ * and spin until all coupled cpus have incremented the counter. Once a
+@@ -511,6 +546,28 @@ retry:
+ cpu_relax();
+ }
+
++ /*
++ * Make sure read of all cpus ready is done before reading pending pokes
++ */
++ smp_rmb();
++
++ /*
++ * There is a small chance that a cpu left and reentered idle after this
++ * cpu saw that all cpus were waiting. The cpu that reentered idle will
++ * have sent this cpu a poke, which will still be pending after the
++ * ready loop. The pending interrupt may be lost by the interrupt
++ * controller when entering the deep idle state. It's not possible to
++ * clear a pending interrupt without turning interrupts on and handling
++ * it, and it's too late to turn on interrupts here, so reset the
++ * coupled idle state of all cpus and retry.
++ */
++ if (cpuidle_coupled_any_pokes_pending(coupled)) {
++ cpuidle_coupled_set_done(dev->cpu, coupled);
++ /* Wait for all cpus to see the pending pokes */
++ cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
++ goto reset;
++ }
++
+ /* all cpus have acked the coupled state */
+ next_state = cpuidle_coupled_get_state(dev, coupled);
+
+@@ -596,7 +653,7 @@ have_coupled:
+ coupled->refcnt++;
+
+ csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
+- csd->func = cpuidle_coupled_poked;
++ csd->func = cpuidle_coupled_handle_poke;
+ csd->info = (void *)(unsigned long)dev->cpu;
+
+ return 0;
--- /dev/null
+From 9e19b73c30a5fa42a53583a1f7817dd857126156 Mon Sep 17 00:00:00 2001
+From: Colin Cross <ccross@android.com>
+Date: Fri, 23 Aug 2013 12:45:12 -0700
+Subject: cpuidle: coupled: fix race condition between pokes and safe state
+
+From: Colin Cross <ccross@android.com>
+
+commit 9e19b73c30a5fa42a53583a1f7817dd857126156 upstream.
+
+The coupled cpuidle waiting loop clears pending pokes before
+entering the safe state. If a poke arrives just before the
+pokes are cleared, but after the while loop condition checks,
+the poke will be lost and the cpu will stay in the safe state
+until another interrupt arrives. This may cause the cpu that
+sent the poke to spin in the ready loop with interrupts off
+until another cpu receives an interrupt, and if no other cpus
+have interrupts routed to them it can spin forever.
+
+Change the return value of cpuidle_coupled_clear_pokes to
+return if a poke was cleared, and move the need_resched()
+checks into the callers. In the waiting loop, if
+a poke was cleared restart the loop to repeat the while
+condition checks.
+
+Reported-by: Neil Zhang <zhangwm@marvell.com>
+Signed-off-by: Colin Cross <ccross@android.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpuidle/coupled.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+--- a/drivers/cpuidle/coupled.c
++++ b/drivers/cpuidle/coupled.c
+@@ -408,19 +408,22 @@ static void cpuidle_coupled_set_done(int
+ * been processed and the poke bit has been cleared.
+ *
+ * Other interrupts may also be processed while interrupts are enabled, so
+- * need_resched() must be tested after turning interrupts off again to make sure
++ * need_resched() must be tested after this function returns to make sure
+ * the interrupt didn't schedule work that should take the cpu out of idle.
+ *
+- * Returns 0 if need_resched was false, -EINTR if need_resched was true.
++ * Returns 0 if no poke was pending, 1 if a poke was cleared.
+ */
+ static int cpuidle_coupled_clear_pokes(int cpu)
+ {
++ if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
++ return 0;
++
+ local_irq_enable();
+ while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
+ cpu_relax();
+ local_irq_disable();
+
+- return need_resched() ? -EINTR : 0;
++ return 1;
+ }
+
+ static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
+@@ -464,7 +467,8 @@ int cpuidle_enter_state_coupled(struct c
+ return -EINVAL;
+
+ while (coupled->prevent) {
+- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
++ cpuidle_coupled_clear_pokes(dev->cpu);
++ if (need_resched()) {
+ local_irq_enable();
+ return entered_state;
+ }
+@@ -502,7 +506,10 @@ retry:
+ */
+ while (!cpuidle_coupled_cpus_waiting(coupled) ||
+ !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
+- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
++ if (cpuidle_coupled_clear_pokes(dev->cpu))
++ continue;
++
++ if (need_resched()) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+@@ -516,7 +523,8 @@ retry:
+ dev->safe_state_index);
+ }
+
+- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
++ cpuidle_coupled_clear_pokes(dev->cpu);
++ if (need_resched()) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
--- /dev/null
+From 7bfa9ad55d691f2b836b576769b11eca2cf50816 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@samba.org>
+Date: Tue, 6 Aug 2013 14:13:44 +1000
+Subject: KVM: PPC: Book3S: Fix compile error in XICS emulation
+
+From: Paul Mackerras <paulus@samba.org>
+
+commit 7bfa9ad55d691f2b836b576769b11eca2cf50816 upstream.
+
+Commit 8e44ddc3f3 ("powerpc/kvm/book3s: Add support for H_IPOLL and
+H_XIRR_X in XICS emulation") added a call to get_tb() but didn't
+include the header that defines it, and on some configs this means
+book3s_xics.c fails to compile:
+
+arch/powerpc/kvm/book3s_xics.c: In function ‘kvmppc_xics_hcall’:
+arch/powerpc/kvm/book3s_xics.c:812:3: error: implicit declaration of function ‘get_tb’ [-Werror=implicit-function-declaration]
+
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Alexander Graf <agraf@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_xics.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/powerpc/kvm/book3s_xics.c
++++ b/arch/powerpc/kvm/book3s_xics.c
+@@ -19,6 +19,7 @@
+ #include <asm/hvcall.h>
+ #include <asm/xics.h>
+ #include <asm/debug.h>
++#include <asm/time.h>
+
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
--- /dev/null
+From 26ee47411ae22caa07d3f3b63ca6d097cba6681b Mon Sep 17 00:00:00 2001
+From: Lars-Peter Clausen <lars@metafoo.de>
+Date: Wed, 28 Aug 2013 17:55:07 +0200
+Subject: regmap: debugfs: Fix continued read from registers file
+
+From: Lars-Peter Clausen <lars@metafoo.de>
+
+commit 26ee47411ae22caa07d3f3b63ca6d097cba6681b upstream.
+
+The regmap_debugfs_get_dump_start() function maps from a file offset to the
+register that can be found at that position in the file. This is done using a
+look-up table. Commit d6814a7d ("regmap: debugfs: Suppress cache for partial
+register files") added a check to bypass the look-up table for partial register
+files, since the offsets in that table are only correct for the full register
+file. The check incorrectly uses the file offset instead of the register base
+address and returns it. This will cause the file offset to be interpreted as a
+register address which will result in a incorrect output from the registers file
+for all reads except at position 0.
+
+The issue can easily be reproduced by doing small reads the registers file, e.g.
+`dd if=registers bs=10 count=5`.
+
+Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Mark Brown <broonie@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/regmap/regmap-debugfs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -85,8 +85,8 @@ static unsigned int regmap_debugfs_get_d
+ unsigned int reg_offset;
+
+ /* Suppress the cache if we're using a subrange */
+- if (from)
+- return from;
++ if (base)
++ return base;
+
+ /*
+ * If we don't have a cache build one so we don't have to do a
powerpc-default-arch-idle-could-cede-processor-on-pseries.patch
xen-gnt-prevent-adding-duplicate-gnt-callbacks.patch
arm-xen-only-set-pm-function-ptrs-for-xen-guests.patch
+cpuidle-coupled-abort-idle-if-pokes-are-pending.patch
+cpuidle-coupled-fix-race-condition-between-pokes-and-safe-state.patch
+arm64-perf-fix-group-validation-when-using-enable_on_exec.patch
+arm64-perf-fix-armv8-evtype_mask-to-include-nsh-bit.patch
+arm-pci-versatile-fix-map_irq-function-to-match-hardware.patch
+arm-pci-versatile-fix-pci-i-o.patch
+arm-pci-versatile-fix-smap-register-offsets.patch
+kvm-ppc-book3s-fix-compile-error-in-xics-emulation.patch
+xhci-plat-don-t-enable-legacy-pci-interrupts.patch
+usb-xhci-disable-runtime-pm-suspend-for-quirky-controllers.patch
+xhci-fix-port-besl-lpm-capability-checking.patch
+usb-dwc3-gadget-don-t-request-irqs-in-atomic.patch
+tty-disassociate_ctty-sends-the-extra-sigcont.patch
+cifs-ensure-that-srv_mutex-is-held-when-dealing-with-ssocket-pointer.patch
+cifs-fix-a-memory-leak-when-a-lease-break-comes.patch
+cifs-fix-missing-lease-break.patch
+usb-ohci-allow-runtime-pm-without-system-sleep.patch
+regmap-debugfs-fix-continued-read-from-registers-file.patch
+staging-comedi-dt282x-dt282x_ai_insn_read-always-fails.patch
--- /dev/null
+From 2c4283ca7cdcc6605859c836fc536fcd83a4525f Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Tue, 20 Aug 2013 11:57:35 +0300
+Subject: staging: comedi: dt282x: dt282x_ai_insn_read() always fails
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 2c4283ca7cdcc6605859c836fc536fcd83a4525f upstream.
+
+In dt282x_ai_insn_read() we call this macro like:
+wait_for(!mux_busy(), comedi_error(dev, "timeout\n"); return -ETIME;);
+Because the if statement doesn't have curly braces it means we always
+return -ETIME and the function never succeeds.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Ian Abbott <abbotti@mev.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/comedi/drivers/dt282x.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/staging/comedi/drivers/dt282x.c
++++ b/drivers/staging/comedi/drivers/dt282x.c
+@@ -264,8 +264,9 @@ struct dt282x_private {
+ } \
+ udelay(5); \
+ } \
+- if (_i) \
++ if (_i) { \
+ b \
++ } \
+ } while (0)
+
+ static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
--- /dev/null
+From 03e1261778cca782d41a3d8e3945ca88cf93e01e Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Sun, 15 Sep 2013 17:50:26 +0200
+Subject: tty: disassociate_ctty() sends the extra SIGCONT
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit 03e1261778cca782d41a3d8e3945ca88cf93e01e upstream.
+
+Starting from v3.10 (probably commit f91e2590410b: "tty: Signal
+foreground group processes in hangup") disassociate_ctty() sends SIGCONT
+if tty && on_exit. This breaks LSB test-suite, in particular test8 in
+_exit.c and test40 in sigcon5.c.
+
+Put the "!on_exit" check back to restore the old behaviour.
+
+Review by Peter Hurley:
+ "Yes, this regression was introduced by me in that commit. The effect
+ of the regression is that ptys will receive a SIGCONT when, in similar
+ circumstances, ttys would not.
+
+ The fact that two test vectors accidentally tripped over this
+ regression suggests that some other apps may as well.
+
+ Thanks for catching this"
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Reported-by: Karel Srot <ksrot@redhat.com>
+Reviewed-by: Peter Hurley <peter@hurleysoftware.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/tty_io.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -850,7 +850,8 @@ void disassociate_ctty(int on_exit)
+ struct pid *tty_pgrp = tty_get_pgrp(tty);
+ if (tty_pgrp) {
+ kill_pgrp(tty_pgrp, SIGHUP, on_exit);
+- kill_pgrp(tty_pgrp, SIGCONT, on_exit);
++ if (!on_exit)
++ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+ put_pid(tty_pgrp);
+ }
+ }
--- /dev/null
+From b0d7ffd44ba9cd2dfbf299674418193a5f9ed21a Mon Sep 17 00:00:00 2001
+From: Felipe Balbi <balbi@ti.com>
+Date: Thu, 27 Jun 2013 10:00:18 +0300
+Subject: usb: dwc3: gadget: don't request IRQs in atomic
+
+From: Felipe Balbi <balbi@ti.com>
+
+commit b0d7ffd44ba9cd2dfbf299674418193a5f9ed21a upstream.
+
+We cannot request an IRQ with spinlocks held
+as that would trigger a sleeping inside
+spinlock warning.
+
+Reported-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: Felipe Balbi <balbi@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/dwc3/gadget.c | 39 ++++++++++++++++++++++-----------------
+ 1 file changed, 22 insertions(+), 17 deletions(-)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1508,6 +1508,15 @@ static int dwc3_gadget_start(struct usb_
+ int irq;
+ u32 reg;
+
++ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
++ ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
++ IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
++ if (ret) {
++ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
++ irq, ret);
++ goto err0;
++ }
++
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ if (dwc->gadget_driver) {
+@@ -1515,7 +1524,7 @@ static int dwc3_gadget_start(struct usb_
+ dwc->gadget.name,
+ dwc->gadget_driver->driver.name);
+ ret = -EBUSY;
+- goto err0;
++ goto err1;
+ }
+
+ dwc->gadget_driver = driver;
+@@ -1551,42 +1560,38 @@ static int dwc3_gadget_start(struct usb_
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+- goto err0;
++ goto err2;
+ }
+
+ dep = dwc->eps[1];
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+- goto err1;
++ goto err3;
+ }
+
+ /* begin to receive SETUP packets */
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc3_ep0_out_start(dwc);
+
+- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+- ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
+- IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
+- if (ret) {
+- dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+- irq, ret);
+- goto err1;
+- }
+-
+ dwc3_gadget_enable_irq(dwc);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+
+-err1:
++err3:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+-err0:
++err2:
+ dwc->gadget_driver = NULL;
++
++err1:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ free_irq(irq, dwc);
++
++err0:
+ return ret;
+ }
+
+@@ -1600,9 +1605,6 @@ static int dwc3_gadget_stop(struct usb_g
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ dwc3_gadget_disable_irq(dwc);
+- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+- free_irq(irq, dwc);
+-
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+ __dwc3_gadget_ep_disable(dwc->eps[1]);
+
+@@ -1610,6 +1612,9 @@ static int dwc3_gadget_stop(struct usb_g
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
++ free_irq(irq, dwc);
++
+ return 0;
+ }
+
--- /dev/null
+From 69820e01aa756b8d228143d997f71523c1e97984 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Mon, 26 Aug 2013 15:01:40 -0400
+Subject: USB: OHCI: Allow runtime PM without system sleep
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit 69820e01aa756b8d228143d997f71523c1e97984 upstream.
+
+Since ohci-hcd supports runtime PM, the .pm field in its pci_driver
+structure should be protected by CONFIG_PM rather than
+CONFIG_PM_SLEEP.
+
+Without this change, OHCI controllers won't do runtime suspend if
+system suspend or hibernation isn't enabled.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/ohci-pci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/host/ohci-pci.c
++++ b/drivers/usb/host/ohci-pci.c
+@@ -289,7 +289,7 @@ static struct pci_driver ohci_pci_driver
+ .remove = usb_hcd_pci_remove,
+ .shutdown = usb_hcd_pci_shutdown,
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
--- /dev/null
+From c8476fb855434c733099079063990e5bfa7ecad6 Mon Sep 17 00:00:00 2001
+From: Shawn Nematbakhsh <shawnn@chromium.org>
+Date: Mon, 19 Aug 2013 10:36:13 -0700
+Subject: usb: xhci: Disable runtime PM suspend for quirky controllers
+
+From: Shawn Nematbakhsh <shawnn@chromium.org>
+
+commit c8476fb855434c733099079063990e5bfa7ecad6 upstream.
+
+If a USB controller with XHCI_RESET_ON_RESUME goes to runtime suspend,
+a reset will be performed upon runtime resume. Any previously suspended
+devices attached to the controller will be re-enumerated at this time.
+This will cause problems, for example, if an open system call on the
+device triggered the resume (the open call will fail).
+
+Note that this change is only relevant when persist_enabled is not set
+for USB devices.
+
+This patch should be backported to kernels as old as 3.0, that
+contain the commit c877b3b2ad5cb9d4fe523c5496185cc328ff3ae9 "xhci: Add
+reset on resume quirk for asrock p67 host".
+
+Signed-off-by: Shawn Nematbakhsh <shawnn@chromium.org>
+Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/xhci.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3586,10 +3586,21 @@ void xhci_free_dev(struct usb_hcd *hcd,
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *virt_dev;
++ struct device *dev = hcd->self.controller;
+ unsigned long flags;
+ u32 state;
+ int i, ret;
+
++#ifndef CONFIG_USB_DEFAULT_PERSIST
++ /*
++ * We called pm_runtime_get_noresume when the device was attached.
++ * Decrement the counter here to allow controller to runtime suspend
++ * if no devices remain.
++ */
++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
++ pm_runtime_put_noidle(dev);
++#endif
++
+ ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
+ /* If the host is halted due to driver unload, we still need to free the
+ * device.
+@@ -3661,6 +3672,7 @@ static int xhci_reserve_host_control_ep_
+ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
++ struct device *dev = hcd->self.controller;
+ unsigned long flags;
+ int timeleft;
+ int ret;
+@@ -3713,6 +3725,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd,
+ goto disable_slot;
+ }
+ udev->slot_id = xhci->slot_id;
++
++#ifndef CONFIG_USB_DEFAULT_PERSIST
++ /*
++ * If resetting upon resume, we can't put the controller into runtime
++ * suspend if there is a device attached.
++ */
++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
++ pm_runtime_get_noresume(dev);
++#endif
++
+ /* Is this a LS or FS device under a HS hub? */
+ /* Hub or peripherial? */
+ return 1;
--- /dev/null
+From dcf06a036848b4e8e6c8220f2e00b9adf6f84918 Mon Sep 17 00:00:00 2001
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+Date: Wed, 21 Aug 2013 18:50:09 +0300
+Subject: xhci: fix port BESL LPM capability checking
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+commit dcf06a036848b4e8e6c8220f2e00b9adf6f84918 upstream.
+
+Wrong capability bit was checked for best effort service latency.
+bit 20 indicate port is BESL LPM capable (BLC),
+bit 19 is hardware LPM capable (HLC)
+
+This patch should be backported to kernels as old as 3.11, that
+contain the commit a558ccdcc71c7770c5e80c926a31cfe8a3892a09 "usb: xhci:
+add USB2 Link power management BESL support"
+
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+Reported-by: Steve Cotton <steve@s.cotton.clara.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/xhci-ext-caps.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/host/xhci-ext-caps.h
++++ b/drivers/usb/host/xhci-ext-caps.h
+@@ -71,7 +71,7 @@
+
+ /* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
+ #define XHCI_HLC (1 << 19)
+-#define XHCI_BLC (1 << 19)
++#define XHCI_BLC (1 << 20)
+
+ /* command register values to disable interrupts and halt the HC */
+ /* start/stop HC execution - do not write unless HC is halted*/
--- /dev/null
+From 52fb61250a7a132b0cfb9f4a1060a1f3c49e5a25 Mon Sep 17 00:00:00 2001
+From: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+Date: Thu, 8 Aug 2013 10:08:34 -0700
+Subject: xhci-plat: Don't enable legacy PCI interrupts.
+
+From: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+
+commit 52fb61250a7a132b0cfb9f4a1060a1f3c49e5a25 upstream.
+
+The xHCI platform driver calls into usb_add_hcd to register the irq for
+its platform device. It does not want the xHCI generic driver to
+register an interrupt for it at all. The original code did that by
+setting the XHCI_BROKEN_MSI quirk, which tells the xHCI driver to not
+enable MSI or MSI-X for a PCI host.
+
+Unfortunately, if CONFIG_PCI is enabled, and CONFIG_USB_DW3 is enabled,
+the xHCI generic driver will attempt to register a legacy PCI interrupt
+for the xHCI platform device in xhci_try_enable_msi(). This will result
+in a bogus irq being registered, since the underlying device is a
+platform_device, not a pci_device, and thus the pci_device->irq pointer
+will be bogus.
+
+Add a new quirk, XHCI_PLAT, so that the xHCI generic driver can
+distinguish between a PCI device that can't handle MSI or MSI-X, and a
+platform device that should not have its interrupts touched at all.
+This quirk may be useful in the future, in case other corner cases like
+this arise.
+
+This patch should be backported to kernels as old as 3.9, that
+contain the commit 00eed9c814cb8f281be6f0f5d8f45025dc0a97eb "USB: xhci:
+correctly enable interrupts".
+
+Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+Reported-by: Yu Y Wang <yu.y.wang@intel.com>
+Tested-by: Yu Y Wang <yu.y.wang@intel.com>
+Reviewed-by: Felipe Balbi <balbi@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/xhci-plat.c | 2 +-
+ drivers/usb/host/xhci.c | 7 ++++++-
+ drivers/usb/host/xhci.h | 1 +
+ 3 files changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -24,7 +24,7 @@ static void xhci_plat_quirks(struct devi
+ * here that the generic code does not try to make a pci_dev from our
+ * dev struct in order to setup MSI
+ */
+- xhci->quirks |= XHCI_BROKEN_MSI;
++ xhci->quirks |= XHCI_PLAT;
+ }
+
+ /* called during probe() after chip reset completes */
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -343,9 +343,14 @@ static void __maybe_unused xhci_msix_syn
+ static int xhci_try_enable_msi(struct usb_hcd *hcd)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
++ struct pci_dev *pdev;
+ int ret;
+
++ /* The xhci platform device has set up IRQs through usb_add_hcd. */
++ if (xhci->quirks & XHCI_PLAT)
++ return 0;
++
++ pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ /*
+ * Some Fresco Logic host controllers advertise MSI, but fail to
+ * generate interrupts. Don't even try to enable MSI.
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1542,6 +1542,7 @@ struct xhci_hcd {
+ #define XHCI_SPURIOUS_REBOOT (1 << 13)
+ #define XHCI_COMP_MODE_QUIRK (1 << 14)
+ #define XHCI_AVOID_BEI (1 << 15)
++#define XHCI_PLAT (1 << 16)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */