]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 4 Jun 2016 19:51:53 +0000 (12:51 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 4 Jun 2016 19:51:53 +0000 (12:51 -0700)
added patches:
powerpc-book3s64-fix-branching-to-ool-handlers-in-relocatable-kernel.patch
powerpc-eeh-don-t-report-error-in-eeh_pe_reset_and_recover.patch
powerpc-eeh-restore-initial-state-in-eeh_pe_reset_and_recover.patch
revert-powerpc-eeh-fix-crash-in-eeh_add_device_early-on-cell.patch
sunrpc-fix-stripping-of-padded-mic-tokens.patch
xen-events-don-t-move-disabled-irqs.patch
xen-use-same-main-loop-for-counting-and-remapping-pages.patch

queue-4.6/powerpc-book3s64-fix-branching-to-ool-handlers-in-relocatable-kernel.patch [new file with mode: 0644]
queue-4.6/powerpc-eeh-don-t-report-error-in-eeh_pe_reset_and_recover.patch [new file with mode: 0644]
queue-4.6/powerpc-eeh-restore-initial-state-in-eeh_pe_reset_and_recover.patch [new file with mode: 0644]
queue-4.6/revert-powerpc-eeh-fix-crash-in-eeh_add_device_early-on-cell.patch [new file with mode: 0644]
queue-4.6/series
queue-4.6/sunrpc-fix-stripping-of-padded-mic-tokens.patch [new file with mode: 0644]
queue-4.6/xen-events-don-t-move-disabled-irqs.patch [new file with mode: 0644]
queue-4.6/xen-use-same-main-loop-for-counting-and-remapping-pages.patch [new file with mode: 0644]

diff --git a/queue-4.6/powerpc-book3s64-fix-branching-to-ool-handlers-in-relocatable-kernel.patch b/queue-4.6/powerpc-book3s64-fix-branching-to-ool-handlers-in-relocatable-kernel.patch
new file mode 100644 (file)
index 0000000..78d060d
--- /dev/null
@@ -0,0 +1,101 @@
+From 8ed8ab40047a570fdd8043a40c104a57248dd3fd Mon Sep 17 00:00:00 2001
+From: Hari Bathini <hbathini@linux.vnet.ibm.com>
+Date: Fri, 15 Apr 2016 22:48:02 +1000
+Subject: powerpc/book3s64: Fix branching to OOL handlers in relocatable kernel
+
+From: Hari Bathini <hbathini@linux.vnet.ibm.com>
+
+commit 8ed8ab40047a570fdd8043a40c104a57248dd3fd upstream.
+
+Some of the interrupt vectors on 64-bit POWER server processors are only
+32 bytes long (8 instructions), which is not enough for the full
+first-level interrupt handler. For these we need to branch to an
+out-of-line (OOL) handler. But when we are running a relocatable kernel,
+interrupt vectors till __end_interrupts marker are copied down to real
+address 0x100. So, branching to labels (ie. OOL handlers) outside this
+section must be handled differently (see LOAD_HANDLER()), considering
+relocatable kernel, which would need at least 4 instructions.
+
+However, branching from interrupt vector means that we corrupt the
+CFAR (come-from address register) on POWER7 and later processors as
+mentioned in commit 1707dd16. So, EXCEPTION_PROLOG_0 (6 instructions)
+that contains the part up to the point where the CFAR is saved in the
+PACA should be part of the short interrupt vectors before we branch out
+to OOL handlers.
+
+But as mentioned already, there are interrupt vectors on 64-bit POWER
+server processors that are only 32 bytes long (like vectors 0x4f00,
+0x4f20, etc.), which cannot accomodate the above two cases at the same
+time owing to space constraint. Currently, in these interrupt vectors,
+we simply branch out to OOL handlers, without using LOAD_HANDLER(),
+which leaves us vulnerable when running a relocatable kernel (eg. kdump
+case). While this has been the case for sometime now and kdump is used
+widely, we were fortunate not to see any problems so far, for three
+reasons:
+
+  1. In almost all cases, production kernel (relocatable) is used for
+     kdump as well, which would mean that crashed kernel's OOL handler
+     would be at the same place where we end up branching to, from short
+     interrupt vector of kdump kernel.
+  2. Also, OOL handler was unlikely the reason for crash in almost all
+     the kdump scenarios, which meant we had a sane OOL handler from
+     crashed kernel that we branched to.
+  3. On most 64-bit POWER server processors, page size is large enough
+     that marking interrupt vector code as executable (see commit
+     429d2e83) leads to marking OOL handler code from crashed kernel,
+     that sits right below interrupt vector code from kdump kernel, as
+     executable as well.
+
+Let us fix this by moving the __end_interrupts marker down past OOL
+handlers to make sure that we also copy OOL handlers to real address
+0x100 when running a relocatable kernel.
+
+This fix has been tested successfully in kdump scenario, on an LPAR with
+4K page size by using different default/production kernel and kdump
+kernel.
+
+Also tested by manually corrupting the OOL handlers in the first kernel
+and then kdump'ing, and then causing the OOL handlers to fire - mpe.
+
+Fixes: c1fb6816fb1b ("powerpc: Add relocation on exception vector handlers")
+Signed-off-by: Hari Bathini <hbathini@linux.vnet.ibm.com>
+Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/exceptions-64s.S |   16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -953,11 +953,6 @@ hv_facility_unavailable_relon_trampoline
+ #endif
+       STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
+-      /* Other future vectors */
+-      .align  7
+-      .globl  __end_interrupts
+-__end_interrupts:
+-
+       .align  7
+ system_call_entry:
+       b       system_call_common
+@@ -1244,6 +1239,17 @@ __end_handlers:
+       STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+       STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
++      /*
++       * The __end_interrupts marker must be past the out-of-line (OOL)
++       * handlers, so that they are copied to real address 0x100 when running
++       * a relocatable kernel. This ensures they can be reached from the short
++       * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
++       * directly, without using LOAD_HANDLER().
++       */
++      .align  7
++      .globl  __end_interrupts
++__end_interrupts:
++
+ #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+ /*
+  * Data area reserved for FWNMI option.
diff --git a/queue-4.6/powerpc-eeh-don-t-report-error-in-eeh_pe_reset_and_recover.patch b/queue-4.6/powerpc-eeh-don-t-report-error-in-eeh_pe_reset_and_recover.patch
new file mode 100644 (file)
index 0000000..be401a2
--- /dev/null
@@ -0,0 +1,43 @@
+From affeb0f2d3a9af419ad7ef4ac782e1540b2f7b28 Mon Sep 17 00:00:00 2001
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Date: Wed, 27 Apr 2016 11:14:50 +1000
+Subject: powerpc/eeh: Don't report error in eeh_pe_reset_and_recover()
+
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+
+commit affeb0f2d3a9af419ad7ef4ac782e1540b2f7b28 upstream.
+
+The function eeh_pe_reset_and_recover() is used to recover EEH
+error when the passthrough device are transferred to guest and
+backwards, meaning the device's driver is vfio-pci or none.
+When the driver is vfio-pci that provides error_detected() error
+handler only, the handler simply stops the guest and it's not
+expected behaviour. On the other hand, no error handlers will
+be called if we don't have a bound driver.
+
+This ignores the error handler in eeh_pe_reset_and_recover()
+that reports the error to device driver to avoid the exceptional
+behaviour.
+
+Fixes: 5cfb20b9 ("powerpc/eeh: Emulate EEH recovery for VFIO devices")
+Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Reviewed-by: Russell Currey <ruscur@russell.cc>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/eeh_driver.c |    3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -564,9 +564,6 @@ int eeh_pe_reset_and_recover(struct eeh_
+       /* Save states */
+       eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
+-      /* Report error */
+-      eeh_pe_dev_traverse(pe, eeh_report_error, &result);
+-
+       /* Issue reset */
+       ret = eeh_reset_pe(pe);
+       if (ret) {
diff --git a/queue-4.6/powerpc-eeh-restore-initial-state-in-eeh_pe_reset_and_recover.patch b/queue-4.6/powerpc-eeh-restore-initial-state-in-eeh_pe_reset_and_recover.patch
new file mode 100644 (file)
index 0000000..0d274dd
--- /dev/null
@@ -0,0 +1,73 @@
+From 5a0cdbfd17b90a89c64a71d8aec9773ecdb20d0d Mon Sep 17 00:00:00 2001
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Date: Wed, 27 Apr 2016 11:14:51 +1000
+Subject: powerpc/eeh: Restore initial state in eeh_pe_reset_and_recover()
+
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+
+commit 5a0cdbfd17b90a89c64a71d8aec9773ecdb20d0d upstream.
+
+The function eeh_pe_reset_and_recover() is used to recover EEH
+error when the passthrou device are transferred to guest and
+backwards. The content in the device's config space will be lost
+on PE reset issued in the middle of the recovery. The function
+saves/restores it before/after the reset. However, config access
+to some adapters like Broadcom BCM5719 at this point will causes
+fenced PHB. The config space is always blocked and we save 0xFF's
+that are restored at late point. The memory BARs are totally
+corrupted, causing another EEH error upon access to one of the
+memory BARs.
+
+This restores the config space on those adapters like BCM5719
+from the content saved to the EEH device when it's populated,
+to resolve above issue.
+
+Fixes: 5cfb20b9 ("powerpc/eeh: Emulate EEH recovery for VFIO devices")
+Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Reviewed-by: Russell Currey <ruscur@russell.cc>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/eeh_driver.c |   23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -171,6 +171,16 @@ static void *eeh_dev_save_state(void *da
+       if (!edev)
+               return NULL;
++      /*
++       * We cannot access the config space on some adapters.
++       * Otherwise, it will cause fenced PHB. We don't save
++       * the content in their config space and will restore
++       * from the initial config space saved when the EEH
++       * device is created.
++       */
++      if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
++              return NULL;
++
+       pdev = eeh_dev_to_pci_dev(edev);
+       if (!pdev)
+               return NULL;
+@@ -312,6 +322,19 @@ static void *eeh_dev_restore_state(void
+       if (!edev)
+               return NULL;
++      /*
++       * The content in the config space isn't saved because
++       * the blocked config space on some adapters. We have
++       * to restore the initial saved config space when the
++       * EEH device is created.
++       */
++      if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
++              if (list_is_last(&edev->list, &edev->pe->edevs))
++                      eeh_pe_restore_bars(edev->pe);
++
++              return NULL;
++      }
++
+       pdev = eeh_dev_to_pci_dev(edev);
+       if (!pdev)
+               return NULL;
diff --git a/queue-4.6/revert-powerpc-eeh-fix-crash-in-eeh_add_device_early-on-cell.patch b/queue-4.6/revert-powerpc-eeh-fix-crash-in-eeh_add_device_early-on-cell.patch
new file mode 100644 (file)
index 0000000..bd4e541
--- /dev/null
@@ -0,0 +1,50 @@
+From c2078d9ef600bdbe568c89e5ddc2c6f15b7982c8 Mon Sep 17 00:00:00 2001
+From: "Guilherme G. Piccoli" <gpiccoli@linux.vnet.ibm.com>
+Date: Mon, 11 Apr 2016 16:17:22 -0300
+Subject: Revert "powerpc/eeh: Fix crash in eeh_add_device_early() on Cell"
+
+From: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
+
+commit c2078d9ef600bdbe568c89e5ddc2c6f15b7982c8 upstream.
+
+This reverts commit 89a51df5ab1d38b257300b8ac940bbac3bb0eb9b.
+
+The function eeh_add_device_early() is used to perform EEH
+initialization in devices added later on the system, like in
+hotplug/DLPAR scenarios. Since the commit 89a51df5ab1d ("powerpc/eeh:
+Fix crash in eeh_add_device_early() on Cell") a new check was introduced
+in this function - Cell has no EEH capabilities which led to kernel oops
+if hotplug was performed, so checking for eeh_enabled() was introduced
+to avoid the issue.
+
+However, in architectures that EEH is present like pSeries or PowerNV,
+we might reach a case in which no PCI devices are present on boot time
+and so EEH is not initialized. Then, if a device is added via DLPAR for
+example, eeh_add_device_early() fails because eeh_enabled() is false,
+and EEH end up not being enabled at all.
+
+This reverts the aforementioned patch since a new verification was
+introduced by the commit d91dafc02f42 ("powerpc/eeh: Delay probing EEH
+device during hotplug") and so the original Cell issue does not happen
+anymore.
+
+Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Signed-off-by: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/eeh.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -1068,7 +1068,7 @@ void eeh_add_device_early(struct pci_dn
+       struct pci_controller *phb;
+       struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+-      if (!edev || !eeh_enabled())
++      if (!edev)
+               return;
+       if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
index ae9428deea863dafc2f24071c64f630c4a12e5f0..84ba6e329a9de37d23a7bd7063d522b4d307e7d8 100644 (file)
@@ -74,3 +74,10 @@ mm-compaction.c-fix-zoneindex-in-kcompactd.patch
 wait-ptrace-assume-__wall-if-the-child-is-traced.patch
 qe-uart-add-fsl-t1040-ucc-uart-to-of_device_id.patch
 batman-adv-fix-double-neigh_node_put-in-batadv_v_ogm_route_update.patch
+powerpc-book3s64-fix-branching-to-ool-handlers-in-relocatable-kernel.patch
+powerpc-eeh-don-t-report-error-in-eeh_pe_reset_and_recover.patch
+revert-powerpc-eeh-fix-crash-in-eeh_add_device_early-on-cell.patch
+powerpc-eeh-restore-initial-state-in-eeh_pe_reset_and_recover.patch
+xen-events-don-t-move-disabled-irqs.patch
+xen-use-same-main-loop-for-counting-and-remapping-pages.patch
+sunrpc-fix-stripping-of-padded-mic-tokens.patch
diff --git a/queue-4.6/sunrpc-fix-stripping-of-padded-mic-tokens.patch b/queue-4.6/sunrpc-fix-stripping-of-padded-mic-tokens.patch
new file mode 100644 (file)
index 0000000..5bfdc67
--- /dev/null
@@ -0,0 +1,52 @@
+From c0cb8bf3a8e4bd82e640862cdd8891400405cb89 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Trnka?= <ttrnka@mail.muni.cz>
+Date: Fri, 20 May 2016 16:41:10 +0200
+Subject: sunrpc: fix stripping of padded MIC tokens
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tomáš Trnka <ttrnka@mail.muni.cz>
+
+commit c0cb8bf3a8e4bd82e640862cdd8891400405cb89 upstream.
+
+The length of the GSS MIC token need not be a multiple of four bytes.
+It is then padded by XDR to a multiple of 4 B, but unwrap_integ_data()
+would previously only trim mic.len + 4 B. The remaining up to three
+bytes would then trigger a check in nfs4svc_decode_compoundargs(),
+leading to a "garbage args" error and mount failure:
+
+nfs4svc_decode_compoundargs: compound not properly padded!
+nfsd: failed to decode arguments!
+
+This would prevent older clients using the pre-RFC 4121 MIC format
+(37-byte MIC including a 9-byte OID) from mounting exports from v3.9+
+servers using krb5i.
+
+The trimming was introduced by commit 4c190e2f913f ("sunrpc: trim off
+trailing checksum before returning decrypted or integrity authenticated
+buffer").
+
+Fixes: 4c190e2f913f "unrpc: trim off trailing checksum..."
+Signed-off-by: Tomáš Trnka <ttrnka@mail.muni.cz>
+Acked-by: Jeff Layton <jlayton@poochiereds.net>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/auth_gss/svcauth_gss.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -857,8 +857,8 @@ unwrap_integ_data(struct svc_rqst *rqstp
+               goto out;
+       if (svc_getnl(&buf->head[0]) != seq)
+               goto out;
+-      /* trim off the mic at the end before returning */
+-      xdr_buf_trim(buf, mic.len + 4);
++      /* trim off the mic and padding at the end before returning */
++      xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
+       stat = 0;
+ out:
+       kfree(mic.data);
diff --git a/queue-4.6/xen-events-don-t-move-disabled-irqs.patch b/queue-4.6/xen-events-don-t-move-disabled-irqs.patch
new file mode 100644 (file)
index 0000000..a1398c8
--- /dev/null
@@ -0,0 +1,86 @@
+From f0f393877c71ad227d36705d61d1e4062bc29cf5 Mon Sep 17 00:00:00 2001
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+Date: Tue, 10 May 2016 16:11:00 +0100
+Subject: xen/events: Don't move disabled irqs
+
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+
+commit f0f393877c71ad227d36705d61d1e4062bc29cf5 upstream.
+
+Commit ff1e22e7a638 ("xen/events: Mask a moving irq") open-coded
+irq_move_irq() but left out checking if the IRQ is disabled. This broke
+resuming from suspend since it tries to move a (disabled) irq without
+holding the IRQ's desc->lock. Fix it by adding in a check for disabled
+IRQs.
+
+The resulting stacktrace was:
+kernel BUG at /build/linux-UbQGH5/linux-4.4.0/kernel/irq/migration.c:31!
+invalid opcode: 0000 [#1] SMP
+Modules linked in: xenfs xen_privcmd ...
+CPU: 0 PID: 9 Comm: migration/0 Not tainted 4.4.0-22-generic #39-Ubuntu
+Hardware name: Xen HVM domU, BIOS 4.6.1-xs125180 05/04/2016
+task: ffff88003d75ee00 ti: ffff88003d7bc000 task.ti: ffff88003d7bc000
+RIP: 0010:[<ffffffff810e26e2>]  [<ffffffff810e26e2>] irq_move_masked_irq+0xd2/0xe0
+RSP: 0018:ffff88003d7bfc50  EFLAGS: 00010046
+RAX: 0000000000000000 RBX: ffff88003d40ba00 RCX: 0000000000000001
+RDX: 0000000000000001 RSI: 0000000000000100 RDI: ffff88003d40bad8
+RBP: ffff88003d7bfc68 R08: 0000000000000000 R09: ffff88003d000000
+R10: 0000000000000000 R11: 000000000000023c R12: ffff88003d40bad0
+R13: ffffffff81f3a4a0 R14: 0000000000000010 R15: 00000000ffffffff
+FS:  0000000000000000(0000) GS:ffff88003da00000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fd4264de624 CR3: 0000000037922000 CR4: 00000000003406f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Stack:
+ ffff88003d40ba38 0000000000000024 0000000000000000 ffff88003d7bfca0
+ ffffffff814c8d92 00000010813ef89d 00000000805ea732 0000000000000009
+ 0000000000000024 ffff88003cc39b80 ffff88003d7bfce0 ffffffff814c8f66
+Call Trace:
+ [<ffffffff814c8d92>] eoi_pirq+0xb2/0xf0
+ [<ffffffff814c8f66>] __startup_pirq+0xe6/0x150
+ [<ffffffff814ca659>] xen_irq_resume+0x319/0x360
+ [<ffffffff814c7e75>] xen_suspend+0xb5/0x180
+ [<ffffffff81120155>] multi_cpu_stop+0xb5/0xe0
+ [<ffffffff811200a0>] ? cpu_stop_queue_work+0x80/0x80
+ [<ffffffff811203d0>] cpu_stopper_thread+0xb0/0x140
+ [<ffffffff810a94e6>] ? finish_task_switch+0x76/0x220
+ [<ffffffff810ca731>] ? __raw_callee_save___pv_queued_spin_unlock+0x11/0x20
+ [<ffffffff810a3935>] smpboot_thread_fn+0x105/0x160
+ [<ffffffff810a3830>] ? sort_range+0x30/0x30
+ [<ffffffff810a0588>] kthread+0xd8/0xf0
+ [<ffffffff810a04b0>] ? kthread_create_on_node+0x1e0/0x1e0
+ [<ffffffff8182568f>] ret_from_fork+0x3f/0x70
+ [<ffffffff810a04b0>] ? kthread_create_on_node+0x1e0/0x1e0
+
+Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/events/events_base.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -487,7 +487,8 @@ static void eoi_pirq(struct irq_data *da
+       if (!VALID_EVTCHN(evtchn))
+               return;
+-      if (unlikely(irqd_is_setaffinity_pending(data))) {
++      if (unlikely(irqd_is_setaffinity_pending(data)) &&
++          likely(!irqd_irq_disabled(data))) {
+               int masked = test_and_set_mask(evtchn);
+               clear_evtchn(evtchn);
+@@ -1370,7 +1371,8 @@ static void ack_dynirq(struct irq_data *
+       if (!VALID_EVTCHN(evtchn))
+               return;
+-      if (unlikely(irqd_is_setaffinity_pending(data))) {
++      if (unlikely(irqd_is_setaffinity_pending(data)) &&
++          likely(!irqd_irq_disabled(data))) {
+               int masked = test_and_set_mask(evtchn);
+               clear_evtchn(evtchn);
diff --git a/queue-4.6/xen-use-same-main-loop-for-counting-and-remapping-pages.patch b/queue-4.6/xen-use-same-main-loop-for-counting-and-remapping-pages.patch
new file mode 100644 (file)
index 0000000..76cca85
--- /dev/null
@@ -0,0 +1,144 @@
+From dd14be92fbf5bc1ef7343f34968440e44e21b46a Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Wed, 18 May 2016 16:44:54 +0200
+Subject: xen: use same main loop for counting and remapping pages
+
+From: Juergen Gross <jgross@suse.com>
+
+commit dd14be92fbf5bc1ef7343f34968440e44e21b46a upstream.
+
+Instead of having two functions for cycling through the E820 map in
+order to count to be remapped pages and remap them later, just use one
+function with a caller supplied sub-function called for each region to
+be processed. This eliminates the possibility of a mismatch between
+both loops which showed up in certain configurations.
+
+Suggested-by: Ed Swierk <eswierk@skyportsystems.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/setup.c |   65 ++++++++++++++++++++-------------------------------
+ 1 file changed, 26 insertions(+), 39 deletions(-)
+
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -393,6 +393,9 @@ static unsigned long __init xen_set_iden
+       unsigned long i = 0;
+       unsigned long n = end_pfn - start_pfn;
++      if (remap_pfn == 0)
++              remap_pfn = nr_pages;
++
+       while (i < n) {
+               unsigned long cur_pfn = start_pfn + i;
+               unsigned long left = n - i;
+@@ -438,17 +441,29 @@ static unsigned long __init xen_set_iden
+       return remap_pfn;
+ }
+-static void __init xen_set_identity_and_remap(unsigned long nr_pages)
++static unsigned long __init xen_count_remap_pages(
++      unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
++      unsigned long remap_pages)
++{
++      if (start_pfn >= nr_pages)
++              return remap_pages;
++
++      return remap_pages + min(end_pfn, nr_pages) - start_pfn;
++}
++
++static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
++      unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
++                            unsigned long nr_pages, unsigned long last_val))
+ {
+       phys_addr_t start = 0;
+-      unsigned long last_pfn = nr_pages;
++      unsigned long ret_val = 0;
+       const struct e820entry *entry = xen_e820_map;
+       int i;
+       /*
+        * Combine non-RAM regions and gaps until a RAM region (or the
+-       * end of the map) is reached, then set the 1:1 map and
+-       * remap the memory in those non-RAM regions.
++       * end of the map) is reached, then call the provided function
++       * to perform its duty on the non-RAM region.
+        *
+        * The combined non-RAM regions are rounded to a whole number
+        * of pages so any partial pages are accessible via the 1:1
+@@ -466,14 +481,13 @@ static void __init xen_set_identity_and_
+                               end_pfn = PFN_UP(entry->addr);
+                       if (start_pfn < end_pfn)
+-                              last_pfn = xen_set_identity_and_remap_chunk(
+-                                              start_pfn, end_pfn, nr_pages,
+-                                              last_pfn);
++                              ret_val = func(start_pfn, end_pfn, nr_pages,
++                                             ret_val);
+                       start = end;
+               }
+       }
+-      pr_info("Released %ld page(s)\n", xen_released_pages);
++      return ret_val;
+ }
+ /*
+@@ -596,35 +610,6 @@ static void __init xen_ignore_unusable(v
+       }
+ }
+-static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
+-{
+-      unsigned long extra = 0;
+-      unsigned long start_pfn, end_pfn;
+-      const struct e820entry *entry = xen_e820_map;
+-      int i;
+-
+-      end_pfn = 0;
+-      for (i = 0; i < xen_e820_map_entries; i++, entry++) {
+-              start_pfn = PFN_DOWN(entry->addr);
+-              /* Adjacent regions on non-page boundaries handling! */
+-              end_pfn = min(end_pfn, start_pfn);
+-
+-              if (start_pfn >= max_pfn)
+-                      return extra + max_pfn - end_pfn;
+-
+-              /* Add any holes in map to result. */
+-              extra += start_pfn - end_pfn;
+-
+-              end_pfn = PFN_UP(entry->addr + entry->size);
+-              end_pfn = min(end_pfn, max_pfn);
+-
+-              if (entry->type != E820_RAM)
+-                      extra += end_pfn - start_pfn;
+-      }
+-
+-      return extra;
+-}
+-
+ bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
+ {
+       struct e820entry *entry;
+@@ -804,7 +789,7 @@ char * __init xen_memory_setup(void)
+       max_pages = xen_get_max_pages();
+       /* How many extra pages do we need due to remapping? */
+-      max_pages += xen_count_remap_pages(max_pfn);
++      max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
+       if (max_pages > max_pfn)
+               extra_pages += max_pages - max_pfn;
+@@ -922,7 +907,9 @@ char * __init xen_memory_setup(void)
+        * Set identity map on non-RAM pages and prepare remapping the
+        * underlying RAM.
+        */
+-      xen_set_identity_and_remap(max_pfn);
++      xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
++
++      pr_info("Released %ld page(s)\n", xen_released_pages);
+       return "Xen";
+ }