--- /dev/null
+From 6f3508f61c814ee852c199988a62bd954c50dfc1 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 20 Jan 2016 12:54:51 +0300
+Subject: EDAC, amd64_edac: Shift wrapping issue in f1x_get_norm_dct_addr()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 6f3508f61c814ee852c199988a62bd954c50dfc1 upstream.
+
+dct_sel_base_off is declared as a u64 but we're only using the lower 32
+bits because of a shift wrapping bug. This can possibly truncate the
+upper 16 bits of DctSelBaseOffset[47:26], causing us to misdecode the CS
+row.
+
+Fixes: c8e518d5673d ('amd64_edac: Sanitize f10_get_base_addr_offset')
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20160120095451.GB19898@mwanda
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/edac/amd64_edac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct
+ u64 chan_off;
+ u64 dram_base = get_dram_base(pvt, range);
+ u64 hole_off = f10_dhar_offset(pvt);
+- u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
++ u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+
+ if (hi_rng) {
+ /*
--- /dev/null
+From eb1af3b71f9d83e45f2fd2fd649356e98e1c582c Mon Sep 17 00:00:00 2001
+From: "Luck, Tony" <tony.luck@intel.com>
+Date: Wed, 9 Mar 2016 16:40:48 -0800
+Subject: EDAC/sb_edac: Fix computation of channel address
+
+From: Tony Luck <tony.luck@intel.com>
+
+commit eb1af3b71f9d83e45f2fd2fd649356e98e1c582c upstream.
+
+Large memory Haswell-EX systems with multiple DIMMs per channel were
+sometimes reporting the wrong DIMM.
+
+Found three problems:
+
+ 1) Debug printouts for socket and channel interleave were not interpreting
+ the register fields correctly. The socket interleave field is a 2^X
+ value (0=1, 1=2, 2=4, 3=8). The channel interleave is X+1 (0=1, 1=2,
+ 2=3. 3=4).
+
+ 2) Actual use of the socket interleave value didn't interpret as 2^X
+
+ 3) Conversion of address to channel address was complicated, and wrong.
+
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Acked-by: Aristeu Rozanski <arozansk@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-edac@vger.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/edac/sb_edac.c | 26 ++++++++++----------------
+ 1 file changed, 10 insertions(+), 16 deletions(-)
+
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1117,8 +1117,8 @@ static void get_memory_layout(const stru
+ edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
+ n_tads, gb, (mb*1000)/1024,
+ ((u64)tmp_mb) << 20L,
+- (u32)TAD_SOCK(reg),
+- (u32)TAD_CH(reg),
++ (u32)(1 << TAD_SOCK(reg)),
++ (u32)TAD_CH(reg) + 1,
+ (u32)TAD_TGT0(reg),
+ (u32)TAD_TGT1(reg),
+ (u32)TAD_TGT2(reg),
+@@ -1396,7 +1396,7 @@ static int get_memory_error_data(struct
+ }
+
+ ch_way = TAD_CH(reg) + 1;
+- sck_way = TAD_SOCK(reg) + 1;
++ sck_way = 1 << TAD_SOCK(reg);
+
+ if (ch_way == 3)
+ idx = addr >> 6;
+@@ -1453,7 +1453,7 @@ static int get_memory_error_data(struct
+ n_tads,
+ addr,
+ limit,
+- (u32)TAD_SOCK(reg),
++ sck_way,
+ ch_way,
+ offset,
+ idx,
+@@ -1468,18 +1468,12 @@ static int get_memory_error_data(struct
+ offset, addr);
+ return -EINVAL;
+ }
+- addr -= offset;
+- /* Store the low bits [0:6] of the addr */
+- ch_addr = addr & 0x7f;
+- /* Remove socket wayness and remove 6 bits */
+- addr >>= 6;
+- addr = div_u64(addr, sck_xch);
+-#if 0
+- /* Divide by channel way */
+- addr = addr / ch_way;
+-#endif
+- /* Recover the last 6 bits */
+- ch_addr |= addr << 6;
++
++ ch_addr = addr - offset;
++ ch_addr >>= (6 + shiftup);
++ ch_addr /= ch_way * sck_way;
++ ch_addr <<= (6 + shiftup);
++ ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
+
+ /*
+ * Step 3) Decode rank
--- /dev/null
+From e9ad4ec8379ad1ba6f68b8ca1c26b50b5ae0a327 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 21 Mar 2016 10:15:25 +0100
+Subject: KVM: fix spin_lock_init order on x86
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit e9ad4ec8379ad1ba6f68b8ca1c26b50b5ae0a327 upstream.
+
+Moving the initialization earlier is needed in 4.6 because
+kvm_arch_init_vm is now using mmu_lock, causing lockdep to
+complain:
+
+[ 284.440294] INFO: trying to register non-static key.
+[ 284.445259] the code is fine but needs lockdep annotation.
+[ 284.450736] turning off the locking correctness validator.
+...
+[ 284.528318] [<ffffffff810aecc3>] lock_acquire+0xd3/0x240
+[ 284.533733] [<ffffffffa0305aa0>] ? kvm_page_track_register_notifier+0x20/0x60 [kvm]
+[ 284.541467] [<ffffffff81715581>] _raw_spin_lock+0x41/0x80
+[ 284.546960] [<ffffffffa0305aa0>] ? kvm_page_track_register_notifier+0x20/0x60 [kvm]
+[ 284.554707] [<ffffffffa0305aa0>] kvm_page_track_register_notifier+0x20/0x60 [kvm]
+[ 284.562281] [<ffffffffa02ece70>] kvm_mmu_init_vm+0x20/0x30 [kvm]
+[ 284.568381] [<ffffffffa02dbf7a>] kvm_arch_init_vm+0x1ea/0x200 [kvm]
+[ 284.574740] [<ffffffffa02bff3f>] kvm_dev_ioctl+0xbf/0x4d0 [kvm]
+
+However, it also helps fixing a preexisting problem, which is why this
+patch is also good for stable kernels: kvm_create_vm was incrementing
+current->mm->mm_count but not decrementing it at the out_err label (in
+case kvm_init_mmu_notifier failed). The new initialization order makes
+it possible to add the required mmdrop without adding a new error label.
+
+Reported-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/kvm_main.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -547,6 +547,16 @@ static struct kvm *kvm_create_vm(unsigne
+ if (!kvm)
+ return ERR_PTR(-ENOMEM);
+
++ spin_lock_init(&kvm->mmu_lock);
++ atomic_inc(¤t->mm->mm_count);
++ kvm->mm = current->mm;
++ kvm_eventfd_init(kvm);
++ mutex_init(&kvm->lock);
++ mutex_init(&kvm->irq_lock);
++ mutex_init(&kvm->slots_lock);
++ atomic_set(&kvm->users_count, 1);
++ INIT_LIST_HEAD(&kvm->devices);
++
+ r = kvm_arch_init_vm(kvm, type);
+ if (r)
+ goto out_err_no_disable;
+@@ -579,16 +589,6 @@ static struct kvm *kvm_create_vm(unsigne
+ goto out_err;
+ }
+
+- spin_lock_init(&kvm->mmu_lock);
+- kvm->mm = current->mm;
+- atomic_inc(&kvm->mm->mm_count);
+- kvm_eventfd_init(kvm);
+- mutex_init(&kvm->lock);
+- mutex_init(&kvm->irq_lock);
+- mutex_init(&kvm->slots_lock);
+- atomic_set(&kvm->users_count, 1);
+- INIT_LIST_HEAD(&kvm->devices);
+-
+ r = kvm_init_mmu_notifier(kvm);
+ if (r)
+ goto out_err;
+@@ -613,6 +613,7 @@ out_err_no_disable:
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+ kvm_free_memslots(kvm, kvm->memslots[i]);
+ kvm_arch_free_vm(kvm);
++ mmdrop(current->mm);
+ return ERR_PTR(r);
+ }
+
--- /dev/null
+From 7dd0fdff145c5be7146d0ac06732ae3613412ac1 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+Date: Wed, 2 Mar 2016 22:56:38 +0100
+Subject: KVM: i8254: change PIT discard tick policy
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Radim Krčmář <rkrcmar@redhat.com>
+
+commit 7dd0fdff145c5be7146d0ac06732ae3613412ac1 upstream.
+
+Discard policy uses ack_notifiers to prevent injection of PIT interrupts
+before EOI from the last one.
+
+This patch changes the policy to always try to deliver the interrupt,
+which makes a difference when its vector is in ISR.
+Old implementation would drop the interrupt, but proposed one injects to
+IRR, like real hardware would.
+
+The old policy breaks legacy NMI watchdogs, where PIT is used through
+virtual wire (LVT0): PIT never sends an interrupt before receiving EOI,
+thus a guest deadlock with disabled interrupts will stop NMIs.
+
+Note that NMI doesn't do EOI, so PIT also had to send a normal interrupt
+through IOAPIC. (KVM's PIT is deeply rotten and luckily not used much
+in modern systems.)
+
+Even though there is a chance of regressions, I think we can fix the
+LVT0 NMI bug without introducing a new tick policy.
+
+Reported-by: Yuki Shibuya <shibuya.yk@ncos.nec.co.jp>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/i8254.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -245,7 +245,7 @@ static void kvm_pit_ack_irq(struct kvm_i
+ * PIC is being reset. Handle it gracefully here
+ */
+ atomic_inc(&ps->pending);
+- else if (value > 0)
++ else if (value > 0 && ps->reinject)
+ /* in this case, we had multiple outstanding pit interrupts
+ * that we needed to inject. Reinject
+ */
+@@ -288,7 +288,9 @@ static void pit_do_work(struct kthread_w
+ * last one has been acked.
+ */
+ spin_lock(&ps->inject_lock);
+- if (ps->irq_ack) {
++ if (!ps->reinject)
++ inject = 1;
++ else if (ps->irq_ack) {
+ ps->irq_ack = 0;
+ inject = 1;
+ }
+@@ -317,10 +319,10 @@ static enum hrtimer_restart pit_timer_fn
+ struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
+ struct kvm_pit *pt = ps->kvm->arch.vpit;
+
+- if (ps->reinject || !atomic_read(&ps->pending)) {
++ if (ps->reinject)
+ atomic_inc(&ps->pending);
+- queue_kthread_work(&pt->worker, &pt->expired);
+- }
++
++ queue_kthread_work(&pt->worker, &pt->expired);
+
+ if (ps->is_periodic) {
+ hrtimer_add_expires_ns(&ps->timer, ps->period);
--- /dev/null
+From 2849eb4f99d54925c543db12917127f88b3c38ff Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 18 Mar 2016 16:53:29 +0100
+Subject: KVM: VMX: avoid guest hang on invalid invept instruction
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 2849eb4f99d54925c543db12917127f88b3c38ff upstream.
+
+A guest executing an invalid invept instruction would hang
+because the instruction pointer was not updated.
+
+Fixes: bfd0a56b90005f8c8a004baf407ad90045c2b11e
+Reviewed-by: David Matlack <dmatlack@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7340,6 +7340,7 @@ static int handle_invept(struct kvm_vcpu
+ if (!(types & (1UL << type))) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
++ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
--- /dev/null
+From f6870ee9e53430f2a318ccf0dd5e66bb46194e43 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 18 Mar 2016 16:53:42 +0100
+Subject: KVM: VMX: avoid guest hang on invalid invvpid instruction
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit f6870ee9e53430f2a318ccf0dd5e66bb46194e43 upstream.
+
+A guest executing an invalid invvpid instruction would hang
+because the instruction pointer was not updated.
+
+Reported-by: jmontleo@redhat.com
+Tested-by: jmontleo@redhat.com
+Fixes: 99b83ac893b84ed1a62ad6d1f2b6cc32026b9e85
+Reviewed-by: David Matlack <dmatlack@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7399,6 +7399,7 @@ static int handle_invvpid(struct kvm_vcp
+ if (!(types & (1UL << type))) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
++ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
--- /dev/null
+From ef697a712a6165aea7779c295604b099e8bfae2e Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 18 Mar 2016 16:58:38 +0100
+Subject: KVM: VMX: fix nested vpid for old KVM guests
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit ef697a712a6165aea7779c295604b099e8bfae2e upstream.
+
+Old KVM guests invoke single-context invvpid without actually checking
+whether it is supported. This was fixed by commit 518c8ae ("KVM: VMX:
+Make sure single type invvpid is supported before issuing invvpid
+instruction", 2010-08-01) and the patch after, but pre-2.6.36
+kernels lack it including RHEL 6.
+
+Reported-by: jmontleo@redhat.com
+Tested-by: jmontleo@redhat.com
+Fixes: 99b83ac893b84ed1a62ad6d1f2b6cc32026b9e85
+Reviewed-by: David Matlack <dmatlack@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2637,8 +2637,15 @@ static void nested_vmx_setup_ctls_msrs(s
+ } else
+ vmx->nested.nested_vmx_ept_caps = 0;
+
++ /*
++ * Old versions of KVM use the single-context version without
++ * checking for support, so declare that it is supported even
++ * though it is treated as global context. The alternative is
++ * not failing the single-context invvpid, and it is worse.
++ */
+ if (enable_vpid)
+ vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
++ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
+ VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
+ else
+ vmx->nested.nested_vmx_vpid_caps = 0;
+@@ -7416,12 +7423,17 @@ static int handle_invvpid(struct kvm_vcp
+ }
+
+ switch (type) {
++ case VMX_VPID_EXTENT_SINGLE_CONTEXT:
++ /*
++ * Old versions of KVM use the single-context version so we
++ * have to support it; just treat it the same as all-context.
++ */
+ case VMX_VPID_EXTENT_ALL_CONTEXT:
+ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
+ nested_vmx_succeed(vcpu);
+ break;
+ default:
+- /* Trap single context invalidation invvpid calls */
++ /* Trap individual address invalidation invvpid calls */
+ BUG_ON(1);
+ break;
+ }
--- /dev/null
+From 4e422bdd2f849d98fffccbc3295c2f0996097fb3 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 10 Feb 2016 17:50:23 +0100
+Subject: KVM: x86: fix missed hardware breakpoints
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 4e422bdd2f849d98fffccbc3295c2f0996097fb3 upstream.
+
+Sometimes when setting a breakpoint a process doesn't stop on it.
+This is because the debug registers are not loaded correctly on
+VCPU load.
+
+The following simple reproducer from Oleg Nesterov tries using debug
+registers in both the host and the guest, for example by running "./bp
+0 1" on the host and "./bp 14 15" under QEMU.
+
+ #include <unistd.h>
+ #include <signal.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <sys/wait.h>
+ #include <sys/ptrace.h>
+ #include <sys/user.h>
+ #include <asm/debugreg.h>
+ #include <assert.h>
+
+ #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+ unsigned long encode_dr7(int drnum, int enable, unsigned int type, unsigned int len)
+ {
+ unsigned long dr7;
+
+ dr7 = ((len | type) & 0xf)
+ << (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE);
+ if (enable)
+ dr7 |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE));
+
+ return dr7;
+ }
+
+ int write_dr(int pid, int dr, unsigned long val)
+ {
+ return ptrace(PTRACE_POKEUSER, pid,
+ offsetof (struct user, u_debugreg[dr]),
+ val);
+ }
+
+ void set_bp(pid_t pid, void *addr)
+ {
+ unsigned long dr7;
+ assert(write_dr(pid, 0, (long)addr) == 0);
+ dr7 = encode_dr7(0, 1, DR_RW_EXECUTE, DR_LEN_1);
+ assert(write_dr(pid, 7, dr7) == 0);
+ }
+
+ void *get_rip(int pid)
+ {
+ return (void*)ptrace(PTRACE_PEEKUSER, pid,
+ offsetof(struct user, regs.rip), 0);
+ }
+
+ void test(int nr)
+ {
+ void *bp_addr = &&label + nr, *bp_hit;
+ int pid;
+
+ printf("test bp %d\n", nr);
+ assert(nr < 16); // see 16 asm nops below
+
+ pid = fork();
+ if (!pid) {
+ assert(ptrace(PTRACE_TRACEME, 0,0,0) == 0);
+ kill(getpid(), SIGSTOP);
+ for (;;) {
+ label: asm (
+ "nop; nop; nop; nop;"
+ "nop; nop; nop; nop;"
+ "nop; nop; nop; nop;"
+ "nop; nop; nop; nop;"
+ );
+ }
+ }
+
+ assert(pid == wait(NULL));
+ set_bp(pid, bp_addr);
+
+ for (;;) {
+ assert(ptrace(PTRACE_CONT, pid, 0, 0) == 0);
+ assert(pid == wait(NULL));
+
+ bp_hit = get_rip(pid);
+ if (bp_hit != bp_addr)
+ fprintf(stderr, "ERR!! hit wrong bp %ld != %d\n",
+ bp_hit - &&label, nr);
+ }
+ }
+
+ int main(int argc, const char *argv[])
+ {
+ while (--argc) {
+ int nr = atoi(*++argv);
+ if (!fork())
+ test(nr);
+ }
+
+ while (wait(NULL) > 0)
+ ;
+ return 0;
+ }
+
+Suggested-by: Nadadv Amit <namit@cs.technion.ac.il>
+Reported-by: Andrey Wagin <avagin@gmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2736,6 +2736,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu
+ }
+
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
++ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
+ }
+
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
--- /dev/null
+From 4a2e7aab4ffce1e0e79b303dc2f9a03aa9f3a332 Mon Sep 17 00:00:00 2001
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Date: Mon, 21 Mar 2016 11:12:55 +0000
+Subject: PCI: ACPI: IA64: fix IO port generic range check
+
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+
+commit 4a2e7aab4ffce1e0e79b303dc2f9a03aa9f3a332 upstream.
+
+The [0 - 64k] ACPI PCI IO port resource boundary check in:
+
+acpi_dev_ioresource_flags()
+
+is currently applied blindly in the ACPI resource parsing to all
+architectures, but only x86 suffers from that IO space limitation.
+
+On arches (ie IA64 and ARM64) where IO space is memory mapped,
+the PCI root bridges IO resource windows are firstly initialized from
+the _CRS (in acpi_decode_space()) and contain the CPU physical address
+at which a root bridge decodes IO space in the CPU physical address
+space with the offset value representing the offset required to translate
+the PCI bus address into the CPU physical address.
+
+The IO resource windows are then parsed and updated in arch code
+before creating and enumerating PCI buses (eg IA64 add_io_space())
+to map in an arch specific way the obtained CPU physical address range
+to a slice of virtual address space reserved to map PCI IO space,
+ending up with PCI bridges resource windows containing IO
+resources like the following on a working IA64 configuration:
+
+PCI host bridge to bus 0000:00
+pci_bus 0000:00: root bus resource [io 0x1000000-0x100ffff window] (bus
+address [0x0000-0xffff])
+pci_bus 0000:00: root bus resource [mem 0x000a0000-0x000fffff window]
+pci_bus 0000:00: root bus resource [mem 0x80000000-0x8fffffff window]
+pci_bus 0000:00: root bus resource [mem 0x80004000000-0x800ffffffff window]
+pci_bus 0000:00: root bus resource [bus 00]
+
+This implies that the [0 - 64K] check in acpi_dev_ioresource_flags()
+leaves platforms with memory mapped IO space (ie IA64) broken (ie kernel
+can't claim IO resources since the host bridge IO resource is disabled
+and discarded by ACPI core code, see log on IA64 with missing root bridge
+IO resource, silently filtered by current [0 - 64k] check in
+acpi_dev_ioresource_flags()):
+
+PCI host bridge to bus 0000:00
+pci_bus 0000:00: root bus resource [mem 0x000a0000-0x000fffff window]
+pci_bus 0000:00: root bus resource [mem 0x80000000-0x8fffffff window]
+pci_bus 0000:00: root bus resource [mem 0x80004000000-0x800ffffffff window]
+pci_bus 0000:00: root bus resource [bus 00]
+
+[...]
+
+pci 0000:00:03.0: [1002:515e] type 00 class 0x030000
+pci 0000:00:03.0: reg 0x10: [mem 0x80000000-0x87ffffff pref]
+pci 0000:00:03.0: reg 0x14: [io 0x1000-0x10ff]
+pci 0000:00:03.0: reg 0x18: [mem 0x88020000-0x8802ffff]
+pci 0000:00:03.0: reg 0x30: [mem 0x88000000-0x8801ffff pref]
+pci 0000:00:03.0: supports D1 D2
+pci 0000:00:03.0: can't claim BAR 1 [io 0x1000-0x10ff]: no compatible
+bridge window
+
+For this reason, the IO port resources boundaries check in generic ACPI
+parsing code should be guarded with a CONFIG_X86 guard so that more arches
+(ie ARM64) can benefit from the generic ACPI resources parsing interface
+without incurring in unexpected resource filtering, fixing at the same
+time current breakage on IA64.
+
+This patch factors out IO ports boundary [0 - 64k] check in generic ACPI
+code and makes the IO space check X86 specific to make sure that IO
+space resources are usable on other arches too.
+
+Fixes: 3772aea7d6f3 (ia64/PCI/ACPI: Use common ACPI resource parsing interface for host bridge)
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/resource.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -26,8 +26,20 @@
+
+ #ifdef CONFIG_X86
+ #define valid_IRQ(i) (((i) != 0) && ((i) != 2))
++static inline bool acpi_iospace_resource_valid(struct resource *res)
++{
++ /* On X86 IO space is limited to the [0 - 64K] IO port range */
++ return res->end < 0x10003;
++}
+ #else
+ #define valid_IRQ(i) (true)
++/*
++ * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
++ * addresses mapping IO space in CPU physical address space, IO space
++ * resources can be placed anywhere in the 64-bit physical address space.
++ */
++static inline bool
++acpi_iospace_resource_valid(struct resource *res) { return true; }
+ #endif
+
+ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
+@@ -126,7 +138,7 @@ static void acpi_dev_ioresource_flags(st
+ if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
+ res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
+
+- if (res->end >= 0x10003)
++ if (!acpi_iospace_resource_valid(res))
+ res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
+
+ if (io_decode == ACPI_DECODE_16)
--- /dev/null
+From b84106b4e2290c081cdab521fa832596cdfea246 Mon Sep 17 00:00:00 2001
+From: Bjorn Helgaas <bhelgaas@google.com>
+Date: Thu, 25 Feb 2016 14:35:57 -0600
+Subject: PCI: Disable IO/MEM decoding for devices with non-compliant BARs
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+commit b84106b4e2290c081cdab521fa832596cdfea246 upstream.
+
+The PCI config header (first 64 bytes of each device's config space) is
+defined by the PCI spec so generic software can identify the device and
+manage its usage of I/O, memory, and IRQ resources.
+
+Some non-spec-compliant devices put registers other than BARs where the
+BARs should be. When the PCI core sizes these "BARs", the reads and writes
+it does may have unwanted side effects, and the "BAR" may appear to
+describe non-sensical address space.
+
+Add a flag bit to mark non-compliant devices so we don't touch their BARs.
+Turn off IO/MEM decoding to prevent the devices from consuming address
+space, since we can't read the BARs to find out what that address space
+would be.
+
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/probe.c | 14 ++++++++++++++
+ include/linux/pci.h | 1 +
+ 2 files changed, 15 insertions(+)
+
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -179,6 +179,9 @@ int __pci_read_base(struct pci_dev *dev,
+ u16 orig_cmd;
+ struct pci_bus_region region, inverted_region;
+
++ if (dev->non_compliant_bars)
++ return 0;
++
+ mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+
+ /* No printks while decoding is disabled! */
+@@ -1174,6 +1177,7 @@ void pci_msi_setup_pci_dev(struct pci_de
+ int pci_setup_device(struct pci_dev *dev)
+ {
+ u32 class;
++ u16 cmd;
+ u8 hdr_type;
+ int pos = 0;
+ struct pci_bus_region region;
+@@ -1219,6 +1223,16 @@ int pci_setup_device(struct pci_dev *dev
+ /* device class may be changed after fixup */
+ class = dev->class >> 8;
+
++ if (dev->non_compliant_bars) {
++ pci_read_config_word(dev, PCI_COMMAND, &cmd);
++ if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
++ dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
++ cmd &= ~PCI_COMMAND_IO;
++ cmd &= ~PCI_COMMAND_MEMORY;
++ pci_write_config_word(dev, PCI_COMMAND, cmd);
++ }
++ }
++
+ switch (dev->hdr_type) { /* header type */
+ case PCI_HEADER_TYPE_NORMAL: /* standard header */
+ if (class == PCI_CLASS_BRIDGE_PCI)
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -359,6 +359,7 @@ struct pci_dev {
+ unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
+ unsigned int irq_managed:1;
+ unsigned int has_secondary_link:1;
++ unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
+ pci_dev_flags_t dev_flags;
+ atomic_t enable_cnt; /* pci_enable_device has been called */
+
--- /dev/null
+From 927a5570855836e5d5859a80ce7e91e963545e8f Mon Sep 17 00:00:00 2001
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Date: Wed, 2 Mar 2016 13:24:14 +0200
+Subject: perf/core: Fix perf_sched_count derailment
+
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+
+commit 927a5570855836e5d5859a80ce7e91e963545e8f upstream.
+
+The error path in perf_event_open() is such that asking for a sampling
+event on a PMU that doesn't generate interrupts will end up in dropping
+the perf_sched_count even though it hasn't been incremented for this
+event yet.
+
+Given a sufficient amount of these calls, we'll end up disabling
+scheduler's jump label even though we'd still have active events in the
+system, thereby facilitating the arrival of the infernal regions upon us.
+
+I'm fixing this by moving account_event() inside perf_event_alloc().
+
+Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: vince@deater.net
+Link: http://lkml.kernel.org/r/1456917854-29427-1-git-send-email-alexander.shishkin@linux.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7979,6 +7979,9 @@ perf_event_alloc(struct perf_event_attr
+ }
+ }
+
++ /* symmetric to unaccount_event() in _free_event() */
++ account_event(event);
++
+ return event;
+
+ err_per_task:
+@@ -8342,8 +8345,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ }
+ }
+
+- account_event(event);
+-
+ /*
+ * Special case software events and allow them to be part of
+ * any hardware group.
+@@ -8626,8 +8627,6 @@ perf_event_create_kernel_counter(struct
+ /* Mark owner so we could distinguish it from user events. */
+ event->owner = EVENT_OWNER_KERNEL;
+
+- account_event(event);
+-
+ ctx = find_get_context(event->pmu, task, event);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
--- /dev/null
+From 940db6dcd3f4659303fdf6befe7416adc4d24118 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Wed, 17 Feb 2016 14:44:55 -0800
+Subject: perf tools: Dont stop PMU parsing on alias parse error
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 940db6dcd3f4659303fdf6befe7416adc4d24118 upstream.
+
+When an error happens during alias parsing currently the complete
+parsing of all attributes of the PMU is stopped. This is breaks old perf
+on a newer kernel that may have not-yet-know alias attributes (such as
+.scale or .per-pkg).
+
+Continue when some attribute is unparseable.
+
+This is IMHO a stable candidate and should be backported to older
+versions to avoid problems with newer kernels.
+
+v2: Print warnings when something goes wrong.
+v3: Change warning to debug output
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Link: http://lkml.kernel.org/r/1455749095-18358-1-git-send-email-andi@firstfloor.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/pmu.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -283,13 +283,12 @@ static int pmu_aliases_parse(char *dir,
+ {
+ struct dirent *evt_ent;
+ DIR *event_dir;
+- int ret = 0;
+
+ event_dir = opendir(dir);
+ if (!event_dir)
+ return -EINVAL;
+
+- while (!ret && (evt_ent = readdir(event_dir))) {
++ while ((evt_ent = readdir(event_dir))) {
+ char path[PATH_MAX];
+ char *name = evt_ent->d_name;
+ FILE *file;
+@@ -305,17 +304,19 @@ static int pmu_aliases_parse(char *dir,
+
+ snprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+- ret = -EINVAL;
+ file = fopen(path, "r");
+- if (!file)
+- break;
++ if (!file) {
++ pr_debug("Cannot open %s\n", path);
++ continue;
++ }
+
+- ret = perf_pmu__new_alias(head, dir, name, file);
++ if (perf_pmu__new_alias(head, dir, name, file) < 0)
++ pr_debug("Cannot set up %s\n", name);
+ fclose(file);
+ }
+
+ closedir(event_dir);
+- return ret;
++ return 0;
+ }
+
+ /*
--- /dev/null
+From 26dee028d365fbc0e3326606a8520260b4462381 Mon Sep 17 00:00:00 2001
+From: Wang Nan <wangnan0@huawei.com>
+Date: Fri, 19 Feb 2016 11:43:52 +0000
+Subject: perf tools: Fix checking asprintf return value
+
+From: Wang Nan <wangnan0@huawei.com>
+
+commit 26dee028d365fbc0e3326606a8520260b4462381 upstream.
+
+According to man pages, asprintf returns -1 when failure. This patch
+fixes two incorrect return value checker.
+
+Signed-off-by: Wang Nan <wangnan0@huawei.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
+Cc: Cody P Schafer <dev@codyps.com>
+Cc: He Kuang <hekuang@huawei.com>
+Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Kirill Smelkov <kirr@nexedi.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Zefan Li <lizefan@huawei.com>
+Cc: pi3orama@163.com
+Fixes: ffeb883e5662 ("perf tools: Show proper error message for wrong terms of hw/sw events")
+Link: http://lkml.kernel.org/r/1455882283-79592-5-git-send-email-wangnan0@huawei.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/parse-events.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -2101,11 +2101,11 @@ char *parse_events_formats_error_string(
+
+ /* valid terms */
+ if (additional_terms) {
+- if (!asprintf(&str, "valid terms: %s,%s",
+- additional_terms, static_terms))
++ if (asprintf(&str, "valid terms: %s,%s",
++ additional_terms, static_terms) < 0)
+ goto fail;
+ } else {
+- if (!asprintf(&str, "valid terms: %s", static_terms))
++ if (asprintf(&str, "valid terms: %s", static_terms) < 0)
+ goto fail;
+ }
+ return str;
--- /dev/null
+From 67d5268908283c187e0a460048a423256c2fb288 Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@redhat.com>
+Date: Sat, 27 Feb 2016 21:21:12 +0100
+Subject: perf tools: Fix python extension build
+
+From: Jiri Olsa <jolsa@redhat.com>
+
+commit 67d5268908283c187e0a460048a423256c2fb288 upstream.
+
+The util/python-ext-sources file contains source files required to build
+the python extension relative to $(srctree)/tools/perf,
+
+Such a file path $(FILE).c is handed over to the python extension build
+system, which builds the final object in the
+$(PYTHON_EXTBUILD)/tmp/$(FILE).o path.
+
+After the build is done all files from $(PYTHON_EXTBUILD)lib/ are
+carried as the result binaries.
+
+Above system fails when we add source file relative to ../lib, which we
+do for:
+
+ ../lib/bitmap.c
+ ../lib/find_bit.c
+ ../lib/hweight.c
+ ../lib/rbtree.c
+
+All above objects will be built like:
+
+ $(PYTHON_EXTBUILD)/tmp/../lib/bitmap.c
+ $(PYTHON_EXTBUILD)/tmp/../lib/find_bit.c
+ $(PYTHON_EXTBUILD)/tmp/../lib/hweight.c
+ $(PYTHON_EXTBUILD)/tmp/../lib/rbtree.c
+
+which accidentally happens to be final library path:
+
+ $(PYTHON_EXTBUILD)/lib/
+
+Changing setup.py to pass full paths of source files to Extension build
+class and thus keep all built objects under $(PYTHON_EXTBUILD)tmp
+directory.
+
+Reported-by: Jeff Bastian <jbastian@redhat.com>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Tested-by: Josh Boyer <jwboyer@fedoraproject.org>
+Cc: David Ahern <dsahern@gmail.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/20160227201350.GB28494@krava.redhat.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/setup.py | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/tools/perf/util/setup.py
++++ b/tools/perf/util/setup.py
+@@ -22,6 +22,7 @@ cflags = getenv('CFLAGS', '').split()
+ # switch off several checks (need to be at the end of cflags list)
+ cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+
++src_perf = getenv('srctree') + '/tools/perf'
+ build_lib = getenv('PYTHON_EXTBUILD_LIB')
+ build_tmp = getenv('PYTHON_EXTBUILD_TMP')
+ libtraceevent = getenv('LIBTRACEEVENT')
+@@ -30,6 +31,9 @@ libapikfs = getenv('LIBAPI')
+ ext_sources = [f.strip() for f in file('util/python-ext-sources')
+ if len(f.strip()) > 0 and f[0] != '#']
+
++# use full paths with source files
++ext_sources = map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)
++
+ perf = Extension('perf',
+ sources = ext_sources,
+ include_dirs = ['util/include'],
--- /dev/null
+From 5690ae28e472d25e330ad0c637a5cea3fc39fb32 Mon Sep 17 00:00:00 2001
+From: Stephane Eranian <eranian@google.com>
+Date: Thu, 3 Mar 2016 20:50:40 +0100
+Subject: perf/x86/intel: Add definition for PT PMI bit
+
+From: Stephane Eranian <eranian@google.com>
+
+commit 5690ae28e472d25e330ad0c637a5cea3fc39fb32 upstream.
+
+This patch adds a definition for GLOBAL_OVFL_STATUS bit 55
+which is used with the Processor Trace (PT) feature.
+
+Signed-off-by: Stephane Eranian <eranian@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: adrian.hunter@intel.com
+Cc: kan.liang@intel.com
+Cc: namhyung@kernel.org
+Link: http://lkml.kernel.org/r/1457034642-21837-2-git-send-email-eranian@google.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/perf_event.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -165,6 +165,7 @@ struct x86_pmu_capability {
+ #define GLOBAL_STATUS_ASIF BIT_ULL(60)
+ #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
+ #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
++#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
+
+ /*
+ * IBS cpuid feature detection
--- /dev/null
+From 2c7e3306d23864d49f686f22e56e180ff0fffb7f Mon Sep 17 00:00:00 2001
+From: Phil Elwell <phil@raspberrypi.org>
+Date: Mon, 29 Feb 2016 17:30:08 -0800
+Subject: pinctrl-bcm2835: Fix cut-and-paste error in "pull" parsing
+
+From: Phil Elwell <phil@raspberrypi.org>
+
+commit 2c7e3306d23864d49f686f22e56e180ff0fffb7f upstream.
+
+The DT bindings for pinctrl-bcm2835 allow both the function and pull
+to contain either one entry or one per pin. However, an error in the
+DT parsing can cause failures if the number of pulls differs from the
+number of functions.
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Phil Elwell <phil@raspberrypi.org>
+Reviewed-by: Stephen Warren <swarren@wwwdotorg.org>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/bcm/pinctrl-bcm2835.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -779,7 +779,7 @@ static int bcm2835_pctl_dt_node_to_map(s
+ }
+ if (num_pulls) {
+ err = of_property_read_u32_index(np, "brcm,pull",
+- (num_funcs > 1) ? i : 0, &pull);
++ (num_pulls > 1) ? i : 0, &pull);
+ if (err)
+ goto out;
+ err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
--- /dev/null
+From 8f100bb1ff27873dd71f636da670e503b9ade3c6 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Thu, 10 Mar 2016 10:32:21 +0100
+Subject: s390/cpumf: add missing lpp magic initialization
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 8f100bb1ff27873dd71f636da670e503b9ade3c6 upstream.
+
+Add the missing lpp magic initialization for cpu 0. Without this all
+samples on cpu 0 do not have the most significant bit set in the
+program parameter field, which we use to distinguish between guest and
+host samples if the pid is also 0.
+
+We did initialize the lpp magic in the absolute zero lowcore but
+forgot that when switching to the allocated lowcore on cpu 0 only.
+
+Reported-by: Shu Juan Zhang <zhshuj@cn.ibm.com>
+Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Fixes: e22cf8ca6f75 ("s390/cpumf: rework program parameter setting to detect guest samples")
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/setup.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -329,6 +329,7 @@ static void __init setup_lowcore(void)
+ + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+ lc->current_task = (unsigned long) init_thread_union.thread_info.task;
+ lc->thread_info = (unsigned long) &init_thread_union;
++ lc->lpp = LPP_MAGIC;
+ lc->machine_flags = S390_lowcore.machine_flags;
+ lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
+ memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
--- /dev/null
+From e370e4769463a65dcf8806fa26d2874e0542ac41 Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Thu, 10 Mar 2016 09:52:55 +0100
+Subject: s390: fix floating pointer register corruption (again)
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit e370e4769463a65dcf8806fa26d2874e0542ac41 upstream.
+
+There is a tricky interaction between the machine check handler
+and the critical sections of load_fpu_regs and save_fpu_regs
+functions. If the machine check interrupts one of the two
+functions the critical section cleanup will complete the function
+before the machine check handler s390_do_machine_check is called.
+Trouble is that the machine check handler needs to validate the
+floating point registers *before* and not *after* the completion
+of load_fpu_regs/save_fpu_regs.
+
+The simplest solution is to rewind the PSW to the start of the
+load_fpu_regs/save_fpu_regs and retry the function after the
+return from the machine check handler.
+
+Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/entry.S | 106 -----------------------------------------------
+ 1 file changed, 2 insertions(+), 104 deletions(-)
+
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -1197,114 +1197,12 @@ cleanup_critical:
+ .quad .Lpsw_idle_lpsw
+
+ .Lcleanup_save_fpu_regs:
+- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
+- bor %r14
+- clg %r9,BASED(.Lcleanup_save_fpu_regs_done)
+- jhe 5f
+- clg %r9,BASED(.Lcleanup_save_fpu_regs_fp)
+- jhe 4f
+- clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
+- jhe 3f
+- clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
+- jhe 2f
+- clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
+- jhe 1f
+- lg %r2,__LC_CURRENT
+- aghi %r2,__TASK_thread
+-0: # Store floating-point controls
+- stfpc __THREAD_FPU_fpc(%r2)
+-1: # Load register save area and check if VX is active
+- lg %r3,__THREAD_FPU_regs(%r2)
+- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+- jz 4f # no VX -> store FP regs
+-2: # Store vector registers (V0-V15)
+- VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
+-3: # Store vector registers (V16-V31)
+- VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
+- j 5f # -> done, set CIF_FPU flag
+-4: # Store floating-point registers
+- std 0,0(%r3)
+- std 1,8(%r3)
+- std 2,16(%r3)
+- std 3,24(%r3)
+- std 4,32(%r3)
+- std 5,40(%r3)
+- std 6,48(%r3)
+- std 7,56(%r3)
+- std 8,64(%r3)
+- std 9,72(%r3)
+- std 10,80(%r3)
+- std 11,88(%r3)
+- std 12,96(%r3)
+- std 13,104(%r3)
+- std 14,112(%r3)
+- std 15,120(%r3)
+-5: # Set CIF_FPU flag
+- oi __LC_CPU_FLAGS+7,_CIF_FPU
+- lg %r9,48(%r11) # return from save_fpu_regs
++ larl %r9,save_fpu_regs
+ br %r14
+-.Lcleanup_save_fpu_fpc_end:
+- .quad .Lsave_fpu_regs_fpc_end
+-.Lcleanup_save_fpu_regs_vx_low:
+- .quad .Lsave_fpu_regs_vx_low
+-.Lcleanup_save_fpu_regs_vx_high:
+- .quad .Lsave_fpu_regs_vx_high
+-.Lcleanup_save_fpu_regs_fp:
+- .quad .Lsave_fpu_regs_fp
+-.Lcleanup_save_fpu_regs_done:
+- .quad .Lsave_fpu_regs_done
+
+ .Lcleanup_load_fpu_regs:
+- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
+- bnor %r14
+- clg %r9,BASED(.Lcleanup_load_fpu_regs_done)
+- jhe 1f
+- clg %r9,BASED(.Lcleanup_load_fpu_regs_fp)
+- jhe 2f
+- clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
+- jhe 3f
+- clg %r9,BASED(.Lcleanup_load_fpu_regs_vx)
+- jhe 4f
+- lg %r4,__LC_CURRENT
+- aghi %r4,__TASK_thread
+- lfpc __THREAD_FPU_fpc(%r4)
+- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+- lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
+- jz 2f # -> no VX, load FP regs
+-4: # Load V0 ..V15 registers
+- VLM %v0,%v15,0,%r4
+-3: # Load V16..V31 registers
+- VLM %v16,%v31,256,%r4
+- j 1f
+-2: # Load floating-point registers
+- ld 0,0(%r4)
+- ld 1,8(%r4)
+- ld 2,16(%r4)
+- ld 3,24(%r4)
+- ld 4,32(%r4)
+- ld 5,40(%r4)
+- ld 6,48(%r4)
+- ld 7,56(%r4)
+- ld 8,64(%r4)
+- ld 9,72(%r4)
+- ld 10,80(%r4)
+- ld 11,88(%r4)
+- ld 12,96(%r4)
+- ld 13,104(%r4)
+- ld 14,112(%r4)
+- ld 15,120(%r4)
+-1: # Clear CIF_FPU bit
+- ni __LC_CPU_FLAGS+7,255-_CIF_FPU
+- lg %r9,48(%r11) # return from load_fpu_regs
++ larl %r9,load_fpu_regs
+ br %r14
+-.Lcleanup_load_fpu_regs_vx:
+- .quad .Lload_fpu_regs_vx
+-.Lcleanup_load_fpu_regs_vx_high:
+- .quad .Lload_fpu_regs_vx_high
+-.Lcleanup_load_fpu_regs_fp:
+- .quad .Lload_fpu_regs_fp
+-.Lcleanup_load_fpu_regs_done:
+- .quad .Lload_fpu_regs_done
+
+ /*
+ * Integer constants
--- /dev/null
+From 80c544ded25ac14d7cc3e555abb8ed2c2da99b84 Mon Sep 17 00:00:00 2001
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Date: Mon, 14 Mar 2016 15:47:23 +0100
+Subject: s390/pci: enforce fmb page boundary rule
+
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+
+commit 80c544ded25ac14d7cc3e555abb8ed2c2da99b84 upstream.
+
+The function measurement block must not cross a page boundary. Ensure
+that by raising the alignment requirement to the smallest power of 2
+larger than the size of the fmb.
+
+Fixes: d0b088531 ("s390/pci: performance statistics and debug infrastructure")
+Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/pci.h | 2 +-
+ arch/s390/pci/pci.c | 5 ++++-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -45,7 +45,7 @@ struct zpci_fmb {
+ u64 rpcit_ops;
+ u64 dma_rbytes;
+ u64 dma_wbytes;
+-} __packed __aligned(16);
++} __packed __aligned(64);
+
+ enum zpci_state {
+ ZPCI_FN_STATE_RESERVED,
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -871,8 +871,11 @@ static inline int barsize(u8 size)
+
+ static int zpci_mem_init(void)
+ {
++ BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
++ __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
++
+ zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
+- 16, 0, NULL);
++ __alignof__(struct zpci_fmb), 0, NULL);
+ if (!zdev_fmb_cache)
+ goto error_zdev;
+
--- /dev/null
+From f9c904b7613b8b4c85b10cd6b33ad41b2843fa9d Mon Sep 17 00:00:00 2001
+From: Chris Friesen <cbf123@mail.usask.ca>
+Date: Sat, 5 Mar 2016 23:18:48 -0600
+Subject: sched/cputime: Fix steal_account_process_tick() to always return jiffies
+
+From: Chris Friesen <cbf123@mail.usask.ca>
+
+commit f9c904b7613b8b4c85b10cd6b33ad41b2843fa9d upstream.
+
+The callers of steal_account_process_tick() expect it to return
+whether a jiffy should be considered stolen or not.
+
+Currently the return value of steal_account_process_tick() is in
+units of cputime, which vary between either jiffies or nsecs
+depending on CONFIG_VIRT_CPU_ACCOUNTING_GEN.
+
+If cputime has nsecs granularity and there is a tiny amount of
+stolen time (a few nsecs, say) then we will consider the entire
+tick stolen and will not account the tick on user/system/idle,
+causing /proc/stats to show invalid data.
+
+The fix is to change steal_account_process_tick() to accumulate
+the stolen time and only account it once it's worth a jiffy.
+
+(Thanks to Frederic Weisbecker for suggestions to fix a bug in my
+first version of the patch.)
+
+Signed-off-by: Chris Friesen <chris.friesen@windriver.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/56DBBDB8.40305@mail.usask.ca
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/cputime.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -259,21 +259,21 @@ static __always_inline bool steal_accoun
+ #ifdef CONFIG_PARAVIRT
+ if (static_key_false(¶virt_steal_enabled)) {
+ u64 steal;
+- cputime_t steal_ct;
++ unsigned long steal_jiffies;
+
+ steal = paravirt_steal_clock(smp_processor_id());
+ steal -= this_rq()->prev_steal_time;
+
+ /*
+- * cputime_t may be less precise than nsecs (eg: if it's
+- * based on jiffies). Lets cast the result to cputime
++ * steal is in nsecs but our caller is expecting steal
++ * time in jiffies. Lets cast the result to jiffies
+ * granularity and account the rest on the next rounds.
+ */
+- steal_ct = nsecs_to_cputime(steal);
+- this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
++ steal_jiffies = nsecs_to_jiffies(steal);
++ this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
+
+- account_steal_time(steal_ct);
+- return steal_ct;
++ account_steal_time(jiffies_to_cputime(steal_jiffies));
++ return steal_jiffies;
+ }
+ #endif
+ return false;
--- /dev/null
+From b15d53d009558d14c4f394a6d1fa2039c7f45c43 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Date: Mon, 29 Feb 2016 09:19:24 +0100
+Subject: sched/preempt, sh: kmap_coherent relies on disabled preemption
+
+From: David Hildenbrand <dahi@linux.vnet.ibm.com>
+
+commit b15d53d009558d14c4f394a6d1fa2039c7f45c43 upstream.
+
+kmap_coherent needs disabled preemption to not schedule in the critical
+section, just like kmap_coherent on mips and kmap_atomic in general.
+
+Fixes: 8222dbe21e79 "sched/preempt, mm/fault: Decouple preemption from the page fault logic"
+Reported-by: Hans Verkuil <hverkuil@xs4all.nl>
+Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Tested-by: Hans Verkuil <hans.verkuil@cisco.com>
+Signed-off-by: Rich Felker <dalias@libc.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/sh/mm/kmap.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/sh/mm/kmap.c
++++ b/arch/sh/mm/kmap.c
+@@ -36,6 +36,7 @@ void *kmap_coherent(struct page *page, u
+
+ BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
+
++ preempt_disable();
+ pagefault_disable();
+
+ idx = FIX_CMAP_END -
+@@ -64,4 +65,5 @@ void kunmap_coherent(void *kvaddr)
+ }
+
+ pagefault_enable();
++ preempt_enable();
+ }
asoc-samsung-pass-dma-channels-as-pointers.patch
mmc-sh_mmcif-rework-dma-channel-handling.patch
mmc-sh_mmcif-correct-tx-dma-channel-allocation.patch
+x86-microcode-intel-make-early-loader-look-for-builtin-microcode-too.patch
+x86-microcode-untangle-from-blk_dev_initrd.patch
+x86-entry-compat-keep-ts_compat-set-during-signal-delivery.patch
+perf-x86-intel-add-definition-for-pt-pmi-bit.patch
+x86-pci-mark-broadwell-ep-home-agent-pcu-as-having-non-compliant-bars.patch
+kvm-x86-fix-missed-hardware-breakpoints.patch
+kvm-i8254-change-pit-discard-tick-policy.patch
+kvm-fix-spin_lock_init-order-on-x86.patch
+kvm-vmx-avoid-guest-hang-on-invalid-invept-instruction.patch
+kvm-vmx-avoid-guest-hang-on-invalid-invvpid-instruction.patch
+kvm-vmx-fix-nested-vpid-for-old-kvm-guests.patch
+perf-core-fix-perf_sched_count-derailment.patch
+perf-tools-dont-stop-pmu-parsing-on-alias-parse-error.patch
+perf-tools-fix-checking-asprintf-return-value.patch
+perf-tools-fix-python-extension-build.patch
+thermal-ignore-invalid-trip-points.patch
+sched-cputime-fix-steal_account_process_tick-to-always-return-jiffies.patch
+sched-preempt-sh-kmap_coherent-relies-on-disabled-preemption.patch
+edac-sb_edac-fix-computation-of-channel-address.patch
+edac-amd64_edac-shift-wrapping-issue-in-f1x_get_norm_dct_addr.patch
+s390-fix-floating-pointer-register-corruption-again.patch
+s390-cpumf-add-missing-lpp-magic-initialization.patch
+s390-pci-enforce-fmb-page-boundary-rule.patch
+pinctrl-bcm2835-fix-cut-and-paste-error-in-pull-parsing.patch
+pci-disable-io-mem-decoding-for-devices-with-non-compliant-bars.patch
+pci-acpi-ia64-fix-io-port-generic-range-check.patch
--- /dev/null
+From 81ad4276b505e987dd8ebbdf63605f92cd172b52 Mon Sep 17 00:00:00 2001
+From: Zhang Rui <rui.zhang@intel.com>
+Date: Fri, 18 Mar 2016 10:03:24 +0800
+Subject: Thermal: Ignore invalid trip points
+
+From: Zhang Rui <rui.zhang@intel.com>
+
+commit 81ad4276b505e987dd8ebbdf63605f92cd172b52 upstream.
+
+In some cases, platform thermal driver may report invalid trip points,
+thermal core should not take any action for these trip points.
+
+This fixed a regression that bogus trip point starts to screw up thermal
+control on some Lenovo laptops, after
+commit bb431ba26c5cd0a17c941ca6c3a195a3a6d5d461
+Author: Zhang Rui <rui.zhang@intel.com>
+Date: Fri Oct 30 16:31:47 2015 +0800
+
+ Thermal: initialize thermal zone device correctly
+
+ After thermal zone device registered, as we have not read any
+ temperature before, thus tz->temperature should not be 0,
+ which actually means 0C, and thermal trend is not available.
+ In this case, we need specially handling for the first
+ thermal_zone_device_update().
+
+ Both thermal core framework and step_wise governor is
+ enhanced to handle this. And since the step_wise governor
+ is the only one that uses trends, so it's the only thermal
+ governor that needs to be updated.
+
+ Tested-by: Manuel Krause <manuelkrause@netscape.net>
+ Tested-by: szegad <szegadlo@poczta.onet.pl>
+ Tested-by: prash <prash.n.rao@gmail.com>
+ Tested-by: amish <ammdispose-arch@yahoo.com>
+ Tested-by: Matthias <morpheusxyz123@yahoo.de>
+ Reviewed-by: Javi Merino <javi.merino@arm.com>
+ Signed-off-by: Zhang Rui <rui.zhang@intel.com>
+ Signed-off-by: Chen Yu <yu.c.chen@intel.com>
+
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=1317190
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=114551
+Signed-off-by: Zhang Rui <rui.zhang@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/thermal/thermal_core.c | 13 ++++++++++++-
+ include/linux/thermal.h | 2 ++
+ 2 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -454,6 +454,10 @@ static void handle_thermal_trip(struct t
+ {
+ enum thermal_trip_type type;
+
++ /* Ignore disabled trip points */
++ if (test_bit(trip, &tz->trips_disabled))
++ return;
++
+ tz->ops->get_trip_type(tz, trip, &type);
+
+ if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
+@@ -1796,6 +1800,7 @@ struct thermal_zone_device *thermal_zone
+ {
+ struct thermal_zone_device *tz;
+ enum thermal_trip_type trip_type;
++ int trip_temp;
+ int result;
+ int count;
+ int passive = 0;
+@@ -1867,9 +1872,15 @@ struct thermal_zone_device *thermal_zone
+ goto unregister;
+
+ for (count = 0; count < trips; count++) {
+- tz->ops->get_trip_type(tz, count, &trip_type);
++ if (tz->ops->get_trip_type(tz, count, &trip_type))
++ set_bit(count, &tz->trips_disabled);
+ if (trip_type == THERMAL_TRIP_PASSIVE)
+ passive = 1;
++ if (tz->ops->get_trip_temp(tz, count, &trip_temp))
++ set_bit(count, &tz->trips_disabled);
++ /* Check for bogus trip points */
++ if (trip_temp == 0)
++ set_bit(count, &tz->trips_disabled);
+ }
+
+ if (!passive) {
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -156,6 +156,7 @@ struct thermal_attr {
+ * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
+ * @devdata: private pointer for device private data
+ * @trips: number of trip points the thermal zone supports
++ * @trips_disabled; bitmap for disabled trips
+ * @passive_delay: number of milliseconds to wait between polls when
+ * performing passive cooling.
+ * @polling_delay: number of milliseconds to wait between polls when
+@@ -191,6 +192,7 @@ struct thermal_zone_device {
+ struct thermal_attr *trip_hyst_attrs;
+ void *devdata;
+ int trips;
++ unsigned long trips_disabled; /* bitmap for disabled trips */
+ int passive_delay;
+ int polling_delay;
+ int temperature;
--- /dev/null
+From 4e79e182b419172e35936a47f098509092d69817 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Wed, 10 Feb 2016 14:15:27 -0800
+Subject: x86/entry/compat: Keep TS_COMPAT set during signal delivery
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 4e79e182b419172e35936a47f098509092d69817 upstream.
+
+Signal delivery needs to know the sign of an interrupted syscall's
+return value in order to detect -ERESTART variants. Normally this
+works independently of bitness because syscalls internally return
+long. Under ptrace, however, this can break, and syscall_get_error
+is supposed to sign-extend regs->ax if needed.
+
+We were clearing TS_COMPAT too early, though, and this prevented
+sign extension, which subtly broke syscall restart under ptrace.
+
+Reported-by: Robert O'Callahan <robert@ocallahan.org>
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Shuah Khan <shuahkh@osg.samsung.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: c5c46f59e4e7 ("x86/entry: Add new, comprehensible entry and exit handlers written in C")
+Link: http://lkml.kernel.org/r/cbce3cf545522f64eb37f5478cb59746230db3b5.1455142412.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/common.c | 23 +++++++++++++----------
+ 1 file changed, 13 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -268,6 +268,7 @@ static void exit_to_usermode_loop(struct
+ /* Called with IRQs disabled. */
+ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
+ {
++ struct thread_info *ti = pt_regs_to_thread_info(regs);
+ u32 cached_flags;
+
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
+@@ -275,12 +276,22 @@ __visible inline void prepare_exit_to_us
+
+ lockdep_sys_exit();
+
+- cached_flags =
+- READ_ONCE(pt_regs_to_thread_info(regs)->flags);
++ cached_flags = READ_ONCE(ti->flags);
+
+ if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
+ exit_to_usermode_loop(regs, cached_flags);
+
++#ifdef CONFIG_COMPAT
++ /*
++ * Compat syscalls set TS_COMPAT. Make sure we clear it before
++ * returning to user mode. We need to clear it *after* signal
++ * handling, because syscall restart has a fixup for compat
++ * syscalls. The fixup is exercised by the ptrace_syscall_32
++ * selftest.
++ */
++ ti->status &= ~TS_COMPAT;
++#endif
++
+ user_enter();
+ }
+
+@@ -332,14 +343,6 @@ __visible inline void syscall_return_slo
+ if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
+ syscall_slow_exit_work(regs, cached_flags);
+
+-#ifdef CONFIG_COMPAT
+- /*
+- * Compat syscalls set TS_COMPAT. Make sure we clear it before
+- * returning to user mode.
+- */
+- ti->status &= ~TS_COMPAT;
+-#endif
+-
+ local_irq_disable();
+ prepare_exit_to_usermode(regs);
+ }
--- /dev/null
+From 264285ac01673e70557c43ecee338ce97c4c0672 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 3 Feb 2016 12:33:30 +0100
+Subject: x86/microcode/intel: Make early loader look for builtin microcode too
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 264285ac01673e70557c43ecee338ce97c4c0672 upstream.
+
+Set the initrd @start depending on the presence of an initrd. Otherwise,
+builtin microcode loading doesn't work as the start is wrong and we're
+using it to compute offset to the microcode blobs.
+
+Tested-by: Thomas Voegtle <tv@lio96.de>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1454499225-21544-3-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/intel.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -555,10 +555,14 @@ scan_microcode(struct mc_saved_data *mc_
+ cd.data = NULL;
+ cd.size = 0;
+
+- cd = find_cpio_data(p, (void *)start, size, &offset);
+- if (!cd.data) {
++ /* try built-in microcode if no initrd */
++ if (!size) {
+ if (!load_builtin_intel_microcode(&cd))
+ return UCODE_ERROR;
++ } else {
++ cd = find_cpio_data(p, (void *)start, size, &offset);
++ if (!cd.data)
++ return UCODE_ERROR;
+ }
+
+ return get_matching_model_microcode(0, start, cd.data, cd.size,
+@@ -732,16 +736,20 @@ void __init load_ucode_intel_bsp(void)
+ struct boot_params *p;
+
+ p = (struct boot_params *)__pa_nodebug(&boot_params);
+- start = p->hdr.ramdisk_image;
+ size = p->hdr.ramdisk_size;
+
+- _load_ucode_intel_bsp(
+- (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+- (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
+- start, size);
++ /*
++ * Set start only if we have an initrd image. We cannot use initrd_start
++ * because it is not set that early yet.
++ */
++ start = (size ? p->hdr.ramdisk_image : 0);
++
++ _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
++ (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
++ start, size);
+ #else
+- start = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
+ size = boot_params.hdr.ramdisk_size;
++ start = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
+
+ _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
+ #endif
--- /dev/null
+From 5f9c01aa7c49a2d74474d6d879a797b8badf29e6 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 3 Feb 2016 12:33:29 +0100
+Subject: x86/microcode: Untangle from BLK_DEV_INITRD
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 5f9c01aa7c49a2d74474d6d879a797b8badf29e6 upstream.
+
+Thomas Voegtle reported that doing oldconfig with a .config which has
+CONFIG_MICROCODE enabled but BLK_DEV_INITRD disabled prevents the
+microcode loading mechanism from being built.
+
+So untangle it from the BLK_DEV_INITRD dependency so that oldconfig
+doesn't turn it off and add an explanatory text to its Kconfig help what
+the supported methods for supplying microcode are.
+
+Reported-by: Thomas Voegtle <tv@lio96.de>
+Tested-by: Thomas Voegtle <tv@lio96.de>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1454499225-21544-2-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/Kconfig | 23 ++++++++++++-----------
+ arch/x86/include/asm/microcode.h | 26 ++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/microcode/intel.c | 14 ++++----------
+ 3 files changed, 42 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1126,22 +1126,23 @@ config MICROCODE
+ bool "CPU microcode loading support"
+ default y
+ depends on CPU_SUP_AMD || CPU_SUP_INTEL
+- depends on BLK_DEV_INITRD
+ select FW_LOADER
+ ---help---
+-
+ If you say Y here, you will be able to update the microcode on
+- certain Intel and AMD processors. The Intel support is for the
+- IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
+- Xeon etc. The AMD support is for families 0x10 and later. You will
+- obviously need the actual microcode binary data itself which is not
+- shipped with the Linux kernel.
++ Intel and AMD processors. The Intel support is for the IA32 family,
++ e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
++ AMD support is for families 0x10 and later. You will obviously need
++ the actual microcode binary data itself which is not shipped with
++ the Linux kernel.
+
+- This option selects the general module only, you need to select
+- at least one vendor specific module as well.
++ The preferred method to load microcode from a detached initrd is described
++ in Documentation/x86/early-microcode.txt. For that you need to enable
++ CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
++ initrd for microcode blobs.
+
+- To compile this driver as a module, choose M here: the module
+- will be called microcode.
++ In addition, you can build-in the microcode into the kernel. For that you
++ need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
++ to the CONFIG_EXTRA_FIRMWARE config option.
+
+ config MICROCODE_INTEL
+ bool "Intel microcode loading support"
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -2,6 +2,7 @@
+ #define _ASM_X86_MICROCODE_H
+
+ #include <linux/earlycpio.h>
++#include <linux/initrd.h>
+
+ #define native_rdmsr(msr, val1, val2) \
+ do { \
+@@ -168,4 +169,29 @@ static inline void reload_early_microcod
+ static inline bool
+ get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
+ #endif
++
++static inline unsigned long get_initrd_start(void)
++{
++#ifdef CONFIG_BLK_DEV_INITRD
++ return initrd_start;
++#else
++ return 0;
++#endif
++}
++
++static inline unsigned long get_initrd_start_addr(void)
++{
++#ifdef CONFIG_BLK_DEV_INITRD
++#ifdef CONFIG_X86_32
++ unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
++
++ return (unsigned long)__pa_nodebug(*initrd_start_p);
++#else
++ return get_initrd_start();
++#endif
++#else /* CONFIG_BLK_DEV_INITRD */
++ return 0;
++#endif
++}
++
+ #endif /* _ASM_X86_MICROCODE_H */
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -698,7 +698,7 @@ int __init save_microcode_in_initrd_inte
+ if (count == 0)
+ return ret;
+
+- copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
++ copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
+ ret = save_microcode(&mc_saved_data, mc_saved, count);
+ if (ret)
+ pr_err("Cannot save microcode patches from initrd.\n");
+@@ -760,20 +760,14 @@ void load_ucode_intel_ap(void)
+ struct mc_saved_data *mc_saved_data_p;
+ struct ucode_cpu_info uci;
+ unsigned long *mc_saved_in_initrd_p;
+- unsigned long initrd_start_addr;
+ enum ucode_state ret;
+ #ifdef CONFIG_X86_32
+- unsigned long *initrd_start_p;
+
+- mc_saved_in_initrd_p =
+- (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
++ mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
+ mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+- initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+- initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
+ #else
+- mc_saved_data_p = &mc_saved_data;
+ mc_saved_in_initrd_p = mc_saved_in_initrd;
+- initrd_start_addr = initrd_start;
++ mc_saved_data_p = &mc_saved_data;
+ #endif
+
+ /*
+@@ -785,7 +779,7 @@ void load_ucode_intel_ap(void)
+
+ collect_cpu_info_early(&uci);
+ ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
+- initrd_start_addr, &uci);
++ get_initrd_start_addr(), &uci);
+
+ if (ret != UCODE_OK)
+ return;
--- /dev/null
+From b894157145e4ac7598d7062bc93320898a5e059e Mon Sep 17 00:00:00 2001
+From: Bjorn Helgaas <bhelgaas@google.com>
+Date: Fri, 26 Feb 2016 09:15:11 -0600
+Subject: x86/PCI: Mark Broadwell-EP Home Agent & PCU as having non-compliant BARs
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+commit b894157145e4ac7598d7062bc93320898a5e059e upstream.
+
+The Home Agent and PCU PCI devices in Broadwell-EP have a non-BAR register
+where a BAR should be. We don't know what the side effects of sizing the
+"BAR" would be, and we don't know what address space the "BAR" might appear
+to describe.
+
+Mark these devices as having non-compliant BARs so the PCI core doesn't
+touch them.
+
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/pci/fixup.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -540,3 +540,10 @@ static void twinhead_reserve_killing_zon
+ }
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
++
++static void pci_bdwep_bar(struct pci_dev *dev)
++{
++ dev->non_compliant_bars = 1;
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);