--- /dev/null
+From 0b938a2e2cf0b0a2c8bac9769111545aff0fee97 Mon Sep 17 00:00:00 2001
+From: John Johansen <john.johansen@canonical.com>
+Date: Wed, 18 Nov 2015 11:41:05 -0800
+Subject: apparmor: fix ref count leak when profile sha1 hash is read
+
+From: John Johansen <john.johansen@canonical.com>
+
+commit 0b938a2e2cf0b0a2c8bac9769111545aff0fee97 upstream.
+
+Signed-off-by: John Johansen <john.johansen@canonical.com>
+Acked-by: Seth Arnold <seth.arnold@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/apparmor/apparmorfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -331,6 +331,7 @@ static int aa_fs_seq_hash_show(struct se
+ seq_printf(seq, "%.2x", profile->hash[i]);
+ seq_puts(seq, "\n");
+ }
++ aa_put_profile(profile);
+
+ return 0;
+ }
--- /dev/null
+From 7de249964f5578e67b99699c5f0b405738d820a2 Mon Sep 17 00:00:00 2001
+From: Dave Weinstein <olorin@google.com>
+Date: Thu, 28 Jul 2016 11:55:41 -0700
+Subject: arm: oabi compat: add missing access checks
+
+From: Dave Weinstein <olorin@google.com>
+
+commit 7de249964f5578e67b99699c5f0b405738d820a2 upstream.
+
+Add access checks to sys_oabi_epoll_wait() and sys_oabi_semtimedop().
+This fixes CVE-2016-3857, a local privilege escalation under
+CONFIG_OABI_COMPAT.
+
+Reported-by: Chiachih Wu <wuchiachih@gmail.com>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Nicolas Pitre <nico@linaro.org>
+Signed-off-by: Dave Weinstein <olorin@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/sys_oabi-compat.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/kernel/sys_oabi-compat.c
++++ b/arch/arm/kernel/sys_oabi-compat.c
+@@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int
+ mm_segment_t fs;
+ long ret, err, i;
+
+- if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
++ if (maxevents <= 0 ||
++ maxevents > (INT_MAX/sizeof(*kbuf)) ||
++ maxevents > (INT_MAX/sizeof(*events)))
+ return -EINVAL;
++ if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
++ return -EFAULT;
+ kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+@@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int
+
+ if (nsops < 1 || nsops > SEMOPM)
+ return -EINVAL;
++ if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
++ return -EFAULT;
+ sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
+ if (!sops)
+ return -ENOMEM;
--- /dev/null
+From a7ae81952cdab56a1277bd2f9ed7284c0f575120 Mon Sep 17 00:00:00 2001
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+Date: Thu, 9 Jun 2016 16:56:28 +0300
+Subject: i2c: i801: Allow ACPI SystemIO OpRegion to conflict with PCI BAR
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+commit a7ae81952cdab56a1277bd2f9ed7284c0f575120 upstream.
+
+Many Intel systems the BIOS declares a SystemIO OpRegion below the SMBus
+PCI device as can be seen in ACPI DSDT table from Lenovo Yoga 900:
+
+ Device (SBUS)
+ {
+ OperationRegion (SMBI, SystemIO, (SBAR << 0x05), 0x10)
+ Field (SMBI, ByteAcc, NoLock, Preserve)
+ {
+ HSTS, 8,
+ Offset (0x02),
+ HCON, 8,
+ HCOM, 8,
+ TXSA, 8,
+ DAT0, 8,
+ DAT1, 8,
+ HBDR, 8,
+ PECR, 8,
+ RXSA, 8,
+ SDAT, 16
+ }
+
+There are also bunch of AML methods that that the BIOS can use to access
+these fields. Most of the systems in question AML methods accessing the
+SMBI OpRegion are never used.
+
+Now, because of this SMBI OpRegion many systems fail to load the SMBus
+driver with an error looking like one below:
+
+ ACPI Warning: SystemIO range 0x0000000000003040-0x000000000000305F
+ conflicts with OpRegion 0x0000000000003040-0x000000000000304F
+ (\_SB.PCI0.SBUS.SMBI) (20160108/utaddress-255)
+ ACPI: If an ACPI driver is available for this device, you should use
+ it instead of the native driver
+
+The reason is that this SMBI OpRegion conflicts with the PCI BAR used by
+the SMBus driver.
+
+It turns out that we can install a custom SystemIO address space handler
+for the SMBus device to intercept all accesses through that OpRegion. This
+allows us to share the PCI BAR with the AML code if it for some reason is
+using it. We do not expect that this OpRegion handler will ever be called
+but if it is we print a warning and prevent all access from the SMBus
+driver itself.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=110041
+Reported-by: Andy Lutomirski <luto@kernel.org>
+Reported-by: Pali Rohár <pali.rohar@gmail.com>
+Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Jean Delvare <jdelvare@suse.de>
+Reviewed-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Tested-by: Pali Rohár <pali.rohar@gmail.com>
+Tested-by: Jean Delvare <jdelvare@suse.de>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-i801.c | 103 ++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 94 insertions(+), 9 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -244,6 +244,13 @@ struct i801_priv {
+ struct platform_device *mux_pdev;
+ #endif
+ struct platform_device *tco_pdev;
++
++ /*
++ * If set to true the host controller registers are reserved for
++ * ACPI AML use. Protected by acpi_lock.
++ */
++ bool acpi_reserved;
++ struct mutex acpi_lock;
+ };
+
+ #define FEATURE_SMBUS_PEC (1 << 0)
+@@ -714,9 +721,15 @@ static s32 i801_access(struct i2c_adapte
+ {
+ int hwpec;
+ int block = 0;
+- int ret, xact = 0;
++ int ret = 0, xact = 0;
+ struct i801_priv *priv = i2c_get_adapdata(adap);
+
++ mutex_lock(&priv->acpi_lock);
++ if (priv->acpi_reserved) {
++ mutex_unlock(&priv->acpi_lock);
++ return -EBUSY;
++ }
++
+ hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
+ && size != I2C_SMBUS_QUICK
+ && size != I2C_SMBUS_I2C_BLOCK_DATA;
+@@ -773,7 +786,8 @@ static s32 i801_access(struct i2c_adapte
+ default:
+ dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n",
+ size);
+- return -EOPNOTSUPP;
++ ret = -EOPNOTSUPP;
++ goto out;
+ }
+
+ if (hwpec) /* enable/disable hardware PEC */
+@@ -796,11 +810,11 @@ static s32 i801_access(struct i2c_adapte
+ ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+
+ if (block)
+- return ret;
++ goto out;
+ if (ret)
+- return ret;
++ goto out;
+ if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
+- return 0;
++ goto out;
+
+ switch (xact & 0x7f) {
+ case I801_BYTE: /* Result put in SMBHSTDAT0 */
+@@ -812,7 +826,10 @@ static s32 i801_access(struct i2c_adapte
+ (inb_p(SMBHSTDAT1(priv)) << 8);
+ break;
+ }
+- return 0;
++
++out:
++ mutex_unlock(&priv->acpi_lock);
++ return ret;
+ }
+
+
+@@ -1249,6 +1266,72 @@ static void i801_add_tco(struct i801_pri
+ priv->tco_pdev = pdev;
+ }
+
++#ifdef CONFIG_ACPI
++static acpi_status
++i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
++ u64 *value, void *handler_context, void *region_context)
++{
++ struct i801_priv *priv = handler_context;
++ struct pci_dev *pdev = priv->pci_dev;
++ acpi_status status;
++
++ /*
++ * Once BIOS AML code touches the OpRegion we warn and inhibit any
++ * further access from the driver itself. This device is now owned
++ * by the system firmware.
++ */
++ mutex_lock(&priv->acpi_lock);
++
++ if (!priv->acpi_reserved) {
++ priv->acpi_reserved = true;
++
++ dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
++ dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
++ }
++
++ if ((function & ACPI_IO_MASK) == ACPI_READ)
++ status = acpi_os_read_port(address, (u32 *)value, bits);
++ else
++ status = acpi_os_write_port(address, (u32)*value, bits);
++
++ mutex_unlock(&priv->acpi_lock);
++
++ return status;
++}
++
++static int i801_acpi_probe(struct i801_priv *priv)
++{
++ struct acpi_device *adev;
++ acpi_status status;
++
++ adev = ACPI_COMPANION(&priv->pci_dev->dev);
++ if (adev) {
++ status = acpi_install_address_space_handler(adev->handle,
++ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
++ NULL, priv);
++ if (ACPI_SUCCESS(status))
++ return 0;
++ }
++
++ return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
++}
++
++static void i801_acpi_remove(struct i801_priv *priv)
++{
++ struct acpi_device *adev;
++
++ adev = ACPI_COMPANION(&priv->pci_dev->dev);
++ if (!adev)
++ return;
++
++ acpi_remove_address_space_handler(adev->handle,
++ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
++}
++#else
++static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
++static inline void i801_acpi_remove(struct i801_priv *priv) { }
++#endif
++
+ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
+ unsigned char temp;
+@@ -1266,6 +1349,7 @@ static int i801_probe(struct pci_dev *de
+ priv->adapter.dev.parent = &dev->dev;
+ ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
+ priv->adapter.retries = 3;
++ mutex_init(&priv->acpi_lock);
+
+ priv->pci_dev = dev;
+ switch (dev->device) {
+@@ -1328,10 +1412,8 @@ static int i801_probe(struct pci_dev *de
+ return -ENODEV;
+ }
+
+- err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
+- if (err) {
++ if (i801_acpi_probe(priv))
+ return -ENODEV;
+- }
+
+ err = pcim_iomap_regions(dev, 1 << SMBBAR,
+ dev_driver_string(&dev->dev));
+@@ -1340,6 +1422,7 @@ static int i801_probe(struct pci_dev *de
+ "Failed to request SMBus region 0x%lx-0x%Lx\n",
+ priv->smba,
+ (unsigned long long)pci_resource_end(dev, SMBBAR));
++ i801_acpi_remove(priv);
+ return err;
+ }
+
+@@ -1404,6 +1487,7 @@ static int i801_probe(struct pci_dev *de
+ err = i2c_add_adapter(&priv->adapter);
+ if (err) {
+ dev_err(&dev->dev, "Failed to add SMBus adapter\n");
++ i801_acpi_remove(priv);
+ return err;
+ }
+
+@@ -1422,6 +1506,7 @@ static void i801_remove(struct pci_dev *
+
+ i801_del_mux(priv);
+ i2c_del_adapter(&priv->adapter);
++ i801_acpi_remove(priv);
+ pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+
+ platform_device_unregister(priv->tco_pdev);
--- /dev/null
+From 896ce45da2c2f4abc508d443fdecde7de0b3fa7e Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Fri, 1 Jul 2016 15:57:02 -0700
+Subject: IB/hfi1: Correct issues with sc5 computation
+
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+
+commit 896ce45da2c2f4abc508d443fdecde7de0b3fa7e upstream.
+
+There are several computatations of the sc in the
+ud receive routine.
+
+Besides the code duplication, all are wrong when the
+sc is greater than 15. In that case the code incorrectly
+or's a 1 into the computed sc instead of 1 shifted left
+by 4.
+
+Fix precomputed sc5 by using an already implemented routine
+hdr2sc() and deleting flawed duplicated code.
+
+Cc: Stable <stable@vger.kernel.org> # 4.6+
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/rdma/hfi1/ud.c | 23 +++--------------------
+ 1 file changed, 3 insertions(+), 20 deletions(-)
+
+--- a/drivers/staging/rdma/hfi1/ud.c
++++ b/drivers/staging/rdma/hfi1/ud.c
+@@ -678,8 +678,7 @@ void hfi1_ud_rcv(struct hfi1_packet *pac
+ u32 tlen = packet->tlen;
+ struct rvt_qp *qp = packet->qp;
+ bool has_grh = rcv_flags & HFI1_HAS_GRH;
+- bool sc4_bit = has_sc4_bit(packet);
+- u8 sc;
++ u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
+ u32 bth1;
+ int is_mcast;
+ struct ib_grh *grh = NULL;
+@@ -697,10 +696,8 @@ void hfi1_ud_rcv(struct hfi1_packet *pac
+ */
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+- u8 sl, sc5;
++ u8 sl;
+
+- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+- sc5 |= sc4_bit;
+ sl = ibp->sc_to_sl[sc5];
+
+ process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
+@@ -717,10 +714,6 @@ void hfi1_ud_rcv(struct hfi1_packet *pac
+
+ if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
+ u16 slid = be16_to_cpu(hdr->lrh[3]);
+- u8 sc5;
+-
+- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+- sc5 |= sc4_bit;
+
+ return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
+ }
+@@ -745,10 +738,6 @@ void hfi1_ud_rcv(struct hfi1_packet *pac
+ if (qp->ibqp.qp_num > 1) {
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u16 slid;
+- u8 sc5;
+-
+- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+- sc5 |= sc4_bit;
+
+ slid = be16_to_cpu(hdr->lrh[3]);
+ if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
+@@ -790,10 +779,6 @@ void hfi1_ud_rcv(struct hfi1_packet *pac
+ /* Received on QP0, and so by definition, this is an SMP */
+ struct opa_smp *smp = (struct opa_smp *)data;
+ u16 slid = be16_to_cpu(hdr->lrh[3]);
+- u8 sc5;
+-
+- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+- sc5 |= sc4_bit;
+
+ if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
+ goto drop;
+@@ -890,9 +875,7 @@ void hfi1_ud_rcv(struct hfi1_packet *pac
+ }
+
+ wc.slid = be16_to_cpu(hdr->lrh[3]);
+- sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+- sc |= sc4_bit;
+- wc.sl = ibp->sc_to_sl[sc];
++ wc.sl = ibp->sc_to_sl[sc5];
+
+ /*
+ * Save the LMC lower bits if the destination LID is a unicast LID.
--- /dev/null
+From 2aee309d3e01447c55fdf89cef05a0e2be372655 Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Fri, 17 Jun 2016 19:17:49 -0700
+Subject: IB/hfi1: Fix deadlock with txreq allocation slow path
+
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+
+commit 2aee309d3e01447c55fdf89cef05a0e2be372655 upstream.
+
+A failure in the get_txreq() inline will result in a
+slow path retry using __get_txreq().
+
+__get_txreq() attempts to procure the qp s_lock, which
+is already held in all callers.
+
+Fix by deleting the s_lock maintenance in __get_txreq()
+and add sparse syntax hooks to future proof the code.
+
+Cc: Stable <stable@vger.kernel.org> # 4.6+
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/rdma/hfi1/verbs_txreq.c | 4 +---
+ drivers/staging/rdma/hfi1/verbs_txreq.h | 1 +
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/rdma/hfi1/verbs_txreq.c
++++ b/drivers/staging/rdma/hfi1/verbs_txreq.c
+@@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *
+
+ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
+ struct rvt_qp *qp)
++ __must_hold(&qp->s_lock)
+ {
+ struct verbs_txreq *tx = ERR_PTR(-EBUSY);
+- unsigned long flags;
+
+- spin_lock_irqsave(&qp->s_lock, flags);
+ write_seqlock(&dev->iowait_lock);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ struct hfi1_qp_priv *priv;
+@@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct h
+ }
+ out:
+ write_sequnlock(&dev->iowait_lock);
+- spin_unlock_irqrestore(&qp->s_lock, flags);
+ return tx;
+ }
+
+--- a/drivers/staging/rdma/hfi1/verbs_txreq.h
++++ b/drivers/staging/rdma/hfi1/verbs_txreq.h
+@@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct h
+
+ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
+ struct rvt_qp *qp)
++ __must_hold(&qp->slock)
+ {
+ struct verbs_txreq *tx;
+ struct hfi1_qp_priv *priv = qp->priv;
--- /dev/null
+From 20f06ed9f61a185c6dabd662c310bed6189470df Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Wed, 27 Jul 2016 11:43:37 +0100
+Subject: KEYS: 64-bit MIPS needs to use compat_sys_keyctl for 32-bit userspace
+
+From: David Howells <dhowells@redhat.com>
+
+commit 20f06ed9f61a185c6dabd662c310bed6189470df upstream.
+
+MIPS64 needs to use compat_sys_keyctl for 32-bit userspace rather than
+calling sys_keyctl. The latter will work in a lot of cases, thereby hiding
+the issue.
+
+Reported-by: Stephan Mueller <smueller@chronox.de>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Cc: linux-security-module@vger.kernel.org
+Cc: keyrings@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/13832/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/scall64-n32.S | 2 +-
+ arch/mips/kernel/scall64-o32.S | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -344,7 +344,7 @@ EXPORT(sysn32_call_table)
+ PTR sys_ni_syscall /* available, was setaltroot */
+ PTR sys_add_key
+ PTR sys_request_key
+- PTR sys_keyctl /* 6245 */
++ PTR compat_sys_keyctl /* 6245 */
+ PTR sys_set_thread_area
+ PTR sys_inotify_init
+ PTR sys_inotify_add_watch
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -500,7 +500,7 @@ EXPORT(sys32_call_table)
+ PTR sys_ni_syscall /* available, was setaltroot */
+ PTR sys_add_key /* 4280 */
+ PTR sys_request_key
+- PTR sys_keyctl
++ PTR compat_sys_keyctl
+ PTR sys_set_thread_area
+ PTR sys_inotify_init
+ PTR sys_inotify_add_watch /* 4285 */
--- /dev/null
+From 86a574de4590ffe6fd3f3ca34cdcf655a78e36ec Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Sun, 3 Jul 2016 17:01:26 -0400
+Subject: random: strengthen input validation for RNDADDTOENTCNT
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 86a574de4590ffe6fd3f3ca34cdcf655a78e36ec upstream.
+
+Don't allow RNDADDTOENTCNT or RNDADDENTROPY to accept a negative
+entropy value. It doesn't make any sense to subtract from the entropy
+counter, and it can trigger a warning:
+
+random: negative entropy/overflow: pool input count -40000
+------------[ cut here ]------------
+WARNING: CPU: 3 PID: 6828 at drivers/char/random.c:670[< none
+ >] credit_entropy_bits+0x21e/0xad0 drivers/char/random.c:670
+Modules linked in:
+CPU: 3 PID: 6828 Comm: a.out Not tainted 4.7.0-rc4+ #4
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
+ ffffffff880b58e0 ffff88005dd9fcb0 ffffffff82cc838f ffffffff87158b40
+ fffffbfff1016b1c 0000000000000000 0000000000000000 ffffffff87158b40
+ ffffffff83283dae 0000000000000009 ffff88005dd9fcf8 ffffffff8136d27f
+Call Trace:
+ [< inline >] __dump_stack lib/dump_stack.c:15
+ [<ffffffff82cc838f>] dump_stack+0x12e/0x18f lib/dump_stack.c:51
+ [<ffffffff8136d27f>] __warn+0x19f/0x1e0 kernel/panic.c:516
+ [<ffffffff8136d48c>] warn_slowpath_null+0x2c/0x40 kernel/panic.c:551
+ [<ffffffff83283dae>] credit_entropy_bits+0x21e/0xad0 drivers/char/random.c:670
+ [< inline >] credit_entropy_bits_safe drivers/char/random.c:734
+ [<ffffffff8328785d>] random_ioctl+0x21d/0x250 drivers/char/random.c:1546
+ [< inline >] vfs_ioctl fs/ioctl.c:43
+ [<ffffffff8185316c>] do_vfs_ioctl+0x18c/0xff0 fs/ioctl.c:674
+ [< inline >] SYSC_ioctl fs/ioctl.c:689
+ [<ffffffff8185405f>] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:680
+ [<ffffffff86a995c0>] entry_SYSCALL_64_fastpath+0x23/0xc1
+arch/x86/entry/entry_64.S:207
+---[ end trace 5d4902b2ba842f1f ]---
+
+This was triggered using the test program:
+
+// autogenerated by syzkaller (http://github.com/google/syzkaller)
+
+int main() {
+ int fd = open("/dev/random", O_RDWR);
+ int val = -5000;
+ ioctl(fd, RNDADDTOENTCNT, &val);
+ return 0;
+}
+
+It's harmless in that (a) only root can trigger it, and (b) after
+complaining the code never does let the entropy count go negative, but
+it's better to simply not allow this userspace from passing in a
+negative entropy value altogether.
+
+Google-Bug-Id: #29575089
+Reported-By: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/random.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -722,15 +722,18 @@ retry:
+ }
+ }
+
+-static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
++static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+ {
+ const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+
++ if (nbits < 0)
++ return -EINVAL;
++
+ /* Cap the value to avoid overflows */
+ nbits = min(nbits, nbits_max);
+- nbits = max(nbits, -nbits_max);
+
+ credit_entropy_bits(r, nbits);
++ return 0;
+ }
+
+ /*********************************************************************
+@@ -1542,8 +1545,7 @@ static long random_ioctl(struct file *f,
+ return -EPERM;
+ if (get_user(ent_count, p))
+ return -EFAULT;
+- credit_entropy_bits_safe(&input_pool, ent_count);
+- return 0;
++ return credit_entropy_bits_safe(&input_pool, ent_count);
+ case RNDADDENTROPY:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+@@ -1557,8 +1559,7 @@ static long random_ioctl(struct file *f,
+ size);
+ if (retval < 0)
+ return retval;
+- credit_entropy_bits_safe(&input_pool, ent_count);
+- return 0;
++ return credit_entropy_bits_safe(&input_pool, ent_count);
+ case RNDZAPENTCNT:
+ case RNDCLEARPOOL:
+ /*
--- /dev/null
+From 43160ffd12c8d1d331362362eea3c70e04b6f9c4 Mon Sep 17 00:00:00 2001
+From: Axel Lin <axel.lin@ingics.com>
+Date: Wed, 15 Jun 2016 10:21:34 +0800
+Subject: regulator: qcom_smd: Remove list_voltage callback for rpm_smps_ldo_ops_fixed
+
+From: Axel Lin <axel.lin@ingics.com>
+
+commit 43160ffd12c8d1d331362362eea3c70e04b6f9c4 upstream.
+
+Use regulator_list_voltage_linear_range in rpm_smps_ldo_ops_fixed is
+wrong because it is used for fixed regulator without any linear range.
+The rpm_smps_ldo_ops_fixed is used for pm8941_lnldo which has fixed_uV
+set and n_voltages = 1. In this case, regulator_list_voltage() can return
+rdev->desc->fixed_uV without .list_voltage implementation.
+
+Fixes: 3bfbb4d1a480 ("regulator: qcom_smd: add list_voltage callback")
+Signed-off-by: Axel Lin <axel.lin@ingics.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/qcom_smd-regulator.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/regulator/qcom_smd-regulator.c
++++ b/drivers/regulator/qcom_smd-regulator.c
+@@ -140,7 +140,6 @@ static const struct regulator_ops rpm_sm
+ .enable = rpm_reg_enable,
+ .disable = rpm_reg_disable,
+ .is_enabled = rpm_reg_is_enabled,
+- .list_voltage = regulator_list_voltage_linear_range,
+
+ .get_voltage = rpm_reg_get_voltage,
+ .set_voltage = rpm_reg_set_voltage,
qed-fix-setting-clearing-bit-in-completion-bitmap.patch
macsec-ensure-rx_sa-is-set-when-validation-is-disabled.patch
tcp-consider-recv-buf-for-the-initial-window-scale.patch
+i2c-i801-allow-acpi-systemio-opregion-to-conflict-with-pci-bar.patch
+arm-oabi-compat-add-missing-access-checks.patch
+keys-64-bit-mips-needs-to-use-compat_sys_keyctl-for-32-bit-userspace.patch
+ib-hfi1-correct-issues-with-sc5-computation.patch
+ib-hfi1-fix-deadlock-with-txreq-allocation-slow-path.patch
+apparmor-fix-ref-count-leak-when-profile-sha1-hash-is-read.patch
+regulator-qcom_smd-remove-list_voltage-callback-for-rpm_smps_ldo_ops_fixed.patch
+random-strengthen-input-validation-for-rndaddtoentcnt.patch
+x86-mm-pat-add-support-of-non-default-pat-msr-setting.patch
+x86-mm-pat-add-pat_disable-interface.patch
+x86-mm-pat-replace-cpu_has_pat-with-boot_cpu_has.patch
+x86-mtrr-fix-xorg-crashes-in-qemu-sessions.patch
+x86-mtrr-fix-pat-init-handling-when-mtrr-is-disabled.patch
+x86-xen-pat-remove-pat-table-init-code-from-xen.patch
+x86-pat-document-the-pat-initialization-sequence.patch
+x86-mm-pat-fix-bug_on-in-mmap_mem-on-qemu-i386.patch
+udf-prevent-stack-overflow-on-corrupted-filesystem-mount.patch
--- /dev/null
+From a47241cdeee2689ee7089ec95cadfcf66588fbdb Mon Sep 17 00:00:00 2001
+From: Alden Tondettar <alden.tondettar@gmail.com>
+Date: Mon, 25 Apr 2016 19:27:56 -0700
+Subject: udf: Prevent stack overflow on corrupted filesystem mount
+
+From: Alden Tondettar <alden.tondettar@gmail.com>
+
+commit a47241cdeee2689ee7089ec95cadfcf66588fbdb upstream.
+
+Presently, a corrupted or malicious UDF filesystem containing a very large
+number (or cycle) of Logical Volume Integrity Descriptor extent
+indirections may trigger a stack overflow and kernel panic in
+udf_load_logicalvolint() on mount.
+
+Replace the unnecessary recursion in udf_load_logicalvolint() with
+simple iteration. Set an arbitrary limit of 1000 indirections (which would
+have almost certainly overflowed the stack without this fix), and treat
+such cases as if there were no LVID.
+
+Signed-off-by: Alden Tondettar <alden.tondettar@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/udf/super.c | 69 ++++++++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 44 insertions(+), 25 deletions(-)
+
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -78,6 +78,15 @@
+ #define VSD_FIRST_SECTOR_OFFSET 32768
+ #define VSD_MAX_SECTOR_OFFSET 0x800000
+
++/*
++ * Maximum number of Terminating Descriptor / Logical Volume Integrity
++ * Descriptor redirections. The chosen numbers are arbitrary - just that we
++ * hopefully don't limit any real use of rewritten inode on write-once media
++ * but avoid looping for too long on corrupted media.
++ */
++#define UDF_MAX_TD_NESTING 64
++#define UDF_MAX_LVID_NESTING 1000
++
+ enum { UDF_MAX_LINKS = 0xffff };
+
+ /* These are the "meat" - everything else is stuffing */
+@@ -1541,42 +1550,52 @@ out_bh:
+ }
+
+ /*
+- * udf_load_logicalvolint
+- *
++ * Find the prevailing Logical Volume Integrity Descriptor.
+ */
+ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
+ {
+- struct buffer_head *bh = NULL;
++ struct buffer_head *bh, *final_bh;
+ uint16_t ident;
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct logicalVolIntegrityDesc *lvid;
++ int indirections = 0;
+
+- while (loc.extLength > 0 &&
+- (bh = udf_read_tagged(sb, loc.extLocation,
+- loc.extLocation, &ident)) &&
+- ident == TAG_IDENT_LVID) {
+- sbi->s_lvid_bh = bh;
+- lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+-
+- if (lvid->nextIntegrityExt.extLength)
+- udf_load_logicalvolint(sb,
+- leea_to_cpu(lvid->nextIntegrityExt));
++ while (++indirections <= UDF_MAX_LVID_NESTING) {
++ final_bh = NULL;
++ while (loc.extLength > 0 &&
++ (bh = udf_read_tagged(sb, loc.extLocation,
++ loc.extLocation, &ident))) {
++ if (ident != TAG_IDENT_LVID) {
++ brelse(bh);
++ break;
++ }
+
+- if (sbi->s_lvid_bh != bh)
+- brelse(bh);
+- loc.extLength -= sb->s_blocksize;
+- loc.extLocation++;
++ brelse(final_bh);
++ final_bh = bh;
++
++ loc.extLength -= sb->s_blocksize;
++ loc.extLocation++;
++ }
++
++ if (!final_bh)
++ return;
++
++ brelse(sbi->s_lvid_bh);
++ sbi->s_lvid_bh = final_bh;
++
++ lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
++ if (lvid->nextIntegrityExt.extLength == 0)
++ return;
++
++ loc = leea_to_cpu(lvid->nextIntegrityExt);
+ }
+- if (sbi->s_lvid_bh != bh)
+- brelse(bh);
++
++ udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
++ UDF_MAX_LVID_NESTING);
++ brelse(sbi->s_lvid_bh);
++ sbi->s_lvid_bh = NULL;
+ }
+
+-/*
+- * Maximum number of Terminating Descriptor redirections. The chosen number is
+- * arbitrary - just that we hopefully don't limit any real use of rewritten
+- * inode on write-once media but avoid looping for too long on corrupted media.
+- */
+-#define UDF_MAX_TD_NESTING 64
+
+ /*
+ * Process a main/reserve volume descriptor sequence.
--- /dev/null
+From 224bb1e5d67ba0f2872c98002d6a6f991ac6fd4a Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Wed, 23 Mar 2016 15:41:58 -0600
+Subject: x86/mm/pat: Add pat_disable() interface
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit 224bb1e5d67ba0f2872c98002d6a6f991ac6fd4a upstream.
+
+In preparation for fixing a regression caused by:
+
+ 9cd25aac1f44 ("x86/mm/pat: Emulate PAT when it is disabled")
+
+... PAT needs to provide an interface that prevents the OS from
+initializing the PAT MSR.
+
+PAT MSR initialization must be done on all CPUs using the specific
+sequence of operations defined in the Intel SDM. This requires MTRRs
+to be enabled since pat_init() is called as part of MTRR init
+from mtrr_rendezvous_handler().
+
+Make pat_disable() as the interface that prevents the OS from
+initializing the PAT MSR. MTRR will call this interface when it
+cannot provide the SDM-defined sequence to initialize PAT.
+
+This also assures that pat_disable() called from pat_bsp_init()
+will set the PAT table properly when CPU does not support PAT.
+
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luis R. Rodriguez <mcgrof@suse.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Robert Elliott <elliott@hpe.com>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: konrad.wilk@oracle.com
+Cc: paul.gortmaker@windriver.com
+Cc: xen-devel@lists.xenproject.org
+Link: http://lkml.kernel.org/r/1458769323-24491-3-git-send-email-toshi.kani@hpe.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/pat.h | 1 +
+ arch/x86/mm/pat.c | 13 ++++++++++++-
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/pat.h
++++ b/arch/x86/include/asm/pat.h
+@@ -5,6 +5,7 @@
+ #include <asm/pgtable_types.h>
+
+ bool pat_enabled(void);
++void pat_disable(const char *reason);
+ extern void pat_init(void);
+ void __init_cache_modes(u64);
+
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -40,11 +40,22 @@
+ static bool boot_cpu_done;
+
+ static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
++static void init_cache_modes(void);
+
+-static inline void pat_disable(const char *reason)
++void pat_disable(const char *reason)
+ {
++ if (!__pat_enabled)
++ return;
++
++ if (boot_cpu_done) {
++ WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
++ return;
++ }
++
+ __pat_enabled = 0;
+ pr_info("x86/PAT: %s\n", reason);
++
++ init_cache_modes();
+ }
+
+ static int __init nopat(char *str)
--- /dev/null
+From 02f037d641dc6672be5cfe7875a48ab99b95b154 Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Wed, 23 Mar 2016 15:41:57 -0600
+Subject: x86/mm/pat: Add support of non-default PAT MSR setting
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit 02f037d641dc6672be5cfe7875a48ab99b95b154 upstream.
+
+In preparation for fixing a regression caused by:
+
+ 9cd25aac1f44 ("x86/mm/pat: Emulate PAT when it is disabled")'
+
+... PAT needs to support a case that PAT MSR is initialized with a
+non-default value.
+
+When pat_init() is called and PAT is disabled, it initializes the
+PAT table with the BIOS default value. Xen, however, sets PAT MSR
+with a non-default value to enable WC. This causes inconsistency
+between the PAT table and PAT MSR when PAT is set to disable on Xen.
+
+Change pat_init() to handle the PAT disable cases properly. Add
+init_cache_modes() to handle two cases when PAT is set to disable.
+
+ 1. CPU supports PAT: Set PAT table to be consistent with PAT MSR.
+ 2. CPU does not support PAT: Set PAT table to be consistent with
+ PWT and PCD bits in a PTE.
+
+Note, __init_cache_modes(), renamed from pat_init_cache_modes(),
+will be changed to a static function in a later patch.
+
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luis R. Rodriguez <mcgrof@suse.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: elliott@hpe.com
+Cc: konrad.wilk@oracle.com
+Cc: paul.gortmaker@windriver.com
+Cc: xen-devel@lists.xenproject.org
+Link: http://lkml.kernel.org/r/1458769323-24491-2-git-send-email-toshi.kani@hpe.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/pat.h | 2 -
+ arch/x86/mm/pat.c | 73 ++++++++++++++++++++++++++++++++-------------
+ arch/x86/xen/enlighten.c | 2 -
+ 3 files changed, 55 insertions(+), 22 deletions(-)
+
+--- a/arch/x86/include/asm/pat.h
++++ b/arch/x86/include/asm/pat.h
+@@ -6,7 +6,7 @@
+
+ bool pat_enabled(void);
+ extern void pat_init(void);
+-void pat_init_cache_modes(u64);
++void __init_cache_modes(u64);
+
+ extern int reserve_memtype(u64 start, u64 end,
+ enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -181,7 +181,7 @@ static enum page_cache_mode pat_get_cach
+ * configuration.
+ * Using lower indices is preferred, so we start with highest index.
+ */
+-void pat_init_cache_modes(u64 pat)
++void __init_cache_modes(u64 pat)
+ {
+ enum page_cache_mode cache;
+ char pat_msg[33];
+@@ -207,9 +207,6 @@ static void pat_bsp_init(u64 pat)
+ return;
+ }
+
+- if (!pat_enabled())
+- goto done;
+-
+ rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
+ if (!tmp_pat) {
+ pat_disable("PAT MSR is 0, disabled.");
+@@ -218,15 +215,11 @@ static void pat_bsp_init(u64 pat)
+
+ wrmsrl(MSR_IA32_CR_PAT, pat);
+
+-done:
+- pat_init_cache_modes(pat);
++ __init_cache_modes(pat);
+ }
+
+ static void pat_ap_init(u64 pat)
+ {
+- if (!pat_enabled())
+- return;
+-
+ if (!cpu_has_pat) {
+ /*
+ * If this happens we are on a secondary CPU, but switched to
+@@ -238,18 +231,32 @@ static void pat_ap_init(u64 pat)
+ wrmsrl(MSR_IA32_CR_PAT, pat);
+ }
+
+-void pat_init(void)
++static void init_cache_modes(void)
+ {
+- u64 pat;
+- struct cpuinfo_x86 *c = &boot_cpu_data;
++ u64 pat = 0;
++ static int init_cm_done;
+
+- if (!pat_enabled()) {
++ if (init_cm_done)
++ return;
++
++ if (boot_cpu_has(X86_FEATURE_PAT)) {
++ /*
++ * CPU supports PAT. Set PAT table to be consistent with
++ * PAT MSR. This case supports "nopat" boot option, and
++ * virtual machine environments which support PAT without
++ * MTRRs. In specific, Xen has unique setup to PAT MSR.
++ *
++ * If PAT MSR returns 0, it is considered invalid and emulates
++ * as No PAT.
++ */
++ rdmsrl(MSR_IA32_CR_PAT, pat);
++ }
++
++ if (!pat) {
+ /*
+ * No PAT. Emulate the PAT table that corresponds to the two
+- * cache bits, PWT (Write Through) and PCD (Cache Disable). This
+- * setup is the same as the BIOS default setup when the system
+- * has PAT but the "nopat" boot option has been specified. This
+- * emulated PAT table is used when MSR_IA32_CR_PAT returns 0.
++ * cache bits, PWT (Write Through) and PCD (Cache Disable).
++ * This setup is also the same as the BIOS default setup.
+ *
+ * PTE encoding:
+ *
+@@ -266,10 +273,36 @@ void pat_init(void)
+ */
+ pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
+ PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
++ }
++
++ __init_cache_modes(pat);
++
++ init_cm_done = 1;
++}
++
++/**
++ * pat_init - Initialize PAT MSR and PAT table
++ *
++ * This function initializes PAT MSR and PAT table with an OS-defined value
++ * to enable additional cache attributes, WC and WT.
++ *
++ * This function must be called on all CPUs using the specific sequence of
++ * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
++ * procedure for PAT.
++ */
++void pat_init(void)
++{
++ u64 pat;
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ if (!pat_enabled()) {
++ init_cache_modes();
++ return;
++ }
+
+- } else if ((c->x86_vendor == X86_VENDOR_INTEL) &&
+- (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
+- ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
++ if ((c->x86_vendor == X86_VENDOR_INTEL) &&
++ (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
++ ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
+ /*
+ * PAT support with the lower four entries. Intel Pentium 2,
+ * 3, M, and 4 are affected by PAT errata, which makes the
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1623,7 +1623,7 @@ asmlinkage __visible void __init xen_sta
+ * configuration.
+ */
+ rdmsrl(MSR_IA32_CR_PAT, pat);
+- pat_init_cache_modes(pat);
++ __init_cache_modes(pat);
+
+ /* keep using Xen gdt for now; no urgent need to change it */
+
--- /dev/null
+From 1886297ce0c8d563a08c8a8c4c0b97743e06cd37 Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Mon, 11 Apr 2016 13:36:00 -0600
+Subject: x86/mm/pat: Fix BUG_ON() in mmap_mem() on QEMU/i386
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit 1886297ce0c8d563a08c8a8c4c0b97743e06cd37 upstream.
+
+The following BUG_ON() crash was reported on QEMU/i386:
+
+ kernel BUG at arch/x86/mm/physaddr.c:79!
+ Call Trace:
+ phys_mem_access_prot_allowed
+ mmap_mem
+ ? mmap_region
+ mmap_region
+ do_mmap
+ vm_mmap_pgoff
+ SyS_mmap_pgoff
+ do_int80_syscall_32
+ entry_INT80_32
+
+after commit:
+
+ edfe63ec97ed ("x86/mtrr: Fix Xorg crashes in Qemu sessions")
+
+PAT is now set to disabled state when MTRRs are disabled.
+Thus, reactivating the __pa(high_memory) check in
+phys_mem_access_prot_allowed().
+
+When CONFIG_DEBUG_VIRTUAL is set, __pa() calls __phys_addr(),
+which in turn calls slow_virt_to_phys() for 'high_memory'.
+Because 'high_memory' is set to (the max direct mapped virt
+addr + 1), it is not a valid virtual address. Hence,
+slow_virt_to_phys() returns 0 and hit the BUG_ON. Using
+__pa_nodebug() instead of __pa() will fix this BUG_ON.
+
+However, this code block, originally written for Pentiums and
+earlier, is no longer adequate since a 32-bit Xen guest has
+MTRRs disabled and supports ZONE_HIGHMEM. In this setup,
+this code sets UC attribute for accessing RAM in high memory
+range.
+
+Delete this code block as it has been unused for a long time.
+
+Reported-by: kernel test robot <ying.huang@linux.intel.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: David Vrabel <david.vrabel@citrix.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: xen-devel@lists.xenproject.org
+Link: http://lkml.kernel.org/r/1460403360-25441-1-git-send-email-toshi.kani@hpe.com
+Link: https://lkml.org/lkml/2016/4/1/608
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/pat.c | 19 -------------------
+ 1 file changed, 19 deletions(-)
+
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -778,25 +778,6 @@ int phys_mem_access_prot_allowed(struct
+ if (file->f_flags & O_DSYNC)
+ pcm = _PAGE_CACHE_MODE_UC_MINUS;
+
+-#ifdef CONFIG_X86_32
+- /*
+- * On the PPro and successors, the MTRRs are used to set
+- * memory types for physical addresses outside main memory,
+- * so blindly setting UC or PWT on those pages is wrong.
+- * For Pentiums and earlier, the surround logic should disable
+- * caching for the high addresses through the KEN pin, but
+- * we maintain the tradition of paranoia in this code.
+- */
+- if (!pat_enabled() &&
+- !(boot_cpu_has(X86_FEATURE_MTRR) ||
+- boot_cpu_has(X86_FEATURE_K6_MTRR) ||
+- boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
+- boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
+- (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
+- pcm = _PAGE_CACHE_MODE_UC;
+- }
+-#endif
+-
+ *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
+ cachemode2protval(pcm));
+ return 1;
--- /dev/null
+From d63dcf49cf5ae5605f4d14229e3888e104f294b1 Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Wed, 23 Mar 2016 15:41:59 -0600
+Subject: x86/mm/pat: Replace cpu_has_pat with boot_cpu_has()
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit d63dcf49cf5ae5605f4d14229e3888e104f294b1 upstream.
+
+Borislav Petkov suggested:
+
+ > Please use on init paths boot_cpu_has(X86_FEATURE_PAT) and on fast
+ > paths static_cpu_has(X86_FEATURE_PAT). No more of that cpu_has_XXX
+ > ugliness.
+
+Replace the use of cpu_has_pat on init paths with boot_cpu_has().
+
+Suggested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luis R. Rodriguez <mcgrof@suse.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Robert Elliott <elliott@hpe.com>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: konrad.wilk@oracle.com
+Cc: paul.gortmaker@windriver.com
+Cc: xen-devel@lists.xenproject.org
+Link: http://lkml.kernel.org/r/1458769323-24491-4-git-send-email-toshi.kani@hpe.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/pat.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -213,7 +213,7 @@ static void pat_bsp_init(u64 pat)
+ {
+ u64 tmp_pat;
+
+- if (!cpu_has_pat) {
++ if (!boot_cpu_has(X86_FEATURE_PAT)) {
+ pat_disable("PAT not supported by CPU.");
+ return;
+ }
+@@ -231,7 +231,7 @@ static void pat_bsp_init(u64 pat)
+
+ static void pat_ap_init(u64 pat)
+ {
+- if (!cpu_has_pat) {
++ if (!boot_cpu_has(X86_FEATURE_PAT)) {
+ /*
+ * If this happens we are on a secondary CPU, but switched to
+ * PAT on the boot CPU. We have no way to undo PAT.
--- /dev/null
+From ad025a73f0e9344ac73ffe1b74c184033e08e7d5 Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Wed, 23 Mar 2016 15:42:01 -0600
+Subject: x86/mtrr: Fix PAT init handling when MTRR is disabled
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit ad025a73f0e9344ac73ffe1b74c184033e08e7d5 upstream.
+
+get_mtrr_state() calls pat_init() on BSP even if MTRR is disabled.
+This results in calling pat_init() on BSP only since APs do not call
+pat_init() when MTRR is disabled. This inconsistency between BSP
+and APs leads to undefined behavior.
+
+Make BSP's calling condition to pat_init() consistent with AP's,
+mtrr_ap_init() and mtrr_aps_init().
+
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luis R. Rodriguez <mcgrof@suse.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: elliott@hpe.com
+Cc: konrad.wilk@oracle.com
+Cc: paul.gortmaker@windriver.com
+Cc: xen-devel@lists.xenproject.org
+Link: http://lkml.kernel.org/r/1458769323-24491-6-git-send-email-toshi.kani@hpe.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mtrr/generic.c | 24 ++++++++++++++----------
+ arch/x86/kernel/cpu/mtrr/main.c | 3 +++
+ arch/x86/kernel/cpu/mtrr/mtrr.h | 1 +
+ 3 files changed, 18 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -444,11 +444,24 @@ static void __init print_mtrr_state(void
+ pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
+ }
+
++/* PAT setup for BP. We need to go through sync steps here */
++void __init mtrr_bp_pat_init(void)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ prepare_set();
++
++ pat_init();
++
++ post_set();
++ local_irq_restore(flags);
++}
++
+ /* Grab all of the MTRR state for this CPU into *state */
+ bool __init get_mtrr_state(void)
+ {
+ struct mtrr_var_range *vrs;
+- unsigned long flags;
+ unsigned lo, dummy;
+ unsigned int i;
+
+@@ -481,15 +494,6 @@ bool __init get_mtrr_state(void)
+
+ mtrr_state_set = 1;
+
+- /* PAT setup for BP. We need to go through sync steps here */
+- local_irq_save(flags);
+- prepare_set();
+-
+- pat_init();
+-
+- post_set();
+- local_irq_restore(flags);
+-
+ return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
+ }
+
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -752,6 +752,9 @@ void __init mtrr_bp_init(void)
+ /* BIOS may override */
+ __mtrr_enabled = get_mtrr_state();
+
++ if (mtrr_enabled())
++ mtrr_bp_pat_init();
++
+ if (mtrr_cleanup(phys_addr)) {
+ changed_by_mtrr_cleanup = 1;
+ mtrr_if->set_all();
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
+@@ -52,6 +52,7 @@ void set_mtrr_prepare_save(struct set_mt
+ void fill_mtrr_var_range(unsigned int index,
+ u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
+ bool get_mtrr_state(void);
++void mtrr_bp_pat_init(void);
+
+ extern void set_mtrr_ops(const struct mtrr_ops *ops);
+
--- /dev/null
+From edfe63ec97ed8d4496225f7ba54c9ce4207c5431 Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Wed, 23 Mar 2016 15:42:00 -0600
+Subject: x86/mtrr: Fix Xorg crashes in Qemu sessions
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit edfe63ec97ed8d4496225f7ba54c9ce4207c5431 upstream.
+
+A Xorg failure on qemu32 was reported as a regression [1] caused by
+commit 9cd25aac1f44 ("x86/mm/pat: Emulate PAT when it is disabled").
+
+This patch fixes the Xorg crash.
+
+Negative effects of this regression were the following two failures [2]
+in Xorg on QEMU with QEMU CPU model "qemu32" (-cpu qemu32), which were
+triggered by the fact that its virtual CPU does not support MTRRs.
+
+ #1. copy_process() failed in the check in reserve_pfn_range()
+
+ copy_process
+ copy_mm
+ dup_mm
+ dup_mmap
+ copy_page_range
+ track_pfn_copy
+ reserve_pfn_range
+
+ A WC map request was tracked as WC in memtype, which set a PTE as
+ UC (pgprot) per __cachemode2pte_tbl[]. This led to this error in
+ reserve_pfn_range() called from track_pfn_copy(), which obtained
+ a pgprot from a PTE. It converts pgprot to page_cache_mode, which
+ does not necessarily result in the original page_cache_mode since
+ __cachemode2pte_tbl[] redirects multiple types to UC.
+
+ #2. error path in copy_process() then hit WARN_ON_ONCE in
+ untrack_pfn().
+
+ x86/PAT: Xorg:509 map pfn expected mapping type uncached-
+ minus for [mem 0xfd000000-0xfdffffff], got write-combining
+ Call Trace:
+ dump_stack
+ warn_slowpath_common
+ ? untrack_pfn
+ ? untrack_pfn
+ warn_slowpath_null
+ untrack_pfn
+ ? __kunmap_atomic
+ unmap_single_vma
+ ? pagevec_move_tail_fn
+ unmap_vmas
+ exit_mmap
+ mmput
+ copy_process.part.47
+ _do_fork
+ SyS_clone
+ do_syscall_32_irqs_on
+ entry_INT80_32
+
+These negative effects are caused by two separate bugs, but they
+can be addressed in separate patches. Fixing the pat_init() issue
+described below addresses the root cause, and avoids Xorg to hit
+these cases.
+
+When the CPU does not support MTRRs, MTRR does not call pat_init(),
+which leaves PAT enabled without initializing PAT. This pat_init()
+issue is a long-standing issue, but manifested as issue #1 (and then
+hit issue #2) with the above-mentioned commit because the memtype
+now tracks cache attribute with 'page_cache_mode'.
+
+This pat_init() issue existed before the commit, but we used pgprot
+in memtype. Hence, we did not have issue #1 before. But WC request
+resulted in WT in effect because WC pgrot is actually WT when PAT
+is not initialized. This is not how it was designed to work. When
+PAT is set to disable properly, WC is converted to UC. The use of
+WT can result in a system crash if the target range does not support
+WT. Fortunately, nobody ran into such issue before.
+
+To fix this pat_init() issue, PAT code has been enhanced to provide
+pat_disable() interface. Call this interface when MTRRs are disabled.
+By setting PAT to disable properly, PAT bypasses the memtype check,
+and avoids issue #1.
+
+ [1]: https://lkml.org/lkml/2016/3/3/828
+ [2]: https://lkml.org/lkml/2016/3/4/775
+
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luis R. Rodriguez <mcgrof@suse.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: elliott@hpe.com
+Cc: konrad.wilk@oracle.com
+Cc: paul.gortmaker@windriver.com
+Cc: xen-devel@lists.xenproject.org
+Link: http://lkml.kernel.org/r/1458769323-24491-5-git-send-email-toshi.kani@hpe.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/mtrr.h | 6 +++++-
+ arch/x86/kernel/cpu/mtrr/main.c | 10 +++++++++-
+ 2 files changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/mtrr.h
++++ b/arch/x86/include/asm/mtrr.h
+@@ -24,6 +24,7 @@
+ #define _ASM_X86_MTRR_H
+
+ #include <uapi/asm/mtrr.h>
++#include <asm/pat.h>
+
+
+ /*
+@@ -83,9 +84,12 @@ static inline int mtrr_trim_uncached_mem
+ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
+ {
+ }
++static inline void mtrr_bp_init(void)
++{
++ pat_disable("MTRRs disabled, skipping PAT initialization too.");
++}
+
+ #define mtrr_ap_init() do {} while (0)
+-#define mtrr_bp_init() do {} while (0)
+ #define set_mtrr_aps_delayed_init() do {} while (0)
+ #define mtrr_aps_init() do {} while (0)
+ #define mtrr_bp_restore() do {} while (0)
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -759,8 +759,16 @@ void __init mtrr_bp_init(void)
+ }
+ }
+
+- if (!mtrr_enabled())
++ if (!mtrr_enabled()) {
+ pr_info("MTRR: Disabled\n");
++
++ /*
++ * PAT initialization relies on MTRR's rendezvous handler.
++ * Skip PAT init until the handler can initialize both
++ * features independently.
++ */
++ pat_disable("MTRRs disabled, skipping PAT initialization too.");
++ }
+ }
+
+ void mtrr_ap_init(void)
--- /dev/null
+From b6350c21cfe8aa9d65e189509a23c0ea4b8362c2 Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Wed, 23 Mar 2016 15:42:03 -0600
+Subject: x86/pat: Document the PAT initialization sequence
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit b6350c21cfe8aa9d65e189509a23c0ea4b8362c2 upstream.
+
+Update PAT documentation to describe how PAT is initialized under
+various configurations.
+
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luis R. Rodriguez <mcgrof@suse.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: elliott@hpe.com
+Cc: konrad.wilk@oracle.com
+Cc: paul.gortmaker@windriver.com
+Cc: xen-devel@lists.xenproject.org
+Link: http://lkml.kernel.org/r/1458769323-24491-8-git-send-email-toshi.kani@hpe.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/x86/pat.txt | 32 ++++++++++++++++++++++++++++++++
+ 1 file changed, 32 insertions(+)
+
+--- a/Documentation/x86/pat.txt
++++ b/Documentation/x86/pat.txt
+@@ -196,3 +196,35 @@ Another, more verbose way of getting PAT
+ "debugpat" boot parameter. With this parameter, various debug messages are
+ printed to dmesg log.
+
++PAT Initialization
++------------------
++
++The following table describes how PAT is initialized under various
++configurations. The PAT MSR must be updated by Linux in order to support WC
++and WT attributes. Otherwise, the PAT MSR has the value programmed in it
++by the firmware. Note, Xen enables WC attribute in the PAT MSR for guests.
++
++ MTRR PAT Call Sequence PAT State PAT MSR
++ =========================================================
++ E E MTRR -> PAT init Enabled OS
++ E D MTRR -> PAT init Disabled -
++ D E MTRR -> PAT disable Disabled BIOS
++ D D MTRR -> PAT disable Disabled -
++ - np/E PAT -> PAT disable Disabled BIOS
++ - np/D PAT -> PAT disable Disabled -
++ E !P/E MTRR -> PAT init Disabled BIOS
++ D !P/E MTRR -> PAT disable Disabled BIOS
++ !M !P/E MTRR stub -> PAT disable Disabled BIOS
++
++ Legend
++ ------------------------------------------------
++ E Feature enabled in CPU
++ D Feature disabled/unsupported in CPU
++ np "nopat" boot option specified
++ !P CONFIG_X86_PAT option unset
++ !M CONFIG_MTRR option unset
++ Enabled PAT state set to enabled
++ Disabled PAT state set to disabled
++ OS PAT initializes PAT MSR with OS setting
++ BIOS PAT keeps PAT MSR with BIOS setting
++
--- /dev/null
+From 88ba281108ed0c25c9d292b48bd3f272fcb90dd0 Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Wed, 23 Mar 2016 15:42:02 -0600
+Subject: x86/xen, pat: Remove PAT table init code from Xen
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit 88ba281108ed0c25c9d292b48bd3f272fcb90dd0 upstream.
+
+Xen supports PAT without MTRRs for its guests. In order to
+enable WC attribute, it was necessary for xen_start_kernel()
+to call pat_init_cache_modes() to update PAT table before
+starting guest kernel.
+
+Now that the kernel initializes PAT table to the BIOS handoff
+state when MTRR is disabled, this Xen-specific PAT init code
+is no longer necessary. Delete it from xen_start_kernel().
+
+Also change __init_cache_modes() to a static function since
+PAT table should not be tweaked by other modules.
+
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Juergen Gross <jgross@suse.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luis R. Rodriguez <mcgrof@suse.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: elliott@hpe.com
+Cc: paul.gortmaker@windriver.com
+Cc: xen-devel@lists.xenproject.org
+Link: http://lkml.kernel.org/r/1458769323-24491-7-git-send-email-toshi.kani@hpe.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/pat.h | 1 -
+ arch/x86/mm/pat.c | 2 +-
+ arch/x86/xen/enlighten.c | 9 ---------
+ 3 files changed, 1 insertion(+), 11 deletions(-)
+
+--- a/arch/x86/include/asm/pat.h
++++ b/arch/x86/include/asm/pat.h
+@@ -7,7 +7,6 @@
+ bool pat_enabled(void);
+ void pat_disable(const char *reason);
+ extern void pat_init(void);
+-void __init_cache_modes(u64);
+
+ extern int reserve_memtype(u64 start, u64 end,
+ enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -192,7 +192,7 @@ static enum page_cache_mode pat_get_cach
+ * configuration.
+ * Using lower indices is preferred, so we start with highest index.
+ */
+-void __init_cache_modes(u64 pat)
++static void __init_cache_modes(u64 pat)
+ {
+ enum page_cache_mode cache;
+ char pat_msg[33];
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -75,7 +75,6 @@
+ #include <asm/mach_traps.h>
+ #include <asm/mwait.h>
+ #include <asm/pci_x86.h>
+-#include <asm/pat.h>
+ #include <asm/cpu.h>
+
+ #ifdef CONFIG_ACPI
+@@ -1511,7 +1510,6 @@ asmlinkage __visible void __init xen_sta
+ {
+ struct physdev_set_iopl set_iopl;
+ unsigned long initrd_start = 0;
+- u64 pat;
+ int rc;
+
+ if (!xen_start_info)
+@@ -1618,13 +1616,6 @@ asmlinkage __visible void __init xen_sta
+ xen_start_info->nr_pages);
+ xen_reserve_special_pages();
+
+- /*
+- * Modify the cache mode translation tables to match Xen's PAT
+- * configuration.
+- */
+- rdmsrl(MSR_IA32_CR_PAT, pat);
+- __init_cache_modes(pat);
+-
+ /* keep using Xen gdt for now; no urgent need to change it */
+
+ #ifdef CONFIG_X86_32