--- /dev/null
+From 462859aa7bbe1ac83ec4377a0a06fe60778f3f27 Mon Sep 17 00:00:00 2001
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+Date: Wed, 8 Jul 2015 13:21:55 +0100
+Subject: ARM: 8404/1: dma-mapping: fix off-by-one error in bitmap size check
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+commit 462859aa7bbe1ac83ec4377a0a06fe60778f3f27 upstream.
+
+nr_bitmaps member of mapping structure stores the number of already
+allocated bitmaps and it is interpreted as loop iterator (it starts from
+0 not from 1), so a comparison against number of possible bitmap
+extensions should include this fact. This patch fixes this by changing
+the extension failure condition. This issue has been introduced by
+commit 4d852ef8c2544ce21ae41414099a7504c61164a0 ("arm: dma-mapping: Add
+support to extend DMA IOMMU mappings").
+
+Reported-by: Hyungwon Hwang <human.hwang@samsung.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Hyungwon Hwang <human.hwang@samsung.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/dma-mapping.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -1953,7 +1953,7 @@ static int extend_iommu_mapping(struct d
+ {
+ int next_bitmap;
+
+- if (mapping->nr_bitmaps > mapping->extensions)
++ if (mapping->nr_bitmaps >= mapping->extensions)
+ return -EINVAL;
+
+ next_bitmap = mapping->nr_bitmaps;
--- /dev/null
+From 9ab402aed38b95d9ce453108622be0fc6f167568 Mon Sep 17 00:00:00 2001
+From: Roger Quadros <rogerq@ti.com>
+Date: Wed, 17 Jun 2015 17:52:43 +0300
+Subject: ARM: dts: am57xx-beagle-x15: Provide supply for usb2_phy2
+
+From: Roger Quadros <rogerq@ti.com>
+
+commit 9ab402aed38b95d9ce453108622be0fc6f167568 upstream.
+
+Without this USB2 breaks if USB1 is disabled or USB1
+initializes after USB2 e.g. due to deferred probing.
+
+Fixes: 5a0f93c6576a ("ARM: dts: Add am57xx-beagle-x15")
+Signed-off-by: Roger Quadros <rogerq@ti.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/am57xx-beagle-x15.dts | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
++++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
+@@ -544,6 +544,10 @@
+ phy-supply = <&ldousb_reg>;
+ };
+
++&usb2_phy2 {
++ phy-supply = <&ldousb_reg>;
++};
++
+ &usb1 {
+ dr_mode = "host";
+ pinctrl-names = "default";
--- /dev/null
+From 2acb5c301edf39ab6d066687ce70da1166e4de9e Mon Sep 17 00:00:00 2001
+From: Roger Quadros <rogerq@ti.com>
+Date: Tue, 7 Jul 2015 17:27:57 +0300
+Subject: ARM: dts: dra7x-evm: Prevent glitch on DCAN1 pinmux
+
+From: Roger Quadros <rogerq@ti.com>
+
+commit 2acb5c301edf39ab6d066687ce70da1166e4de9e upstream.
+
+Driver core sets "default" pinmux on on probe and CAN driver
+sets "sleep" pinmux during register. This causes a small window
+where the CAN pins are in "default" state with the DCAN module
+being disabled.
+
+Change the "default" state to be like sleep so this glitch is
+avoided. Add a new "active" state that is used by the driver
+when CAN is actually active.
+
+Signed-off-by: Roger Quadros <rogerq@ti.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/dra7-evm.dts | 5 +++--
+ arch/arm/boot/dts/dra72-evm.dts | 5 +++--
+ 2 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/boot/dts/dra7-evm.dts
++++ b/arch/arm/boot/dts/dra7-evm.dts
+@@ -686,7 +686,8 @@
+
+ &dcan1 {
+ status = "ok";
+- pinctrl-names = "default", "sleep";
+- pinctrl-0 = <&dcan1_pins_default>;
++ pinctrl-names = "default", "sleep", "active";
++ pinctrl-0 = <&dcan1_pins_sleep>;
+ pinctrl-1 = <&dcan1_pins_sleep>;
++ pinctrl-2 = <&dcan1_pins_default>;
+ };
+--- a/arch/arm/boot/dts/dra72-evm.dts
++++ b/arch/arm/boot/dts/dra72-evm.dts
+@@ -497,9 +497,10 @@
+
+ &dcan1 {
+ status = "ok";
+- pinctrl-names = "default", "sleep";
+- pinctrl-0 = <&dcan1_pins_default>;
++ pinctrl-names = "default", "sleep", "active";
++ pinctrl-0 = <&dcan1_pins_sleep>;
+ pinctrl-1 = <&dcan1_pins_sleep>;
++ pinctrl-2 = <&dcan1_pins_default>;
+ };
+
+ &qspi {
--- /dev/null
+From d438462c20a300139c2e5e65b96cadaa21b58d9a Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Tue, 30 Jun 2015 14:48:24 +0200
+Subject: ARM: imx6: gpc: always enable PU domain if CONFIG_PM is not set
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+commit d438462c20a300139c2e5e65b96cadaa21b58d9a upstream.
+
+If CONFIG_PM is not set the PU power domain needs to be enabled always,
+otherwise there are two failure scenarios which will hang the system if
+one of the devices in the PU domain is accessed.
+
+1. New DTs (4.1+) drop the "always-on" property from the PU regulator, so
+if it isn't properly enabled by the GPC code it will be disabled at the
+end of boot.
+
+2. If the bootloader already disabled the PU domain the GPC explicitly
+needs to enable it again, even if the kernel doesn't do any power
+management. This is a bit hypothetical, as it requires to boot a
+mainline kernel on a downstream bootloader, as no mainline bootloader
+disables the PM domains.
+
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Acked-by: Philipp Zabel <p.zabel@pengutronix.de>
+Signed-off-by: Shawn Guo <shawn.guo@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-imx/gpc.c | 27 ++++++---------------------
+ 1 file changed, 6 insertions(+), 21 deletions(-)
+
+--- a/arch/arm/mach-imx/gpc.c
++++ b/arch/arm/mach-imx/gpc.c
+@@ -291,8 +291,6 @@ void __init imx_gpc_check_dt(void)
+ }
+ }
+
+-#ifdef CONFIG_PM_GENERIC_DOMAINS
+-
+ static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
+ {
+ int iso, iso2sw;
+@@ -399,7 +397,6 @@ static struct genpd_onecell_data imx_gpc
+ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
+ {
+ struct clk *clk;
+- bool is_off;
+ int i;
+
+ imx6q_pu_domain.reg = pu_reg;
+@@ -416,18 +413,13 @@ static int imx_gpc_genpd_init(struct dev
+ }
+ imx6q_pu_domain.num_clks = i;
+
+- is_off = IS_ENABLED(CONFIG_PM);
+- if (is_off) {
+- _imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
+- } else {
+- /*
+- * Enable power if compiled without CONFIG_PM in case the
+- * bootloader disabled it.
+- */
+- imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
+- }
++ /* Enable power always in case bootloader disabled it. */
++ imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
+
+- pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off);
++ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
++ return 0;
++
++ pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
+ return of_genpd_add_provider_onecell(dev->of_node,
+ &imx_gpc_onecell_data);
+
+@@ -437,13 +429,6 @@ clk_err:
+ return -EINVAL;
+ }
+
+-#else
+-static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
+-{
+- return 0;
+-}
+-#endif /* CONFIG_PM_GENERIC_DOMAINS */
+-
+ static int imx_gpc_probe(struct platform_device *pdev)
+ {
+ struct regulator *pu_reg;
--- /dev/null
+From a927ef895e288e79f1bfed221f27d7bfa37e907f Mon Sep 17 00:00:00 2001
+From: Robert Jarzmik <robert.jarzmik@free.fr>
+Date: Sat, 11 Jul 2015 21:33:06 +0200
+Subject: ARM: pxa: fix dm9000 platform data regression
+
+From: Robert Jarzmik <robert.jarzmik@free.fr>
+
+commit a927ef895e288e79f1bfed221f27d7bfa37e907f upstream.
+
+Since dm9000 driver added support for a vcc regulator, platform data
+based platforms have their ethernet broken, as the regulator claiming
+returns -EPROBE_DEFER and prevents dm9000 loading.
+
+This patch fixes this for all pxa boards using dm9000, by using the
+specific regulator_has_full_constraints() function.
+
+This was discovered and tested on the cm-x300 board.
+
+Fixes: 7994fe55a4a2 ("dm9000: Add regulator and reset support to dm9000")
+Signed-off-by: Robert Jarzmik <robert.jarzmik@free.fr>
+Acked-by: Igor Grinberg <grinberg@compulab.co.il>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-pxa/capc7117.c | 3 +++
+ arch/arm/mach-pxa/cm-x2xx.c | 3 +++
+ arch/arm/mach-pxa/cm-x300.c | 2 ++
+ arch/arm/mach-pxa/colibri-pxa270.c | 3 +++
+ arch/arm/mach-pxa/em-x270.c | 2 ++
+ arch/arm/mach-pxa/icontrol.c | 3 +++
+ arch/arm/mach-pxa/trizeps4.c | 3 +++
+ arch/arm/mach-pxa/vpac270.c | 3 +++
+ arch/arm/mach-pxa/zeus.c | 2 ++
+ 9 files changed, 24 insertions(+)
+
+--- a/arch/arm/mach-pxa/capc7117.c
++++ b/arch/arm/mach-pxa/capc7117.c
+@@ -24,6 +24,7 @@
+ #include <linux/ata_platform.h>
+ #include <linux/serial_8250.h>
+ #include <linux/gpio.h>
++#include <linux/regulator/machine.h>
+
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+@@ -144,6 +145,8 @@ static void __init capc7117_init(void)
+
+ capc7117_uarts_init();
+ capc7117_ide_init();
++
++ regulator_has_full_constraints();
+ }
+
+ MACHINE_START(CAPC7117,
+--- a/arch/arm/mach-pxa/cm-x2xx.c
++++ b/arch/arm/mach-pxa/cm-x2xx.c
+@@ -13,6 +13,7 @@
+ #include <linux/syscore_ops.h>
+ #include <linux/irq.h>
+ #include <linux/gpio.h>
++#include <linux/regulator/machine.h>
+
+ #include <linux/dm9000.h>
+ #include <linux/leds.h>
+@@ -466,6 +467,8 @@ static void __init cmx2xx_init(void)
+ cmx2xx_init_ac97();
+ cmx2xx_init_touchscreen();
+ cmx2xx_init_leds();
++
++ regulator_has_full_constraints();
+ }
+
+ static void __init cmx2xx_init_irq(void)
+--- a/arch/arm/mach-pxa/cm-x300.c
++++ b/arch/arm/mach-pxa/cm-x300.c
+@@ -835,6 +835,8 @@ static void __init cm_x300_init(void)
+ cm_x300_init_ac97();
+ cm_x300_init_wi2wi();
+ cm_x300_init_bl();
++
++ regulator_has_full_constraints();
+ }
+
+ static void __init cm_x300_fixup(struct tag *tags, char **cmdline)
+--- a/arch/arm/mach-pxa/colibri-pxa270.c
++++ b/arch/arm/mach-pxa/colibri-pxa270.c
+@@ -18,6 +18,7 @@
+ #include <linux/mtd/partitions.h>
+ #include <linux/mtd/physmap.h>
+ #include <linux/platform_device.h>
++#include <linux/regulator/machine.h>
+ #include <linux/ucb1400.h>
+
+ #include <asm/mach/arch.h>
+@@ -294,6 +295,8 @@ static void __init colibri_pxa270_init(v
+ printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n",
+ colibri_pxa270_baseboard);
+ }
++
++ regulator_has_full_constraints();
+ }
+
+ /* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either
+--- a/arch/arm/mach-pxa/em-x270.c
++++ b/arch/arm/mach-pxa/em-x270.c
+@@ -1306,6 +1306,8 @@ static void __init em_x270_init(void)
+ em_x270_init_i2c();
+ em_x270_init_camera();
+ em_x270_userspace_consumers_init();
++
++ regulator_has_full_constraints();
+ }
+
+ MACHINE_START(EM_X270, "Compulab EM-X270")
+--- a/arch/arm/mach-pxa/icontrol.c
++++ b/arch/arm/mach-pxa/icontrol.c
+@@ -26,6 +26,7 @@
+ #include <linux/spi/spi.h>
+ #include <linux/spi/pxa2xx_spi.h>
+ #include <linux/can/platform/mcp251x.h>
++#include <linux/regulator/machine.h>
+
+ #include "generic.h"
+
+@@ -185,6 +186,8 @@ static void __init icontrol_init(void)
+ mxm_8x10_mmc_init();
+
+ icontrol_can_init();
++
++ regulator_has_full_constraints();
+ }
+
+ MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
+--- a/arch/arm/mach-pxa/trizeps4.c
++++ b/arch/arm/mach-pxa/trizeps4.c
+@@ -26,6 +26,7 @@
+ #include <linux/dm9000.h>
+ #include <linux/mtd/physmap.h>
+ #include <linux/mtd/partitions.h>
++#include <linux/regulator/machine.h>
+ #include <linux/i2c/pxa-i2c.h>
+
+ #include <asm/types.h>
+@@ -534,6 +535,8 @@ static void __init trizeps4_init(void)
+
+ BCR_writew(trizeps_conxs_bcr);
+ board_backlight_power(1);
++
++ regulator_has_full_constraints();
+ }
+
+ static void __init trizeps4_map_io(void)
+--- a/arch/arm/mach-pxa/vpac270.c
++++ b/arch/arm/mach-pxa/vpac270.c
+@@ -24,6 +24,7 @@
+ #include <linux/dm9000.h>
+ #include <linux/ucb1400.h>
+ #include <linux/ata_platform.h>
++#include <linux/regulator/machine.h>
+ #include <linux/regulator/max1586.h>
+ #include <linux/i2c/pxa-i2c.h>
+
+@@ -711,6 +712,8 @@ static void __init vpac270_init(void)
+ vpac270_ts_init();
+ vpac270_rtc_init();
+ vpac270_ide_init();
++
++ regulator_has_full_constraints();
+ }
+
+ MACHINE_START(VPAC270, "Voipac PXA270")
+--- a/arch/arm/mach-pxa/zeus.c
++++ b/arch/arm/mach-pxa/zeus.c
+@@ -868,6 +868,8 @@ static void __init zeus_init(void)
+ i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
+ pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info);
+ spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info));
++
++ regulator_has_full_constraints();
+ }
+
+ static struct map_desc zeus_io_desc[] __initdata = {
--- /dev/null
+From 033365191136c97f88c81b7bd0011414db28bb4e Mon Sep 17 00:00:00 2001
+From: "J.D. Schroeder" <jay.schroeder@garmin.com>
+Date: Wed, 8 Jul 2015 14:38:12 +0300
+Subject: can: c_can: Fix default pinmux glitch at init
+
+From: "J.D. Schroeder" <jay.schroeder@garmin.com>
+
+commit 033365191136c97f88c81b7bd0011414db28bb4e upstream.
+
+The previous change 3973c526ae9c (net: can: c_can: Disable pins when CAN
+interface is down) causes a slight glitch on the pinctrl settings when used.
+Since commit ab78029 (drivers/pinctrl: grab default handles from device core),
+the device core will automatically set the default pins. This causes the pins
+to be momentarily set to the default and then to the sleep state in
+register_c_can_dev(). By adding an optional "enable" state, boards can set the
+default pin state to be disabled and avoid the glitch when the switch from
+default to sleep first occurs. If the "enable" state is not available
+c_can_pinctrl_select_state() falls back to using the "default" pinctrl state.
+
+[Roger Q] - Forward port to v4.2 and use pinctrl_get_select().
+
+Signed-off-by: J.D. Schroeder <jay.schroeder@garmin.com>
+Signed-off-by: Roger Quadros <rogerq@ti.com>
+Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/c_can/c_can.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -592,6 +592,7 @@ static int c_can_start(struct net_device
+ {
+ struct c_can_priv *priv = netdev_priv(dev);
+ int err;
++ struct pinctrl *p;
+
+ /* basic c_can configuration */
+ err = c_can_chip_config(dev);
+@@ -604,8 +605,13 @@ static int c_can_start(struct net_device
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+- /* activate pins */
+- pinctrl_pm_select_default_state(dev->dev.parent);
++ /* Attempt to use "active" if available else use "default" */
++ p = pinctrl_get_select(priv->device, "active");
++ if (!IS_ERR(p))
++ pinctrl_put(p);
++ else
++ pinctrl_pm_select_default_state(priv->device);
++
+ return 0;
+ }
+
--- /dev/null
+From 25b401c1816ae64bcc5dcb1d39ab41812522a0ce Mon Sep 17 00:00:00 2001
+From: Stefan Agner <stefan@agner.ch>
+Date: Mon, 18 May 2015 18:33:27 +0200
+Subject: can: mcp251x: fix resume when device is down
+
+From: Stefan Agner <stefan@agner.ch>
+
+commit 25b401c1816ae64bcc5dcb1d39ab41812522a0ce upstream.
+
+If a valid power regulator or a dummy regulator is used (which
+happens to be the case when no regulator is specified), restart_work
+is queued no matter whether the device was running or not at suspend
+time. Since work queues get initialized in the ndo_open callback,
+resuming leads to a NULL pointer exception.
+
+Reverse exactly the steps executed at suspend time:
+- Enable the power regulator in any case
+- Enable the transceiver regulator if the device was running, even in
+ case we have a power regulator
+- Queue restart_work only in case the device was running
+
+Fixes: bf66f3736a94 ("can: mcp251x: Move to threaded interrupts instead of workqueues.")
+Signed-off-by: Stefan Agner <stefan@agner.ch>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/spi/mcp251x.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1221,17 +1221,16 @@ static int __maybe_unused mcp251x_can_re
+ struct spi_device *spi = to_spi_device(dev);
+ struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+- if (priv->after_suspend & AFTER_SUSPEND_POWER) {
++ if (priv->after_suspend & AFTER_SUSPEND_POWER)
+ mcp251x_power_enable(priv->power, 1);
++
++ if (priv->after_suspend & AFTER_SUSPEND_UP) {
++ mcp251x_power_enable(priv->transceiver, 1);
+ queue_work(priv->wq, &priv->restart_work);
+ } else {
+- if (priv->after_suspend & AFTER_SUSPEND_UP) {
+- mcp251x_power_enable(priv->transceiver, 1);
+- queue_work(priv->wq, &priv->restart_work);
+- } else {
+- priv->after_suspend = 0;
+- }
++ priv->after_suspend = 0;
+ }
++
+ priv->force_quit = 0;
+ enable_irq(spi->irq);
+ return 0;
--- /dev/null
+From 5e63e6baa159fa8c787cf783dbf3d77fbea97331 Mon Sep 17 00:00:00 2001
+From: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Date: Sat, 20 Jun 2015 03:32:46 +0300
+Subject: can: rcar_can: fix IRQ check
+
+From: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+
+commit 5e63e6baa159fa8c787cf783dbf3d77fbea97331 upstream.
+
+rcar_can_probe() regards 0 as a wrong IRQ #, despite platform_get_irq() that it
+calls returns negative error code in that case. This leads to the following
+being printed to the console when attempting to open the device:
+
+error requesting interrupt fffffffa
+
+because rcar_can_open() calls request_irq() with a negative IRQ #, and that
+function naturally fails with -EINVAL.
+
+Check for the negative error codes instead and propagate them upstream instead
+of just returning -ENODEV.
+
+Fixes: fd1159318e55 ("can: add Renesas R-Car CAN driver")
+Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/rcar_can.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/can/rcar_can.c
++++ b/drivers/net/can/rcar_can.c
+@@ -758,8 +758,9 @@ static int rcar_can_probe(struct platfor
+ }
+
+ irq = platform_get_irq(pdev, 0);
+- if (!irq) {
++ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
++ err = irq;
+ goto fail;
+ }
+
--- /dev/null
+From c1a4c87b06fa564d6e2760a12d4e5a09badc684b Mon Sep 17 00:00:00 2001
+From: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Date: Sat, 20 Jun 2015 03:33:53 +0300
+Subject: can: rcar_can: print signed IRQ #
+
+From: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+
+commit c1a4c87b06fa564d6e2760a12d4e5a09badc684b upstream.
+
+Printing IRQ # using "%x" and "%u" unsigned formats isn't quite correct as
+'ndev->irq' is of type *int*, so the "%d" format needs to be used instead.
+
+While fixing this, beautify the dev_info() message in rcar_can_probe() a bit.
+
+Fixes: fd1159318e55 ("can: add Renesas R-Car CAN driver")
+Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/rcar_can.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/can/rcar_can.c
++++ b/drivers/net/can/rcar_can.c
+@@ -526,7 +526,7 @@ static int rcar_can_open(struct net_devi
+ napi_enable(&priv->napi);
+ err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
+ if (err) {
+- netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
++ netdev_err(ndev, "error requesting interrupt %d\n", ndev->irq);
+ goto out_close;
+ }
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+@@ -824,7 +824,7 @@ static int rcar_can_probe(struct platfor
+
+ devm_can_led_init(ndev);
+
+- dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
++ dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
+ priv->regs, ndev->irq);
+
+ return 0;
--- /dev/null
+From d3b58c47d330de8c29898fe9746f7530408f8a59 Mon Sep 17 00:00:00 2001
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+Date: Fri, 26 Jun 2015 11:58:19 +0200
+Subject: can: replace timestamp as unique skb attribute
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+commit d3b58c47d330de8c29898fe9746f7530408f8a59 upstream.
+
+Commit 514ac99c64b "can: fix multiple delivery of a single CAN frame for
+overlapping CAN filters" requires the skb->tstamp to be set to check for
+identical CAN skbs.
+
+Without timestamping to be required by user space applications this timestamp
+was not generated which lead to commit 36c01245eb8 "can: fix loss of CAN frames
+in raw_rcv" - which forces the timestamp to be set in all CAN related skbuffs
+by introducing several __net_timestamp() calls.
+
+This forces e.g. out of tree drivers which are not using alloc_can{,fd}_skb()
+to add __net_timestamp() after skbuff creation to prevent the frame loss fixed
+in mainline Linux.
+
+This patch removes the timestamp dependency and uses an atomic counter to
+create an unique identifier together with the skbuff pointer.
+
+Btw: the new skbcnt element introduced in struct can_skb_priv has to be
+initialized with zero in out-of-tree drivers which are not using
+alloc_can{,fd}_skb() too.
+
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/dev.c | 7 ++-----
+ drivers/net/can/slcan.c | 2 +-
+ drivers/net/can/vcan.c | 3 ---
+ include/linux/can/skb.h | 2 ++
+ net/can/af_can.c | 12 +++++++-----
+ net/can/bcm.c | 2 ++
+ net/can/raw.c | 7 ++++---
+ 7 files changed, 18 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u8 dlc = cf->can_dlc;
+
+- if (!(skb->tstamp.tv64))
+- __net_timestamp(skb);
+-
+ netif_rx(priv->echo_skb[idx]);
+ priv->echo_skb[idx] = NULL;
+
+@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net
+ if (unlikely(!skb))
+ return NULL;
+
+- __net_timestamp(skb);
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
+
+ *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+ memset(*cf, 0, sizeof(struct can_frame));
+@@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct n
+ if (unlikely(!skb))
+ return NULL;
+
+- __net_timestamp(skb);
+ skb->protocol = htons(ETH_P_CANFD);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct n
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
+
+ *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
+ memset(*cfd, 0, sizeof(struct canfd_frame));
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
+ if (!skb)
+ return;
+
+- __net_timestamp(skb);
+ skb->dev = sl->dev;
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+@@ -215,6 +214,7 @@ static void slc_bump(struct slcan *sl)
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = sl->dev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
+
+ memcpy(skb_put(skb, sizeof(struct can_frame)),
+ &cf, sizeof(struct can_frame));
+--- a/drivers/net/can/vcan.c
++++ b/drivers/net/can/vcan.c
+@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb,
+ skb->dev = dev;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+- if (!(skb->tstamp.tv64))
+- __net_timestamp(skb);
+-
+ netif_rx_ni(skb);
+ }
+
+--- a/include/linux/can/skb.h
++++ b/include/linux/can/skb.h
+@@ -27,10 +27,12 @@
+ /**
+ * struct can_skb_priv - private additional data inside CAN sk_buffs
+ * @ifindex: ifindex of the first interface the CAN frame appeared on
++ * @skbcnt: atomic counter to have an unique id together with skb pointer
+ * @cf: align to the following CAN frame at skb->data
+ */
+ struct can_skb_priv {
+ int ifindex;
++ int skbcnt;
+ struct can_frame cf[0];
+ };
+
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -89,6 +89,8 @@ struct timer_list can_stattimer; /* ti
+ struct s_stats can_stats; /* packet statistics */
+ struct s_pstats can_pstats; /* receive list statistics */
+
++static atomic_t skbcounter = ATOMIC_INIT(0);
++
+ /*
+ * af_can socket functions
+ */
+@@ -310,12 +312,8 @@ int can_send(struct sk_buff *skb, int lo
+ return err;
+ }
+
+- if (newskb) {
+- if (!(newskb->tstamp.tv64))
+- __net_timestamp(newskb);
+-
++ if (newskb)
+ netif_rx_ni(newskb);
+- }
+
+ /* update statistics */
+ can_stats.tx_frames++;
+@@ -683,6 +681,10 @@ static void can_receive(struct sk_buff *
+ can_stats.rx_frames++;
+ can_stats.rx_frames_delta++;
+
++ /* create non-zero unique skb identifier together with *skb */
++ while (!(can_skb_prv(skb)->skbcnt))
++ can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
++
+ rcu_read_lock();
+
+ /* deliver the packet to sockets listening on all devices */
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -261,6 +261,7 @@ static void bcm_can_tx(struct bcm_op *op
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
+
+ memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
+
+@@ -1217,6 +1218,7 @@ static int bcm_tx_send(struct msghdr *ms
+ }
+
+ can_skb_prv(skb)->ifindex = dev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
+ skb->dev = dev;
+ can_skb_set_owner(skb, sk);
+ err = can_send(skb, 1); /* send with loopback */
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -75,7 +75,7 @@ MODULE_ALIAS("can-proto-1");
+ */
+
+ struct uniqframe {
+- ktime_t tstamp;
++ int skbcnt;
+ const struct sk_buff *skb;
+ unsigned int join_rx_count;
+ };
+@@ -133,7 +133,7 @@ static void raw_rcv(struct sk_buff *oskb
+
+ /* eliminate multiple filter matches for the same skb */
+ if (this_cpu_ptr(ro->uniq)->skb == oskb &&
+- ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) {
++ this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
+ if (ro->join_filters) {
+ this_cpu_inc(ro->uniq->join_rx_count);
+ /* drop frame until all enabled filters matched */
+@@ -144,7 +144,7 @@ static void raw_rcv(struct sk_buff *oskb
+ }
+ } else {
+ this_cpu_ptr(ro->uniq)->skb = oskb;
+- this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp;
++ this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
+ this_cpu_ptr(ro->uniq)->join_rx_count = 1;
+ /* drop first frame to check all enabled filters? */
+ if (ro->join_filters && ro->count > 1)
+@@ -749,6 +749,7 @@ static int raw_sendmsg(struct socket *so
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
+
+ err = memcpy_from_msg(skb_put(skb, size), msg, size);
+ if (err < 0)
--- /dev/null
+From 2c069a118fe1d80c47dca84e1561045fc7f3cc9e Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 10 Jul 2015 09:04:25 +1000
+Subject: cxl: Check if afu is not null in cxl_slbia
+
+From: Daniel Axtens <dja@axtens.net>
+
+commit 2c069a118fe1d80c47dca84e1561045fc7f3cc9e upstream.
+
+The pointer to an AFU in the adapter's list of AFUs can be null
+if we're in the process of removing AFUs. The afu_list_lock
+doesn't guard against this.
+
+Say we have 2 slices, and we're in the process of removing cxl.
+ - We remove the AFUs in order (see cxl_remove). In cxl_remove_afu
+ for AFU 0, we take the lock, set adapter->afu[0] = NULL, and
+ release the lock.
+ - Then we get an slbia. In cxl_slbia we take the lock, and set
+ afu = adapter->afu[0], which is NULL.
+ - Therefore our attempt to check afu->enabled will blow up.
+
+Therefore, check if afu is a null pointer before dereferencing it.
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Acked-by: Michael Neuling <mikey@neuling.org>
+Acked-by: Ian Munsie <imunsie@au1.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/cxl/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/misc/cxl/main.c
++++ b/drivers/misc/cxl/main.c
+@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct
+ spin_lock(&adapter->afu_list_lock);
+ for (slice = 0; slice < adapter->slices; slice++) {
+ afu = adapter->afu[slice];
+- if (!afu->enabled)
++ if (!afu || !afu->enabled)
+ continue;
+ rcu_read_lock();
+ idr_for_each_entry(&afu->contexts_idr, ctx, id)
--- /dev/null
+From 10a5894f2dedd8a26b3132497445b314c0d952c4 Mon Sep 17 00:00:00 2001
+From: Ian Munsie <imunsie@au1.ibm.com>
+Date: Tue, 7 Jul 2015 15:45:45 +1000
+Subject: cxl: Fix off by one error allowing subsequent mmap page to be accessed
+
+From: Ian Munsie <imunsie@au1.ibm.com>
+
+commit 10a5894f2dedd8a26b3132497445b314c0d952c4 upstream.
+
+It was discovered that if a process mmaped their problem state area they
+were able to access one page more than expected, potentially allowing
+them to access the problem state area of an unrelated process.
+
+This was due to a simple off by one error in the mmap fault handler
+introduced in 0712dc7e73e59d79bcead5d5520acf4e9e917e87 ("cxl: Fix issues
+when unmapping contexts"), which is fixed in this patch.
+
+Fixes: 0712dc7e73e5 ("cxl: Fix issues when unmapping contexts")
+Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/cxl/context.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/misc/cxl/context.c
++++ b/drivers/misc/cxl/context.c
+@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ area = ctx->afu->psn_phys;
+- if (offset > ctx->afu->adapter->ps_size)
++ if (offset >= ctx->afu->adapter->ps_size)
+ return VM_FAULT_SIGBUS;
+ } else {
+ area = ctx->psn_phys;
+- if (offset > ctx->psn_size)
++ if (offset >= ctx->psn_size)
+ return VM_FAULT_SIGBUS;
+ }
+
--- /dev/null
+From cccf34e9411c41b0cbfb41980fe55fc8e7c98fd2 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Fri, 10 Jul 2015 09:29:10 +0100
+Subject: MIPS: c-r4k: Fix cache flushing for MT cores
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit cccf34e9411c41b0cbfb41980fe55fc8e7c98fd2 upstream.
+
+MT_SMP is not the only SMP option for MT cores. The MT_SMP option
+allows more than one VPE per core to appear as a secondary CPU in the
+system. Because of how CM works, it propagates the address-based
+cache ops to the secondary cores but not the index-based ones.
+Because of that, the code does not use IPIs to flush the L1 caches on
+secondary cores because the CM would have done that already. However,
+the CM functionality is independent of the type of SMP kernel so even in
+non-MT kernels, IPIs are not necessary. As a result of which, we change
+the conditional to depend on the CM presence. Moreover, since VPEs on
+the same core share the same L1 caches, there is no need to send an
+IPI on all of them so we calculate a suitable cpumask with only one
+VPE per core.
+
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/10654/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/smp.h | 1 +
+ arch/mips/kernel/smp.c | 44 +++++++++++++++++++++++++++++++++++++++++++-
+ arch/mips/mm/c-r4k.c | 14 +++++++++++---
+ 3 files changed, 55 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/include/asm/smp.h
++++ b/arch/mips/include/asm/smp.h
+@@ -23,6 +23,7 @@
+ extern int smp_num_siblings;
+ extern cpumask_t cpu_sibling_map[];
+ extern cpumask_t cpu_core_map[];
++extern cpumask_t cpu_foreign_map;
+
+ #define raw_smp_processor_id() (current_thread_info()->cpu)
+
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
+ cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(cpu_core_map);
+
++/*
++ * A logcal cpu mask containing only one VPE per core to
++ * reduce the number of IPIs on large MT systems.
++ */
++cpumask_t cpu_foreign_map __read_mostly;
++EXPORT_SYMBOL(cpu_foreign_map);
++
+ /* representing cpus for which sibling maps can be computed */
+ static cpumask_t cpu_sibling_setup_map;
+
+@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int
+ }
+ }
+
++/*
++ * Calculate a new cpu_foreign_map mask whenever a
++ * new cpu appears or disappears.
++ */
++static inline void calculate_cpu_foreign_map(void)
++{
++ int i, k, core_present;
++ cpumask_t temp_foreign_map;
++
++ /* Re-calculate the mask */
++ for_each_online_cpu(i) {
++ core_present = 0;
++ for_each_cpu(k, &temp_foreign_map)
++ if (cpu_data[i].package == cpu_data[k].package &&
++ cpu_data[i].core == cpu_data[k].core)
++ core_present = 1;
++ if (!core_present)
++ cpumask_set_cpu(i, &temp_foreign_map);
++ }
++
++ cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
++}
++
+ struct plat_smp_ops *mp_ops;
+ EXPORT_SYMBOL(mp_ops);
+
+@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
+ set_cpu_sibling_map(cpu);
+ set_cpu_core_map(cpu);
+
++ calculate_cpu_foreign_map();
++
+ cpumask_set_cpu(cpu, &cpu_callin_map);
+
+ synchronise_count_slave(cpu);
+@@ -173,9 +205,18 @@ void __irq_entry smp_call_function_inter
+ static void stop_this_cpu(void *dummy)
+ {
+ /*
+- * Remove this CPU:
++ * Remove this CPU. Be a bit slow here and
++ * set the bits for every online CPU so we don't miss
++ * any IPI whilst taking this VPE down.
+ */
++
++ cpumask_copy(&cpu_foreign_map, cpu_online_mask);
++
++ /* Make it visible to every other CPU */
++ smp_mb();
++
+ set_cpu_online(smp_processor_id(), false);
++ calculate_cpu_foreign_map();
+ local_irq_disable();
+ while (1);
+ }
+@@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned in
+ mp_ops->prepare_cpus(max_cpus);
+ set_cpu_sibling_map(0);
+ set_cpu_core_map(0);
++ calculate_cpu_foreign_map();
+ #ifndef CONFIG_HOTPLUG_CPU
+ init_cpu_present(cpu_possible_mask);
+ #endif
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -37,6 +37,7 @@
+ #include <asm/cacheflush.h> /* for run_uncached() */
+ #include <asm/traps.h>
+ #include <asm/dma-coherence.h>
++#include <asm/mips-cm.h>
+
+ /*
+ * Special Variant of smp_call_function for use by cache functions:
+@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void
+ {
+ preempt_disable();
+
+-#ifndef CONFIG_MIPS_MT_SMP
+- smp_call_function(func, info, 1);
+-#endif
++ /*
++ * The Coherent Manager propagates address-based cache ops to other
++ * cores but not index-based ops. However, r4k_on_each_cpu is used
++ * in both cases so there is no easy way to tell what kind of op is
++ * executed to the other cores. The best we can probably do is
++ * to restrict that call when a CM is not present because both
++ * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
++ */
++ if (!mips_cm_present())
++ smp_call_function_many(&cpu_foreign_map, func, info, 1);
+ func(info);
+ preempt_enable();
+ }
--- /dev/null
+From b677bc03d757c7d749527cccdd2afcf34ebeeb07 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Wed, 1 Jul 2015 09:13:33 +0100
+Subject: MIPS: cps-vec: Use macros for various arithmetics and memory operations
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit b677bc03d757c7d749527cccdd2afcf34ebeeb07 upstream.
+
+Replace lw/sw and various arithmetic instructions with macros so the
+code can work on 64-bit kernels as well.
+
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/10591/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/cps-vec.S | 32 ++++++++++++++++----------------
+ 1 file changed, 16 insertions(+), 16 deletions(-)
+
+--- a/arch/mips/kernel/cps-vec.S
++++ b/arch/mips/kernel/cps-vec.S
+@@ -108,9 +108,9 @@ not_nmi:
+ mul t1, t1, t2
+
+ li a0, CKSEG0
+- add a1, a0, t1
++ PTR_ADD a1, a0, t1
+ 1: cache Index_Store_Tag_I, 0(a0)
+- add a0, a0, t0
++ PTR_ADD a0, a0, t0
+ bne a0, a1, 1b
+ nop
+ icache_done:
+@@ -135,11 +135,11 @@ icache_done:
+ mul t1, t1, t2
+
+ li a0, CKSEG0
+- addu a1, a0, t1
+- subu a1, a1, t0
++ PTR_ADDU a1, a0, t1
++ PTR_SUBU a1, a1, t0
+ 1: cache Index_Store_Tag_D, 0(a0)
+ bne a0, a1, 1b
+- add a0, a0, t0
++ PTR_ADD a0, a0, t0
+ dcache_done:
+
+ /* Set Kseg0 CCA to that in s0 */
+@@ -152,7 +152,7 @@ dcache_done:
+
+ /* Enter the coherent domain */
+ li t0, 0xff
+- sw t0, GCR_CL_COHERENCE_OFS(v1)
++ PTR_S t0, GCR_CL_COHERENCE_OFS(v1)
+ ehb
+
+ /* Jump to kseg0 */
+@@ -178,9 +178,9 @@ dcache_done:
+ nop
+
+ /* Off we go! */
+- lw t1, VPEBOOTCFG_PC(v0)
+- lw gp, VPEBOOTCFG_GP(v0)
+- lw sp, VPEBOOTCFG_SP(v0)
++ PTR_L t1, VPEBOOTCFG_PC(v0)
++ PTR_L gp, VPEBOOTCFG_GP(v0)
++ PTR_L sp, VPEBOOTCFG_SP(v0)
+ jr t1
+ nop
+ END(mips_cps_core_entry)
+@@ -299,15 +299,15 @@ LEAF(mips_cps_core_init)
+ LEAF(mips_cps_boot_vpes)
+ /* Retrieve CM base address */
+ PTR_LA t0, mips_cm_base
+- lw t0, 0(t0)
++ PTR_L t0, 0(t0)
+
+ /* Calculate a pointer to this cores struct core_boot_config */
+- lw t0, GCR_CL_ID_OFS(t0)
++ PTR_L t0, GCR_CL_ID_OFS(t0)
+ li t1, COREBOOTCFG_SIZE
+ mul t0, t0, t1
+ PTR_LA t1, mips_cps_core_bootcfg
+- lw t1, 0(t1)
+- addu t0, t0, t1
++ PTR_L t1, 0(t1)
++ PTR_ADDU t0, t0, t1
+
+ /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+ has_mt ta2, 1f
+@@ -334,8 +334,8 @@ LEAF(mips_cps_boot_vpes)
+ 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
+ li t1, VPEBOOTCFG_SIZE
+ mul v0, t9, t1
+- lw ta3, COREBOOTCFG_VPECONFIG(t0)
+- addu v0, v0, ta3
++ PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
++ PTR_ADDU v0, v0, ta3
+
+ #ifdef CONFIG_MIPS_MT
+
+@@ -360,7 +360,7 @@ LEAF(mips_cps_boot_vpes)
+ ehb
+
+ /* Loop through each VPE */
+- lw ta2, COREBOOTCFG_VPEMASK(t0)
++ PTR_L ta2, COREBOOTCFG_VPEMASK(t0)
+ move t8, ta2
+ li ta1, 0
+
--- /dev/null
+From 143fefc8f315cd10e046e6860913c421c3385cb1 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Wed, 24 Jun 2015 09:52:01 +0100
+Subject: MIPS: Fix erroneous JR emulation for MIPS R6
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 143fefc8f315cd10e046e6860913c421c3385cb1 upstream.
+
+Commit 5f9f41c474befb4ebbc40b27f65bb7d649241581 ("MIPS: kernel: Prepare
+the JR instruction for emulation on MIPS R6") added support for
+emulating the JR instruction on MIPS R6 cores but that introduced a bug
+which could be triggered when hitting a JALR opcode because the code used
+the wrong field in the 'r_format' struct to determine the instruction
+opcode. This lead to crashes because an emulated JALR instruction was
+treated as a JR one when the R6 emulator was turned off.
+
+Fixes: 5f9f41c474be ("MIPS: kernel: Prepare the JR instruction for emulation on MIPS R6")
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/10583/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/math-emu/cp1emu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs
+ /* Fall through */
+ case jr_op:
+ /* For R6, JR already emulated in jalr_op */
+- if (NO_R6EMU && insn.r_format.opcode == jr_op)
++ if (NO_R6EMU && insn.r_format.func == jr_op)
+ break;
+ *contpc = regs->regs[insn.r_format.rs];
+ return 1;
--- /dev/null
+From fcc53b5f6c38acbf5d311ffc3e0da517491c6f7b Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Thu, 16 Jul 2015 15:30:04 +0100
+Subject: MIPS: fpu.h: Allow 64-bit FPU on a 64-bit MIPS R6 CPU
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit fcc53b5f6c38acbf5d311ffc3e0da517491c6f7b upstream.
+
+Commit 6134d94923d0 ("MIPS: asm: fpu: Allow 64-bit FPU on MIPS32 R6")
+added support for 64-bit FPU on a 32-bit MIPS R6 processor but it missed
+the 64-bit CPU case leading to FPU failures when requesting FR=1 mode
+(which is always the case for MIPS R6 userland) when running a 32-bit
+kernel on a 64-bit CPU. We also fix the MIPS R2 case.
+
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Fixes: 6134d94923d0 ("MIPS: asm: fpu: Allow 64-bit FPU on MIPS32 R6")
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/10734/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/fpu.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/include/asm/fpu.h
++++ b/arch/mips/include/asm/fpu.h
+@@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_
+ goto fr_common;
+
+ case FPU_64BIT:
+-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
++#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \
+ || defined(CONFIG_64BIT))
+ /* we only have a 32-bit FPU */
+ return SIGFPE;
--- /dev/null
+From 717f14255a52ad445d6f0eca7d0f22f59d6ba1f8 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Wed, 1 Jul 2015 09:13:32 +0100
+Subject: MIPS: kernel: cps-vec: Replace KSEG0 with CKSEG0
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 717f14255a52ad445d6f0eca7d0f22f59d6ba1f8 upstream.
+
+In preparation for 64-bit CPS support, we replace KSEG0 with CKSEG0
+so 64-bit kernels can be supported.
+
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/10590/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/cps-vec.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/kernel/cps-vec.S
++++ b/arch/mips/kernel/cps-vec.S
+@@ -107,7 +107,7 @@ not_nmi:
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+- li a0, KSEG0
++ li a0, CKSEG0
+ add a1, a0, t1
+ 1: cache Index_Store_Tag_I, 0(a0)
+ add a0, a0, t0
+@@ -134,7 +134,7 @@ icache_done:
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+- li a0, KSEG0
++ li a0, CKSEG0
+ addu a1, a0, t1
+ subu a1, a1, t0
+ 1: cache Index_Store_Tag_D, 0(a0)
--- /dev/null
+From 81a02e34ded906357deac7003fbb0d36b6cc503f Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Wed, 1 Jul 2015 09:13:29 +0100
+Subject: MIPS: kernel: cps-vec: Replace 'la' macro with PTR_LA
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 81a02e34ded906357deac7003fbb0d36b6cc503f upstream.
+
+The PTR_LA macro will pick the correct "la" or "dla" macro to
+load an address to a register. This gets rids of the following
+warnings (and others) when building a 64-bit CPS kernel:
+
+arch/mips/kernel/cps-vec.S:63: Warning: la used to load 64-bit address
+arch/mips/kernel/cps-vec.S:159: Warning: la used to load 64-bit address
+arch/mips/kernel/cps-vec.S:220: Warning: la used to load 64-bit address
+arch/mips/kernel/cps-vec.S:240: Warning: la used to load 64-bit address
+[...]
+
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/10587/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/cps-vec.S | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/arch/mips/kernel/cps-vec.S
++++ b/arch/mips/kernel/cps-vec.S
+@@ -60,7 +60,7 @@ LEAF(mips_cps_core_entry)
+ nop
+
+ /* This is an NMI */
+- la k0, nmi_handler
++ PTR_LA k0, nmi_handler
+ jr k0
+ nop
+
+@@ -156,7 +156,7 @@ dcache_done:
+ ehb
+
+ /* Jump to kseg0 */
+- la t0, 1f
++ PTR_LA t0, 1f
+ jr t0
+ nop
+
+@@ -217,7 +217,7 @@ LEAF(excep_intex)
+
+ .org 0x480
+ LEAF(excep_ejtag)
+- la k0, ejtag_debug_handler
++ PTR_LA k0, ejtag_debug_handler
+ jr k0
+ nop
+ END(excep_ejtag)
+@@ -237,7 +237,7 @@ LEAF(mips_cps_core_init)
+
+ /* ...and for the moment only 1 VPE */
+ dvpe
+- la t1, 1f
++ PTR_LA t1, 1f
+ jr.hb t1
+ nop
+
+@@ -298,14 +298,14 @@ LEAF(mips_cps_core_init)
+
+ LEAF(mips_cps_boot_vpes)
+ /* Retrieve CM base address */
+- la t0, mips_cm_base
++ PTR_LA t0, mips_cm_base
+ lw t0, 0(t0)
+
+ /* Calculate a pointer to this cores struct core_boot_config */
+ lw t0, GCR_CL_ID_OFS(t0)
+ li t1, COREBOOTCFG_SIZE
+ mul t0, t0, t1
+- la t1, mips_cps_core_bootcfg
++ PTR_LA t1, mips_cps_core_bootcfg
+ lw t1, 0(t1)
+ addu t0, t0, t1
+
+@@ -351,7 +351,7 @@ LEAF(mips_cps_boot_vpes)
+
+ 1: /* Enter VPE configuration state */
+ dvpe
+- la t1, 1f
++ PTR_LA t1, 1f
+ jr.hb t1
+ nop
+ 1: mfc0 t1, CP0_MVPCONTROL
+@@ -445,7 +445,7 @@ LEAF(mips_cps_boot_vpes)
+ /* This VPE should be offline, halt the TC */
+ li t0, TCHALT_H
+ mtc0 t0, CP0_TCHALT
+- la t0, 1f
++ PTR_LA t0, 1f
+ 1: jr.hb t0
+ nop
+
+@@ -466,10 +466,10 @@ LEAF(mips_cps_boot_vpes)
+ .set noat
+ lw $1, TI_CPU(gp)
+ sll $1, $1, LONGLOG
+- la \dest, __per_cpu_offset
++ PTR_LA \dest, __per_cpu_offset
+ addu $1, $1, \dest
+ lw $1, 0($1)
+- la \dest, cps_cpu_state
++ PTR_LA \dest, cps_cpu_state
+ addu \dest, \dest, $1
+ .set pop
+ .endm
--- /dev/null
+From 977e043d5ea1270ce985e4c165724ff91dc3c3e2 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Wed, 1 Jul 2015 09:13:30 +0100
+Subject: MIPS: kernel: cps-vec: Replace mips32r2 ISA level with mips64r2
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 977e043d5ea1270ce985e4c165724ff91dc3c3e2 upstream.
+
+mips32r2 is a subset of mips64r2, so we replace mips32r2 with mips64r2
+in preparation for 64-bit CPS support.
+
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/10588/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/cps-vec.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/kernel/cps-vec.S
++++ b/arch/mips/kernel/cps-vec.S
+@@ -229,7 +229,7 @@ LEAF(mips_cps_core_init)
+ nop
+
+ .set push
+- .set mips32r2
++ .set mips64r2
+ .set mt
+
+ /* Only allow 1 TC per VPE to execute... */
+@@ -346,7 +346,7 @@ LEAF(mips_cps_boot_vpes)
+ nop
+
+ .set push
+- .set mips32r2
++ .set mips64r2
+ .set mt
+
+ 1: /* Enter VPE configuration state */
--- /dev/null
+From 0586ac75cd0746a4d5c43372dabcea8739ae0176 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Wed, 1 Jul 2015 09:13:31 +0100
+Subject: MIPS: kernel: cps-vec: Use ta0-ta3 pseudo-registers for 64-bit
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 0586ac75cd0746a4d5c43372dabcea8739ae0176 upstream.
+
+The cps-vec code assumes O32 ABI and uses t4-t7 in quite a few places. This
+breaks the build on 64-bit. As a result of which, use the pseudo-registers
+ta0-ta3 to make the code compatible with 64-bit.
+
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/10589/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/cps-vec.S | 42 +++++++++++++++++++++---------------------
+ 1 file changed, 21 insertions(+), 21 deletions(-)
+
+--- a/arch/mips/kernel/cps-vec.S
++++ b/arch/mips/kernel/cps-vec.S
+@@ -250,25 +250,25 @@ LEAF(mips_cps_core_init)
+ mfc0 t0, CP0_MVPCONF0
+ srl t0, t0, MVPCONF0_PVPE_SHIFT
+ andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
+- addiu t7, t0, 1
++ addiu ta3, t0, 1
+
+ /* If there's only 1, we're done */
+ beqz t0, 2f
+ nop
+
+ /* Loop through each VPE within this core */
+- li t5, 1
++ li ta1, 1
+
+ 1: /* Operate on the appropriate TC */
+- mtc0 t5, CP0_VPECONTROL
++ mtc0 ta1, CP0_VPECONTROL
+ ehb
+
+ /* Bind TC to VPE (1:1 TC:VPE mapping) */
+- mttc0 t5, CP0_TCBIND
++ mttc0 ta1, CP0_TCBIND
+
+ /* Set exclusive TC, non-active, master */
+ li t0, VPECONF0_MVP
+- sll t1, t5, VPECONF0_XTC_SHIFT
++ sll t1, ta1, VPECONF0_XTC_SHIFT
+ or t0, t0, t1
+ mttc0 t0, CP0_VPECONF0
+
+@@ -280,8 +280,8 @@ LEAF(mips_cps_core_init)
+ mttc0 t0, CP0_TCHALT
+
+ /* Next VPE */
+- addiu t5, t5, 1
+- slt t0, t5, t7
++ addiu ta1, ta1, 1
++ slt t0, ta1, ta3
+ bnez t0, 1b
+ nop
+
+@@ -310,7 +310,7 @@ LEAF(mips_cps_boot_vpes)
+ addu t0, t0, t1
+
+ /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+- has_mt t6, 1f
++ has_mt ta2, 1f
+ li t9, 0
+
+ /* Find the number of VPEs present in the core */
+@@ -334,13 +334,13 @@ LEAF(mips_cps_boot_vpes)
+ 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
+ li t1, VPEBOOTCFG_SIZE
+ mul v0, t9, t1
+- lw t7, COREBOOTCFG_VPECONFIG(t0)
+- addu v0, v0, t7
++ lw ta3, COREBOOTCFG_VPECONFIG(t0)
++ addu v0, v0, ta3
+
+ #ifdef CONFIG_MIPS_MT
+
+ /* If the core doesn't support MT then return */
+- bnez t6, 1f
++ bnez ta2, 1f
+ nop
+ jr ra
+ nop
+@@ -360,12 +360,12 @@ LEAF(mips_cps_boot_vpes)
+ ehb
+
+ /* Loop through each VPE */
+- lw t6, COREBOOTCFG_VPEMASK(t0)
+- move t8, t6
+- li t5, 0
++ lw ta2, COREBOOTCFG_VPEMASK(t0)
++ move t8, ta2
++ li ta1, 0
+
+ /* Check whether the VPE should be running. If not, skip it */
+-1: andi t0, t6, 1
++1: andi t0, ta2, 1
+ beqz t0, 2f
+ nop
+
+@@ -373,7 +373,7 @@ LEAF(mips_cps_boot_vpes)
+ mfc0 t0, CP0_VPECONTROL
+ ori t0, t0, VPECONTROL_TARGTC
+ xori t0, t0, VPECONTROL_TARGTC
+- or t0, t0, t5
++ or t0, t0, ta1
+ mtc0 t0, CP0_VPECONTROL
+ ehb
+
+@@ -384,8 +384,8 @@ LEAF(mips_cps_boot_vpes)
+
+ /* Calculate a pointer to the VPEs struct vpe_boot_config */
+ li t0, VPEBOOTCFG_SIZE
+- mul t0, t0, t5
+- addu t0, t0, t7
++ mul t0, t0, ta1
++ addu t0, t0, ta3
+
+ /* Set the TC restart PC */
+ lw t1, VPEBOOTCFG_PC(t0)
+@@ -423,9 +423,9 @@ LEAF(mips_cps_boot_vpes)
+ mttc0 t0, CP0_VPECONF0
+
+ /* Next VPE */
+-2: srl t6, t6, 1
+- addiu t5, t5, 1
+- bnez t6, 1b
++2: srl ta2, ta2, 1
++ addiu ta1, ta1, 1
++ bnez ta2, 1b
+ nop
+
+ /* Leave VPE configuration state */
--- /dev/null
+From fd5ed3066bb2f47814fe53cdc56d11a678551ae1 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Wed, 1 Jul 2015 09:13:28 +0100
+Subject: MIPS: kernel: smp-cps: Fix 64-bit compatibility errors due to pointer casting
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit fd5ed3066bb2f47814fe53cdc56d11a678551ae1 upstream.
+
+Commit 1d8f1f5a780a ("MIPS: smp-cps: hotplug support") added hotplug
+support in the SMP/CPS implementation but it introduced a few build problems
+on 64-bit kernels due to pointer being casted to and from 'int' C types. We
+fix this problem by using 'unsigned long' instead which should match the size
+of the pointers in 32/64-bit kernels. Finally, we fix the comment since the
+CM base address is loaded to v1($3) instead of v0.
+
+Fixes the following build problems:
+
+arch/mips/kernel/smp-cps.c: In function 'wait_for_sibling_halt':
+arch/mips/kernel/smp-cps.c:366:17: error: cast from pointer to integer of
+different size [-Werror=pointer-to-int-cast]
+[...]
+arch/mips/kernel/smp-cps.c: In function 'cps_cpu_die':
+arch/mips/kernel/smp-cps.c:427:13: error: cast to pointer
+from integer of different size [-Werror=int-to-pointer-cast]
+
+cc1: all warnings being treated as errors
+
+Fixes: 1d8f1f5a780a ("MIPS: smp-cps: hotplug support")
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/10586/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/smp-cps.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/mips/kernel/smp-cps.c
++++ b/arch/mips/kernel/smp-cps.c
+@@ -133,7 +133,7 @@ static void __init cps_prepare_cpus(unsi
+ /*
+ * Patch the start of mips_cps_core_entry to provide:
+ *
+- * v0 = CM base address
++ * v1 = CM base address
+ * s0 = kseg0 CCA
+ */
+ entry_code = (u32 *)&mips_cps_core_entry;
+@@ -369,7 +369,7 @@ void play_dead(void)
+
+ static void wait_for_sibling_halt(void *ptr_cpu)
+ {
+- unsigned cpu = (unsigned)ptr_cpu;
++ unsigned cpu = (unsigned long)ptr_cpu;
+ unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ unsigned halted;
+ unsigned long flags;
+@@ -430,7 +430,7 @@ static void cps_cpu_die(unsigned int cpu
+ */
+ err = smp_call_function_single(cpu_death_sibling,
+ wait_for_sibling_halt,
+- (void *)cpu, 1);
++ (void *)(unsigned long)cpu, 1);
+ if (err)
+ panic("Failed to call remote sibling CPU\n");
+ }
--- /dev/null
+From 4e9d324d4288b082497c30bc55b8ad13acc7cf01 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@imgtec.com>
+Date: Fri, 10 Jul 2015 16:00:24 +0100
+Subject: MIPS: Require O32 FP64 support for MIPS64 with O32 compat
+
+From: Paul Burton <paul.burton@imgtec.com>
+
+commit 4e9d324d4288b082497c30bc55b8ad13acc7cf01 upstream.
+
+MIPS32r6 code requires FP64 (ie. FR=1) support. Building a kernel with
+support for MIPS32r6 binaries but without support for O32 with FP64 is
+therefore a problem which can lead to incorrectly executed userland.
+
+CONFIG_MIPS_O32_FP64_SUPPORT is already selected when the kernel is
+configured for MIPS32r6, but not when the kernel is configured for
+MIPS64r6 with O32 compat support. Select CONFIG_MIPS_O32_FP64_SUPPORT in
+such configurations to prevent building kernels which execute MIPS32r6
+userland incorrectly.
+
+Signed-off-by: Paul Burton <paul.burton@imgtec.com>
+Cc: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Cc: Matthew Fortune <matthew.fortune@imgtec.com>
+Cc: linux-kernel@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/10674/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1417,6 +1417,7 @@ config CPU_MIPS64_R6
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_MSA
+ select GENERIC_CSUM
++ select MIPS_O32_FP64_SUPPORT if MIPS32_O32
+ help
+ Choose this option to build a kernel for release 6 or later of the
+ MIPS64 architecture. New MIPS processors, starting with the Warrior
--- /dev/null
+From 01ab60570427caa24b9debc369e452e86cd9beb4 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Wed, 1 Jul 2015 17:18:37 -0400
+Subject: parisc: Fix some PTE/TLB race conditions and optimize __flush_tlb_range based on timing results
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit 01ab60570427caa24b9debc369e452e86cd9beb4 upstream.
+
+The increased use of pdtlb/pitlb instructions seemed to increase the
+frequency of random segmentation faults building packages. Further, we
+had a number of cases where TLB inserts would repeatedly fail and all
+forward progress would stop. The Haskell ghc package caused a lot of
+trouble in this area. The final indication of a race in pte handling was
+this syslog entry on sibaris (C8000):
+
+ swap_free: Unused swap offset entry 00000004
+ BUG: Bad page map in process mysqld pte:00000100 pmd:019bbec5
+ addr:00000000ec464000 vm_flags:00100073 anon_vma:0000000221023828 mapping: (null) index:ec464
+ CPU: 1 PID: 9176 Comm: mysqld Not tainted 4.0.0-2-parisc64-smp #1 Debian 4.0.5-1
+ Backtrace:
+ [<0000000040173eb0>] show_stack+0x20/0x38
+ [<0000000040444424>] dump_stack+0x9c/0x110
+ [<00000000402a0d38>] print_bad_pte+0x1a8/0x278
+ [<00000000402a28b8>] unmap_single_vma+0x3d8/0x770
+ [<00000000402a4090>] zap_page_range+0xf0/0x198
+ [<00000000402ba2a4>] SyS_madvise+0x404/0x8c0
+
+Note that the pte value is 0 except for the accessed bit 0x100. This bit
+shouldn't be set without the present bit.
+
+It should be noted that the madvise system call is probably a trigger for many
+of the random segmentation faults.
+
+In looking at the kernel code, I found the following problems:
+
+1) The pte_clear define didn't take TLB lock when clearing a pte.
+2) We didn't test pte present bit inside lock in exception support.
+3) The pte and tlb locks needed to merged in order to ensure consistency
+between page table and TLB. This also has the effect of serializing TLB
+broadcasts on SMP systems.
+
+The attached change implements the above and a few other tweaks to try
+to improve performance. Based on the timing code, TLB purges are very
+slow (e.g., ~ 209 cycles per page on rp3440). Thus, I think it
+beneficial to test the split_tlb variable to avoid duplicate purges.
+Probably, all PA 2.0 machines have combined TLBs.
+
+I dropped using __flush_tlb_range in flush_tlb_mm as I realized all
+applications and most threads have a stack size that is too large to
+make this useful. I added some comments to this effect.
+
+Since implementing 1 through 3, I haven't had any random segmentation
+faults on mx3210 (rp3440) in about one week of building code and running
+as a Debian buildd.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/pgtable.h | 57 ++++++++----
+ arch/parisc/include/asm/tlbflush.h | 53 ++++++------
+ arch/parisc/kernel/cache.c | 105 +++++++++++++++--------
+ arch/parisc/kernel/entry.S | 163 +++++++++++++++++--------------------
+ arch/parisc/kernel/traps.c | 4
+ 5 files changed, 213 insertions(+), 169 deletions(-)
+
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -16,7 +16,7 @@
+ #include <asm/processor.h>
+ #include <asm/cache.h>
+
+-extern spinlock_t pa_dbit_lock;
++extern spinlock_t pa_tlb_lock;
+
+ /*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+@@ -33,6 +33,19 @@ extern spinlock_t pa_dbit_lock;
+ */
+ #define kern_addr_valid(addr) (1)
+
++/* Purge data and instruction TLB entries. Must be called holding
++ * the pa_tlb_lock. The TLB purge instructions are slow on SMP
++ * machines since the purge must be broadcast to all CPUs.
++ */
++
++static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
++{
++ mtsp(mm->context, 1);
++ pdtlb(addr);
++ if (unlikely(split_tlb))
++ pitlb(addr);
++}
++
+ /* Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+@@ -42,15 +55,20 @@ extern spinlock_t pa_dbit_lock;
+ *(pteptr) = (pteval); \
+ } while(0)
+
+-extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+-
+-#define set_pte_at(mm, addr, ptep, pteval) \
+- do { \
++#define pte_inserted(x) \
++ ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \
++ == (_PAGE_PRESENT|_PAGE_ACCESSED))
++
++#define set_pte_at(mm, addr, ptep, pteval) \
++ do { \
++ pte_t old_pte; \
+ unsigned long flags; \
+- spin_lock_irqsave(&pa_dbit_lock, flags); \
+- set_pte(ptep, pteval); \
+- purge_tlb_entries(mm, addr); \
+- spin_unlock_irqrestore(&pa_dbit_lock, flags); \
++ spin_lock_irqsave(&pa_tlb_lock, flags); \
++ old_pte = *ptep; \
++ set_pte(ptep, pteval); \
++ if (pte_inserted(old_pte)) \
++ purge_tlb_entries(mm, addr); \
++ spin_unlock_irqrestore(&pa_tlb_lock, flags); \
+ } while (0)
+
+ #endif /* !__ASSEMBLY__ */
+@@ -268,7 +286,7 @@ extern unsigned long *empty_zero_page;
+
+ #define pte_none(x) (pte_val(x) == 0)
+ #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+-#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)
++#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
+
+ #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
+ #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
+@@ -435,15 +453,15 @@ static inline int ptep_test_and_clear_yo
+ if (!pte_young(*ptep))
+ return 0;
+
+- spin_lock_irqsave(&pa_dbit_lock, flags);
++ spin_lock_irqsave(&pa_tlb_lock, flags);
+ pte = *ptep;
+ if (!pte_young(pte)) {
+- spin_unlock_irqrestore(&pa_dbit_lock, flags);
++ spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ return 0;
+ }
+ set_pte(ptep, pte_mkold(pte));
+ purge_tlb_entries(vma->vm_mm, addr);
+- spin_unlock_irqrestore(&pa_dbit_lock, flags);
++ spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ return 1;
+ }
+
+@@ -453,11 +471,12 @@ static inline pte_t ptep_get_and_clear(s
+ pte_t old_pte;
+ unsigned long flags;
+
+- spin_lock_irqsave(&pa_dbit_lock, flags);
++ spin_lock_irqsave(&pa_tlb_lock, flags);
+ old_pte = *ptep;
+- pte_clear(mm,addr,ptep);
+- purge_tlb_entries(mm, addr);
+- spin_unlock_irqrestore(&pa_dbit_lock, flags);
++ set_pte(ptep, __pte(0));
++ if (pte_inserted(old_pte))
++ purge_tlb_entries(mm, addr);
++ spin_unlock_irqrestore(&pa_tlb_lock, flags);
+
+ return old_pte;
+ }
+@@ -465,10 +484,10 @@ static inline pte_t ptep_get_and_clear(s
+ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ unsigned long flags;
+- spin_lock_irqsave(&pa_dbit_lock, flags);
++ spin_lock_irqsave(&pa_tlb_lock, flags);
+ set_pte(ptep, pte_wrprotect(*ptep));
+ purge_tlb_entries(mm, addr);
+- spin_unlock_irqrestore(&pa_dbit_lock, flags);
++ spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ }
+
+ #define pte_same(A,B) (pte_val(A) == pte_val(B))
+--- a/arch/parisc/include/asm/tlbflush.h
++++ b/arch/parisc/include/asm/tlbflush.h
+@@ -13,6 +13,9 @@
+ * active at any one time on the Merced bus. This tlb purge
+ * synchronisation is fairly lightweight and harmless so we activate
+ * it on all systems not just the N class.
++
++ * It is also used to ensure PTE updates are atomic and consistent
++ * with the TLB.
+ */
+ extern spinlock_t pa_tlb_lock;
+
+@@ -24,20 +27,24 @@ extern void flush_tlb_all_local(void *);
+
+ #define smp_flush_tlb_all() flush_tlb_all()
+
++int __flush_tlb_range(unsigned long sid,
++ unsigned long start, unsigned long end);
++
++#define flush_tlb_range(vma, start, end) \
++ __flush_tlb_range((vma)->vm_mm->context, start, end)
++
++#define flush_tlb_kernel_range(start, end) \
++ __flush_tlb_range(0, start, end)
++
+ /*
+ * flush_tlb_mm()
+ *
+- * XXX This code is NOT valid for HP-UX compatibility processes,
+- * (although it will probably work 99% of the time). HP-UX
+- * processes are free to play with the space id's and save them
+- * over long periods of time, etc. so we have to preserve the
+- * space and just flush the entire tlb. We need to check the
+- * personality in order to do that, but the personality is not
+- * currently being set correctly.
+- *
+- * Of course, Linux processes could do the same thing, but
+- * we don't support that (and the compilers, dynamic linker,
+- * etc. do not do that).
++ * The code to switch to a new context is NOT valid for processes
++ * which play with the space id's. Thus, we have to preserve the
++ * space and just flush the entire tlb. However, the compilers,
++ * dynamic linker, etc, do not manipulate space id's, so there
++ * could be a significant performance benefit in switching contexts
++ * and not flushing the whole tlb.
+ */
+
+ static inline void flush_tlb_mm(struct mm_struct *mm)
+@@ -45,10 +52,18 @@ static inline void flush_tlb_mm(struct m
+ BUG_ON(mm == &init_mm); /* Should never happen */
+
+ #if 1 || defined(CONFIG_SMP)
++ /* Except for very small threads, flushing the whole TLB is
++ * faster than using __flush_tlb_range. The pdtlb and pitlb
++ * instructions are very slow because of the TLB broadcast.
++ * It might be faster to do local range flushes on all CPUs
++ * on PA 2.0 systems.
++ */
+ flush_tlb_all();
+ #else
+ /* FIXME: currently broken, causing space id and protection ids
+- * to go out of sync, resulting in faults on userspace accesses.
++ * to go out of sync, resulting in faults on userspace accesses.
++ * This approach needs further investigation since running many
++ * small applications (e.g., GCC testsuite) is faster on HP-UX.
+ */
+ if (mm) {
+ if (mm->context != 0)
+@@ -65,22 +80,12 @@ static inline void flush_tlb_page(struct
+ {
+ unsigned long flags, sid;
+
+- /* For one page, it's not worth testing the split_tlb variable */
+-
+- mb();
+ sid = vma->vm_mm->context;
+ purge_tlb_start(flags);
+ mtsp(sid, 1);
+ pdtlb(addr);
+- pitlb(addr);
++ if (unlikely(split_tlb))
++ pitlb(addr);
+ purge_tlb_end(flags);
+ }
+-
+-void __flush_tlb_range(unsigned long sid,
+- unsigned long start, unsigned long end);
+-
+-#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
+-
+-#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
+-
+ #endif
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -342,12 +342,15 @@ EXPORT_SYMBOL(flush_data_cache_local);
+ EXPORT_SYMBOL(flush_kernel_icache_range_asm);
+
+ #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
+-int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
++static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
++
++#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
++static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
+
+ void __init parisc_setup_cache_timing(void)
+ {
+ unsigned long rangetime, alltime;
+- unsigned long size;
++ unsigned long size, start;
+
+ alltime = mfctl(16);
+ flush_data_cache();
+@@ -364,14 +367,43 @@ void __init parisc_setup_cache_timing(vo
+ /* Racy, but if we see an intermediate value, it's ok too... */
+ parisc_cache_flush_threshold = size * alltime / rangetime;
+
+- parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
++ parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
+ if (!parisc_cache_flush_threshold)
+ parisc_cache_flush_threshold = FLUSH_THRESHOLD;
+
+ if (parisc_cache_flush_threshold > cache_info.dc_size)
+ parisc_cache_flush_threshold = cache_info.dc_size;
+
+- printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
++ printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
++ parisc_cache_flush_threshold/1024);
++
++ /* calculate TLB flush threshold */
++
++ alltime = mfctl(16);
++ flush_tlb_all();
++ alltime = mfctl(16) - alltime;
++
++ size = PAGE_SIZE;
++ start = (unsigned long) _text;
++ rangetime = mfctl(16);
++ while (start < (unsigned long) _end) {
++ flush_tlb_kernel_range(start, start + PAGE_SIZE);
++ start += PAGE_SIZE;
++ size += PAGE_SIZE;
++ }
++ rangetime = mfctl(16) - rangetime;
++
++ printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
++ alltime, size, rangetime);
++
++ parisc_tlb_flush_threshold = size * alltime / rangetime;
++ parisc_tlb_flush_threshold *= num_online_cpus();
++ parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
++ if (!parisc_tlb_flush_threshold)
++ parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
++
++ printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
++ parisc_tlb_flush_threshold/1024);
+ }
+
+ extern void purge_kernel_dcache_page_asm(unsigned long);
+@@ -403,48 +435,45 @@ void copy_user_page(void *vto, void *vfr
+ }
+ EXPORT_SYMBOL(copy_user_page);
+
+-void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
++/* __flush_tlb_range()
++ *
++ * returns 1 if all TLBs were flushed.
++ */
++int __flush_tlb_range(unsigned long sid, unsigned long start,
++ unsigned long end)
+ {
+- unsigned long flags;
+-
+- /* Note: purge_tlb_entries can be called at startup with
+- no context. */
++ unsigned long flags, size;
+
+- purge_tlb_start(flags);
+- mtsp(mm->context, 1);
+- pdtlb(addr);
+- pitlb(addr);
+- purge_tlb_end(flags);
+-}
+-EXPORT_SYMBOL(purge_tlb_entries);
+-
+-void __flush_tlb_range(unsigned long sid, unsigned long start,
+- unsigned long end)
+-{
+- unsigned long npages;
+-
+- npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+- if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
++ size = (end - start);
++ if (size >= parisc_tlb_flush_threshold) {
+ flush_tlb_all();
+- else {
+- unsigned long flags;
++ return 1;
++ }
++
++ /* Purge TLB entries for small ranges using the pdtlb and
++ pitlb instructions. These instructions execute locally
++ but cause a purge request to be broadcast to other TLBs. */
++ if (likely(!split_tlb)) {
++ while (start < end) {
++ purge_tlb_start(flags);
++ mtsp(sid, 1);
++ pdtlb(start);
++ purge_tlb_end(flags);
++ start += PAGE_SIZE;
++ }
++ return 0;
++ }
+
++ /* split TLB case */
++ while (start < end) {
+ purge_tlb_start(flags);
+ mtsp(sid, 1);
+- if (split_tlb) {
+- while (npages--) {
+- pdtlb(start);
+- pitlb(start);
+- start += PAGE_SIZE;
+- }
+- } else {
+- while (npages--) {
+- pdtlb(start);
+- start += PAGE_SIZE;
+- }
+- }
++ pdtlb(start);
++ pitlb(start);
+ purge_tlb_end(flags);
++ start += PAGE_SIZE;
+ }
++ return 0;
+ }
+
+ static void cacheflush_h_tmp_function(void *dummy)
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -45,7 +45,7 @@
+ .level 2.0
+ #endif
+
+- .import pa_dbit_lock,data
++ .import pa_tlb_lock,data
+
+ /* space_to_prot macro creates a prot id from a space id */
+
+@@ -420,8 +420,8 @@
+ SHLREG %r9,PxD_VALUE_SHIFT,\pmd
+ extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+- shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
+- LDREG %r0(\pmd),\pte /* pmd is now pte */
++ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
++ LDREG %r0(\pmd),\pte
+ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
+ .endm
+
+@@ -453,57 +453,53 @@
+ L2_ptep \pgd,\pte,\index,\va,\fault
+ .endm
+
+- /* Acquire pa_dbit_lock lock. */
+- .macro dbit_lock spc,tmp,tmp1
++ /* Acquire pa_tlb_lock lock and recheck page is still present. */
++ .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
+ #ifdef CONFIG_SMP
+ cmpib,COND(=),n 0,\spc,2f
+- load32 PA(pa_dbit_lock),\tmp
++ load32 PA(pa_tlb_lock),\tmp
+ 1: LDCW 0(\tmp),\tmp1
+ cmpib,COND(=) 0,\tmp1,1b
+ nop
++ LDREG 0(\ptp),\pte
++ bb,<,n \pte,_PAGE_PRESENT_BIT,2f
++ b \fault
++ stw \spc,0(\tmp)
+ 2:
+ #endif
+ .endm
+
+- /* Release pa_dbit_lock lock without reloading lock address. */
+- .macro dbit_unlock0 spc,tmp
++ /* Release pa_tlb_lock lock without reloading lock address. */
++ .macro tlb_unlock0 spc,tmp
+ #ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ stw \spc,0(\tmp)
+ #endif
+ .endm
+
+- /* Release pa_dbit_lock lock. */
+- .macro dbit_unlock1 spc,tmp
++ /* Release pa_tlb_lock lock. */
++ .macro tlb_unlock1 spc,tmp
+ #ifdef CONFIG_SMP
+- load32 PA(pa_dbit_lock),\tmp
+- dbit_unlock0 \spc,\tmp
++ load32 PA(pa_tlb_lock),\tmp
++ tlb_unlock0 \spc,\tmp
+ #endif
+ .endm
+
+ /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
+ * don't needlessly dirty the cache line if it was already set */
+- .macro update_ptep spc,ptep,pte,tmp,tmp1
+-#ifdef CONFIG_SMP
+- or,COND(=) %r0,\spc,%r0
+- LDREG 0(\ptep),\pte
+-#endif
++ .macro update_accessed ptp,pte,tmp,tmp1
+ ldi _PAGE_ACCESSED,\tmp1
+ or \tmp1,\pte,\tmp
+ and,COND(<>) \tmp1,\pte,%r0
+- STREG \tmp,0(\ptep)
++ STREG \tmp,0(\ptp)
+ .endm
+
+ /* Set the dirty bit (and accessed bit). No need to be
+ * clever, this is only used from the dirty fault */
+- .macro update_dirty spc,ptep,pte,tmp
+-#ifdef CONFIG_SMP
+- or,COND(=) %r0,\spc,%r0
+- LDREG 0(\ptep),\pte
+-#endif
++ .macro update_dirty ptp,pte,tmp
+ ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
+ or \tmp,\pte,\pte
+- STREG \pte,0(\ptep)
++ STREG \pte,0(\ptp)
+ .endm
+
+ /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
+@@ -1148,14 +1144,14 @@ dtlb_miss_20w:
+
+ L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ idtlbt pte,prot
+- dbit_unlock1 spc,t0
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1174,14 +1170,14 @@ nadtlb_miss_20w:
+
+ L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ idtlbt pte,prot
+- dbit_unlock1 spc,t0
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1202,20 +1198,20 @@ dtlb_miss_11:
+
+ L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
++ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ idtlba pte,(%sr1,va)
+ idtlbp prot,(%sr1,va)
+
+- mtsp t0, %sr1 /* Restore sr1 */
+- dbit_unlock1 spc,t0
++ mtsp t1, %sr1 /* Restore sr1 */
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1235,21 +1231,20 @@ nadtlb_miss_11:
+
+ L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+-
+- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
++ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ idtlba pte,(%sr1,va)
+ idtlbp prot,(%sr1,va)
+
+- mtsp t0, %sr1 /* Restore sr1 */
+- dbit_unlock1 spc,t0
++ mtsp t1, %sr1 /* Restore sr1 */
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1269,16 +1264,16 @@ dtlb_miss_20:
+
+ L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+- f_extend pte,t0
++ f_extend pte,t1
+
+ idtlbt pte,prot
+- dbit_unlock1 spc,t0
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1297,16 +1292,16 @@ nadtlb_miss_20:
+
+ L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+- f_extend pte,t0
++ f_extend pte,t1
+
+- idtlbt pte,prot
+- dbit_unlock1 spc,t0
++ idtlbt pte,prot
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1406,14 +1401,14 @@ itlb_miss_20w:
+
+ L3_ptep ptp,pte,t0,va,itlb_fault
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,itlb_fault
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ iitlbt pte,prot
+- dbit_unlock1 spc,t0
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1430,14 +1425,14 @@ naitlb_miss_20w:
+
+ L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ iitlbt pte,prot
+- dbit_unlock1 spc,t0
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1458,20 +1453,20 @@ itlb_miss_11:
+
+ L2_ptep ptp,pte,t0,va,itlb_fault
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,itlb_fault
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
++ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ iitlba pte,(%sr1,va)
+ iitlbp prot,(%sr1,va)
+
+- mtsp t0, %sr1 /* Restore sr1 */
+- dbit_unlock1 spc,t0
++ mtsp t1, %sr1 /* Restore sr1 */
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1482,20 +1477,20 @@ naitlb_miss_11:
+
+ L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
++ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ iitlba pte,(%sr1,va)
+ iitlbp prot,(%sr1,va)
+
+- mtsp t0, %sr1 /* Restore sr1 */
+- dbit_unlock1 spc,t0
++ mtsp t1, %sr1 /* Restore sr1 */
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1516,16 +1511,16 @@ itlb_miss_20:
+
+ L2_ptep ptp,pte,t0,va,itlb_fault
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,itlb_fault
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+- f_extend pte,t0
++ f_extend pte,t1
+
+ iitlbt pte,prot
+- dbit_unlock1 spc,t0
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1536,16 +1531,16 @@ naitlb_miss_20:
+
+ L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
+
+- dbit_lock spc,t0,t1
+- update_ptep spc,ptp,pte,t0,t1
++ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
++ update_accessed ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+- f_extend pte,t0
++ f_extend pte,t1
+
+ iitlbt pte,prot
+- dbit_unlock1 spc,t0
+
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1568,14 +1563,14 @@ dbit_trap_20w:
+
+ L3_ptep ptp,pte,t0,va,dbit_fault
+
+- dbit_lock spc,t0,t1
+- update_dirty spc,ptp,pte,t1
++ tlb_lock spc,ptp,pte,t0,t1,dbit_fault
++ update_dirty ptp,pte,t1
+
+ make_insert_tlb spc,pte,prot
+
+ idtlbt pte,prot
+- dbit_unlock0 spc,t0
+
++ tlb_unlock0 spc,t0
+ rfir
+ nop
+ #else
+@@ -1588,8 +1583,8 @@ dbit_trap_11:
+
+ L2_ptep ptp,pte,t0,va,dbit_fault
+
+- dbit_lock spc,t0,t1
+- update_dirty spc,ptp,pte,t1
++ tlb_lock spc,ptp,pte,t0,t1,dbit_fault
++ update_dirty ptp,pte,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+@@ -1600,8 +1595,8 @@ dbit_trap_11:
+ idtlbp prot,(%sr1,va)
+
+ mtsp t1, %sr1 /* Restore sr1 */
+- dbit_unlock0 spc,t0
+
++ tlb_unlock0 spc,t0
+ rfir
+ nop
+
+@@ -1612,16 +1607,16 @@ dbit_trap_20:
+
+ L2_ptep ptp,pte,t0,va,dbit_fault
+
+- dbit_lock spc,t0,t1
+- update_dirty spc,ptp,pte,t1
++ tlb_lock spc,ptp,pte,t0,t1,dbit_fault
++ update_dirty ptp,pte,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t1
+
+- idtlbt pte,prot
+- dbit_unlock0 spc,t0
++ idtlbt pte,prot
+
++ tlb_unlock0 spc,t0
+ rfir
+ nop
+ #endif
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -43,10 +43,6 @@
+
+ #include "../math-emu/math-emu.h" /* for handle_fpe() */
+
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+-DEFINE_SPINLOCK(pa_dbit_lock);
+-#endif
+-
+ static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+ struct pt_regs *regs);
+
--- /dev/null
+From 4c4ac9a48ac512c6b5a6cca06cfad2ad96e8caaa Mon Sep 17 00:00:00 2001
+From: Christophe Jaillet <christophe.jaillet@wanadoo.fr>
+Date: Mon, 13 Jul 2015 11:32:43 +0200
+Subject: parisc: mm: Fix a memory leak related to pmd not attached to the pgd
+
+From: Christophe Jaillet <christophe.jaillet@wanadoo.fr>
+
+commit 4c4ac9a48ac512c6b5a6cca06cfad2ad96e8caaa upstream.
+
+Commit 0e0da48dee8d ("parisc: mm: don't count preallocated pmds")
+introduced a memory leak.
+
+After this commit, the 'return' statement in pmd_free is executed in all
+cases. Even for pmd that are not attached to the pgd. So 'free_pages'
+can never be called anymore, leading to a memory leak.
+
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Mikulas Patocka <mpatocka@redhat.com>
+Acked-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/pgalloc.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/parisc/include/asm/pgalloc.h
++++ b/arch/parisc/include/asm/pgalloc.h
+@@ -72,7 +72,7 @@ static inline pmd_t *pmd_alloc_one(struc
+
+ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ {
+- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
++ if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
+ /*
+ * This is the permanent pmd attached to the pgd;
+ * cannot free it.
+@@ -81,6 +81,7 @@ static inline void pmd_free(struct mm_st
+ */
+ mm_inc_nr_pmds(mm);
+ return;
++ }
+ free_pages((unsigned long)pmd, PMD_ORDER);
+ }
+
--- /dev/null
+From b32aadc1a8ed84afbe924cd2ced31cd6a2e67074 Mon Sep 17 00:00:00 2001
+From: "Shreyas B. Prabhu" <shreyas@linux.vnet.ibm.com>
+Date: Tue, 7 Jul 2015 01:39:23 +0530
+Subject: powerpc/powernv: Fix race in updating core_idle_state
+
+From: "Shreyas B. Prabhu" <shreyas@linux.vnet.ibm.com>
+
+commit b32aadc1a8ed84afbe924cd2ced31cd6a2e67074 upstream.
+
+core_idle_state is maintained for each core. It uses 0-7 bits to track
+whether a thread in the core has entered fastsleep or winkle. 8th bit is
+used as a lock bit.
+The lock bit is set in these 2 scenarios-
+ - The thread is first in subcore to wakeup from sleep/winkle.
+ - If its the last thread in the core about to enter sleep/winkle
+
+While the lock bit is set, if any other thread in the core wakes up, it
+loops until the lock bit is cleared before proceeding in the wakeup
+path. This helps prevent race conditions w.r.t fastsleep workaround and
+prevents threads from switching to process context before core/subcore
+resources are restored.
+
+But, in the path to sleep/winkle entry, we currently don't check for
+lock-bit. This exposes us to following race when running with subcore
+on-
+
+First thread in the subcorea Another thread in the same
+waking up core entering sleep/winkle
+
+lwarx r15,0,r14
+ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
+stwcx. r15,0,r14
+[Code to restore subcore state]
+
+ lwarx r15,0,r14
+ [clear thread bit]
+ stwcx. r15,0,r14
+
+andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
+stw r15,0(r14)
+
+Here, after the thread entering sleep clears its thread bit in
+core_idle_state, the value is overwritten by the thread waking up.
+In such cases when the core enters fastsleep, code mistakes an idle
+thread as running. Because of this, the first thread waking up from
+fastsleep which is supposed to resync timebase skips it. So we can
+end up having a core with stale timebase value.
+
+This patch fixes the above race by looping on the lock bit even while
+entering the idle states.
+
+Signed-off-by: Shreyas B. Prabhu <shreyas@linux.vnet.ibm.com>
+Fixes: 7b54e9f213f76 'powernv/powerpc: Add winkle support for offline cpus'
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/idle_power7.S | 31 +++++++++++++++++++++----------
+ 1 file changed, 21 insertions(+), 10 deletions(-)
+
+--- a/arch/powerpc/kernel/idle_power7.S
++++ b/arch/powerpc/kernel/idle_power7.S
+@@ -52,6 +52,22 @@
+ .text
+
+ /*
++ * Used by threads when the lock bit of core_idle_state is set.
++ * Threads will spin in HMT_LOW until the lock bit is cleared.
++ * r14 - pointer to core_idle_state
++ * r15 - used to load contents of core_idle_state
++ */
++
++core_idle_lock_held:
++ HMT_LOW
++3: lwz r15,0(r14)
++ andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
++ bne 3b
++ HMT_MEDIUM
++ lwarx r15,0,r14
++ blr
++
++/*
+ * Pass requested state in r3:
+ * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
+ *
+@@ -150,6 +166,10 @@ power7_enter_nap_mode:
+ ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
+ lwarx_loop1:
+ lwarx r15,0,r14
++
++ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
++ bnel core_idle_lock_held
++
+ andc r15,r15,r7 /* Clear thread bit */
+
+ andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
+@@ -294,7 +314,7 @@ lwarx_loop2:
+ * workaround undo code or resyncing timebase or restoring context
+ * In either case loop until the lock bit is cleared.
+ */
+- bne core_idle_lock_held
++ bnel core_idle_lock_held
+
+ cmpwi cr2,r15,0
+ lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
+@@ -319,15 +339,6 @@ lwarx_loop2:
+ isync
+ b common_exit
+
+-core_idle_lock_held:
+- HMT_LOW
+-core_idle_lock_loop:
+- lwz r15,0(14)
+- andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+- bne core_idle_lock_loop
+- HMT_MEDIUM
+- b lwarx_loop2
+-
+ first_thread_in_subcore:
+ /* First thread in subcore to wakeup */
+ ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
--- /dev/null
+From dbf3c370862d73fcd2c74ca55e254bb02143238d Mon Sep 17 00:00:00 2001
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Date: Fri, 10 Jul 2015 10:11:07 -0700
+Subject: Revert "Input: synaptics - allocate 3 slots to keep stability in image sensors"
+
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+commit dbf3c370862d73fcd2c74ca55e254bb02143238d upstream.
+
+This reverts commit 63c4fda3c0bb841b1aad1298fc7fe94058fc79f8 as it
+causes issues with detecting 3-finger taps.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=100481
+Acked-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+
+---
+ drivers/input/mouse/synaptics.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1199,7 +1199,7 @@ static void set_input_params(struct psmo
+ ABS_MT_POSITION_Y);
+ /* Image sensors can report per-contact pressure */
+ input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
+- input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK);
++ input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
+
+ /* Image sensors can signal 4 and 5 finger clicks */
+ __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
--- /dev/null
+From 1c885357da2d3cf62132e611c0beaf4cdf607dd9 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Wed, 1 Jul 2015 09:31:14 +0100
+Subject: Revert "MIPS: Kconfig: Disable SMP/CPS for 64-bit"
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 1c885357da2d3cf62132e611c0beaf4cdf607dd9 upstream.
+
+This reverts commit 6ca716f2e5571d25a3899c6c5c91ff72ea6d6f5e.
+
+SMP/CPS is now supported on 64bit cores.
+
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/10592/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2220,7 +2220,7 @@ config MIPS_CMP
+
+ config MIPS_CPS
+ bool "MIPS Coherent Processing System support"
+- depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
++ depends on SYS_SUPPORTS_MIPS_CPS
+ select MIPS_CM
+ select MIPS_CPC
+ select MIPS_CPS_PM if HOTPLUG_CPU
--- /dev/null
+cxl-fix-off-by-one-error-allowing-subsequent-mmap-page-to-be-accessed.patch
+cxl-check-if-afu-is-not-null-in-cxl_slbia.patch
+powerpc-powernv-fix-race-in-updating-core_idle_state.patch
+revert-input-synaptics-allocate-3-slots-to-keep-stability-in-image-sensors.patch
+parisc-fix-some-pte-tlb-race-conditions-and-optimize-__flush_tlb_range-based-on-timing-results.patch
+parisc-mm-fix-a-memory-leak-related-to-pmd-not-attached-to-the-pgd.patch
+arm-pxa-fix-dm9000-platform-data-regression.patch
+arm-dts-dra7x-evm-prevent-glitch-on-dcan1-pinmux.patch
+arm-dts-am57xx-beagle-x15-provide-supply-for-usb2_phy2.patch
+arm-8404-1-dma-mapping-fix-off-by-one-error-in-bitmap-size-check.patch
+arm-imx6-gpc-always-enable-pu-domain-if-config_pm-is-not-set.patch
+revert-mips-kconfig-disable-smp-cps-for-64-bit.patch
+mips-fix-erroneous-jr-emulation-for-mips-r6.patch
+mips-c-r4k-fix-cache-flushing-for-mt-cores.patch
+mips-kernel-smp-cps-fix-64-bit-compatibility-errors-due-to-pointer-casting.patch
+mips-kernel-cps-vec-replace-la-macro-with-ptr_la.patch
+mips-kernel-cps-vec-replace-mips32r2-isa-level-with-mips64r2.patch
+mips-kernel-cps-vec-use-ta0-ta3-pseudo-registers-for-64-bit.patch
+mips-kernel-cps-vec-replace-kseg0-with-ckseg0.patch
+mips-cps-vec-use-macros-for-various-arithmetics-and-memory-operations.patch
+mips-require-o32-fp64-support-for-mips64-with-o32-compat.patch
+mips-fpu.h-allow-64-bit-fpu-on-a-64-bit-mips-r6-cpu.patch
+can-replace-timestamp-as-unique-skb-attribute.patch
+can-rcar_can-fix-irq-check.patch
+can-c_can-fix-default-pinmux-glitch-at-init.patch
+can-rcar_can-print-signed-irq.patch
+can-mcp251x-fix-resume-when-device-is-down.patch