--- /dev/null
+From dc9e0a9347e932e3fd3cd03e7ff241022ed6ea8a Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Thu, 15 Mar 2018 19:49:14 -0700
+Subject: acpi, numa: fix pxm to online numa node associations
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit dc9e0a9347e932e3fd3cd03e7ff241022ed6ea8a upstream.
+
+Commit 99759869faf1 "acpi: Add acpi_map_pxm_to_online_node()" added
+support for mapping a given proximity to its nearest, by SLIT distance,
+online node. However, it sometimes returns unexpected results due to the
+fact that it switches from comparing the PXM node to the last node that
+was closer than the current max.
+
+ for_each_online_node(n) {
+ dist = node_distance(node, n);
+ if (dist < min_dist) {
+ min_dist = dist;
+ node = n; <---- from this point we're using the
+ wrong node for node_distance()
+
+
+Fixes: 99759869faf1 ("acpi: Add acpi_map_pxm_to_online_node()")
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Toshi Kani <toshi.kani@hp.com>
+Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/numa.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/acpi/numa.c
++++ b/drivers/acpi/numa.c
+@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
+ */
+ int acpi_map_pxm_to_online_node(int pxm)
+ {
+- int node, n, dist, min_dist;
++ int node, min_node;
+
+ node = acpi_map_pxm_to_node(pxm);
+
+ if (node == NUMA_NO_NODE)
+ node = 0;
+
++ min_node = node;
+ if (!node_online(node)) {
+- min_dist = INT_MAX;
++ int min_dist = INT_MAX, dist, n;
++
+ for_each_online_node(n) {
+ dist = node_distance(node, n);
+ if (dist < min_dist) {
+ min_dist = dist;
+- node = n;
++ min_node = n;
+ }
+ }
+ }
+
+- return node;
++ return min_node;
+ }
+ EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
+
--- /dev/null
+From b1abf6fc49829d89660c961fafe3f90f3d843c55 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 19 Mar 2018 14:51:49 +0100
+Subject: ACPI / watchdog: Fix off-by-one error at resource assignment
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit b1abf6fc49829d89660c961fafe3f90f3d843c55 upstream.
+
+The resource allocation in WDAT watchdog has off-one-by error, it sets
+one byte more than the actual end address. This may eventually lead
+to unexpected resource conflicts.
+
+Fixes: 058dfc767008 (ACPI / watchdog: Add support for WDAT hardware watchdog)
+Cc: 4.9+ <stable@vger.kernel.org> # 4.9+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Acked-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpi_watchdog.c | 4 ++--
+ drivers/watchdog/wdat_wdt.c | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/acpi/acpi_watchdog.c
++++ b/drivers/acpi/acpi_watchdog.c
+@@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void)
+ res.start = gas->address;
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ res.flags = IORESOURCE_MEM;
+- res.end = res.start + ALIGN(gas->access_width, 4);
++ res.end = res.start + ALIGN(gas->access_width, 4) - 1;
+ } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ res.flags = IORESOURCE_IO;
+- res.end = res.start + gas->access_width;
++ res.end = res.start + gas->access_width - 1;
+ } else {
+ pr_warn("Unsupported address space: %u\n",
+ gas->space_id);
+--- a/drivers/watchdog/wdat_wdt.c
++++ b/drivers/watchdog/wdat_wdt.c
+@@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platfor
+
+ memset(&r, 0, sizeof(r));
+ r.start = gas->address;
+- r.end = r.start + gas->access_width;
++ r.end = r.start + gas->access_width - 1;
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ r.flags = IORESOURCE_MEM;
+ } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
--- /dev/null
+From 455f3e76cfc0d893585a5f358b9ddbe9c1e1e53b Mon Sep 17 00:00:00 2001
+From: Arend Van Spriel <arend.vanspriel@broadcom.com>
+Date: Wed, 28 Feb 2018 21:15:20 +0100
+Subject: brcmfmac: fix P2P_DEVICE ethernet address generation
+
+From: Arend Van Spriel <arend.vanspriel@broadcom.com>
+
+commit 455f3e76cfc0d893585a5f358b9ddbe9c1e1e53b upstream.
+
+The firmware has a requirement that the P2P_DEVICE address should
+be different from the address of the primary interface. When not
+specified by user-space, the driver generates the MAC address for
+the P2P_DEVICE interface using the MAC address of the primary
+interface and setting the locally administered bit. However, the MAC
+address of the primary interface may already have that bit set causing
+the creation of the P2P_DEVICE interface to fail with -EBUSY. Fix this
+by using a random address instead to determine the P2P_DEVICE address.
+
+Cc: stable@vger.kernel.org # 3.10.y
+Reported-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Hante Meuleman <hante.meuleman@broadcom.com>
+Reviewed-by: Pieter-Paul Giesberts <pieter-paul.giesberts@broadcom.com>
+Reviewed-by: Franky Lin <franky.lin@broadcom.com>
+Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c | 24 +++++++----------
+ 1 file changed, 11 insertions(+), 13 deletions(-)
+
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+@@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct
+ * @dev_addr: optional device address.
+ *
+ * P2P needs mac addresses for P2P device and interface. If no device
+- * address it specified, these are derived from the primary net device, ie.
+- * the permanent ethernet address of the device.
++ * address it specified, these are derived from a random ethernet
++ * address.
+ */
+ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
+ {
+- struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+- bool local_admin = false;
++ bool random_addr = false;
+
+- if (!dev_addr || is_zero_ether_addr(dev_addr)) {
+- dev_addr = pri_ifp->mac_addr;
+- local_admin = true;
+- }
++ if (!dev_addr || is_zero_ether_addr(dev_addr))
++ random_addr = true;
+
+- /* Generate the P2P Device Address. This consists of the device's
+- * primary MAC address with the locally administered bit set.
++ /* Generate the P2P Device Address obtaining a random ethernet
++ * address with the locally administered bit set.
+ */
+- memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
+- if (local_admin)
+- p2p->dev_addr[0] |= 0x02;
++ if (random_addr)
++ eth_random_addr(p2p->dev_addr);
++ else
++ memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
+
+ /* Generate the P2P Interface Address. If the discovery and connection
+ * BSSCFGs need to simultaneously co-exist, then this address must be
--- /dev/null
+From 746201235b3f876792099079f4c6fea941d76183 Mon Sep 17 00:00:00 2001
+From: Andri Yngvason <andri.yngvason@marel.com>
+Date: Wed, 14 Mar 2018 11:52:57 +0000
+Subject: can: cc770: Fix queue stall & dropped RTR reply
+
+From: Andri Yngvason <andri.yngvason@marel.com>
+
+commit 746201235b3f876792099079f4c6fea941d76183 upstream.
+
+While waiting for the TX object to send an RTR, an external message with a
+matching id can overwrite the TX data. In this case we must call the rx
+routine and then try transmitting the message that was overwritten again.
+
+The queue was being stalled because the RX event did not generate an
+interrupt to wake up the queue again and the TX event did not happen
+because the TXRQST flag is reset by the chip when new data is received.
+
+According to the CC770 datasheet the id of a message object should not be
+changed while the MSGVAL bit is set. This has been fixed by resetting the
+MSGVAL bit before modifying the object in the transmit function and setting
+it after. It is not enough to set & reset CPUUPD.
+
+It is important to keep the MSGVAL bit reset while the message object is
+being modified. Otherwise, during RTR transmission, a frame with matching
+id could trigger an rx-interrupt, which would cause a race condition
+between the interrupt routine and the transmit function.
+
+Signed-off-by: Andri Yngvason <andri.yngvason@marel.com>
+Tested-by: Richard Weinberger <richard@nod.at>
+Cc: linux-stable <stable@vger.kernel.org>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/cc770/cc770.c | 94 +++++++++++++++++++++++++++++-------------
+ drivers/net/can/cc770/cc770.h | 2
+ 2 files changed, 68 insertions(+), 28 deletions(-)
+
+--- a/drivers/net/can/cc770/cc770.c
++++ b/drivers/net/can/cc770/cc770.c
+@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const
+ return 0;
+ }
+
+-static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static void cc770_tx(struct net_device *dev, int mo)
+ {
+ struct cc770_priv *priv = netdev_priv(dev);
+- struct net_device_stats *stats = &dev->stats;
+- struct can_frame *cf = (struct can_frame *)skb->data;
+- unsigned int mo = obj2msgobj(CC770_OBJ_TX);
++ struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
+ u8 dlc, rtr;
+ u32 id;
+ int i;
+
+- if (can_dropped_invalid_skb(dev, skb))
+- return NETDEV_TX_OK;
+-
+- if ((cc770_read_reg(priv,
+- msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+- netdev_err(dev, "TX register is still occupied!\n");
+- return NETDEV_TX_BUSY;
+- }
+-
+- netif_stop_queue(dev);
+-
+ dlc = cf->can_dlc;
+ id = cf->can_id;
+- if (cf->can_id & CAN_RTR_FLAG)
+- rtr = 0;
+- else
+- rtr = MSGCFG_DIR;
++ rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
++
++ cc770_write_reg(priv, msgobj[mo].ctrl0,
++ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
+- cc770_write_reg(priv, msgobj[mo].ctrl0,
+- MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
++
+ if (id & CAN_EFF_FLAG) {
+ id &= CAN_EFF_MASK;
+ cc770_write_reg(priv, msgobj[mo].config,
+@@ -439,13 +425,30 @@ static netdev_tx_t cc770_start_xmit(stru
+ for (i = 0; i < dlc; i++)
+ cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+
+- /* Store echo skb before starting the transfer */
+- can_put_echo_skb(skb, dev, 0);
+-
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+- RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
++ RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
++ cc770_write_reg(priv, msgobj[mo].ctrl0,
++ MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
++}
++
++static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct cc770_priv *priv = netdev_priv(dev);
++ unsigned int mo = obj2msgobj(CC770_OBJ_TX);
++
++ if (can_dropped_invalid_skb(dev, skb))
++ return NETDEV_TX_OK;
++
++ netif_stop_queue(dev);
++
++ if ((cc770_read_reg(priv,
++ msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
++ netdev_err(dev, "TX register is still occupied!\n");
++ return NETDEV_TX_BUSY;
++ }
+
+- stats->tx_bytes += dlc;
++ priv->tx_skb = skb;
++ cc770_tx(dev, mo);
+
+ return NETDEV_TX_OK;
+ }
+@@ -671,13 +674,47 @@ static void cc770_tx_interrupt(struct ne
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int mo = obj2msgobj(o);
++ struct can_frame *cf;
++ u8 ctrl1;
++
++ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+
+- /* Nothing more to send, switch off interrupts */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
++ cc770_write_reg(priv, msgobj[mo].ctrl1,
++ RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
+
+- stats->tx_packets++;
++ if (unlikely(!priv->tx_skb)) {
++ netdev_err(dev, "missing tx skb in tx interrupt\n");
++ return;
++ }
++
++ if (unlikely(ctrl1 & MSGLST_SET)) {
++ stats->rx_over_errors++;
++ stats->rx_errors++;
++ }
++
++ /* When the CC770 is sending an RTR message and it receives a regular
++ * message that matches the id of the RTR message, it will overwrite the
++ * outgoing message in the TX register. When this happens we must
++ * process the received message and try to transmit the outgoing skb
++ * again.
++ */
++ if (unlikely(ctrl1 & NEWDAT_SET)) {
++ cc770_rx(dev, mo, ctrl1);
++ cc770_tx(dev, mo);
++ return;
++ }
++
++ can_put_echo_skb(priv->tx_skb, dev, 0);
+ can_get_echo_skb(dev, 0);
++
++ cf = (struct can_frame *)priv->tx_skb->data;
++ stats->tx_bytes += cf->can_dlc;
++ stats->tx_packets++;
++
++ priv->tx_skb = NULL;
++
+ netif_wake_queue(dev);
+ }
+
+@@ -789,6 +826,7 @@ struct net_device *alloc_cc770dev(int si
+ priv->can.do_set_bittiming = cc770_set_bittiming;
+ priv->can.do_set_mode = cc770_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
++ priv->tx_skb = NULL;
+
+ memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
+
+--- a/drivers/net/can/cc770/cc770.h
++++ b/drivers/net/can/cc770/cc770.h
+@@ -193,6 +193,8 @@ struct cc770_priv {
+ u8 cpu_interface; /* CPU interface register */
+ u8 clkout; /* Clock out register */
+ u8 bus_config; /* Bus conffiguration register */
++
++ struct sk_buff *tx_skb;
+ };
+
+ struct net_device *alloc_cc770dev(int sizeof_priv);
--- /dev/null
+From f4353daf4905c0099fd25fa742e2ffd4a4bab26a Mon Sep 17 00:00:00 2001
+From: Andri Yngvason <andri.yngvason@marel.com>
+Date: Wed, 14 Mar 2018 11:52:56 +0000
+Subject: can: cc770: Fix stalls on rt-linux, remove redundant IRQ ack
+
+From: Andri Yngvason <andri.yngvason@marel.com>
+
+commit f4353daf4905c0099fd25fa742e2ffd4a4bab26a upstream.
+
+This has been reported to cause stalls on rt-linux.
+
+Suggested-by: Richard Weinberger <richard@nod.at>
+Tested-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Andri Yngvason <andri.yngvason@marel.com>
+Cc: linux-stable <stable@vger.kernel.org>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/cc770/cc770.c | 15 ---------------
+ 1 file changed, 15 deletions(-)
+
+--- a/drivers/net/can/cc770/cc770.c
++++ b/drivers/net/can/cc770/cc770.c
+@@ -447,15 +447,6 @@ static netdev_tx_t cc770_start_xmit(stru
+
+ stats->tx_bytes += dlc;
+
+-
+- /*
+- * HM: We had some cases of repeated IRQs so make sure the
+- * INT is acknowledged I know it's already further up, but
+- * doing again fixed the issue
+- */
+- cc770_write_reg(priv, msgobj[mo].ctrl0,
+- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+-
+ return NETDEV_TX_OK;
+ }
+
+@@ -684,12 +675,6 @@ static void cc770_tx_interrupt(struct ne
+ /* Nothing more to send, switch off interrupts */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+- /*
+- * We had some cases of repeated IRQ so make sure the
+- * INT is acknowledged
+- */
+- cc770_write_reg(priv, msgobj[mo].ctrl0,
+- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+ stats->tx_packets++;
+ can_get_echo_skb(dev, 0);
--- /dev/null
+From 9ffd7503944ec7c0ef41c3245d1306c221aef2be Mon Sep 17 00:00:00 2001
+From: Andri Yngvason <andri.yngvason@marel.com>
+Date: Thu, 15 Mar 2018 18:23:17 +0000
+Subject: can: cc770: Fix use after free in cc770_tx_interrupt()
+
+From: Andri Yngvason <andri.yngvason@marel.com>
+
+commit 9ffd7503944ec7c0ef41c3245d1306c221aef2be upstream.
+
+This fixes use after free introduced by the last cc770 patch.
+
+Signed-off-by: Andri Yngvason <andri.yngvason@marel.com>
+Fixes: 746201235b3f ("can: cc770: Fix queue stall & dropped RTR reply")
+Cc: linux-stable <stable@vger.kernel.org>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/cc770/cc770.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/can/cc770/cc770.c
++++ b/drivers/net/can/cc770/cc770.c
+@@ -706,13 +706,12 @@ static void cc770_tx_interrupt(struct ne
+ return;
+ }
+
+- can_put_echo_skb(priv->tx_skb, dev, 0);
+- can_get_echo_skb(dev, 0);
+-
+ cf = (struct can_frame *)priv->tx_skb->data;
+ stats->tx_bytes += cf->can_dlc;
+ stats->tx_packets++;
+
++ can_put_echo_skb(priv->tx_skb, dev, 0);
++ can_get_echo_skb(dev, 0);
+ priv->tx_skb = NULL;
+
+ netif_wake_queue(dev);
--- /dev/null
+From 591d65d5b15496af8d05e252bc1da611c66c0b79 Mon Sep 17 00:00:00 2001
+From: Marek Vasut <marex@denx.de>
+Date: Mon, 5 Mar 2018 21:29:52 +0100
+Subject: can: ifi: Check core revision upon probe
+
+From: Marek Vasut <marex@denx.de>
+
+commit 591d65d5b15496af8d05e252bc1da611c66c0b79 upstream.
+
+Older versions of the core are not compatible with the driver due
+to various intrusive fixes of the core. Read out the VER register,
+check the core revision bitfield and verify if the core in use is
+new enough (rev 2.1 or newer) to work correctly with this driver.
+
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Heiko Schocher <hs@denx.de>
+Cc: Markus Marb <markus@marb.org>
+Cc: Marc Kleine-Budde <mkl@pengutronix.de>
+Cc: linux-stable <stable@vger.kernel.org>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/ifi_canfd/ifi_canfd.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
++++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
+@@ -144,6 +144,8 @@
+ #define IFI_CANFD_SYSCLOCK 0x50
+
+ #define IFI_CANFD_VER 0x54
++#define IFI_CANFD_VER_REV_MASK 0xff
++#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15
+
+ #define IFI_CANFD_IP_ID 0x58
+ #define IFI_CANFD_IP_ID_VALUE 0xD073CAFD
+@@ -943,7 +945,7 @@ static int ifi_canfd_plat_probe(struct p
+ struct resource *res;
+ void __iomem *addr;
+ int irq, ret;
+- u32 id;
++ u32 id, rev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(dev, res);
+@@ -957,6 +959,13 @@ static int ifi_canfd_plat_probe(struct p
+ return -EINVAL;
+ }
+
++ rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK;
++ if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) {
++ dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n",
++ rev, IFI_CANFD_VER_REV_MIN_SUPPORTED);
++ return -EINVAL;
++ }
++
+ ndev = alloc_candev(sizeof(*priv), 1);
+ if (!ndev)
+ return -ENOMEM;
--- /dev/null
+From 880dd464b4304583c557c4e5f5ecebfd55d232b1 Mon Sep 17 00:00:00 2001
+From: Marek Vasut <marex@denx.de>
+Date: Thu, 1 Mar 2018 19:34:00 +0100
+Subject: can: ifi: Repair the error handling
+
+From: Marek Vasut <marex@denx.de>
+
+commit 880dd464b4304583c557c4e5f5ecebfd55d232b1 upstream.
+
+The new version of the IFI CANFD core has significantly less complex
+error state indication logic. In particular, the warning/error state
+bits are no longer all over the place, but are all present in the
+STATUS register. Moreover, there is a new IRQ register bit indicating
+transition between error states (active/warning/passive/busoff).
+
+This patch makes use of this bit to weed out the obscure selective
+INTERRUPT register clearing, which was used to carry over the error
+state indication into the poll function. While at it, this patch
+fixes the handling of the ACTIVE state, since the hardware provides
+indication of the core being in ACTIVE state and that in turn fixes
+the state transition indication toward userspace. Finally, register
+reads in the poll function are moved to the matching subfunctions
+since those are also no longer needed in the poll function.
+
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Heiko Schocher <hs@denx.de>
+Cc: Markus Marb <markus@marb.org>
+Cc: Marc Kleine-Budde <mkl@pengutronix.de>
+Cc: linux-stable <stable@vger.kernel.org>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/ifi_canfd/ifi_canfd.c | 64 +++++++++++++++++++---------------
+ 1 file changed, 37 insertions(+), 27 deletions(-)
+
+--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
++++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
+@@ -30,6 +30,7 @@
+ #define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2)
+ #define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3)
+ #define IFI_CANFD_STCMD_BUSOFF BIT(4)
++#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5)
+ #define IFI_CANFD_STCMD_BUSMONITOR BIT(16)
+ #define IFI_CANFD_STCMD_LOOPBACK BIT(18)
+ #define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24)
+@@ -52,7 +53,10 @@
+ #define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13)
+
+ #define IFI_CANFD_INTERRUPT 0xc
++#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0)
+ #define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1)
++#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2)
++#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3)
+ #define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10)
+ #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16)
+ #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22)
+@@ -61,6 +65,10 @@
+ #define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31))
+
+ #define IFI_CANFD_IRQMASK 0x10
++#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0)
++#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1)
++#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2)
++#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3)
+ #define IFI_CANFD_IRQMASK_SET_ERR BIT(7)
+ #define IFI_CANFD_IRQMASK_SET_TS BIT(15)
+ #define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16)
+@@ -220,7 +228,10 @@ static void ifi_canfd_irq_enable(struct
+
+ if (enable) {
+ enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
+- IFI_CANFD_IRQMASK_RXFIFO_NEMPTY;
++ IFI_CANFD_IRQMASK_RXFIFO_NEMPTY |
++ IFI_CANFD_IRQMASK_ERROR_STATE_CHG |
++ IFI_CANFD_IRQMASK_ERROR_WARNING |
++ IFI_CANFD_IRQMASK_ERROR_BUSOFF;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
+ }
+@@ -361,12 +372,13 @@ static int ifi_canfd_handle_lost_msg(str
+ return 1;
+ }
+
+-static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr)
++static int ifi_canfd_handle_lec_err(struct net_device *ndev)
+ {
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
++ u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
+ const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
+ IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
+@@ -449,6 +461,11 @@ static int ifi_canfd_handle_state_change
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
++ /* error active state */
++ priv->can.can_stats.error_warning++;
++ priv->can.state = CAN_STATE_ERROR_ACTIVE;
++ break;
++ case CAN_STATE_ERROR_WARNING:
+ /* error warning state */
+ priv->can.can_stats.error_warning++;
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+@@ -477,7 +494,7 @@ static int ifi_canfd_handle_state_change
+ ifi_canfd_get_berr_counter(ndev, &bec);
+
+ switch (new_state) {
+- case CAN_STATE_ERROR_ACTIVE:
++ case CAN_STATE_ERROR_WARNING:
+ /* error warning state */
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
+@@ -510,22 +527,21 @@ static int ifi_canfd_handle_state_change
+ return 1;
+ }
+
+-static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd)
++static int ifi_canfd_handle_state_errors(struct net_device *ndev)
+ {
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
++ u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
+ int work_done = 0;
+- u32 isr;
+
+- /*
+- * The ErrWarn condition is a little special, since the bit is
+- * located in the INTERRUPT register instead of STCMD register.
+- */
+- isr = readl(priv->base + IFI_CANFD_INTERRUPT);
+- if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) &&
++ if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) &&
++ (priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
++ netdev_dbg(ndev, "Error, entered active state\n");
++ work_done += ifi_canfd_handle_state_change(ndev,
++ CAN_STATE_ERROR_ACTIVE);
++ }
++
++ if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) &&
+ (priv->can.state != CAN_STATE_ERROR_WARNING)) {
+- /* Clear the interrupt */
+- writel(IFI_CANFD_INTERRUPT_ERROR_WARNING,
+- priv->base + IFI_CANFD_INTERRUPT);
+ netdev_dbg(ndev, "Error, entered warning state\n");
+ work_done += ifi_canfd_handle_state_change(ndev,
+ CAN_STATE_ERROR_WARNING);
+@@ -552,18 +568,11 @@ static int ifi_canfd_poll(struct napi_st
+ {
+ struct net_device *ndev = napi->dev;
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+- const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE |
+- IFI_CANFD_STCMD_BUSOFF;
+- int work_done = 0;
+-
+- u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
+ u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
+- u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
++ int work_done = 0;
+
+ /* Handle bus state changes */
+- if ((stcmd & stcmd_state_mask) ||
+- ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0))
+- work_done += ifi_canfd_handle_state_errors(ndev, stcmd);
++ work_done += ifi_canfd_handle_state_errors(ndev);
+
+ /* Handle lost messages on RX */
+ if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
+@@ -571,7 +580,7 @@ static int ifi_canfd_poll(struct napi_st
+
+ /* Handle lec errors on the bus */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+- work_done += ifi_canfd_handle_lec_err(ndev, errctr);
++ work_done += ifi_canfd_handle_lec_err(ndev);
+
+ /* Handle normal messages on RX */
+ if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
+@@ -592,12 +601,13 @@ static irqreturn_t ifi_canfd_isr(int irq
+ struct net_device_stats *stats = &ndev->stats;
+ const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
+ IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
++ IFI_CANFD_INTERRUPT_ERROR_COUNTER |
++ IFI_CANFD_INTERRUPT_ERROR_STATE_CHG |
+ IFI_CANFD_INTERRUPT_ERROR_WARNING |
+- IFI_CANFD_INTERRUPT_ERROR_COUNTER;
++ IFI_CANFD_INTERRUPT_ERROR_BUSOFF;
+ const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
+ IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
+- const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ |
+- IFI_CANFD_INTERRUPT_ERROR_WARNING));
++ const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ);
+ u32 isr;
+
+ isr = readl(priv->base + IFI_CANFD_INTERRUPT);
--- /dev/null
+From e6048a00cfd0863d32f53b226e0b9a3633fc3332 Mon Sep 17 00:00:00 2001
+From: Stephane Grosjean <s.grosjean@peak-system.com>
+Date: Thu, 8 Mar 2018 09:30:28 +0100
+Subject: can: peak/pcie_fd: fix echo_skb is occupied! bug
+
+From: Stephane Grosjean <s.grosjean@peak-system.com>
+
+commit e6048a00cfd0863d32f53b226e0b9a3633fc3332 upstream.
+
+This patch makes atomic the handling of the linux-can echo_skb array and
+the network tx queue. This prevents from the "BUG! echo_skb is occupied!"
+message to be printed by the linux-can core, in SMP environments.
+
+Reported-by: Diana Burgess <diana@peloton-tech.com>
+Signed-off-by: Stephane Grosjean <s.grosjean@peak-system.com>
+Cc: linux-stable <stable@vger.kernel.org>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/peak_canfd/peak_canfd.c | 12 ++++++------
+ drivers/net/can/peak_canfd/peak_pciefd_main.c | 8 ++++++--
+ 2 files changed, 12 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/can/peak_canfd/peak_canfd.c
++++ b/drivers/net/can/peak_canfd/peak_canfd.c
+@@ -262,7 +262,6 @@ static int pucan_handle_can_rx(struct pe
+
+ spin_lock_irqsave(&priv->echo_lock, flags);
+ can_get_echo_skb(priv->ndev, msg->client);
+- spin_unlock_irqrestore(&priv->echo_lock, flags);
+
+ /* count bytes of the echo instead of skb */
+ stats->tx_bytes += cf_len;
+@@ -271,6 +270,7 @@ static int pucan_handle_can_rx(struct pe
+ /* restart tx queue (a slot is free) */
+ netif_wake_queue(priv->ndev);
+
++ spin_unlock_irqrestore(&priv->echo_lock, flags);
+ return 0;
+ }
+
+@@ -726,11 +726,6 @@ static netdev_tx_t peak_canfd_start_xmit
+ */
+ should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
+
+- spin_unlock_irqrestore(&priv->echo_lock, flags);
+-
+- /* write the skb on the interface */
+- priv->write_tx_msg(priv, msg);
+-
+ /* stop network tx queue if not enough room to save one more msg too */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ should_stop_tx_queue |= (room_left <
+@@ -742,6 +737,11 @@ static netdev_tx_t peak_canfd_start_xmit
+ if (should_stop_tx_queue)
+ netif_stop_queue(ndev);
+
++ spin_unlock_irqrestore(&priv->echo_lock, flags);
++
++ /* write the skb on the interface */
++ priv->write_tx_msg(priv, msg);
++
+ return NETDEV_TX_OK;
+ }
+
+--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
++++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
+@@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(in
+ priv->tx_pages_free++;
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+- /* wake producer up */
+- netif_wake_queue(priv->ucan.ndev);
++ /* wake producer up (only if enough room in echo_skb array) */
++ spin_lock_irqsave(&priv->ucan.echo_lock, flags);
++ if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx])
++ netif_wake_queue(priv->ucan.ndev);
++
++ spin_unlock_irqrestore(&priv->ucan.echo_lock, flags);
+ }
+
+ /* re-enable Rx DMA transfer for this CAN */
--- /dev/null
+From ffd137f7043cb30067e1bff6fe62a073ae190b23 Mon Sep 17 00:00:00 2001
+From: Stephane Grosjean <s.grosjean@peak-system.com>
+Date: Thu, 8 Mar 2018 09:30:29 +0100
+Subject: can: peak/pcie_fd: remove useless code when interface starts
+
+From: Stephane Grosjean <s.grosjean@peak-system.com>
+
+commit ffd137f7043cb30067e1bff6fe62a073ae190b23 upstream.
+
+When an interface starts, the echo_skb array is empty and the network
+queue should be started only. This patch replaces useless code and locks
+when the internal RX_BARRIER message is received from the IP core, telling
+the driver that tx may start.
+
+Signed-off-by: Stephane Grosjean <s.grosjean@peak-system.com>
+Cc: linux-stable <stable@vger.kernel.org>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/peak_canfd/peak_canfd.c | 13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/can/peak_canfd/peak_canfd.c
++++ b/drivers/net/can/peak_canfd/peak_canfd.c
+@@ -333,7 +333,6 @@ static int pucan_handle_status(struct pe
+
+ /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
+ if (pucan_status_is_rx_barrier(msg)) {
+- unsigned long flags;
+
+ if (priv->enable_tx_path) {
+ int err = priv->enable_tx_path(priv);
+@@ -342,16 +341,8 @@ static int pucan_handle_status(struct pe
+ return err;
+ }
+
+- /* restart network queue only if echo skb array is free */
+- spin_lock_irqsave(&priv->echo_lock, flags);
+-
+- if (!priv->can.echo_skb[priv->echo_idx]) {
+- spin_unlock_irqrestore(&priv->echo_lock, flags);
+-
+- netif_wake_queue(ndev);
+- } else {
+- spin_unlock_irqrestore(&priv->echo_lock, flags);
+- }
++ /* start network queue (echo_skb array is empty) */
++ netif_start_queue(ndev);
+
+ return 0;
+ }
--- /dev/null
+From 731a373698c9675d5aed8a30d8c9861bea9c41a2 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Wed, 7 Mar 2018 13:45:33 -0500
+Subject: drm/amd/display: Add one to EDID's audio channel count when passing to DC
+
+From: Harry Wentland <harry.wentland@amd.com>
+
+commit 731a373698c9675d5aed8a30d8c9861bea9c41a2 upstream.
+
+DC takes channel count to mean the actual count. cea_sad's channels
+represent it as number of channels - 1.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -109,7 +109,7 @@ enum dc_edid_status dm_helpers_parse_edi
+ struct cea_sad *sad = &sads[i];
+
+ edid_caps->audio_modes[i].format_code = sad->format;
+- edid_caps->audio_modes[i].channel_count = sad->channels;
++ edid_caps->audio_modes[i].channel_count = sad->channels + 1;
+ edid_caps->audio_modes[i].sample_rate = sad->freq;
+ edid_caps->audio_modes[i].sample_size = sad->byte2;
+ }
--- /dev/null
+From 509648fcf0ce8650184649b43ad039f78dde155f Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 6 Mar 2018 11:14:12 -0500
+Subject: drm/amd/display: We shouldn't set format_default on plane as atomic driver
+
+From: Harry Wentland <harry.wentland@amd.com>
+
+commit 509648fcf0ce8650184649b43ad039f78dde155f upstream.
+
+This is still a leftover from early atomic brinup days.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3090,8 +3090,6 @@ static int amdgpu_dm_plane_init(struct a
+
+ switch (aplane->base.type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+- aplane->base.format_default = true;
+-
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
--- /dev/null
+From 2681bc79eeb640562c932007bfebbbdc55bf6a7d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Wed, 14 Mar 2018 18:14:04 +0100
+Subject: drm/radeon: Don't turn off DP sink when disconnected
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michel Dänzer <michel.daenzer@amd.com>
+
+commit 2681bc79eeb640562c932007bfebbbdc55bf6a7d upstream.
+
+Turning off the sink in this case causes various issues, because
+userspace expects it to stay on until it turns it off explicitly.
+
+Instead, turn the sink off and back on when a display is connected
+again. This dance seems necessary for link training to work correctly.
+
+Bugzilla: https://bugs.freedesktop.org/105308
+Cc: stable@vger.kernel.org
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_connectors.c | 29 +++++++++++------------------
+ 1 file changed, 11 insertions(+), 18 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm
+ /* don't do anything if sink is not display port, i.e.,
+ * passive dp->(dvi|hdmi) adaptor
+ */
+- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+- int saved_dpms = connector->dpms;
+- /* Only turn off the display if it's physically disconnected */
+- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+- } else if (radeon_dp_needs_link_train(radeon_connector)) {
+- /* Don't try to start link training before we
+- * have the dpcd */
+- if (!radeon_dp_getdpcd(radeon_connector))
+- return;
++ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
++ radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
++ radeon_dp_needs_link_train(radeon_connector)) {
++ /* Don't start link training before we have the DPCD */
++ if (!radeon_dp_getdpcd(radeon_connector))
++ return;
+
+- /* set it to OFF so that drm_helper_connector_dpms()
+- * won't return immediately since the current state
+- * is ON at this point.
+- */
+- connector->dpms = DRM_MODE_DPMS_OFF;
+- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+- }
+- connector->dpms = saved_dpms;
++ /* Turn the connector off and back on immediately, which
++ * will trigger link training
++ */
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ }
+ }
--- /dev/null
+From b24791fe00f8b089d5b10cb7bcc4e1ae88b4831b Mon Sep 17 00:00:00 2001
+From: Daniel Stone <daniels@collabora.com>
+Date: Tue, 20 Mar 2018 22:58:39 +0000
+Subject: drm: Reject getfb for multi-plane framebuffers
+
+From: Daniel Stone <daniels@collabora.com>
+
+commit b24791fe00f8b089d5b10cb7bcc4e1ae88b4831b upstream.
+
+getfb can only return a single plane, so reject attempts to use it with
+multi-plane framebuffers.
+
+Signed-off-by: Daniel Stone <daniels@collabora.com>
+Reported-by: Daniel van Vugt <daniel.van.vugt@canonical.com>
+Reviewed-by: Rob Clark <robdclark@gmail.com>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Fixes: 308e5bcbdb10 ("drm: add an fb creation ioctl that takes a pixel format v5")
+Cc: stable@vger.kernel.org # v3.3+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105518
+Link: https://patchwork.freedesktop.org/patch/msgid/20180320225839.30905-1-daniels@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_framebuffer.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/gpu/drm/drm_framebuffer.c
++++ b/drivers/gpu/drm/drm_framebuffer.c
+@@ -458,6 +458,12 @@ int drm_mode_getfb(struct drm_device *de
+ if (!fb)
+ return -ENOENT;
+
++ /* Multi-planar framebuffers need getfb2. */
++ if (fb->format->num_planes > 1) {
++ ret = -EINVAL;
++ goto out;
++ }
++
+ r->height = fb->height;
+ r->width = fb->width;
+ r->depth = fb->format->depth;
+@@ -481,6 +487,7 @@ int drm_mode_getfb(struct drm_device *de
+ ret = -ENODEV;
+ }
+
++out:
+ drm_framebuffer_put(fb);
+
+ return ret;
--- /dev/null
+From 3b82a4db8eaccce735dffd50b4d4e1578099b8e8 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Wed, 21 Mar 2018 16:45:53 +0100
+Subject: drm: udl: Properly check framebuffer mmap offsets
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit 3b82a4db8eaccce735dffd50b4d4e1578099b8e8 upstream.
+
+The memmap options sent to the udl framebuffer driver were not being
+checked for all sets of possible crazy values. Fix this up by properly
+bounding the allowed values.
+
+Reported-by: Eyal Itkin <eyalit@checkpoint.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180321154553.GA18454@kroah.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/udl/udl_fb.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *i
+ {
+ unsigned long start = vma->vm_start;
+ unsigned long size = vma->vm_end - vma->vm_start;
+- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++ unsigned long offset;
+ unsigned long page, pos;
+
+- if (offset + size > info->fix.smem_len)
++ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
++ return -EINVAL;
++
++ offset = vma->vm_pgoff << PAGE_SHIFT;
++
++ if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
+ return -EINVAL;
+
+ pos = (unsigned long)info->fix.smem_start + offset;
--- /dev/null
+From 73a88250b70954a8f27c2444e1c2411bba3c29d9 Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom@vmware.com>
+Date: Wed, 21 Mar 2018 10:18:38 +0100
+Subject: drm/vmwgfx: Fix a destoy-while-held mutex problem.
+
+From: Thomas Hellstrom <thellstrom@vmware.com>
+
+commit 73a88250b70954a8f27c2444e1c2411bba3c29d9 upstream.
+
+When validating legacy surfaces, the backup bo might be destroyed at
+surface validate time. However, the kms resource validation code may have
+the bo reserved, so we will destroy a locked mutex. While there shouldn't
+be any other users of that mutex when it is destroyed, it causes a lock
+leak and thus throws a lockdep error.
+
+Fix this by having the kms resource validation code hold a reference to
+the bo while we have it reserved. We do this by introducing a validation
+context which might come in handy when the kms code is extended to validate
+multiple resources or buffers.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
+Reviewed-by: Brian Paul <brianp@vmware.com>
+Reviewed-by: Sinclair Yeh <syeh@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 28 +++++++++++++++++++---------
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 12 +++++++++---
+ drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 5 +++--
+ drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 5 +++--
+ 4 files changed, 34 insertions(+), 16 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -31,7 +31,6 @@
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_rect.h>
+
+-
+ /* Might need a hrtimer here? */
+ #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+@@ -2531,9 +2530,12 @@ void vmw_kms_helper_buffer_finish(struct
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ */
+-void vmw_kms_helper_resource_revert(struct vmw_resource *res)
++void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
+ {
+- vmw_kms_helper_buffer_revert(res->backup);
++ struct vmw_resource *res = ctx->res;
++
++ vmw_kms_helper_buffer_revert(ctx->buf);
++ vmw_dmabuf_unreference(&ctx->buf);
+ vmw_resource_unreserve(res, false, NULL, 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ }
+@@ -2550,10 +2552,14 @@ void vmw_kms_helper_resource_revert(stru
+ * interrupted by a signal.
+ */
+ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+- bool interruptible)
++ bool interruptible,
++ struct vmw_validation_ctx *ctx)
+ {
+ int ret = 0;
+
++ ctx->buf = NULL;
++ ctx->res = res;
++
+ if (interruptible)
+ ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
+ else
+@@ -2572,6 +2578,8 @@ int vmw_kms_helper_resource_prepare(stru
+ res->dev_priv->has_mob);
+ if (ret)
+ goto out_unreserve;
++
++ ctx->buf = vmw_dmabuf_reference(res->backup);
+ }
+ ret = vmw_resource_validate(res);
+ if (ret)
+@@ -2579,7 +2587,7 @@ int vmw_kms_helper_resource_prepare(stru
+ return 0;
+
+ out_revert:
+- vmw_kms_helper_buffer_revert(res->backup);
++ vmw_kms_helper_buffer_revert(ctx->buf);
+ out_unreserve:
+ vmw_resource_unreserve(res, false, NULL, 0);
+ out_unlock:
+@@ -2595,11 +2603,13 @@ out_unlock:
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ */
+-void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+- struct vmw_fence_obj **out_fence)
++void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
++ struct vmw_fence_obj **out_fence)
+ {
+- if (res->backup || out_fence)
+- vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
++ struct vmw_resource *res = ctx->res;
++
++ if (ctx->buf || out_fence)
++ vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
+ out_fence, NULL);
+
+ vmw_resource_unreserve(res, false, NULL, 0);
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -240,6 +240,11 @@ struct vmw_display_unit {
+ int set_gui_y;
+ };
+
++struct vmw_validation_ctx {
++ struct vmw_resource *res;
++ struct vmw_dma_buffer *buf;
++};
++
+ #define vmw_crtc_to_du(x) \
+ container_of(x, struct vmw_display_unit, crtc)
+ #define vmw_connector_to_du(x) \
+@@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep);
+ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+- bool interruptible);
+-void vmw_kms_helper_resource_revert(struct vmw_resource *res);
+-void vmw_kms_helper_resource_finish(struct vmw_resource *res,
++ bool interruptible,
++ struct vmw_validation_ctx *ctx);
++void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
++void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
+ struct vmw_fence_obj **out_fence);
+ int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+@@ -909,12 +909,13 @@ int vmw_kms_sou_do_surface_dirty(struct
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(framebuffer, typeof(*vfbs), base);
+ struct vmw_kms_sou_surface_dirty sdirty;
++ struct vmw_validation_ctx ctx;
+ int ret;
+
+ if (!srf)
+ srf = &vfbs->surface->res;
+
+- ret = vmw_kms_helper_resource_prepare(srf, true);
++ ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
+ if (ret)
+ return ret;
+
+@@ -933,7 +934,7 @@ int vmw_kms_sou_do_surface_dirty(struct
+ ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+ dest_x, dest_y, num_clips, inc,
+ &sdirty.base);
+- vmw_kms_helper_resource_finish(srf, out_fence);
++ vmw_kms_helper_resource_finish(&ctx, out_fence);
+
+ return ret;
+ }
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -980,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vm
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(framebuffer, typeof(*vfbs), base);
+ struct vmw_stdu_dirty sdirty;
++ struct vmw_validation_ctx ctx;
+ int ret;
+
+ if (!srf)
+ srf = &vfbs->surface->res;
+
+- ret = vmw_kms_helper_resource_prepare(srf, true);
++ ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
+ if (ret)
+ return ret;
+
+@@ -1008,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vm
+ dest_x, dest_y, num_clips, inc,
+ &sdirty.base);
+ out_finish:
+- vmw_kms_helper_resource_finish(srf, out_fence);
++ vmw_kms_helper_resource_finish(&ctx, out_fence);
+
+ return ret;
+ }
--- /dev/null
+From 140bcaa23a1c37b694910424075a15e009120dbe Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom@vmware.com>
+Date: Thu, 8 Mar 2018 10:07:37 +0100
+Subject: drm/vmwgfx: Fix black screen and device errors when running without fbdev
+
+From: Thomas Hellstrom <thellstrom@vmware.com>
+
+commit 140bcaa23a1c37b694910424075a15e009120dbe upstream.
+
+When we are running without fbdev, transitioning from the login screen to
+X or gnome-shell/wayland will cause a vt switch and the driver will disable
+svga mode, losing all modesetting resources. However, the kms atomic state
+does not reflect that and may think that a crtc is still turned on, which
+will cause device errors when we try to bind an fb to the crtc, and the
+screen will remain black.
+
+Fix this by turning off all kms resources before disabling svga mode.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
+Reviewed-by: Sinclair Yeh <syeh@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 13 +++++++++++++
+ drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 1 +
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 11 +++++++++++
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 1 -
+ 4 files changed, 25 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -1337,6 +1337,19 @@ static void __vmw_svga_disable(struct vm
+ */
+ void vmw_svga_disable(struct vmw_private *dev_priv)
+ {
++ /*
++ * Disabling SVGA will turn off device modesetting capabilities, so
++ * notify KMS about that so that it doesn't cache atomic state that
++ * isn't valid anymore, for example crtcs turned on.
++ * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
++ * but vmw_kms_lost_device() takes the reservation sem and thus we'll
++ * end up with lock order reversal. Thus, a master may actually perform
++ * a new modeset just after we call vmw_kms_lost_device() and race with
++ * vmw_svga_disable(), but that should at worst cause atomic KMS state
++ * to be inconsistent with the device, causing modesetting problems.
++ *
++ */
++ vmw_kms_lost_device(dev_priv->dev);
+ ttm_write_lock(&dev_priv->reservation_sem, false);
+ spin_lock(&dev_priv->svga_lock);
+ if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -938,6 +938,7 @@ int vmw_kms_present(struct vmw_private *
+ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
++void vmw_kms_lost_device(struct drm_device *dev);
+
+ int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -2865,3 +2865,14 @@ int vmw_kms_set_config(struct drm_mode_s
+
+ return drm_atomic_helper_set_config(set, ctx);
+ }
++
++
++/**
++ * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
++ *
++ * @dev: Pointer to the drm device
++ */
++void vmw_kms_lost_device(struct drm_device *dev)
++{
++ drm_atomic_helper_shutdown(dev);
++}
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -439,5 +439,4 @@ int vmw_kms_stdu_dma(struct vmw_private
+
+ int vmw_kms_set_config(struct drm_mode_set *set,
+ struct drm_modeset_acquire_ctx *ctx);
+-
+ #endif
--- /dev/null
+From 1705f7c534163594f8b05e060cb49fbea86ca70b Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Thu, 22 Mar 2018 16:17:17 -0700
+Subject: h8300: remove extraneous __BIG_ENDIAN definition
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 1705f7c534163594f8b05e060cb49fbea86ca70b upstream.
+
+A bugfix I did earlier caused a build regression on h8300, which defines
+the __BIG_ENDIAN macro in a slightly different way than the generic
+code:
+
+ arch/h8300/include/asm/byteorder.h:5:0: warning: "__BIG_ENDIAN" redefined
+
+We don't need to define it here, as the same macro is already provided
+by the linux/byteorder/big_endian.h, and that version does not conflict.
+
+While this is a v4.16 regression, my earlier patch also got backported
+to the 4.14 and 4.15 stable kernels, so we need the fixup there as well.
+
+Link: http://lkml.kernel.org/r/20180313120752.2645129-1-arnd@arndb.de
+Fixes: 101110f6271c ("Kbuild: always define endianess in kconfig.h")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/h8300/include/asm/byteorder.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/arch/h8300/include/asm/byteorder.h
++++ b/arch/h8300/include/asm/byteorder.h
+@@ -2,7 +2,6 @@
+ #ifndef __H8300_BYTEORDER_H__
+ #define __H8300_BYTEORDER_H__
+
+-#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
+ #include <linux/byteorder/big_endian.h>
+
+ #endif
--- /dev/null
+From 63489f8e821144000e0bdca7e65a8d1cc23a7ee7 Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Thu, 22 Mar 2018 16:17:13 -0700
+Subject: hugetlbfs: check for pgoff value overflow
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit 63489f8e821144000e0bdca7e65a8d1cc23a7ee7 upstream.
+
+A vma with vm_pgoff large enough to overflow a loff_t type when
+converted to a byte offset can be passed via the remap_file_pages system
+call. The hugetlbfs mmap routine uses the byte offset to calculate
+reservations and file size.
+
+A sequence such as:
+
+ mmap(0x20a00000, 0x600000, 0, 0x66033, -1, 0);
+ remap_file_pages(0x20a00000, 0x600000, 0, 0x20000000000000, 0);
+
+will result in the following when task exits/file closed,
+
+ kernel BUG at mm/hugetlb.c:749!
+ Call Trace:
+ hugetlbfs_evict_inode+0x2f/0x40
+ evict+0xcb/0x190
+ __dentry_kill+0xcb/0x150
+ __fput+0x164/0x1e0
+ task_work_run+0x84/0xa0
+ exit_to_usermode_loop+0x7d/0x80
+ do_syscall_64+0x18b/0x190
+ entry_SYSCALL_64_after_hwframe+0x3d/0xa2
+
+The overflowed pgoff value causes hugetlbfs to try to set up a mapping
+with a negative range (end < start) that leaves invalid state which
+causes the BUG.
+
+The previous overflow fix to this code was incomplete and did not take
+the remap_file_pages system call into account.
+
+[mike.kravetz@oracle.com: v3]
+ Link: http://lkml.kernel.org/r/20180309002726.7248-1-mike.kravetz@oracle.com
+[akpm@linux-foundation.org: include mmdebug.h]
+[akpm@linux-foundation.org: fix -ve left shift count on sh]
+Link: http://lkml.kernel.org/r/20180308210502.15952-1-mike.kravetz@oracle.com
+Fixes: 045c7a3f53d9 ("hugetlbfs: fix offset overflow in hugetlbfs mmap")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Reported-by: Nic Losby <blurbdust@gmail.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Yisheng Xie <xieyisheng1@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/hugetlbfs/inode.c | 17 ++++++++++++++---
+ mm/hugetlb.c | 7 +++++++
+ 2 files changed, 21 insertions(+), 3 deletions(-)
+
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -118,6 +118,16 @@ static void huge_pagevec_release(struct
+ pagevec_reinit(pvec);
+ }
+
++/*
++ * Mask used when checking the page offset value passed in via system
++ * calls. This value will be converted to a loff_t which is signed.
++ * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
++ * value. The extra bit (- 1 in the shift value) is to take the sign
++ * bit into account.
++ */
++#define PGOFF_LOFFT_MAX \
++ (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
++
+ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+ struct inode *inode = file_inode(file);
+@@ -137,12 +147,13 @@ static int hugetlbfs_file_mmap(struct fi
+ vma->vm_ops = &hugetlb_vm_ops;
+
+ /*
+- * Offset passed to mmap (before page shift) could have been
+- * negative when represented as a (l)off_t.
++ * page based offset in vm_pgoff could be sufficiently large to
++ * overflow a (l)off_t when converted to byte offset.
+ */
+- if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
++ if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
+ return -EINVAL;
+
++ /* must be huge page aligned */
+ if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
+ return -EINVAL;
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -18,6 +18,7 @@
+ #include <linux/bootmem.h>
+ #include <linux/sysfs.h>
+ #include <linux/slab.h>
++#include <linux/mmdebug.h>
+ #include <linux/sched/signal.h>
+ #include <linux/rmap.h>
+ #include <linux/string_helpers.h>
+@@ -4354,6 +4355,12 @@ int hugetlb_reserve_pages(struct inode *
+ struct resv_map *resv_map;
+ long gbl_reserve;
+
++ /* This should never happen */
++ if (from > to) {
++ VM_WARN(1, "%s called with a negative range\n", __func__);
++ return -EINVAL;
++ }
++
+ /*
+ * Only apply hugepage reservation if asked. At fault time, an
+ * attempt will be made for VM_NORESERVE to allocate a page
--- /dev/null
+From 3ffb0ba9b567a8efb9a04ed3d1ec15ff333ada22 Mon Sep 17 00:00:00 2001
+From: Vishal Verma <vishal.l.verma@intel.com>
+Date: Mon, 5 Mar 2018 16:56:13 -0700
+Subject: libnvdimm, {btt, blk}: do integrity setup before add_disk()
+
+From: Vishal Verma <vishal.l.verma@intel.com>
+
+commit 3ffb0ba9b567a8efb9a04ed3d1ec15ff333ada22 upstream.
+
+Prior to 25520d55cdb6 ("block: Inline blk_integrity in struct gendisk")
+we needed to temporarily add a zero-capacity disk before registering for
+blk-integrity. But adding a zero-capacity disk caused the partition
+table scanning to bail early, and this resulted in partitions not coming
+up after a probe of the BTT or blk namespaces.
+
+We can now register for integrity before the disk has been added, and
+this fixes the rescan problems.
+
+Fixes: 25520d55cdb6 ("block: Inline blk_integrity in struct gendisk")
+Reported-by: Dariusz Dokupil <dariusz.dokupil@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvdimm/blk.c | 3 +--
+ drivers/nvdimm/btt.c | 3 +--
+ 2 files changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/nvdimm/blk.c
++++ b/drivers/nvdimm/blk.c
+@@ -278,8 +278,6 @@ static int nsblk_attach_disk(struct nd_n
+ disk->queue = q;
+ disk->flags = GENHD_FL_EXT_DEVT;
+ nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
+- set_capacity(disk, 0);
+- device_add_disk(dev, disk);
+
+ if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
+ return -ENOMEM;
+@@ -292,6 +290,7 @@ static int nsblk_attach_disk(struct nd_n
+ }
+
+ set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
++ device_add_disk(dev, disk);
+ revalidate_disk(disk);
+ return 0;
+ }
+--- a/drivers/nvdimm/btt.c
++++ b/drivers/nvdimm/btt.c
+@@ -1545,8 +1545,6 @@ static int btt_blk_init(struct btt *btt)
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
+ btt->btt_queue->queuedata = btt;
+
+- set_capacity(btt->btt_disk, 0);
+- device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
+ if (btt_meta_size(btt)) {
+ int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
+
+@@ -1558,6 +1556,7 @@ static int btt_blk_init(struct btt *btt)
+ }
+ }
+ set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
++ device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
+ btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
+ revalidate_disk(btt->btt_disk);
+
--- /dev/null
+From fece2029a9e65b9a990831afe2a2b83290cbbe26 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Thu, 22 Mar 2018 16:17:28 -0700
+Subject: mm/khugepaged.c: convert VM_BUG_ON() to collapse fail
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit fece2029a9e65b9a990831afe2a2b83290cbbe26 upstream.
+
+khugepaged is not yet able to convert PTE-mapped huge pages back to PMD
+mapped. We do not collapse such pages. See check
+khugepaged_scan_pmd().
+
+But if between khugepaged_scan_pmd() and __collapse_huge_page_isolate()
+somebody managed to instantiate THP in the range and then split the PMD
+back to PTEs we would have a problem --
+VM_BUG_ON_PAGE(PageCompound(page)) will get triggered.
+
+It's possible since we drop mmap_sem during collapse to re-take for
+write.
+
+Replace the VM_BUG_ON() with graceful collapse fail.
+
+Link: http://lkml.kernel.org/r/20180315152353.27989-1-kirill.shutemov@linux.intel.com
+Fixes: b1caa957ae6d ("khugepaged: ignore pmd tables with THP mapped with ptes")
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Laura Abbott <labbott@redhat.com>
+Cc: Jerome Marchand <jmarchan@redhat.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/khugepaged.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -530,7 +530,12 @@ static int __collapse_huge_page_isolate(
+ goto out;
+ }
+
+- VM_BUG_ON_PAGE(PageCompound(page), page);
++ /* TODO: teach khugepaged to collapse THP mapped with pte */
++ if (PageCompound(page)) {
++ result = SCAN_PAGE_COMPOUND;
++ goto out;
++ }
++
+ VM_BUG_ON_PAGE(!PageAnon(page), page);
+
+ /*
--- /dev/null
+From b3cd54b257ad95d344d121dc563d943ca39b0921 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Thu, 22 Mar 2018 16:17:35 -0700
+Subject: mm/shmem: do not wait for lock_page() in shmem_unused_huge_shrink()
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit b3cd54b257ad95d344d121dc563d943ca39b0921 upstream.
+
+shmem_unused_huge_shrink() gets called from reclaim path. Waiting for
+page lock may lead to deadlock there.
+
+There was a bug report that may be attributed to this:
+
+ http://lkml.kernel.org/r/alpine.LRH.2.11.1801242349220.30642@mail.ewheeler.net
+
+Replace lock_page() with trylock_page() and skip the page if we failed
+to lock it. We will get to the page on the next scan.
+
+We can test for the PageTransHuge() outside the page lock as we only
+need protection against splitting the page under us. Holding pin oni
+the page is enough for this.
+
+Link: http://lkml.kernel.org/r/20180316210830.43738-1-kirill.shutemov@linux.intel.com
+Fixes: 779750d20b93 ("shmem: split huge pages beyond i_size under memory pressure")
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reported-by: Eric Wheeler <linux-mm@lists.ewheeler.net>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: <stable@vger.kernel.org> [4.8+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/shmem.c | 31 ++++++++++++++++++++-----------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -493,36 +493,45 @@ next:
+ info = list_entry(pos, struct shmem_inode_info, shrinklist);
+ inode = &info->vfs_inode;
+
+- if (nr_to_split && split >= nr_to_split) {
+- iput(inode);
+- continue;
+- }
++ if (nr_to_split && split >= nr_to_split)
++ goto leave;
+
+- page = find_lock_page(inode->i_mapping,
++ page = find_get_page(inode->i_mapping,
+ (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
+ if (!page)
+ goto drop;
+
++ /* No huge page at the end of the file: nothing to split */
+ if (!PageTransHuge(page)) {
+- unlock_page(page);
+ put_page(page);
+ goto drop;
+ }
+
++ /*
++ * Leave the inode on the list if we failed to lock
++ * the page at this time.
++ *
++ * Waiting for the lock may lead to deadlock in the
++ * reclaim path.
++ */
++ if (!trylock_page(page)) {
++ put_page(page);
++ goto leave;
++ }
++
+ ret = split_huge_page(page);
+ unlock_page(page);
+ put_page(page);
+
+- if (ret) {
+- /* split failed: leave it on the list */
+- iput(inode);
+- continue;
+- }
++ /* If split failed leave the inode on the list */
++ if (ret)
++ goto leave;
+
+ split++;
+ drop:
+ list_del_init(&info->shrinklist);
+ removed++;
++leave:
+ iput(inode);
+ }
+
--- /dev/null
+From fa41b900c30b45fab03783724932dc30cd46a6be Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Thu, 22 Mar 2018 16:17:31 -0700
+Subject: mm/thp: do not wait for lock_page() in deferred_split_scan()
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit fa41b900c30b45fab03783724932dc30cd46a6be upstream.
+
+deferred_split_scan() gets called from reclaim path. Waiting for page
+lock may lead to deadlock there.
+
+Replace lock_page() with trylock_page() and skip the page if we failed
+to lock it. We will get to the page on the next scan.
+
+Link: http://lkml.kernel.org/r/20180315150747.31945-1-kirill.shutemov@linux.intel.com
+Fixes: 9a982250f773 ("thp: introduce deferred_split_huge_page()")
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2791,11 +2791,13 @@ static unsigned long deferred_split_scan
+
+ list_for_each_safe(pos, next, &list) {
+ page = list_entry((void *)pos, struct page, mapping);
+- lock_page(page);
++ if (!trylock_page(page))
++ goto next;
+ /* split_huge_page() removes page from list on success */
+ if (!split_huge_page(page))
+ split++;
+ unlock_page(page);
++next:
+ put_page(page);
+ }
+
--- /dev/null
+From b6bdb7517c3d3f41f20e5c2948d6bc3f8897394e Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Thu, 22 Mar 2018 16:17:20 -0700
+Subject: mm/vmalloc: add interfaces to free unmapped page table
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit b6bdb7517c3d3f41f20e5c2948d6bc3f8897394e upstream.
+
+On architectures with CONFIG_HAVE_ARCH_HUGE_VMAP set, ioremap() may
+create pud/pmd mappings. A kernel panic was observed on arm64 systems
+with Cortex-A75 in the following steps as described by Hanjun Guo.
+
+ 1. ioremap a 4K size, valid page table will build,
+ 2. iounmap it, pte0 will set to 0;
+ 3. ioremap the same address with 2M size, pgd/pmd is unchanged,
+ then set the a new value for pmd;
+ 4. pte0 is leaked;
+ 5. CPU may meet exception because the old pmd is still in TLB,
+ which will lead to kernel panic.
+
+This panic is not reproducible on x86. INVLPG, called from iounmap,
+purges all levels of entries associated with purged address on x86. x86
+still has memory leak.
+
+The patch changes the ioremap path to free unmapped page table(s) since
+doing so in the unmap path has the following issues:
+
+ - The iounmap() path is shared with vunmap(). Since vmap() only
+ supports pte mappings, making vunmap() to free a pte page is an
+ overhead for regular vmap users as they do not need a pte page freed
+ up.
+
+ - Checking if all entries in a pte page are cleared in the unmap path
+ is racy, and serializing this check is expensive.
+
+ - The unmap path calls free_vmap_area_noflush() to do lazy TLB purges.
+ Clearing a pud/pmd entry before the lazy TLB purges needs extra TLB
+ purge.
+
+Add two interfaces, pud_free_pmd_page() and pmd_free_pte_page(), which
+clear a given pud/pmd entry and free up a page for the lower level
+entries.
+
+This patch implements their stub functions on x86 and arm64, which work
+as workaround.
+
+[akpm@linux-foundation.org: fix typo in pmd_free_pte_page() stub]
+Link: http://lkml.kernel.org/r/20180314180155.19492-2-toshi.kani@hpe.com
+Fixes: e61ce6ade404e ("mm: change ioremap to set up huge I/O mappings")
+Reported-by: Lei Li <lious.lilei@hisilicon.com>
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Wang Xuefeng <wxf.wang@hisilicon.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Hanjun Guo <guohanjun@huawei.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Chintan Pandya <cpandya@codeaurora.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c | 10 ++++++++++
+ arch/x86/mm/pgtable.c | 24 ++++++++++++++++++++++++
+ include/asm-generic/pgtable.h | 10 ++++++++++
+ lib/ioremap.c | 6 ++++--
+ 4 files changed, 48 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -941,3 +941,13 @@ int pmd_clear_huge(pmd_t *pmd)
+ pmd_clear(pmd);
+ return 1;
+ }
++
++int pud_free_pmd_page(pud_t *pud)
++{
++ return pud_none(*pud);
++}
++
++int pmd_free_pte_page(pmd_t *pmd)
++{
++ return pmd_none(*pmd);
++}
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -702,4 +702,28 @@ int pmd_clear_huge(pmd_t *pmd)
+
+ return 0;
+ }
++
++/**
++ * pud_free_pmd_page - Clear pud entry and free pmd page.
++ * @pud: Pointer to a PUD.
++ *
++ * Context: The pud range has been unmaped and TLB purged.
++ * Return: 1 if clearing the entry succeeded. 0 otherwise.
++ */
++int pud_free_pmd_page(pud_t *pud)
++{
++ return pud_none(*pud);
++}
++
++/**
++ * pmd_free_pte_page - Clear pmd entry and free pte page.
++ * @pmd: Pointer to a PMD.
++ *
++ * Context: The pmd range has been unmaped and TLB purged.
++ * Return: 1 if clearing the entry succeeded. 0 otherwise.
++ */
++int pmd_free_pte_page(pmd_t *pmd)
++{
++ return pmd_none(*pmd);
++}
+ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -976,6 +976,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t
+ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+ int pud_clear_huge(pud_t *pud);
+ int pmd_clear_huge(pmd_t *pmd);
++int pud_free_pmd_page(pud_t *pud);
++int pmd_free_pte_page(pmd_t *pmd);
+ #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+ static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+ {
+@@ -1001,6 +1003,14 @@ static inline int pmd_clear_huge(pmd_t *
+ {
+ return 0;
+ }
++static inline int pud_free_pmd_page(pud_t *pud)
++{
++ return 0;
++}
++static inline int pmd_free_pte_page(pmd_t *pmd)
++{
++ return 0;
++}
+ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
+ #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+--- a/lib/ioremap.c
++++ b/lib/ioremap.c
+@@ -91,7 +91,8 @@ static inline int ioremap_pmd_range(pud_
+
+ if (ioremap_pmd_enabled() &&
+ ((next - addr) == PMD_SIZE) &&
+- IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
++ IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
++ pmd_free_pte_page(pmd)) {
+ if (pmd_set_huge(pmd, phys_addr + addr, prot))
+ continue;
+ }
+@@ -117,7 +118,8 @@ static inline int ioremap_pud_range(p4d_
+
+ if (ioremap_pud_enabled() &&
+ ((next - addr) == PUD_SIZE) &&
+- IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
++ IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
++ pud_free_pmd_page(pud)) {
+ if (pud_set_huge(pud, phys_addr + addr, prot))
+ continue;
+ }
--- /dev/null
+From 1c610d5f93c709df56787f50b3576704ac271826 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Thu, 22 Mar 2018 16:17:42 -0700
+Subject: mm/vmscan: wake up flushers for legacy cgroups too
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 1c610d5f93c709df56787f50b3576704ac271826 upstream.
+
+Commit 726d061fbd36 ("mm: vmscan: kick flushers when we encounter dirty
+pages on the LRU") added flusher invocation to shrink_inactive_list()
+when many dirty pages on the LRU are encountered.
+
+However, shrink_inactive_list() doesn't wake up flushers for legacy
+cgroup reclaim, so the next commit bbef938429f5 ("mm: vmscan: remove old
+flusher wakeup from direct reclaim path") removed the only source of
+flusher's wake up in legacy mem cgroup reclaim path.
+
+This leads to premature OOM if there is too many dirty pages in cgroup:
+ # mkdir /sys/fs/cgroup/memory/test
+ # echo $$ > /sys/fs/cgroup/memory/test/tasks
+ # echo 50M > /sys/fs/cgroup/memory/test/memory.limit_in_bytes
+ # dd if=/dev/zero of=tmp_file bs=1M count=100
+ Killed
+
+ dd invoked oom-killer: gfp_mask=0x14000c0(GFP_KERNEL), nodemask=(null), order=0, oom_score_adj=0
+
+ Call Trace:
+ dump_stack+0x46/0x65
+ dump_header+0x6b/0x2ac
+ oom_kill_process+0x21c/0x4a0
+ out_of_memory+0x2a5/0x4b0
+ mem_cgroup_out_of_memory+0x3b/0x60
+ mem_cgroup_oom_synchronize+0x2ed/0x330
+ pagefault_out_of_memory+0x24/0x54
+ __do_page_fault+0x521/0x540
+ page_fault+0x45/0x50
+
+ Task in /test killed as a result of limit of /test
+ memory: usage 51200kB, limit 51200kB, failcnt 73
+ memory+swap: usage 51200kB, limit 9007199254740988kB, failcnt 0
+ kmem: usage 296kB, limit 9007199254740988kB, failcnt 0
+ Memory cgroup stats for /test: cache:49632KB rss:1056KB rss_huge:0KB shmem:0KB
+ mapped_file:0KB dirty:49500KB writeback:0KB swap:0KB inactive_anon:0KB
+ active_anon:1168KB inactive_file:24760KB active_file:24960KB unevictable:0KB
+ Memory cgroup out of memory: Kill process 3861 (bash) score 88 or sacrifice child
+ Killed process 3876 (dd) total-vm:8484kB, anon-rss:1052kB, file-rss:1720kB, shmem-rss:0kB
+ oom_reaper: reaped process 3876 (dd), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB
+
+Wake up flushers in legacy cgroup reclaim too.
+
+Link: http://lkml.kernel.org/r/20180315164553.17856-1-aryabinin@virtuozzo.com
+Fixes: bbef938429f5 ("mm: vmscan: remove old flusher wakeup from direct reclaim path")
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Tested-by: Shakeel Butt <shakeelb@google.com>
+Acked-by: Michal Hocko <mhocko@suse.cz>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmscan.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1847,6 +1847,20 @@ shrink_inactive_list(unsigned long nr_to
+ set_bit(PGDAT_WRITEBACK, &pgdat->flags);
+
+ /*
++ * If dirty pages are scanned that are not queued for IO, it
++ * implies that flushers are not doing their job. This can
++ * happen when memory pressure pushes dirty pages to the end of
++ * the LRU before the dirty limits are breached and the dirty
++ * data has expired. It can also happen when the proportion of
++ * dirty pages grows not through writes but through memory
++ * pressure reclaiming all the clean cache. And in some cases,
++ * the flushers simply cannot keep up with the allocation
++ * rate. Nudge the flusher threads in case they are asleep.
++ */
++ if (stat.nr_unqueued_dirty == nr_taken)
++ wakeup_flusher_threads(WB_REASON_VMSCAN);
++
++ /*
+ * Legacy memcg will stall in page writeback so avoid forcibly
+ * stalling here.
+ */
+@@ -1858,22 +1872,9 @@ shrink_inactive_list(unsigned long nr_to
+ if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
+ set_bit(PGDAT_CONGESTED, &pgdat->flags);
+
+- /*
+- * If dirty pages are scanned that are not queued for IO, it
+- * implies that flushers are not doing their job. This can
+- * happen when memory pressure pushes dirty pages to the end of
+- * the LRU before the dirty limits are breached and the dirty
+- * data has expired. It can also happen when the proportion of
+- * dirty pages grows not through writes but through memory
+- * pressure reclaiming all the clean cache. And in some cases,
+- * the flushers simply cannot keep up with the allocation
+- * rate. Nudge the flusher threads in case they are asleep, but
+- * also allow kswapd to start writing pages during reclaim.
+- */
+- if (stat.nr_unqueued_dirty == nr_taken) {
+- wakeup_flusher_threads(WB_REASON_VMSCAN);
++ /* Allow kswapd to start writing pages during reclaim. */
++ if (stat.nr_unqueued_dirty == nr_taken)
+ set_bit(PGDAT_DIRTY, &pgdat->flags);
+- }
+
+ /*
+ * If kswapd scans pages marked marked for immediate
--- /dev/null
+From 3f553b308bb004eb730da8e00a28150c157c7724 Mon Sep 17 00:00:00 2001
+From: Leon Yu <chianglungyu@gmail.com>
+Date: Tue, 6 Mar 2018 23:16:24 +0800
+Subject: module: propagate error in modules_open()
+
+From: Leon Yu <chianglungyu@gmail.com>
+
+commit 3f553b308bb004eb730da8e00a28150c157c7724 upstream.
+
+otherwise kernel can oops later in seq_release() due to dereferencing null
+file->private_data which is only set if seq_open() succeeds.
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
+IP: seq_release+0xc/0x30
+Call Trace:
+ close_pdeo+0x37/0xd0
+ proc_reg_release+0x5d/0x60
+ __fput+0x9d/0x1d0
+ ____fput+0x9/0x10
+ task_work_run+0x75/0x90
+ do_exit+0x252/0xa00
+ do_group_exit+0x36/0xb0
+ SyS_exit_group+0xf/0x10
+
+Fixes: 516fb7f2e73d ("/proc/module: use the same logic as /proc/kallsyms for address exposure")
+Cc: Jessica Yu <jeyu@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: stable@vger.kernel.org # 4.15+
+Signed-off-by: Leon Yu <chianglungyu@gmail.com>
+Signed-off-by: Jessica Yu <jeyu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/module.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -4223,7 +4223,7 @@ static int modules_open(struct inode *in
+ m->private = kallsyms_show_value() ? NULL : (void *)8ul;
+ }
+
+- return 0;
++ return err;
+ }
+
+ static const struct file_operations proc_modules_operations = {
--- /dev/null
+From 843c3a59997f18060848b8632607dd04781b52d1 Mon Sep 17 00:00:00 2001
+From: Jagdish Gediya <jagdish.gediya@nxp.com>
+Date: Wed, 21 Mar 2018 05:51:46 +0530
+Subject: mtd: nand: fsl_ifc: Fix eccstat array overflow for IFC ver >= 2.0.0
+
+From: Jagdish Gediya <jagdish.gediya@nxp.com>
+
+commit 843c3a59997f18060848b8632607dd04781b52d1 upstream.
+
+Number of ECC status registers i.e. (ECCSTATx) has been increased in IFC
+version 2.0.0 due to increase in SRAM size. This is causing eccstat
+array to over flow.
+
+So, replace eccstat array with u32 variable to make it fail-safe and
+independent of number of ECC status registers or SRAM size.
+
+Fixes: bccb06c353af ("mtd: nand: ifc: update bufnum mask for ver >= 2.0.0")
+Cc: stable@vger.kernel.org # 3.18+
+Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
+Signed-off-by: Jagdish Gediya <jagdish.gediya@nxp.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/fsl_ifc_nand.c | 23 ++++++++++-------------
+ 1 file changed, 10 insertions(+), 13 deletions(-)
+
+--- a/drivers/mtd/nand/fsl_ifc_nand.c
++++ b/drivers/mtd/nand/fsl_ifc_nand.c
+@@ -173,14 +173,9 @@ static void set_addr(struct mtd_info *mt
+
+ /* returns nonzero if entire page is blank */
+ static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
+- u32 *eccstat, unsigned int bufnum)
++ u32 eccstat, unsigned int bufnum)
+ {
+- u32 reg = eccstat[bufnum / 4];
+- int errors;
+-
+- errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
+-
+- return errors;
++ return (eccstat >> ((3 - bufnum % 4) * 8)) & 15;
+ }
+
+ /*
+@@ -193,7 +188,7 @@ static void fsl_ifc_run_command(struct m
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+- u32 eccstat[4];
++ u32 eccstat;
+ int i;
+
+ /* set the chip select for NAND Transaction */
+@@ -228,8 +223,8 @@ static void fsl_ifc_run_command(struct m
+ if (nctrl->eccread) {
+ int errors;
+ int bufnum = nctrl->page & priv->bufnum_mask;
+- int sector = bufnum * chip->ecc.steps;
+- int sector_end = sector + chip->ecc.steps - 1;
++ int sector_start = bufnum * chip->ecc.steps;
++ int sector_end = sector_start + chip->ecc.steps - 1;
+ __be32 *eccstat_regs;
+
+ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+@@ -237,10 +232,12 @@ static void fsl_ifc_run_command(struct m
+ else
+ eccstat_regs = ifc->ifc_nand.v1_nand_eccstat;
+
+- for (i = sector / 4; i <= sector_end / 4; i++)
+- eccstat[i] = ifc_in32(&eccstat_regs[i]);
++ eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
++
++ for (i = sector_start; i <= sector_end; i++) {
++ if (i != sector_start && !(i % 4))
++ eccstat = ifc_in32(&eccstat_regs[i / 4]);
+
+- for (i = sector; i <= sector_end; i++) {
+ errors = check_read_ecc(mtd, ctrl, eccstat, i);
+
+ if (errors == 15) {
--- /dev/null
+From fa8e6d58c5bc260f4369c6699683d69695daed0a Mon Sep 17 00:00:00 2001
+From: Jagdish Gediya <jagdish.gediya@nxp.com>
+Date: Wed, 21 Mar 2018 04:31:36 +0530
+Subject: mtd: nand: fsl_ifc: Fix nand waitfunc return value
+
+From: Jagdish Gediya <jagdish.gediya@nxp.com>
+
+commit fa8e6d58c5bc260f4369c6699683d69695daed0a upstream.
+
+As per the IFC hardware manual, Most significant 2 bytes in
+nand_fsr register are the outcome of NAND READ STATUS command.
+
+So status value need to be shifted and aligned as per the nand
+framework requirement.
+
+Fixes: 82771882d960 ("NAND Machine support for Integrated Flash Controller")
+Cc: stable@vger.kernel.org # v3.18+
+Signed-off-by: Jagdish Gediya <jagdish.gediya@nxp.com>
+Reviewed-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/fsl_ifc_nand.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/mtd/nand/fsl_ifc_nand.c
++++ b/drivers/mtd/nand/fsl_ifc_nand.c
+@@ -626,6 +626,7 @@ static int fsl_ifc_wait(struct mtd_info
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ u32 nand_fsr;
++ int status;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+@@ -640,12 +641,12 @@ static int fsl_ifc_wait(struct mtd_info
+ fsl_ifc_run_command(mtd);
+
+ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
+-
++ status = nand_fsr >> 24;
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+- return nand_fsr | NAND_STATUS_WP;
++ return status | NAND_STATUS_WP;
+ }
+
+ /*
--- /dev/null
+From 6b00c35138b404be98b85f4a703be594cbed501c Mon Sep 17 00:00:00 2001
+From: Jagdish Gediya <jagdish.gediya@nxp.com>
+Date: Thu, 22 Mar 2018 01:08:10 +0530
+Subject: mtd: nand: fsl_ifc: Read ECCSTAT0 and ECCSTAT1 registers for IFC 2.0
+
+From: Jagdish Gediya <jagdish.gediya@nxp.com>
+
+commit 6b00c35138b404be98b85f4a703be594cbed501c upstream.
+
+Due to missing information in Hardware manual, current
+implementation doesn't read ECCSTAT0 and ECCSTAT1 registers
+for IFC 2.0.
+
+Add support to read ECCSTAT0 and ECCSTAT1 registers during
+ecccheck for IFC 2.0.
+
+Fixes: 656441478ed5 ("mtd: nand: ifc: Fix location of eccstat registers for IFC V1.0")
+Cc: stable@vger.kernel.org # v3.18+
+Signed-off-by: Jagdish Gediya <jagdish.gediya@nxp.com>
+Reviewed-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/fsl_ifc_nand.c | 6 +-----
+ include/linux/fsl_ifc.h | 6 +-----
+ 2 files changed, 2 insertions(+), 10 deletions(-)
+
+--- a/drivers/mtd/nand/fsl_ifc_nand.c
++++ b/drivers/mtd/nand/fsl_ifc_nand.c
+@@ -227,11 +227,7 @@ static void fsl_ifc_run_command(struct m
+ int sector_end = sector_start + chip->ecc.steps - 1;
+ __be32 *eccstat_regs;
+
+- if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+- eccstat_regs = ifc->ifc_nand.v2_nand_eccstat;
+- else
+- eccstat_regs = ifc->ifc_nand.v1_nand_eccstat;
+-
++ eccstat_regs = ifc->ifc_nand.nand_eccstat;
+ eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
+
+ for (i = sector_start; i <= sector_end; i++) {
+--- a/include/linux/fsl_ifc.h
++++ b/include/linux/fsl_ifc.h
+@@ -734,11 +734,7 @@ struct fsl_ifc_nand {
+ u32 res19[0x10];
+ __be32 nand_fsr;
+ u32 res20;
+- /* The V1 nand_eccstat is actually 4 words that overlaps the
+- * V2 nand_eccstat.
+- */
+- __be32 v1_nand_eccstat[2];
+- __be32 v2_nand_eccstat[6];
++ __be32 nand_eccstat[8];
+ u32 res21[0x1c];
+ __be32 nanndcr;
+ u32 res22[0x2];
--- /dev/null
+From 6de564939e14327148e31ddcf769e34105176447 Mon Sep 17 00:00:00 2001
+From: OuYang ZhiZhong <ouyzz@yealink.com>
+Date: Sun, 11 Mar 2018 15:59:07 +0800
+Subject: mtdchar: fix usage of mtd_ooblayout_ecc()
+
+From: OuYang ZhiZhong <ouyzz@yealink.com>
+
+commit 6de564939e14327148e31ddcf769e34105176447 upstream.
+
+Section was not properly computed. The value of OOB region definition is
+always ECC section 0 information in the OOB area, but we want to get all
+the ECC bytes information, so we should call
+mtd_ooblayout_ecc(mtd, section++, &oobregion) until it returns -ERANGE.
+
+Fixes: c2b78452a9db ("mtd: use mtd_ooblayout_xxx() helpers where appropriate")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: OuYang ZhiZhong <ouyzz@yealink.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/mtdchar.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -479,7 +479,7 @@ static int shrink_ecclayout(struct mtd_i
+ for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
+ u32 eccpos;
+
+- ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
++ ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
+ if (ret < 0) {
+ if (ret != -ERANGE)
+ return ret;
+@@ -526,7 +526,7 @@ static int get_oobinfo(struct mtd_info *
+ for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
+ u32 eccpos;
+
+- ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
++ ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
+ if (ret < 0) {
+ if (ret != -ERANGE)
+ return ret;
--- /dev/null
+From f59f1caf72ba00d519c793c3deb32cd3be32edc2 Mon Sep 17 00:00:00 2001
+From: Daniel Vacek <neelx@redhat.com>
+Date: Thu, 22 Mar 2018 16:17:38 -0700
+Subject: Revert "mm: page_alloc: skip over regions of invalid pfns where possible"
+
+From: Daniel Vacek <neelx@redhat.com>
+
+commit f59f1caf72ba00d519c793c3deb32cd3be32edc2 upstream.
+
+This reverts commit b92df1de5d28 ("mm: page_alloc: skip over regions of
+invalid pfns where possible"). The commit is meant to be a boot init
+speed up skipping the loop in memmap_init_zone() for invalid pfns.
+
+But given some specific memory mapping on x86_64 (or more generally
+theoretically anywhere but on arm with CONFIG_HAVE_ARCH_PFN_VALID) the
+implementation also skips valid pfns which is plain wrong and causes
+'kernel BUG at mm/page_alloc.c:1389!'
+
+ crash> log | grep -e BUG -e RIP -e Call.Trace -e move_freepages_block -e rmqueue -e freelist -A1
+ kernel BUG at mm/page_alloc.c:1389!
+ invalid opcode: 0000 [#1] SMP
+ --
+ RIP: 0010: move_freepages+0x15e/0x160
+ --
+ Call Trace:
+ move_freepages_block+0x73/0x80
+ __rmqueue+0x263/0x460
+ get_page_from_freelist+0x7e1/0x9e0
+ __alloc_pages_nodemask+0x176/0x420
+ --
+
+ crash> page_init_bug -v | grep RAM
+ <struct resource 0xffff88067fffd2f8> 1000 - 9bfff System RAM (620.00 KiB)
+ <struct resource 0xffff88067fffd3a0> 100000 - 430bffff System RAM ( 1.05 GiB = 1071.75 MiB = 1097472.00 KiB)
+ <struct resource 0xffff88067fffd410> 4b0c8000 - 4bf9cfff System RAM ( 14.83 MiB = 15188.00 KiB)
+ <struct resource 0xffff88067fffd480> 4bfac000 - 646b1fff System RAM (391.02 MiB = 400408.00 KiB)
+ <struct resource 0xffff88067fffd560> 7b788000 - 7b7fffff System RAM (480.00 KiB)
+ <struct resource 0xffff88067fffd640> 100000000 - 67fffffff System RAM ( 22.00 GiB)
+
+ crash> page_init_bug | head -6
+ <struct resource 0xffff88067fffd560> 7b788000 - 7b7fffff System RAM (480.00 KiB)
+ <struct page 0xffffea0001ede200> 1fffff00000000 0 <struct pglist_data 0xffff88047ffd9000> 1 <struct zone 0xffff88047ffd9800> DMA32 4096 1048575
+ <struct page 0xffffea0001ede200> 505736 505344 <struct page 0xffffea0001ed8000> 505855 <struct page 0xffffea0001edffc0>
+ <struct page 0xffffea0001ed8000> 0 0 <struct pglist_data 0xffff88047ffd9000> 0 <struct zone 0xffff88047ffd9000> DMA 1 4095
+ <struct page 0xffffea0001edffc0> 1fffff00000400 0 <struct pglist_data 0xffff88047ffd9000> 1 <struct zone 0xffff88047ffd9800> DMA32 4096 1048575
+ BUG, zones differ!
+
+ crash> kmem -p 77fff000 78000000 7b5ff000 7b600000 7b787000 7b788000
+ PAGE PHYSICAL MAPPING INDEX CNT FLAGS
+ ffffea0001e00000 78000000 0 0 0 0
+ ffffea0001ed7fc0 7b5ff000 0 0 0 0
+ ffffea0001ed8000 7b600000 0 0 0 0 <<<<
+ ffffea0001ede1c0 7b787000 0 0 0 0
+ ffffea0001ede200 7b788000 0 0 1 1fffff00000000
+
+Link: http://lkml.kernel.org/r/20180316143855.29838-1-neelx@redhat.com
+Fixes: b92df1de5d28 ("mm: page_alloc: skip over regions of invalid pfns where possible")
+Signed-off-by: Daniel Vacek <neelx@redhat.com>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Pavel Tatashin <pasha.tatashin@oracle.com>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/memblock.h | 1 -
+ mm/memblock.c | 28 ----------------------------
+ mm/page_alloc.c | 11 +----------
+ 3 files changed, 1 insertion(+), 39 deletions(-)
+
+--- a/include/linux/memblock.h
++++ b/include/linux/memblock.h
+@@ -187,7 +187,6 @@ int memblock_search_pfn_nid(unsigned lon
+ unsigned long *end_pfn);
+ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
+ unsigned long *out_end_pfn, int *out_nid);
+-unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
+
+ /**
+ * for_each_mem_pfn_range - early memory pfn range iterator
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1101,34 +1101,6 @@ void __init_memblock __next_mem_pfn_rang
+ *out_nid = r->nid;
+ }
+
+-unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
+- unsigned long max_pfn)
+-{
+- struct memblock_type *type = &memblock.memory;
+- unsigned int right = type->cnt;
+- unsigned int mid, left = 0;
+- phys_addr_t addr = PFN_PHYS(++pfn);
+-
+- do {
+- mid = (right + left) / 2;
+-
+- if (addr < type->regions[mid].base)
+- right = mid;
+- else if (addr >= (type->regions[mid].base +
+- type->regions[mid].size))
+- left = mid + 1;
+- else {
+- /* addr is within the region, so pfn is valid */
+- return pfn;
+- }
+- } while (left < right);
+-
+- if (right == type->cnt)
+- return -1UL;
+- else
+- return PHYS_PFN(type->regions[right].base);
+-}
+-
+ /**
+ * memblock_set_node - set node ID on memblock regions
+ * @base: base of area to set node ID for
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5348,17 +5348,8 @@ void __meminit memmap_init_zone(unsigned
+ if (context != MEMMAP_EARLY)
+ goto not_early;
+
+- if (!early_pfn_valid(pfn)) {
+-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+- /*
+- * Skip to the pfn preceding the next valid one (or
+- * end_pfn), such that we hit a valid pfn (or end_pfn)
+- * on our next iteration of the loop.
+- */
+- pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
+-#endif
++ if (!early_pfn_valid(pfn))
+ continue;
+- }
+ if (!early_pfn_in_nid(pfn, nid))
+ continue;
+ if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
--- /dev/null
+From 78dc897b7ee67205423dbbc6b56be49fb18d15b5 Mon Sep 17 00:00:00 2001
+From: Larry Finger <Larry.Finger@lwfinger.net>
+Date: Thu, 22 Feb 2018 14:28:59 -0600
+Subject: rtlwifi: rtl8723be: Fix loss of signal
+
+From: Larry Finger <Larry.Finger@lwfinger.net>
+
+commit 78dc897b7ee67205423dbbc6b56be49fb18d15b5 upstream.
+
+In commit c713fb071edc ("rtlwifi: rtl8821ae: Fix connection lost problem
+correctly") a problem in rtl8821ae that caused loss of signal was fixed.
+That same problem has now been reported for rtl8723be. Accordingly,
+the ASPM L1 latency has been increased from 0 to 7 to fix the instability.
+
+Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
+Cc: Stable <stable@vger.kernel.org>
+Tested-by: James Cameron <quozl@laptop.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+@@ -1124,7 +1124,8 @@ static void _rtl8723be_enable_aspm_back_
+
+ /* Configuration Space offset 0x70f BIT7 is used to control L0S */
+ tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f);
+- _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7));
++ _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) |
++ ASPM_L1_LATENCY << 3);
+
+ /* Configuration Space offset 0x719 Bit3 is for L1
+ * BIT4 is for clock request
cgroup-fix-rule-checking-for-threaded-mode-switching.patch
nfsd-remove-blocked-locks-on-client-teardown.patch
media-tegra-cec-reset-rx_buf_cnt-when-start-bit-detected.patch
+hugetlbfs-check-for-pgoff-value-overflow.patch
+h8300-remove-extraneous-__big_endian-definition.patch
+mm-vmalloc-add-interfaces-to-free-unmapped-page-table.patch
+x86-mm-implement-free-pmd-pte-page-interfaces.patch
+mm-khugepaged.c-convert-vm_bug_on-to-collapse-fail.patch
+mm-thp-do-not-wait-for-lock_page-in-deferred_split_scan.patch
+mm-shmem-do-not-wait-for-lock_page-in-shmem_unused_huge_shrink.patch
+revert-mm-page_alloc-skip-over-regions-of-invalid-pfns-where-possible.patch
+drm-vmwgfx-fix-black-screen-and-device-errors-when-running-without-fbdev.patch
+drm-vmwgfx-fix-a-destoy-while-held-mutex-problem.patch
+drm-radeon-don-t-turn-off-dp-sink-when-disconnected.patch
+drm-amd-display-we-shouldn-t-set-format_default-on-plane-as-atomic-driver.patch
+drm-amd-display-add-one-to-edid-s-audio-channel-count-when-passing-to-dc.patch
+drm-reject-getfb-for-multi-plane-framebuffers.patch
+drm-udl-properly-check-framebuffer-mmap-offsets.patch
+mm-vmscan-wake-up-flushers-for-legacy-cgroups-too.patch
+module-propagate-error-in-modules_open.patch
+acpi-numa-fix-pxm-to-online-numa-node-associations.patch
+acpi-watchdog-fix-off-by-one-error-at-resource-assignment.patch
+libnvdimm-btt-blk-do-integrity-setup-before-add_disk.patch
+brcmfmac-fix-p2p_device-ethernet-address-generation.patch
+rtlwifi-rtl8723be-fix-loss-of-signal.patch
+tracing-probeevent-fix-to-support-minus-offset-from-symbol.patch
+mtdchar-fix-usage-of-mtd_ooblayout_ecc.patch
+mtd-nand-fsl_ifc-fix-nand-waitfunc-return-value.patch
+mtd-nand-fsl_ifc-fix-eccstat-array-overflow-for-ifc-ver-2.0.0.patch
+mtd-nand-fsl_ifc-read-eccstat0-and-eccstat1-registers-for-ifc-2.0.patch
+staging-ncpfs-memory-corruption-in-ncp_read_kernel.patch
+can-peak-pcie_fd-fix-echo_skb-is-occupied-bug.patch
+can-peak-pcie_fd-remove-useless-code-when-interface-starts.patch
+can-ifi-repair-the-error-handling.patch
+can-ifi-check-core-revision-upon-probe.patch
+can-cc770-fix-stalls-on-rt-linux-remove-redundant-irq-ack.patch
+can-cc770-fix-queue-stall-dropped-rtr-reply.patch
+can-cc770-fix-use-after-free-in-cc770_tx_interrupt.patch
--- /dev/null
+From 4c41aa24baa4ed338241d05494f2c595c885af8f Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Mon, 19 Mar 2018 14:07:45 +0300
+Subject: staging: ncpfs: memory corruption in ncp_read_kernel()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 4c41aa24baa4ed338241d05494f2c595c885af8f upstream.
+
+If the server is malicious then *bytes_read could be larger than the
+size of the "target" buffer. It would lead to memory corruption when we
+do the memcpy().
+
+Reported-by: Dr Silvio Cesare of InfoSect <Silvio Cesare <silvio.cesare@gmail.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ncpfs/ncplib_kernel.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/ncpfs/ncplib_kernel.c
++++ b/fs/ncpfs/ncplib_kernel.c
+@@ -981,6 +981,10 @@ ncp_read_kernel(struct ncp_server *serve
+ goto out;
+ }
+ *bytes_read = ncp_reply_be16(server, 0);
++ if (*bytes_read > to_read) {
++ result = -EINVAL;
++ goto out;
++ }
+ source = ncp_reply_data(server, 2 + (offset & 1));
+
+ memcpy(target, source, *bytes_read);
--- /dev/null
+From c5d343b6b7badd1f5fe0873eff2e8d63a193e732 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Sat, 17 Mar 2018 21:38:10 +0900
+Subject: tracing: probeevent: Fix to support minus offset from symbol
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit c5d343b6b7badd1f5fe0873eff2e8d63a193e732 upstream.
+
+In Documentation/trace/kprobetrace.txt, it says
+
+ @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol)
+
+However, the parser doesn't parse minus offset correctly, since
+commit 2fba0c8867af ("tracing/kprobes: Fix probe offset to be
+unsigned") drops minus ("-") offset support for kprobe probe
+address usage.
+
+This fixes the traceprobe_split_symbol_offset() to parse minus
+offset again with checking the offset range, and add a minus
+offset check in kprobe probe address usage.
+
+Link: http://lkml.kernel.org/r/152129028983.31874.13419301530285775521.stgit@devbox
+
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Tom Zanussi <tom.zanussi@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
+Cc: stable@vger.kernel.org
+Fixes: 2fba0c8867af ("tracing/kprobes: Fix probe offset to be unsigned")
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_kprobe.c | 4 ++--
+ kernel/trace/trace_probe.c | 8 +++-----
+ kernel/trace/trace_probe.h | 2 +-
+ 3 files changed, 6 insertions(+), 8 deletions(-)
+
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -635,7 +635,7 @@ static int create_trace_kprobe(int argc,
+ char *symbol = NULL, *event = NULL, *group = NULL;
+ int maxactive = 0;
+ char *arg;
+- unsigned long offset = 0;
++ long offset = 0;
+ void *addr = NULL;
+ char buf[MAX_EVENT_NAME_LEN];
+
+@@ -723,7 +723,7 @@ static int create_trace_kprobe(int argc,
+ symbol = argv[1];
+ /* TODO: support .init module functions */
+ ret = traceprobe_split_symbol_offset(symbol, &offset);
+- if (ret) {
++ if (ret || offset < 0 || offset > UINT_MAX) {
+ pr_info("Failed to parse either an address or a symbol.\n");
+ return ret;
+ }
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -320,7 +320,7 @@ static fetch_func_t get_fetch_size_funct
+ }
+
+ /* Split symbol and offset. */
+-int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
++int traceprobe_split_symbol_offset(char *symbol, long *offset)
+ {
+ char *tmp;
+ int ret;
+@@ -328,13 +328,11 @@ int traceprobe_split_symbol_offset(char
+ if (!offset)
+ return -EINVAL;
+
+- tmp = strchr(symbol, '+');
++ tmp = strpbrk(symbol, "+-");
+ if (tmp) {
+- /* skip sign because kstrtoul doesn't accept '+' */
+- ret = kstrtoul(tmp + 1, 0, offset);
++ ret = kstrtol(tmp, 0, offset);
+ if (ret)
+ return ret;
+-
+ *tmp = '\0';
+ } else
+ *offset = 0;
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -353,7 +353,7 @@ extern int traceprobe_conflict_field_nam
+ extern void traceprobe_update_arg(struct probe_arg *arg);
+ extern void traceprobe_free_probe_arg(struct probe_arg *arg);
+
+-extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
++extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
+
+ /* Sum up total data length for dynamic arraies (strings) */
+ static nokprobe_inline int
--- /dev/null
+From 28ee90fe6048fa7b7ceaeb8831c0e4e454a4cf89 Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Thu, 22 Mar 2018 16:17:24 -0700
+Subject: x86/mm: implement free pmd/pte page interfaces
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit 28ee90fe6048fa7b7ceaeb8831c0e4e454a4cf89 upstream.
+
+Implement pud_free_pmd_page() and pmd_free_pte_page() on x86, which
+clear a given pud/pmd entry and free up lower level page table(s).
+
+The address range associated with the pud/pmd entry must have been
+purged by INVLPG.
+
+Link: http://lkml.kernel.org/r/20180314180155.19492-3-toshi.kani@hpe.com
+Fixes: e61ce6ade404e ("mm: change ioremap to set up huge I/O mappings")
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Reported-by: Lei Li <lious.lilei@hisilicon.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/pgtable.c | 28 ++++++++++++++++++++++++++--
+ 1 file changed, 26 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -712,7 +712,22 @@ int pmd_clear_huge(pmd_t *pmd)
+ */
+ int pud_free_pmd_page(pud_t *pud)
+ {
+- return pud_none(*pud);
++ pmd_t *pmd;
++ int i;
++
++ if (pud_none(*pud))
++ return 1;
++
++ pmd = (pmd_t *)pud_page_vaddr(*pud);
++
++ for (i = 0; i < PTRS_PER_PMD; i++)
++ if (!pmd_free_pte_page(&pmd[i]))
++ return 0;
++
++ pud_clear(pud);
++ free_page((unsigned long)pmd);
++
++ return 1;
+ }
+
+ /**
+@@ -724,6 +739,15 @@ int pud_free_pmd_page(pud_t *pud)
+ */
+ int pmd_free_pte_page(pmd_t *pmd)
+ {
+- return pmd_none(*pmd);
++ pte_t *pte;
++
++ if (pmd_none(*pmd))
++ return 1;
++
++ pte = (pte_t *)pmd_page_vaddr(*pmd);
++ pmd_clear(pmd);
++ free_page((unsigned long)pte);
++
++ return 1;
+ }
+ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */