--- /dev/null
+From 2be147f7459db5bbf292e0a6f135037b55e20b39 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Thu, 3 May 2018 13:17:12 -0500
+Subject: atm: zatm: Fix potential Spectre v1
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit 2be147f7459db5bbf292e0a6f135037b55e20b39 upstream.
+
+pool can be indirectly controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+drivers/atm/zatm.c:1462 zatm_ioctl() warn: potential spectre issue
+'zatm_dev->pool_info' (local cap)
+
+Fix this by sanitizing pool before using it to index
+zatm_dev->pool_info
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/atm/zatm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/atm/zatm.c
++++ b/drivers/atm/zatm.c
+@@ -28,6 +28,7 @@
+ #include <asm/io.h>
+ #include <linux/atomic.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+
+ #include "uPD98401.h"
+ #include "uPD98402.h"
+@@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *de
+ return -EFAULT;
+ if (pool < 0 || pool > ZATM_LAST_POOL)
+ return -EINVAL;
++ pool = array_index_nospec(pool,
++ ZATM_LAST_POOL + 1);
+ spin_lock_irqsave(&zatm_dev->lock, flags);
+ info = zatm_dev->pool_info[pool];
+ if (cmd == ZATM_GETPOOLZ) {
--- /dev/null
+From 0e030a373df3b8792b8991740fc31fe0629c6e58 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+Date: Wed, 25 Apr 2018 16:50:39 +0200
+Subject: can: flexcan: fix endianess detection
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+commit 0e030a373df3b8792b8991740fc31fe0629c6e58 upstream.
+
+In commit 88462d2a7830 ("can: flexcan: Remodel FlexCAN register r/w APIs
+for big endian FlexCAN controllers.") the following logic was
+implemented:
+
+ if the dt property "big-endian" is given or
+ the device is compatible to "fsl,p1010-flexcan":
+ use big-endian mode;
+ else
+ use little-endian mode;
+
+This relies on commit d50f4630c2e1 ("arm: dts: Remove p1010-flexcan
+compatible from imx series dts") which was applied a few commits later.
+Without this commit (or an old device tree used for booting a new
+kernel) the flexcan devices on i.MX25, i.MX28, i.MX35 and i.MX53 match
+the 'the device is compatible to "fsl,p1010-flexcan"' test and so are
+switched erroneously to big endian mode.
+
+Instead of the check above put a quirk in devtype data and rely on
+of_match_device yielding the most compatible match
+
+Fixes: 88462d2a7830 ("can: flexcan: Remodel FlexCAN register r/w APIs for big endian FlexCAN controllers.")
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Tested-by: Gavin Schenk <g.schenk@eckelmann.de>
+Cc: linux-stable <stable@vger.kernel.org> # >= v4.16
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/flexcan.c | 26 ++++++++++++++------------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -200,6 +200,7 @@
+ #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
+ #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
+ #define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
++#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */
+
+ /* Structure of the message buffer */
+ struct flexcan_mb {
+@@ -288,6 +289,12 @@ struct flexcan_priv {
+
+ static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
++ FLEXCAN_QUIRK_BROKEN_PERR_STATE |
++ FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN,
++};
++
++static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
++ .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ };
+
+@@ -1251,9 +1258,9 @@ static void unregister_flexcandev(struct
+ static const struct of_device_id flexcan_of_match[] = {
+ { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+ { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+- { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
+- { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
+- { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
++ { .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
++ { .compatible = "fsl,imx35-flexcan", .data = &fsl_imx25_devtype_data, },
++ { .compatible = "fsl,imx25-flexcan", .data = &fsl_imx25_devtype_data, },
+ { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+ { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
+@@ -1337,18 +1344,13 @@ static int flexcan_probe(struct platform
+
+ priv = netdev_priv(dev);
+
+- if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
++ if (of_property_read_bool(pdev->dev.of_node, "big-endian") ||
++ devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
+ priv->read = flexcan_read_be;
+ priv->write = flexcan_write_be;
+ } else {
+- if (of_device_is_compatible(pdev->dev.of_node,
+- "fsl,p1010-flexcan")) {
+- priv->read = flexcan_read_be;
+- priv->write = flexcan_write_be;
+- } else {
+- priv->read = flexcan_read_le;
+- priv->write = flexcan_write_le;
+- }
++ priv->read = flexcan_read_le;
++ priv->write = flexcan_write_le;
+ }
+
+ priv->can.clock.freq = clock_freq;
--- /dev/null
+From 5cec9425b41dcf834c3d48776900d6acb7e96f38 Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Wed, 9 May 2018 14:38:43 +0200
+Subject: can: hi311x: Acquire SPI lock on ->do_get_berr_counter
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit 5cec9425b41dcf834c3d48776900d6acb7e96f38 upstream.
+
+hi3110_get_berr_counter() may run concurrently to the rest of the driver
+but neglects to acquire the lock protecting access to the SPI device.
+As a result, it and the rest of the driver may clobber each other's tx
+and rx buffers.
+
+We became aware of this issue because transmission of packets with
+"cangen -g 0 -i -x" frequently hung. It turns out that agetty executes
+->do_get_berr_counter every few seconds via the following call stack:
+
+ CPU: 2 PID: 1605 Comm: agetty
+ [<7f3f7500>] (hi3110_get_berr_counter [hi311x])
+ [<7f130204>] (can_fill_info [can_dev])
+ [<80693bc0>] (rtnl_fill_ifinfo)
+ [<806949ec>] (rtnl_dump_ifinfo)
+ [<806b4834>] (netlink_dump)
+ [<806b4bc8>] (netlink_recvmsg)
+ [<8065f180>] (sock_recvmsg)
+ [<80660f90>] (___sys_recvmsg)
+ [<80661e7c>] (__sys_recvmsg)
+ [<80661ec0>] (SyS_recvmsg)
+ [<80108b20>] (ret_fast_syscall+0x0/0x1c)
+
+agetty listens to netlink messages in order to update the login prompt
+when IP addresses change (if /etc/issue contains \4 or \6 escape codes):
+https://git.kernel.org/pub/scm/utils/util-linux/util-linux.git/commit/?id=e36deb6424e8
+
+It's a useful feature, though it seems questionable that it causes CAN
+bit error statistics to be queried.
+
+Be that as it may, if hi3110_get_berr_counter() is invoked while a frame
+is sent by hi3110_hw_tx(), bogus SPI transfers like the following may
+occur:
+
+ => 12 00 (hi3110_get_berr_counter() wanted to transmit
+ EC 00 to query the transmit error counter,
+ but the first byte was overwritten by
+ hi3110_hw_tx_frame())
+
+ => EA 00 3E 80 01 FB (hi3110_hw_tx_frame() wanted to transmit a
+ frame, but the first byte was overwritten by
+ hi3110_get_berr_counter() because it wanted
+ to query the receive error counter)
+
+This sequence hangs the transmission because the driver believes it has
+sent a frame and waits for the interrupt signaling completion, but in
+reality the chip has never sent away the frame since the commands it
+received were malformed.
+
+Fix by acquiring the SPI lock in hi3110_get_berr_counter().
+
+I've scrutinized the entire driver for further unlocked SPI accesses but
+found no others.
+
+Cc: Mathias Duckeck <m.duckeck@kunbus.de>
+Cc: Akshay Bhat <akshay.bhat@timesys.com>
+Cc: Casey Fitzpatrick <casey.fitzpatrick@timesys.com>
+Cc: Stef Walter <stefw@redhat.com>
+Cc: Karel Zak <kzak@redhat.com>
+Cc: stable@vger.kernel.org # v4.12+
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Reviewed-by: Akshay Bhat <akshay.bhat@timesys.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/spi/hi311x.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/can/spi/hi311x.c
++++ b/drivers/net/can/spi/hi311x.c
+@@ -427,8 +427,10 @@ static int hi3110_get_berr_counter(const
+ struct hi3110_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
+
++ mutex_lock(&priv->hi3110_lock);
+ bec->txerr = hi3110_read(spi, HI3110_READ_TEC);
+ bec->rxerr = hi3110_read(spi, HI3110_READ_REC);
++ mutex_unlock(&priv->hi3110_lock);
+
+ return 0;
+ }
--- /dev/null
+From 32bee8f48fa048a3198109de50e51c092507ff52 Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Wed, 9 May 2018 14:43:43 +0200
+Subject: can: hi311x: Work around TX complete interrupt erratum
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit 32bee8f48fa048a3198109de50e51c092507ff52 upstream.
+
+When sending packets as fast as possible using "cangen -g 0 -i -x", the
+HI-3110 occasionally latches the interrupt pin high on completion of a
+packet, but doesn't set the TXCPLT bit in the INTF register. The INTF
+register contains 0x00 as if no interrupt has occurred. Even waiting
+for a few milliseconds after the interrupt doesn't help.
+
+Work around this apparent erratum by instead checking the TXMTY bit in
+the STATF register ("TX FIFO empty"). We know that we've queued up a
+packet for transmission if priv->tx_len is nonzero. If the TX FIFO is
+empty, transmission of that packet must have completed.
+
+Note that this is congruent with our handling of received packets, which
+likewise gleans from the STATF register whether a packet is waiting in
+the RX FIFO, instead of looking at the INTF register.
+
+Cc: Mathias Duckeck <m.duckeck@kunbus.de>
+Cc: Akshay Bhat <akshay.bhat@timesys.com>
+Cc: Casey Fitzpatrick <casey.fitzpatrick@timesys.com>
+Cc: stable@vger.kernel.org # v4.12+
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Acked-by: Akshay Bhat <akshay.bhat@timesys.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/spi/hi311x.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/can/spi/hi311x.c
++++ b/drivers/net/can/spi/hi311x.c
+@@ -91,6 +91,7 @@
+ #define HI3110_STAT_BUSOFF BIT(2)
+ #define HI3110_STAT_ERRP BIT(3)
+ #define HI3110_STAT_ERRW BIT(4)
++#define HI3110_STAT_TXMTY BIT(7)
+
+ #define HI3110_BTR0_SJW_SHIFT 6
+ #define HI3110_BTR0_BRP_SHIFT 0
+@@ -737,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int ir
+ }
+ }
+
+- if (intf == 0)
+- break;
+-
+- if (intf & HI3110_INT_TXCPLT) {
++ if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += priv->tx_len - 1;
+ can_led_event(net, CAN_LED_EVENT_TX);
+@@ -750,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int ir
+ }
+ netif_wake_queue(net);
+ }
++
++ if (intf == 0)
++ break;
+ }
+ mutex_unlock(&priv->hi3110_lock);
+ return IRQ_HANDLED;
--- /dev/null
+From 6ee00865ffe4e8c8ba4a68d26db53c7ec09bbb89 Mon Sep 17 00:00:00 2001
+From: Jimmy Assarsson <extja@kvaser.com>
+Date: Fri, 20 Apr 2018 14:38:46 +0200
+Subject: can: kvaser_usb: Increase correct stats counter in kvaser_usb_rx_can_msg()
+
+From: Jimmy Assarsson <extja@kvaser.com>
+
+commit 6ee00865ffe4e8c8ba4a68d26db53c7ec09bbb89 upstream.
+
+Increase rx_dropped, if alloc_can_skb() fails, not tx_dropped.
+
+Signed-off-by: Jimmy Assarsson <extja@kvaser.com>
+Cc: linux-stable <stable@vger.kernel.org>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/usb/kvaser_usb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -1179,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const
+
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb) {
+- stats->tx_dropped++;
++ stats->rx_dropped++;
+ return;
+ }
+
--- /dev/null
+From 3a15b38fd2efc1d648cb33186bf71e9138c93491 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Thu, 3 May 2018 16:10:09 +0200
+Subject: ceph: fix rsize/wsize capping in ceph_direct_read_write()
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 3a15b38fd2efc1d648cb33186bf71e9138c93491 upstream.
+
+rsize/wsize cap should be applied before ceph_osdc_new_request() is
+called. Otherwise, if the size is limited by the cap instead of the
+stripe unit, ceph_osdc_new_request() would setup an extent op that is
+bigger than what dio_get_pages_alloc() would pin and add to the page
+vector, triggering asserts in the messenger.
+
+Cc: stable@vger.kernel.org
+Fixes: 95cca2b44e54 ("ceph: limit osd write size")
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: "Yan, Zheng" <zyan@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ceph/file.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -878,6 +878,11 @@ ceph_direct_read_write(struct kiocb *ioc
+ size_t start = 0;
+ ssize_t len;
+
++ if (write)
++ size = min_t(u64, size, fsc->mount_options->wsize);
++ else
++ size = min_t(u64, size, fsc->mount_options->rsize);
++
+ vino = ceph_vino(inode);
+ req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+ vino, pos, &size, 0,
+@@ -893,11 +898,6 @@ ceph_direct_read_write(struct kiocb *ioc
+ break;
+ }
+
+- if (write)
+- size = min_t(u64, size, fsc->mount_options->wsize);
+- else
+- size = min_t(u64, size, fsc->mount_options->rsize);
+-
+ len = size;
+ pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
+ if (IS_ERR(pages)) {
--- /dev/null
+From 0a0b98734479aa5b3c671d5190e86273372cab95 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Fri, 11 May 2018 02:19:01 +0200
+Subject: compat: fix 4-byte infoleak via uninitialized struct field
+
+From: Jann Horn <jannh@google.com>
+
+commit 0a0b98734479aa5b3c671d5190e86273372cab95 upstream.
+
+Commit 3a4d44b61625 ("ntp: Move adjtimex related compat syscalls to
+native counterparts") removed the memset() in compat_get_timex(). Since
+then, the compat adjtimex syscall can invoke do_adjtimex() with an
+uninitialized ->tai.
+
+If do_adjtimex() doesn't write to ->tai (e.g. because the arguments are
+invalid), compat_put_timex() then copies the uninitialized ->tai field
+to userspace.
+
+Fix it by adding the memset() back.
+
+Fixes: 3a4d44b61625 ("ntp: Move adjtimex related compat syscalls to native counterparts")
+Signed-off-by: Jann Horn <jannh@google.com>
+Acked-by: Kees Cook <keescook@chromium.org>
+Acked-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/compat.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -34,6 +34,7 @@ int compat_get_timex(struct timex *txc,
+ {
+ struct compat_timex tx32;
+
++ memset(txc, 0, sizeof(struct timex));
+ if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
+ return -EFAULT;
+
--- /dev/null
+From fc8cec113904a47396bf0a1afc62920d66319d36 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 17 Apr 2018 18:32:26 -0400
+Subject: dm integrity: use kvfree for kvmalloc'd memory
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit fc8cec113904a47396bf0a1afc62920d66319d36 upstream.
+
+Use kvfree instead of kfree because the array is allocated with kvmalloc.
+
+Fixes: 7eada909bfd7a ("dm: add integrity target")
+Cc: stable@vger.kernel.org # v4.12+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-integrity.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_sc
+ unsigned i;
+ for (i = 0; i < ic->journal_sections; i++)
+ kvfree(sl[i]);
+- kfree(sl);
++ kvfree(sl);
+ }
+
+ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
--- /dev/null
+From f0b408eebc993310bea3f2daae286c40bd3f063b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Wed, 2 May 2018 21:32:47 +0300
+Subject: drm/atomic: Clean old_state/new_state in drm_atomic_state_default_clear()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit f0b408eebc993310bea3f2daae286c40bd3f063b upstream.
+
+Clear the old_state and new_state pointers for every object in
+drm_atomic_state_default_clear(). Otherwise
+drm_atomic_get_{new,old}_*_state() will hand out stale pointers to
+anyone who hasn't first confirmed that the object is in fact part of
+the current atomic transcation, if they are called after we've done
+the ww backoff dance while hanging on to the same drm_atomic_state.
+
+For example, handle_conflicting_encoders() looks like it could hit
+this since it iterates the full connector list and just calls
+drm_atomic_get_new_connector_state() for each.
+
+And I believe we have now witnessed this happening at least once in
+i915 check_digital_port_conflicts(). Commit 8b69449d2663 ("drm/i915:
+Remove last references to drm_atomic_get_existing* macros") changed
+the safe drm_atomic_get_existing_connector_state() to the unsafe
+drm_atomic_get_new_connector_state(), which opened the doors for
+this particular bug there as well.
+
+v2: Split private objs out to a separate patch (Daniel)
+
+Cc: stable@vger.kernel.org
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: Abhay Kumar <abhay.kumar@intel.com>
+Fixes: 581e49fe6b41 ("drm/atomic: Add new iterators over all state, v3.")
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180502183247.5746-1-ville.syrjala@linux.intel.com
+Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Sean Paul <seanpaul@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_atomic.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -155,6 +155,8 @@ void drm_atomic_state_default_clear(stru
+ state->connectors[i].state);
+ state->connectors[i].ptr = NULL;
+ state->connectors[i].state = NULL;
++ state->connectors[i].old_state = NULL;
++ state->connectors[i].new_state = NULL;
+ drm_connector_put(connector);
+ }
+
+@@ -169,6 +171,8 @@ void drm_atomic_state_default_clear(stru
+
+ state->crtcs[i].ptr = NULL;
+ state->crtcs[i].state = NULL;
++ state->crtcs[i].old_state = NULL;
++ state->crtcs[i].new_state = NULL;
+ }
+
+ for (i = 0; i < config->num_total_plane; i++) {
+@@ -181,6 +185,8 @@ void drm_atomic_state_default_clear(stru
+ state->planes[i].state);
+ state->planes[i].ptr = NULL;
+ state->planes[i].state = NULL;
++ state->planes[i].old_state = NULL;
++ state->planes[i].new_state = NULL;
+ }
+
+ for (i = 0; i < state->num_private_objs; i++) {
--- /dev/null
+From b5cb2e5a1f64d882a155add7522247ab0523051e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Wed, 2 May 2018 21:32:47 +0300
+Subject: drm/atomic: Clean private obj old_state/new_state in drm_atomic_state_default_clear()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit b5cb2e5a1f64d882a155add7522247ab0523051e upstream.
+
+Clear the old_state and new_state pointers for private objects
+in drm_atomic_state_default_clear(). We don't actually have
+functions to get the new/old state for private objects so
+getting access to the potentially stale pointers requires a
+bit more manual labour than for other object types. But let's
+clear the pointers for private objects as well, if only to
+avoid future surprises when someone decides to add the functions
+to get at them.
+
+v2: Split private objs to a separate patch (Daniel)
+
+Cc: <stable@vger.kernel.org> # v4.14+
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: Abhay Kumar <abhay.kumar@intel.com>
+Fixes: a4370c777406 (drm/atomic: Make private objs proper objects)
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180502183247.5746-1-ville.syrjala@linux.intel.com
+Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Sean Paul <seanpaul@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_atomic.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -196,6 +196,8 @@ void drm_atomic_state_default_clear(stru
+ state->private_objs[i].state);
+ state->private_objs[i].ptr = NULL;
+ state->private_objs[i].state = NULL;
++ state->private_objs[i].old_state = NULL;
++ state->private_objs[i].new_state = NULL;
+ }
+ state->num_private_objs = 0;
+
--- /dev/null
+From 9d219554d9bf59875b4e571a0392d620e8954879 Mon Sep 17 00:00:00 2001
+From: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Date: Wed, 2 May 2018 10:52:55 -0700
+Subject: drm/i915: Adjust eDP's logical vco in a reliable place.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rodrigo Vivi <rodrigo.vivi@intel.com>
+
+commit 9d219554d9bf59875b4e571a0392d620e8954879 upstream.
+
+On intel_dp_compute_config() we were calculating the needed vco
+for eDP on gen9 and we stashing it in
+intel_atomic_state.cdclk.logical.vco
+
+However few moments later on intel_modeset_checks() we fully
+replace entire intel_atomic_state.cdclk.logical with
+dev_priv->cdclk.logical fully overwriting the logical desired
+vco for eDP on gen9.
+
+So, with wrong VCO value we end up with wrong desired cdclk, but
+also it will raise a lot of WARNs: On gen9, when we read
+CDCLK_CTL to verify if we configured properly the desired
+frequency the CD Frequency Select bits [27:26] == 10b can mean
+337.5 or 308.57 MHz depending on the VCO. So if we have wrong
+VCO value stashed we will believe the frequency selection didn't
+stick and start to raise WARNs of cdclk mismatch.
+
+[ 42.857519] [drm:intel_dump_cdclk_state [i915]] Changing CDCLK to 308571 kHz, VCO 8640000 kHz, ref 24000 kHz, bypass 24000 kHz, voltage level 0
+[ 42.897269] cdclk state doesn't match!
+[ 42.901052] WARNING: CPU: 5 PID: 1116 at drivers/gpu/drm/i915/intel_cdclk.c:2084 intel_set_cdclk+0x5d/0x110 [i915]
+[ 42.938004] RIP: 0010:intel_set_cdclk+0x5d/0x110 [i915]
+[ 43.155253] WARNING: CPU: 5 PID: 1116 at drivers/gpu/drm/i915/intel_cdclk.c:2084 intel_set_cdclk+0x5d/0x110 [i915]
+[ 43.170277] [drm:intel_dump_cdclk_state [i915]] [hw state] 337500 kHz, VCO 8100000 kHz, ref 24000 kHz, bypass 24000 kHz, voltage level 0
+[ 43.182566] [drm:intel_dump_cdclk_state [i915]] [sw state] 308571 kHz, VCO 8640000 kHz, ref 24000 kHz, bypass 24000 kHz, voltage level 0
+
+v2: Move the entire eDP's vco logical adjustment to inside
+ the skl_modeset_calc_cdclk as suggested by Ville.
+
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Fixes: bb0f4aab0e76 ("drm/i915: Track full cdclk state for the logical and actual cdclk frequencies")
+Cc: <stable@vger.kernel.org> # v4.12+
+Link: https://patchwork.freedesktop.org/patch/msgid/20180502175255.5344-1-rodrigo.vivi@intel.com
+(cherry picked from commit 3297234a05ab1e90091b0574db4c397ef0e90d5f)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_cdclk.c | 41 +++++++++++++++++++++++++++++++++----
+ drivers/gpu/drm/i915/intel_dp.c | 20 ------------------
+ 2 files changed, 37 insertions(+), 24 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/intel_cdclk.c
+@@ -2108,9 +2108,44 @@ static int bdw_modeset_calc_cdclk(struct
+ return 0;
+ }
+
++static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
++{
++ struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
++ struct intel_crtc *crtc;
++ struct intel_crtc_state *crtc_state;
++ int vco, i;
++
++ vco = intel_state->cdclk.logical.vco;
++ if (!vco)
++ vco = dev_priv->skl_preferred_vco_freq;
++
++ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
++ if (!crtc_state->base.enable)
++ continue;
++
++ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
++ continue;
++
++ /*
++ * DPLL0 VCO may need to be adjusted to get the correct
++ * clock for eDP. This will affect cdclk as well.
++ */
++ switch (crtc_state->port_clock / 2) {
++ case 108000:
++ case 216000:
++ vco = 8640000;
++ break;
++ default:
++ vco = 8100000;
++ break;
++ }
++ }
++
++ return vco;
++}
++
+ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+ {
+- struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ int min_cdclk, cdclk, vco;
+
+@@ -2118,9 +2153,7 @@ static int skl_modeset_calc_cdclk(struct
+ if (min_cdclk < 0)
+ return min_cdclk;
+
+- vco = intel_state->cdclk.logical.vco;
+- if (!vco)
+- vco = dev_priv->skl_preferred_vco_freq;
++ vco = skl_dpll0_vco(intel_state);
+
+ /*
+ * FIXME should also account for plane ratio
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1794,26 +1794,6 @@ found:
+ reduce_m_n);
+ }
+
+- /*
+- * DPLL0 VCO may need to be adjusted to get the correct
+- * clock for eDP. This will affect cdclk as well.
+- */
+- if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
+- int vco;
+-
+- switch (pipe_config->port_clock / 2) {
+- case 108000:
+- case 216000:
+- vco = 8640000;
+- break;
+- default:
+- vco = 8100000;
+- break;
+- }
+-
+- to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
+- }
+-
+ if (!HAS_DDI(dev_priv))
+ intel_dp_set_clock(encoder, pipe_config);
+
--- /dev/null
+From e8f48f96db7e482995743f461b3e8a5c1a102533 Mon Sep 17 00:00:00 2001
+From: Florent Flament <contact@florentflament.com>
+Date: Thu, 19 Apr 2018 19:07:00 +0300
+Subject: drm/i915: Fix drm:intel_enable_lvds ERROR message in kernel log
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Florent Flament <contact@florentflament.com>
+
+commit e8f48f96db7e482995743f461b3e8a5c1a102533 upstream.
+
+Fix `[drm:intel_enable_lvds] *ERROR* timed out waiting for panel to
+power on` in kernel log at boot time.
+
+Toshiba Satellite Z930 laptops needs between 1 and 2 seconds to power
+on its screen during Intel i915 DRM initialization. This currently
+results in a `[drm:intel_enable_lvds] *ERROR* timed out waiting for
+panel to power on` message appearing in the kernel log during boot
+time and when stopping the machine.
+
+This change increases the timeout of the `intel_enable_lvds` function
+from 1 to 5 seconds, letting enough time for the Satellite 930 LCD
+screen to power on, and suppressing the error message from the kernel
+log.
+
+This patch has been successfully tested on Linux 4.14 running on a
+Toshiba Satellite Z930.
+
+[vsyrjala: bump the timeout from 2 to 5 seconds to match the DP
+ code and properly cover the max hw timeout of ~4 seconds, and
+ drop the comment about the specific machine since this is not
+ a particulary surprising issue, nor specific to that one machine]
+
+Signed-off-by: Florent Flament <contact@florentflament.com>
+Cc: stable@vger.kernel.org
+Cc: Pavel Petrovic <ppetrovic@acm.org>
+Cc: Sérgio M. Basto <sergio@serjux.com>
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103414
+References: https://bugzilla.kernel.org/show_bug.cgi?id=57591
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180419160700.19828-1-ville.syrjala@linux.intel.com
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+(cherry picked from commit 280b54ade5914d3b4abe4f0ebe083ddbd4603246)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_lvds.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -319,7 +319,8 @@ static void intel_enable_lvds(struct int
+
+ I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
+ POSTING_READ(lvds_encoder->reg);
+- if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
++
++ if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
+ DRM_ERROR("timed out waiting for panel to power on\n");
+
+ intel_panel_enable_backlight(pipe_config, conn_state);
--- /dev/null
+From 352672db857290ab5b0e2b6a99c414f92bee024c Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Wed, 2 May 2018 19:38:48 -0400
+Subject: drm/nouveau: Fix deadlock in nv50_mstm_register_connector()
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit 352672db857290ab5b0e2b6a99c414f92bee024c upstream.
+
+Currently; we're grabbing all of the modesetting locks before adding MST
+connectors to fbdev. This isn't actually necessary, and causes a
+deadlock as well:
+
+======================================================
+WARNING: possible circular locking dependency detected
+4.17.0-rc3Lyude-Test+ #1 Tainted: G O
+------------------------------------------------------
+kworker/1:0/18 is trying to acquire lock:
+00000000c832f62d (&helper->lock){+.+.}, at: drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
+
+but task is already holding lock:
+00000000942e28e2 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8e/0x1c0 [drm]
+
+which lock already depends on the new lock.
+
+the existing dependency chain (in reverse order) is:
+
+-> #3 (crtc_ww_class_mutex){+.+.}:
+ ww_mutex_lock+0x43/0x80
+ drm_modeset_lock+0x71/0x130 [drm]
+ drm_helper_probe_single_connector_modes+0x7d/0x6b0 [drm_kms_helper]
+ drm_setup_crtcs+0x15e/0xc90 [drm_kms_helper]
+ __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper]
+ nouveau_fbcon_init+0x138/0x1a0 [nouveau]
+ nouveau_drm_load+0x173/0x7e0 [nouveau]
+ drm_dev_register+0x134/0x1c0 [drm]
+ drm_get_pci_dev+0x8e/0x160 [drm]
+ nouveau_drm_probe+0x1a9/0x230 [nouveau]
+ pci_device_probe+0xcd/0x150
+ driver_probe_device+0x30b/0x480
+ __driver_attach+0xbc/0xe0
+ bus_for_each_dev+0x67/0x90
+ bus_add_driver+0x164/0x260
+ driver_register+0x57/0xc0
+ do_one_initcall+0x4d/0x323
+ do_init_module+0x5b/0x1f8
+ load_module+0x20e5/0x2ac0
+ __do_sys_finit_module+0xb7/0xd0
+ do_syscall_64+0x60/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+-> #2 (crtc_ww_class_acquire){+.+.}:
+ drm_helper_probe_single_connector_modes+0x58/0x6b0 [drm_kms_helper]
+ drm_setup_crtcs+0x15e/0xc90 [drm_kms_helper]
+ __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper]
+ nouveau_fbcon_init+0x138/0x1a0 [nouveau]
+ nouveau_drm_load+0x173/0x7e0 [nouveau]
+ drm_dev_register+0x134/0x1c0 [drm]
+ drm_get_pci_dev+0x8e/0x160 [drm]
+ nouveau_drm_probe+0x1a9/0x230 [nouveau]
+ pci_device_probe+0xcd/0x150
+ driver_probe_device+0x30b/0x480
+ __driver_attach+0xbc/0xe0
+ bus_for_each_dev+0x67/0x90
+ bus_add_driver+0x164/0x260
+ driver_register+0x57/0xc0
+ do_one_initcall+0x4d/0x323
+ do_init_module+0x5b/0x1f8
+ load_module+0x20e5/0x2ac0
+ __do_sys_finit_module+0xb7/0xd0
+ do_syscall_64+0x60/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+-> #1 (&dev->mode_config.mutex){+.+.}:
+ drm_setup_crtcs+0x10c/0xc90 [drm_kms_helper]
+ __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper]
+ nouveau_fbcon_init+0x138/0x1a0 [nouveau]
+ nouveau_drm_load+0x173/0x7e0 [nouveau]
+ drm_dev_register+0x134/0x1c0 [drm]
+ drm_get_pci_dev+0x8e/0x160 [drm]
+ nouveau_drm_probe+0x1a9/0x230 [nouveau]
+ pci_device_probe+0xcd/0x150
+ driver_probe_device+0x30b/0x480
+ __driver_attach+0xbc/0xe0
+ bus_for_each_dev+0x67/0x90
+ bus_add_driver+0x164/0x260
+ driver_register+0x57/0xc0
+ do_one_initcall+0x4d/0x323
+ do_init_module+0x5b/0x1f8
+ load_module+0x20e5/0x2ac0
+ __do_sys_finit_module+0xb7/0xd0
+ do_syscall_64+0x60/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+-> #0 (&helper->lock){+.+.}:
+ __mutex_lock+0x70/0x9d0
+ drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
+ nv50_mstm_register_connector+0x2c/0x50 [nouveau]
+ drm_dp_add_port+0x2f5/0x420 [drm_kms_helper]
+ drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper]
+ drm_dp_add_port+0x33f/0x420 [drm_kms_helper]
+ drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper]
+ drm_dp_check_and_send_link_address+0x87/0xd0 [drm_kms_helper]
+ drm_dp_mst_link_probe_work+0x4d/0x80 [drm_kms_helper]
+ process_one_work+0x20d/0x650
+ worker_thread+0x3a/0x390
+ kthread+0x11e/0x140
+ ret_from_fork+0x3a/0x50
+
+other info that might help us debug this:
+Chain exists of:
+ &helper->lock --> crtc_ww_class_acquire --> crtc_ww_class_mutex
+ Possible unsafe locking scenario:
+ CPU0 CPU1
+ ---- ----
+ lock(crtc_ww_class_mutex);
+ lock(crtc_ww_class_acquire);
+ lock(crtc_ww_class_mutex);
+ lock(&helper->lock);
+
+ *** DEADLOCK ***
+5 locks held by kworker/1:0/18:
+ #0: 000000004a05cd50 ((wq_completion)"events_long"){+.+.}, at: process_one_work+0x187/0x650
+ #1: 00000000601c11d1 ((work_completion)(&mgr->work)){+.+.}, at: process_one_work+0x187/0x650
+ #2: 00000000586ca0df (&dev->mode_config.mutex){+.+.}, at: drm_modeset_lock_all+0x3a/0x1b0 [drm]
+ #3: 00000000d3ca0ffa (crtc_ww_class_acquire){+.+.}, at: drm_modeset_lock_all+0x44/0x1b0 [drm]
+ #4: 00000000942e28e2 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8e/0x1c0 [drm]
+
+stack backtrace:
+CPU: 1 PID: 18 Comm: kworker/1:0 Tainted: G O 4.17.0-rc3Lyude-Test+ #1
+Hardware name: Gateway FX6840/FX6840, BIOS P01-A3 05/17/2010
+Workqueue: events_long drm_dp_mst_link_probe_work [drm_kms_helper]
+Call Trace:
+ dump_stack+0x85/0xcb
+ print_circular_bug.isra.38+0x1ce/0x1db
+ __lock_acquire+0x128f/0x1350
+ ? lock_acquire+0x9f/0x200
+ ? lock_acquire+0x9f/0x200
+ ? __ww_mutex_lock.constprop.13+0x8f/0x1000
+ lock_acquire+0x9f/0x200
+ ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
+ ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
+ __mutex_lock+0x70/0x9d0
+ ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
+ ? ww_mutex_lock+0x43/0x80
+ ? _cond_resched+0x15/0x30
+ ? ww_mutex_lock+0x43/0x80
+ ? drm_modeset_lock+0xb2/0x130 [drm]
+ ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
+ drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
+ nv50_mstm_register_connector+0x2c/0x50 [nouveau]
+ drm_dp_add_port+0x2f5/0x420 [drm_kms_helper]
+ ? mark_held_locks+0x50/0x80
+ ? kfree+0xcf/0x2a0
+ ? drm_dp_check_mstb_guid+0xd6/0x120 [drm_kms_helper]
+ ? trace_hardirqs_on_caller+0xed/0x180
+ ? drm_dp_check_mstb_guid+0xd6/0x120 [drm_kms_helper]
+ drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper]
+ drm_dp_add_port+0x33f/0x420 [drm_kms_helper]
+ ? nouveau_connector_aux_xfer+0x7c/0xb0 [nouveau]
+ ? find_held_lock+0x2d/0x90
+ ? drm_dp_dpcd_access+0xd9/0xf0 [drm_kms_helper]
+ ? __mutex_unlock_slowpath+0x3b/0x280
+ ? drm_dp_dpcd_access+0xd9/0xf0 [drm_kms_helper]
+ drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper]
+ drm_dp_check_and_send_link_address+0x87/0xd0 [drm_kms_helper]
+ drm_dp_mst_link_probe_work+0x4d/0x80 [drm_kms_helper]
+ process_one_work+0x20d/0x650
+ worker_thread+0x3a/0x390
+ ? process_one_work+0x650/0x650
+ kthread+0x11e/0x140
+ ? kthread_create_worker_on_cpu+0x50/0x50
+ ret_from_fork+0x3a/0x50
+
+Taking example from i915, the only time we need to hold any modesetting
+locks is when changing the port on the mstc, and in that case we only
+need to hold the connection mutex.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Cc: Karol Herbst <kherbst@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/nv50_display.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -3272,10 +3272,11 @@ nv50_mstm_destroy_connector(struct drm_d
+
+ drm_connector_unregister(&mstc->connector);
+
+- drm_modeset_lock_all(drm->dev);
+ drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
++
++ drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
+ mstc->port = NULL;
+- drm_modeset_unlock_all(drm->dev);
++ drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
+
+ drm_connector_unreference(&mstc->connector);
+ }
+@@ -3285,9 +3286,7 @@ nv50_mstm_register_connector(struct drm_
+ {
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+
+- drm_modeset_lock_all(drm->dev);
+ drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
+- drm_modeset_unlock_all(drm->dev);
+
+ drm_connector_register(connector);
+ }
--- /dev/null
+From 0d5a03c3d9254813ca76d7886ff9ed76a0aea545 Mon Sep 17 00:00:00 2001
+From: Ben Skeggs <bskeggs@redhat.com>
+Date: Tue, 8 May 2018 20:39:47 +1000
+Subject: drm/nouveau/ttm: don't dereference nvbo::cli, it can outlive client
+
+From: Ben Skeggs <bskeggs@redhat.com>
+
+commit 0d5a03c3d9254813ca76d7886ff9ed76a0aea545 upstream.
+
+Potentially responsible for some random OOPSes.
+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Cc: stable@vger.kernel.org [v4.15+]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/nouveau_bo.c | 1 -
+ drivers/gpu/drm/nouveau/nouveau_bo.h | 2 --
+ drivers/gpu/drm/nouveau/nouveau_ttm.c | 6 +++---
+ 3 files changed, 3 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -214,7 +214,6 @@ nouveau_bo_new(struct nouveau_cli *cli,
+ INIT_LIST_HEAD(&nvbo->entry);
+ INIT_LIST_HEAD(&nvbo->vma_list);
+ nvbo->bo.bdev = &drm->ttm.bdev;
+- nvbo->cli = cli;
+
+ /* This is confusing, and doesn't actually mean we want an uncached
+ * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
+@@ -26,8 +26,6 @@ struct nouveau_bo {
+
+ struct list_head vma_list;
+
+- struct nouveau_cli *cli;
+-
+ unsigned contig:1;
+ unsigned page:5;
+ unsigned kind:8;
+--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+@@ -63,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_mem_
+ struct ttm_mem_reg *reg)
+ {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+- struct nouveau_drm *drm = nvbo->cli->drm;
++ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *mem;
+ int ret;
+
+@@ -103,7 +103,7 @@ nouveau_gart_manager_new(struct ttm_mem_
+ struct ttm_mem_reg *reg)
+ {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+- struct nouveau_drm *drm = nvbo->cli->drm;
++ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *mem;
+ int ret;
+
+@@ -131,7 +131,7 @@ nv04_gart_manager_new(struct ttm_mem_typ
+ struct ttm_mem_reg *reg)
+ {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+- struct nouveau_drm *drm = nvbo->cli->drm;
++ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *mem;
+ int ret;
+
--- /dev/null
+From da291320baec914f0bb4e65a9dccb86bd6c728f2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Wed, 25 Apr 2018 17:32:10 +0200
+Subject: drm/ttm: Use GFP_TRANSHUGE_LIGHT for allocating huge pages
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michel Dänzer <michel.daenzer@amd.com>
+
+commit da291320baec914f0bb4e65a9dccb86bd6c728f2 upstream.
+
+GFP_TRANSHUGE tries very hard to allocate huge pages, which can result
+in long delays with high memory pressure. I have observed firefox
+freezing for up to around a minute due to this while restic was taking
+a full system backup.
+
+Since we don't really need huge pages, use GFP_TRANSHUGE_LIGHT |
+__GFP_NORETRY instead, in order to fail quickly when there are no huge
+pages available.
+
+Set __GFP_KSWAPD_RECLAIM as well, in order for huge pages to be freed
+up in the background if necessary.
+
+With these changes, I'm no longer seeing freezes during a restic backup.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/ttm/ttm_page_alloc.c | 11 ++++++++---
+ drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 3 ++-
+ 2 files changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -904,7 +904,8 @@ static int ttm_get_pages(struct page **p
+ while (npages >= HPAGE_PMD_NR) {
+ gfp_t huge_flags = gfp_flags;
+
+- huge_flags |= GFP_TRANSHUGE;
++ huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++ __GFP_KSWAPD_RECLAIM;
+ huge_flags &= ~__GFP_MOVABLE;
+ huge_flags &= ~__GFP_COMP;
+ p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+@@ -1021,11 +1022,15 @@ int ttm_page_alloc_init(struct ttm_mem_g
+ GFP_USER | GFP_DMA32, "uc dma", 0);
+
+ ttm_page_pool_init_locked(&_manager->wc_pool_huge,
+- GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
++ (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++ __GFP_KSWAPD_RECLAIM) &
++ ~(__GFP_MOVABLE | __GFP_COMP),
+ "wc huge", order);
+
+ ttm_page_pool_init_locked(&_manager->uc_pool_huge,
+- GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
++ (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++ __GFP_KSWAPD_RECLAIM) &
++ ~(__GFP_MOVABLE | __GFP_COMP)
+ , "uc huge", order);
+
+ _manager->options.max_size = max_pages;
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+@@ -915,7 +915,8 @@ static gfp_t ttm_dma_pool_gfp_flags(stru
+ gfp_flags |= __GFP_ZERO;
+
+ if (huge) {
+- gfp_flags |= GFP_TRANSHUGE;
++ gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++ __GFP_KSWAPD_RECLAIM;
+ gfp_flags &= ~__GFP_MOVABLE;
+ gfp_flags &= ~__GFP_COMP;
+ }
--- /dev/null
+From 9a0e9802217291e54c4dd1fc5462f189a4be14ec Mon Sep 17 00:00:00 2001
+From: Boris Brezillon <boris.brezillon@bootlin.com>
+Date: Mon, 7 May 2018 14:13:03 +0200
+Subject: drm/vc4: Fix scaling of uni-planar formats
+
+From: Boris Brezillon <boris.brezillon@bootlin.com>
+
+commit 9a0e9802217291e54c4dd1fc5462f189a4be14ec upstream.
+
+When using uni-planar formats (like RGB), the scaling parameters are
+stored in plane 0, not plane 1.
+
+Fixes: fc04023fafec ("drm/vc4: Add support for YUV planes.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Reviewed-by: Eric Anholt <eric@anholt.net>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180507121303.5610-1-boris.brezillon@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vc4/vc4_plane.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -557,7 +557,7 @@ static int vc4_plane_mode_set(struct drm
+ * the scl fields here.
+ */
+ if (num_planes == 1) {
+- scl0 = vc4_get_scl_field(state, 1);
++ scl0 = vc4_get_scl_field(state, 0);
+ scl1 = scl0;
+ } else {
+ scl0 = vc4_get_scl_field(state, 1);
--- /dev/null
+From f241632fd087d3d9fbd5450f4d8c8604badd8348 Mon Sep 17 00:00:00 2001
+From: Govert Overgaauw <govert.overgaauw@prodrive-technologies.com>
+Date: Fri, 6 Apr 2018 14:41:35 +0200
+Subject: gpio: fix aspeed_gpio unmask irq
+
+From: Govert Overgaauw <govert.overgaauw@prodrive-technologies.com>
+
+commit f241632fd087d3d9fbd5450f4d8c8604badd8348 upstream.
+
+The unmask function disables all interrupts in a bank when unmasking an
+interrupt. Only disable the given interrupt.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Govert Overgaauw <govert.overgaauw@prodrive-technologies.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-aspeed.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -384,7 +384,7 @@ static void aspeed_gpio_irq_set_mask(str
+ if (set)
+ reg |= bit;
+ else
+- reg &= bit;
++ reg &= ~bit;
+ iowrite32(reg, addr);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
--- /dev/null
+From f001cc351ad3309ec8736c374e90e5a4bc472d41 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+Date: Mon, 16 Apr 2018 13:17:53 +0200
+Subject: gpio: fix error path in lineevent_create
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+commit f001cc351ad3309ec8736c374e90e5a4bc472d41 upstream.
+
+If gpiod_request() fails the cleanup must not call gpiod_free().
+
+Cc: stable@vger.kernel.org
+Fixes: 61f922db7221 ("gpio: userspace ABI for reading GPIO line events")
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpiolib.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -852,7 +852,7 @@ static int lineevent_create(struct gpio_
+ desc = &gdev->descs[offset];
+ ret = gpiod_request(desc, le->label);
+ if (ret)
+- goto out_free_desc;
++ goto out_free_label;
+ le->desc = desc;
+ le->eflags = eflags;
+
--- /dev/null
+From ab3dbcf78f60f46d6a0ad63b1f4b690b7a427140 Mon Sep 17 00:00:00 2001
+From: Timur Tabi <timur@codeaurora.org>
+Date: Thu, 29 Mar 2018 13:29:12 -0500
+Subject: gpioib: do not free unrequested descriptors
+
+From: Timur Tabi <timur@codeaurora.org>
+
+commit ab3dbcf78f60f46d6a0ad63b1f4b690b7a427140 upstream.
+
+If the main loop in linehandle_create() encounters an error, it
+unwinds completely by freeing all previously requested GPIO
+descriptors. However, if the error occurs in the beginning of
+the loop before that GPIO is requested, then the exit code
+attempts to free a null descriptor. If extrachecks is enabled,
+gpiod_free() triggers a WARN_ON.
+
+Instead, keep a separate count of legitimate GPIOs so that only
+those are freed.
+
+Cc: stable@vger.kernel.org
+Fixes: d7c51b47ac11 ("gpio: userspace ABI for reading/writing GPIO lines")
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Timur Tabi <timur@codeaurora.org>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpiolib.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -446,7 +446,7 @@ static int linehandle_create(struct gpio
+ struct gpiohandle_request handlereq;
+ struct linehandle_state *lh;
+ struct file *file;
+- int fd, i, ret;
++ int fd, i, count = 0, ret;
+ u32 lflags;
+
+ if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
+@@ -507,6 +507,7 @@ static int linehandle_create(struct gpio
+ if (ret)
+ goto out_free_descs;
+ lh->descs[i] = desc;
++ count = i;
+
+ if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+@@ -577,7 +578,7 @@ static int linehandle_create(struct gpio
+ out_put_unused_fd:
+ put_unused_fd(fd);
+ out_free_descs:
+- for (; i >= 0; i--)
++ for (i = 0; i < count; i++)
+ gpiod_free(lh->descs[i]);
+ kfree(lh->label);
+ out_free_lh:
--- /dev/null
+From 184add2ca23ce5edcac0ab9c3b9be13f91e7b567 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 26 Apr 2018 22:32:21 +0200
+Subject: libata: Apply NOLPM quirk for SanDisk SD7UB3Q*G1001 SSDs
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 184add2ca23ce5edcac0ab9c3b9be13f91e7b567 upstream.
+
+Richard Jones has reported that using med_power_with_dipm on a T450s
+with a Sandisk SD7UB3Q256G1001 SSD (firmware version X2180501) is
+causing the machine to hang.
+
+Switching the LPM to max_performance fixes this, so it seems that
+this Sandisk SSD does not handle LPM well.
+
+Note in the past there have been bug-reports about the following
+Sandisk models not working with min_power, so we may need to extend
+the quirk list in the future: name - firmware
+Sandisk SD6SB2M512G1022I - X210400
+Sandisk SD6PP4M-256G-1006 - A200906
+
+Cc: stable@vger.kernel.org
+Cc: Richard W.M. Jones <rjones@redhat.com>
+Reported-and-tested-by: Richard W.M. Jones <rjones@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4549,6 +4549,9 @@ static const struct ata_blacklist_entry
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NOLPM, },
+
++ /* Sandisk devices which are known to not handle LPM well */
++ { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
++
+ /* devices that don't properly handle queued TRIM commands */
+ { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
--- /dev/null
+From 27ae357fa82be5ab73b2ef8d39dcb8ca2563483a Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Fri, 11 May 2018 16:02:04 -0700
+Subject: mm, oom: fix concurrent munlock and oom reaper unmap, v3
+
+From: David Rientjes <rientjes@google.com>
+
+commit 27ae357fa82be5ab73b2ef8d39dcb8ca2563483a upstream.
+
+Since exit_mmap() is done without the protection of mm->mmap_sem, it is
+possible for the oom reaper to concurrently operate on an mm until
+MMF_OOM_SKIP is set.
+
+This allows munlock_vma_pages_all() to concurrently run while the oom
+reaper is operating on a vma. Since munlock_vma_pages_range() depends
+on clearing VM_LOCKED from vm_flags before actually doing the munlock to
+determine if any other vmas are locking the same memory, the check for
+VM_LOCKED in the oom reaper is racy.
+
+This is especially noticeable on architectures such as powerpc where
+clearing a huge pmd requires serialize_against_pte_lookup(). If the pmd
+is zapped by the oom reaper during follow_page_mask() after the check
+for pmd_none() is bypassed, this ends up deferencing a NULL ptl or a
+kernel oops.
+
+Fix this by manually freeing all possible memory from the mm before
+doing the munlock and then setting MMF_OOM_SKIP. The oom reaper can not
+run on the mm anymore so the munlock is safe to do in exit_mmap(). It
+also matches the logic that the oom reaper currently uses for
+determining when to set MMF_OOM_SKIP itself, so there's no new risk of
+excessive oom killing.
+
+This issue fixes CVE-2018-1000200.
+
+Link: http://lkml.kernel.org/r/alpine.DEB.2.21.1804241526320.238665@chino.kir.corp.google.com
+Fixes: 212925802454 ("mm: oom: let oom_reap_task and exit_mmap run concurrently")
+Signed-off-by: David Rientjes <rientjes@google.com>
+Suggested-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: <stable@vger.kernel.org> [4.14+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/oom.h | 2 +
+ mm/mmap.c | 44 ++++++++++++++++------------
+ mm/oom_kill.c | 81 +++++++++++++++++++++++++++-------------------------
+ 3 files changed, 71 insertions(+), 56 deletions(-)
+
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -95,6 +95,8 @@ static inline int check_stable_address_s
+ return 0;
+ }
+
++void __oom_reap_task_mm(struct mm_struct *mm);
++
+ extern unsigned long oom_badness(struct task_struct *p,
+ struct mem_cgroup *memcg, const nodemask_t *nodemask,
+ unsigned long totalpages);
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2997,6 +2997,32 @@ void exit_mmap(struct mm_struct *mm)
+ /* mm's last user has gone, and its about to be pulled down */
+ mmu_notifier_release(mm);
+
++ if (unlikely(mm_is_oom_victim(mm))) {
++ /*
++ * Manually reap the mm to free as much memory as possible.
++ * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
++ * this mm from further consideration. Taking mm->mmap_sem for
++ * write after setting MMF_OOM_SKIP will guarantee that the oom
++ * reaper will not run on this mm again after mmap_sem is
++ * dropped.
++ *
++ * Nothing can be holding mm->mmap_sem here and the above call
++ * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
++ * __oom_reap_task_mm() will not block.
++ *
++ * This needs to be done before calling munlock_vma_pages_all(),
++ * which clears VM_LOCKED, otherwise the oom reaper cannot
++ * reliably test it.
++ */
++ mutex_lock(&oom_lock);
++ __oom_reap_task_mm(mm);
++ mutex_unlock(&oom_lock);
++
++ set_bit(MMF_OOM_SKIP, &mm->flags);
++ down_write(&mm->mmap_sem);
++ up_write(&mm->mmap_sem);
++ }
++
+ if (mm->locked_vm) {
+ vma = mm->mmap;
+ while (vma) {
+@@ -3018,24 +3044,6 @@ void exit_mmap(struct mm_struct *mm)
+ /* update_hiwater_rss(mm) here? but nobody should be looking */
+ /* Use -1 here to ensure all VMAs in the mm are unmapped */
+ unmap_vmas(&tlb, vma, 0, -1);
+-
+- if (unlikely(mm_is_oom_victim(mm))) {
+- /*
+- * Wait for oom_reap_task() to stop working on this
+- * mm. Because MMF_OOM_SKIP is already set before
+- * calling down_read(), oom_reap_task() will not run
+- * on this "mm" post up_write().
+- *
+- * mm_is_oom_victim() cannot be set from under us
+- * either because victim->mm is already set to NULL
+- * under task_lock before calling mmput and oom_mm is
+- * set not NULL by the OOM killer only if victim->mm
+- * is found not NULL while holding the task_lock.
+- */
+- set_bit(MMF_OOM_SKIP, &mm->flags);
+- down_write(&mm->mmap_sem);
+- up_write(&mm->mmap_sem);
+- }
+ free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
+ tlb_finish_mmu(&tlb, 0, -1);
+
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -474,7 +474,6 @@ bool process_shares_mm(struct task_struc
+ return false;
+ }
+
+-
+ #ifdef CONFIG_MMU
+ /*
+ * OOM Reaper kernel thread which tries to reap the memory used by the OOM
+@@ -485,16 +484,54 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reape
+ static struct task_struct *oom_reaper_list;
+ static DEFINE_SPINLOCK(oom_reaper_lock);
+
+-static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
++void __oom_reap_task_mm(struct mm_struct *mm)
+ {
+- struct mmu_gather tlb;
+ struct vm_area_struct *vma;
++
++ /*
++ * Tell all users of get_user/copy_from_user etc... that the content
++ * is no longer stable. No barriers really needed because unmapping
++ * should imply barriers already and the reader would hit a page fault
++ * if it stumbled over a reaped memory.
++ */
++ set_bit(MMF_UNSTABLE, &mm->flags);
++
++ for (vma = mm->mmap ; vma; vma = vma->vm_next) {
++ if (!can_madv_dontneed_vma(vma))
++ continue;
++
++ /*
++ * Only anonymous pages have a good chance to be dropped
++ * without additional steps which we cannot afford as we
++ * are OOM already.
++ *
++ * We do not even care about fs backed pages because all
++ * which are reclaimable have already been reclaimed and
++ * we do not want to block exit_mmap by keeping mm ref
++ * count elevated without a good reason.
++ */
++ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
++ const unsigned long start = vma->vm_start;
++ const unsigned long end = vma->vm_end;
++ struct mmu_gather tlb;
++
++ tlb_gather_mmu(&tlb, mm, start, end);
++ mmu_notifier_invalidate_range_start(mm, start, end);
++ unmap_page_range(&tlb, vma, start, end, NULL);
++ mmu_notifier_invalidate_range_end(mm, start, end);
++ tlb_finish_mmu(&tlb, start, end);
++ }
++ }
++}
++
++static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
++{
+ bool ret = true;
+
+ /*
+ * We have to make sure to not race with the victim exit path
+ * and cause premature new oom victim selection:
+- * __oom_reap_task_mm exit_mm
++ * oom_reap_task_mm exit_mm
+ * mmget_not_zero
+ * mmput
+ * atomic_dec_and_test
+@@ -539,39 +576,8 @@ static bool __oom_reap_task_mm(struct ta
+
+ trace_start_task_reaping(tsk->pid);
+
+- /*
+- * Tell all users of get_user/copy_from_user etc... that the content
+- * is no longer stable. No barriers really needed because unmapping
+- * should imply barriers already and the reader would hit a page fault
+- * if it stumbled over a reaped memory.
+- */
+- set_bit(MMF_UNSTABLE, &mm->flags);
+-
+- for (vma = mm->mmap ; vma; vma = vma->vm_next) {
+- if (!can_madv_dontneed_vma(vma))
+- continue;
++ __oom_reap_task_mm(mm);
+
+- /*
+- * Only anonymous pages have a good chance to be dropped
+- * without additional steps which we cannot afford as we
+- * are OOM already.
+- *
+- * We do not even care about fs backed pages because all
+- * which are reclaimable have already been reclaimed and
+- * we do not want to block exit_mmap by keeping mm ref
+- * count elevated without a good reason.
+- */
+- if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+- const unsigned long start = vma->vm_start;
+- const unsigned long end = vma->vm_end;
+-
+- tlb_gather_mmu(&tlb, mm, start, end);
+- mmu_notifier_invalidate_range_start(mm, start, end);
+- unmap_page_range(&tlb, vma, start, end, NULL);
+- mmu_notifier_invalidate_range_end(mm, start, end);
+- tlb_finish_mmu(&tlb, start, end);
+- }
+- }
+ pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
+ task_pid_nr(tsk), tsk->comm,
+ K(get_mm_counter(mm, MM_ANONPAGES)),
+@@ -592,13 +598,12 @@ static void oom_reap_task(struct task_st
+ struct mm_struct *mm = tsk->signal->oom_mm;
+
+ /* Retry the down_read_trylock(mmap_sem) a few times */
+- while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
++ while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
+ schedule_timeout_idle(HZ/10);
+
+ if (attempts <= MAX_OOM_REAP_RETRIES)
+ goto done;
+
+-
+ pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
+ task_pid_nr(tsk), tsk->comm);
+ debug_show_all_locks();
--- /dev/null
+From 27227c733852f71008e9bf165950bb2edaed3a90 Mon Sep 17 00:00:00 2001
+From: Pavel Tatashin <pasha.tatashin@oracle.com>
+Date: Fri, 11 May 2018 16:01:50 -0700
+Subject: mm: sections are not offlined during memory hotremove
+
+From: Pavel Tatashin <pasha.tatashin@oracle.com>
+
+commit 27227c733852f71008e9bf165950bb2edaed3a90 upstream.
+
+Memory hotplug and hotremove operate with per-block granularity. If the
+machine has a large amount of memory (more than 64G), the size of a
+memory block can span multiple sections. By mistake, during hotremove
+we set only the first section to offline state.
+
+The bug was discovered because kernel selftest started to fail:
+ https://lkml.kernel.org/r/20180423011247.GK5563@yexl-desktop
+
+After commit, "mm/memory_hotplug: optimize probe routine". But, the bug
+is older than this commit. In this optimization we also added a check
+for sections to be in a proper state during hotplug operation.
+
+Link: http://lkml.kernel.org/r/20180427145257.15222-1-pasha.tatashin@oracle.com
+Fixes: 2d070eab2e82 ("mm: consider zone which is not fully populated to have holes")
+Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Steven Sistare <steven.sistare@oracle.com>
+Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/sparse.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -666,7 +666,7 @@ void offline_mem_sections(unsigned long
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+- unsigned long section_nr = pfn_to_section_nr(start_pfn);
++ unsigned long section_nr = pfn_to_section_nr(pfn);
+ struct mem_section *ms;
+
+ /*
--- /dev/null
+From 3057fcef385348fe85173f1b0c824d89f1176f72 Mon Sep 17 00:00:00 2001
+From: Boris Brezillon <boris.brezillon@bootlin.com>
+Date: Fri, 4 May 2018 21:24:31 +0200
+Subject: mtd: rawnand: Make sure we wait tWB before polling the STATUS reg
+
+From: Boris Brezillon <boris.brezillon@bootlin.com>
+
+commit 3057fcef385348fe85173f1b0c824d89f1176f72 upstream.
+
+NAND chips require a bit of time to take the NAND operation into
+account and set the BUSY bit in the STATUS reg. Make sure we don't poll
+the STATUS reg too early in nand_soft_waitrdy().
+
+Fixes: 8878b126df76 ("mtd: nand: add ->exec_op() implementation")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/nand_base.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -707,12 +707,17 @@ static void nand_wait_status_ready(struc
+ */
+ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+ {
++ const struct nand_sdr_timings *timings;
+ u8 status = 0;
+ int ret;
+
+ if (!chip->exec_op)
+ return -ENOTSUPP;
+
++ /* Wait tWB before polling the STATUS reg. */
++ timings = nand_get_sdr_timings(&chip->data_interface);
++ ndelay(PSEC_TO_NSEC(timings->tWB_max));
++
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
--- /dev/null
+From a2ee41fd953e7c3ff6c55a3038c80354d191a318 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Thu, 3 May 2018 12:00:27 +0200
+Subject: mtd: rawnand: marvell: fix command xtype in BCH write hook
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit a2ee41fd953e7c3ff6c55a3038c80354d191a318 upstream.
+
+One layout supported by the Marvell NAND controller supports NAND pages
+of 2048 bytes, all handled in one single chunk when using BCH with a
+strength of 4-bit per 512 bytes. In this case, instead of the generic
+XTYPE_WRITE_DISPATCH/XTYPE_LAST_NAKED_RW couple, the controller expects
+to receive XTYPE_MONOLITHIC_RW.
+
+This fixes problems at boot like:
+
+[ 1.315475] Scanning device for bad blocks
+[ 3.203108] marvell-nfc f10d0000.flash: Timeout waiting for RB signal
+[ 3.209564] nand_bbt: error while writing BBT block -110
+[ 4.243106] marvell-nfc f10d0000.flash: Timeout waiting for RB signal
+[ 5.283106] marvell-nfc f10d0000.flash: Timeout waiting for RB signal
+[ 5.289562] nand_bbt: error -110 while marking block 2047 bad
+[ 6.323106] marvell-nfc f10d0000.flash: Timeout waiting for RB signal
+[ 6.329559] nand_bbt: error while writing BBT block -110
+[ 7.363106] marvell-nfc f10d0000.flash: Timeout waiting for RB signal
+[ 8.403105] marvell-nfc f10d0000.flash: Timeout waiting for RB signal
+[ 8.409559] nand_bbt: error -110 while marking block 2046 bad
+...
+
+Fixes: 02f26ecf8c772 ("mtd: nand: add reworked Marvell NAND controller driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Tested-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/marvell_nand.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/marvell_nand.c
++++ b/drivers/mtd/nand/marvell_nand.c
+@@ -1404,6 +1404,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struc
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
++ u32 xtype;
+ int ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
+@@ -1419,7 +1420,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struc
+ * last naked write.
+ */
+ if (chunk == 0) {
+- nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
++ if (lt->nchunks == 1)
++ xtype = XTYPE_MONOLITHIC_RW;
++ else
++ xtype = XTYPE_WRITE_DISPATCH;
++
++ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN);
+ nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
--- /dev/null
+From b76401fc4ba720f0f38f7b1f9d54d5c2308bc18d Mon Sep 17 00:00:00 2001
+From: Chris Packham <chris.packham@alliedtelesis.co.nz>
+Date: Thu, 3 May 2018 14:21:28 +1200
+Subject: mtd: rawnand: marvell: pass ms delay to wait_op
+
+From: Chris Packham <chris.packham@alliedtelesis.co.nz>
+
+commit b76401fc4ba720f0f38f7b1f9d54d5c2308bc18d upstream.
+
+marvell_nfc_wait_op() expects the delay to be expressed in milliseconds
+but nand_sdr_timings uses picoseconds. Use PSEC_TO_MSEC when passing
+tPROG_max to marvell_nfc_wait_op().
+
+Fixes: 02f26ecf8c772 ("mtd: nand: add reworked Marvell NAND controller driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
+Reviewed-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/marvell_nand.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/mtd/nand/marvell_nand.c
++++ b/drivers/mtd/nand/marvell_nand.c
+@@ -1070,7 +1070,7 @@ static int marvell_nfc_hw_ecc_hmg_do_wri
+ return ret;
+
+ ret = marvell_nfc_wait_op(chip,
+- chip->data_interface.timings.sdr.tPROG_max);
++ PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
+ return ret;
+ }
+
+@@ -1490,7 +1490,7 @@ static int marvell_nfc_hw_ecc_bch_write_
+ }
+
+ ret = marvell_nfc_wait_op(chip,
+- chip->data_interface.timings.sdr.tPROG_max);
++ PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
+
+ marvell_nfc_disable_hw_ecc(chip);
+
--- /dev/null
+From acf784bd0ce257fe43da7ca266f7a10b837479d2 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Thu, 3 May 2018 13:45:58 -0500
+Subject: net: atm: Fix potential Spectre v1
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit acf784bd0ce257fe43da7ca266f7a10b837479d2 upstream.
+
+ioc_data.dev_num can be controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+net/atm/lec.c:702 lec_vcc_attach() warn: potential spectre issue
+'dev_lec'
+
+Fix this by sanitizing ioc_data.dev_num before using it to index
+dev_lec. Also, notice that there is another instance in which array
+dev_lec is being indexed using ioc_data.dev_num at line 705:
+lec_vcc_added(netdev_priv(dev_lec[ioc_data.dev_num]),
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/atm/lec.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] =
+ #include <linux/module.h>
+ #include <linux/init.h>
+
++/* Hardening for Spectre-v1 */
++#include <linux/nospec.h>
++
+ #include "lec.h"
+ #include "lec_arpc.h"
+ #include "resources.h"
+@@ -687,8 +690,10 @@ static int lec_vcc_attach(struct atm_vcc
+ bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
+ if (bytes_left != 0)
+ pr_info("copy from user failed for %d bytes\n", bytes_left);
+- if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
+- !dev_lec[ioc_data.dev_num])
++ if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
++ return -EINVAL;
++ ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
++ if (!dev_lec[ioc_data.dev_num])
+ return -EINVAL;
+ vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
+ if (!vpriv)
--- /dev/null
+From 4bf01ca21e2e0e4561d1a03c48c3d740418702db Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Thu, 26 Apr 2018 09:31:52 +0200
+Subject: rfkill: gpio: fix memory leak in probe error path
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 4bf01ca21e2e0e4561d1a03c48c3d740418702db upstream.
+
+Make sure to free the rfkill device in case registration fails during
+probe.
+
+Fixes: 5e7ca3937fbe ("net: rfkill: gpio: convert to resource managed allocation")
+Cc: stable <stable@vger.kernel.org> # 3.13
+Cc: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/rfkill/rfkill-gpio.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -137,13 +137,18 @@ static int rfkill_gpio_probe(struct plat
+
+ ret = rfkill_register(rfkill->rfkill_dev);
+ if (ret < 0)
+- return ret;
++ goto err_destroy;
+
+ platform_set_drvdata(pdev, rfkill);
+
+ dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
+
+ return 0;
++
++err_destroy:
++ rfkill_destroy(rfkill->rfkill_dev);
++
++ return ret;
+ }
+
+ static int rfkill_gpio_remove(struct platform_device *pdev)
bdi-wake-up-concurrent-wb_shutdown-callers.patch
bdi-fix-use-after-free-bug-in-debugfs_remove.patch
bdi-fix-oops-in-wb_workfn.patch
+compat-fix-4-byte-infoleak-via-uninitialized-struct-field.patch
+gpioib-do-not-free-unrequested-descriptors.patch
+gpio-fix-aspeed_gpio-unmask-irq.patch
+gpio-fix-error-path-in-lineevent_create.patch
+rfkill-gpio-fix-memory-leak-in-probe-error-path.patch
+libata-apply-nolpm-quirk-for-sandisk-sd7ub3q-g1001-ssds.patch
+dm-integrity-use-kvfree-for-kvmalloc-d-memory.patch
+tracing-fix-regex_match_front-to-not-over-compare-the-test-string.patch
+z3fold-fix-reclaim-lock-ups.patch
+mm-sections-are-not-offlined-during-memory-hotremove.patch
+mm-oom-fix-concurrent-munlock-and-oom-reaper-unmap-v3.patch
+ceph-fix-rsize-wsize-capping-in-ceph_direct_read_write.patch
+can-flexcan-fix-endianess-detection.patch
+can-kvaser_usb-increase-correct-stats-counter-in-kvaser_usb_rx_can_msg.patch
+can-hi311x-acquire-spi-lock-on-do_get_berr_counter.patch
+can-hi311x-work-around-tx-complete-interrupt-erratum.patch
+mtd-rawnand-marvell-pass-ms-delay-to-wait_op.patch
+mtd-rawnand-marvell-fix-command-xtype-in-bch-write-hook.patch
+mtd-rawnand-make-sure-we-wait-twb-before-polling-the-status-reg.patch
+drm-vc4-fix-scaling-of-uni-planar-formats.patch
+drm-ttm-use-gfp_transhuge_light-for-allocating-huge-pages.patch
+drm-i915-fix-drm-intel_enable_lvds-error-message-in-kernel-log.patch
+drm-i915-adjust-edp-s-logical-vco-in-a-reliable-place.patch
+drm-nouveau-fix-deadlock-in-nv50_mstm_register_connector.patch
+drm-nouveau-ttm-don-t-dereference-nvbo-cli-it-can-outlive-client.patch
+drm-atomic-clean-old_state-new_state-in-drm_atomic_state_default_clear.patch
+drm-atomic-clean-private-obj-old_state-new_state-in-drm_atomic_state_default_clear.patch
+net-atm-fix-potential-spectre-v1.patch
+atm-zatm-fix-potential-spectre-v1.patch
--- /dev/null
+From dc432c3d7f9bceb3de6f5b44fb9c657c9810ed6d Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Wed, 9 May 2018 11:59:32 -0400
+Subject: tracing: Fix regex_match_front() to not over compare the test string
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit dc432c3d7f9bceb3de6f5b44fb9c657c9810ed6d upstream.
+
+The regex match function regex_match_front() in the tracing filter logic,
+was fixed to test just the pattern length from testing the entire test
+string. That is, it went from strncmp(str, r->pattern, len) to
+strcmp(str, r->pattern, r->len).
+
+The issue is that str is not guaranteed to be nul terminated, and if r->len
+is greater than the length of str, it can access more memory than is
+allocated.
+
+The solution is to add a simple test if (len < r->len) return 0.
+
+Cc: stable@vger.kernel.org
+Fixes: 285caad415f45 ("tracing/filters: Fix MATCH_FRONT_ONLY filter matching")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_filter.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -338,6 +338,9 @@ static int regex_match_full(char *str, s
+
+ static int regex_match_front(char *str, struct regex *r, int len)
+ {
++ if (len < r->len)
++ return 0;
++
+ if (strncmp(str, r->pattern, r->len) == 0)
+ return 1;
+ return 0;
--- /dev/null
+From 6098d7e136692f9c6e23ae362c62ec822343e4d5 Mon Sep 17 00:00:00 2001
+From: Vitaly Wool <vitalywool@gmail.com>
+Date: Fri, 11 May 2018 16:01:46 -0700
+Subject: z3fold: fix reclaim lock-ups
+
+From: Vitaly Wool <vitalywool@gmail.com>
+
+commit 6098d7e136692f9c6e23ae362c62ec822343e4d5 upstream.
+
+Do not try to optimize in-page object layout while the page is under
+reclaim. This fixes lock-ups on reclaim and improves reclaim
+performance at the same time.
+
+[akpm@linux-foundation.org: coding-style fixes]
+Link: http://lkml.kernel.org/r/20180430125800.444cae9706489f412ad12621@gmail.com
+Signed-off-by: Vitaly Wool <vitaly.vul@sony.com>
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Cc: <Oleksiy.Avramchenko@sony.com>
+Cc: Matthew Wilcox <mawilcox@microsoft.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/z3fold.c | 42 ++++++++++++++++++++++++++++++------------
+ 1 file changed, 30 insertions(+), 12 deletions(-)
+
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -144,7 +144,8 @@ enum z3fold_page_flags {
+ PAGE_HEADLESS = 0,
+ MIDDLE_CHUNK_MAPPED,
+ NEEDS_COMPACTING,
+- PAGE_STALE
++ PAGE_STALE,
++ UNDER_RECLAIM
+ };
+
+ /*****************
+@@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold
+ clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
+ clear_bit(NEEDS_COMPACTING, &page->private);
+ clear_bit(PAGE_STALE, &page->private);
++ clear_bit(UNDER_RECLAIM, &page->private);
+
+ spin_lock_init(&zhdr->page_lock);
+ kref_init(&zhdr->refcount);
+@@ -748,6 +750,10 @@ static void z3fold_free(struct z3fold_po
+ atomic64_dec(&pool->pages_nr);
+ return;
+ }
++ if (test_bit(UNDER_RECLAIM, &page->private)) {
++ z3fold_page_unlock(zhdr);
++ return;
++ }
+ if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
+ z3fold_page_unlock(zhdr);
+ return;
+@@ -832,6 +838,8 @@ static int z3fold_reclaim_page(struct z3
+ kref_get(&zhdr->refcount);
+ list_del_init(&zhdr->buddy);
+ zhdr->cpu = -1;
++ set_bit(UNDER_RECLAIM, &page->private);
++ break;
+ }
+
+ list_del_init(&page->lru);
+@@ -879,25 +887,35 @@ static int z3fold_reclaim_page(struct z3
+ goto next;
+ }
+ next:
+- spin_lock(&pool->lock);
+ if (test_bit(PAGE_HEADLESS, &page->private)) {
+ if (ret == 0) {
+- spin_unlock(&pool->lock);
+ free_z3fold_page(page);
+ return 0;
+ }
+- } else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
+- atomic64_dec(&pool->pages_nr);
++ spin_lock(&pool->lock);
++ list_add(&page->lru, &pool->lru);
++ spin_unlock(&pool->lock);
++ } else {
++ z3fold_page_lock(zhdr);
++ clear_bit(UNDER_RECLAIM, &page->private);
++ if (kref_put(&zhdr->refcount,
++ release_z3fold_page_locked)) {
++ atomic64_dec(&pool->pages_nr);
++ return 0;
++ }
++ /*
++ * if we are here, the page is still not completely
++ * free. Take the global pool lock then to be able
++ * to add it back to the lru list
++ */
++ spin_lock(&pool->lock);
++ list_add(&page->lru, &pool->lru);
+ spin_unlock(&pool->lock);
+- return 0;
++ z3fold_page_unlock(zhdr);
+ }
+
+- /*
+- * Add to the beginning of LRU.
+- * Pool lock has to be kept here to ensure the page has
+- * not already been released
+- */
+- list_add(&page->lru, &pool->lru);
++ /* We started off locked to we need to lock the pool back */
++ spin_lock(&pool->lock);
+ }
+ spin_unlock(&pool->lock);
+ return -EAGAIN;