--- /dev/null
+From aabbe8f1a561dd8318e693830d9ae377c9a04d2b Mon Sep 17 00:00:00 2001
+From: "ludovic.desroches@atmel.com" <ludovic.desroches@atmel.com>
+Date: Mon, 8 Jun 2015 15:55:48 +0200
+Subject: ARM: at91/dt: sama5d4: fix dma conf for aes, sha and tdes nodes
+
+From: "ludovic.desroches@atmel.com" <ludovic.desroches@atmel.com>
+
+commit aabbe8f1a561dd8318e693830d9ae377c9a04d2b upstream.
+
+The xdmac channel configuration is done in one cell not two. This error
+prevents from probing devices correctly.
+
+Signed-off-by: Ludovic Desroches <ludovic.desroches@atmel.com>
+Fixes: 83906783b766 ("ARM: at91/dt: sama5d4: add aes, sha and tdes nodes")
+Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Kevin Hilman <khilman@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/sama5d4.dtsi | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/arch/arm/boot/dts/sama5d4.dtsi
++++ b/arch/arm/boot/dts/sama5d4.dtsi
+@@ -1125,10 +1125,10 @@
+ compatible = "atmel,at91sam9g46-aes";
+ reg = <0xfc044000 0x100>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
+- dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+- AT91_XDMAC_DT_PERID(41)>,
+- <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+- AT91_XDMAC_DT_PERID(40)>;
++ dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++ | AT91_XDMAC_DT_PERID(41))>,
++ <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++ | AT91_XDMAC_DT_PERID(40))>;
+ dma-names = "tx", "rx";
+ clocks = <&aes_clk>;
+ clock-names = "aes_clk";
+@@ -1139,10 +1139,10 @@
+ compatible = "atmel,at91sam9g46-tdes";
+ reg = <0xfc04c000 0x100>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH 0>;
+- dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+- AT91_XDMAC_DT_PERID(42)>,
+- <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+- AT91_XDMAC_DT_PERID(43)>;
++ dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++ | AT91_XDMAC_DT_PERID(42))>,
++ <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++ | AT91_XDMAC_DT_PERID(43))>;
+ dma-names = "tx", "rx";
+ clocks = <&tdes_clk>;
+ clock-names = "tdes_clk";
+@@ -1153,8 +1153,8 @@
+ compatible = "atmel,at91sam9g46-sha";
+ reg = <0xfc050000 0x100>;
+ interrupts = <15 IRQ_TYPE_LEVEL_HIGH 0>;
+- dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+- AT91_XDMAC_DT_PERID(44)>;
++ dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++ | AT91_XDMAC_DT_PERID(44))>;
+ dma-names = "tx";
+ clocks = <&sha_clk>;
+ clock-names = "sha_clk";
--- /dev/null
+From ea25525ce0d195724fead07fe6562fe478a3bf6f Mon Sep 17 00:00:00 2001
+From: Ludovic Desroches <ludovic.desroches@atmel.com>
+Date: Thu, 28 May 2015 11:55:16 +0200
+Subject: ARM: at91/dt: sama5d4ek: mci0 uses slot 0
+
+From: Ludovic Desroches <ludovic.desroches@atmel.com>
+
+commit ea25525ce0d195724fead07fe6562fe478a3bf6f upstream.
+
+Mci0 uses slot 0 not 1.
+
+Signed-off-by: Ludovic Desroches <ludovic.desroches@atmel.com>
+Fixes: 7a4752677c44 ("ARM: at91: dt: add device tree file for SAMA5D4ek board")
+Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/at91-sama5d4ek.dts | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
+@@ -108,8 +108,8 @@
+ mmc0: mmc@f8000000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_cd>;
+- slot@1 {
+- reg = <1>;
++ slot@0 {
++ reg = <0>;
+ bus-width = <4>;
+ cd-gpios = <&pioE 5 0>;
+ };
--- /dev/null
+From 50f0a44991516b5b9744ecb2c080c2ec6ad21b25 Mon Sep 17 00:00:00 2001
+From: Nicolas Ferre <nicolas.ferre@atmel.com>
+Date: Wed, 17 Jun 2015 10:59:04 +0200
+Subject: ARM: at91/dt: trivial: fix USB udc compatible string
+
+From: Nicolas Ferre <nicolas.ferre@atmel.com>
+
+commit 50f0a44991516b5b9744ecb2c080c2ec6ad21b25 upstream.
+
+To please checkpatch and the tiresome reader, add the "atmel," prefix to the
+USB udc compatible string.
+
+Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Kevin Hilman <khilman@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/devicetree/bindings/usb/atmel-usb.txt | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/Documentation/devicetree/bindings/usb/atmel-usb.txt
++++ b/Documentation/devicetree/bindings/usb/atmel-usb.txt
+@@ -60,9 +60,9 @@ Atmel High-Speed USB device controller
+
+ Required properties:
+ - compatible: Should be one of the following
+- "at91sam9rl-udc"
+- "at91sam9g45-udc"
+- "sama5d3-udc"
++ "atmel,at91sam9rl-udc"
++ "atmel,at91sam9g45-udc"
++ "atmel,sama5d3-udc"
+ - reg: Address and length of the register set for the device
+ - interrupts: Should contain usba interrupt
+ - ep childnode: To specify the number of endpoints and their properties.
--- /dev/null
+From 6540165cf41655810ee67b78f01537af022a636a Mon Sep 17 00:00:00 2001
+From: Boris Brezillon <boris.brezillon@free-electrons.com>
+Date: Wed, 17 Jun 2015 10:59:05 +0200
+Subject: ARM: at91/dt: update udc compatible strings
+
+From: Boris Brezillon <boris.brezillon@free-electrons.com>
+
+commit 6540165cf41655810ee67b78f01537af022a636a upstream.
+
+at91sam9g45, at91sam9x5 and sama5 SoCs should not use
+"atmel,at91sam9rl-udc" for their USB device compatible property since
+this compatible is attached to a specific hardware bug fix.
+
+Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+Acked-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Tested-by: Bo Shen <voice.shen@atmel.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Kevin Hilman <khilman@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/at91sam9g45.dtsi | 2 +-
+ arch/arm/boot/dts/at91sam9x5.dtsi | 2 +-
+ arch/arm/boot/dts/sama5d3.dtsi | 2 +-
+ arch/arm/boot/dts/sama5d4.dtsi | 2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/boot/dts/at91sam9g45.dtsi
++++ b/arch/arm/boot/dts/at91sam9g45.dtsi
+@@ -1148,7 +1148,7 @@
+ usb2: gadget@fff78000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "atmel,at91sam9rl-udc";
++ compatible = "atmel,at91sam9g45-udc";
+ reg = <0x00600000 0x80000
+ 0xfff78000 0x400>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>;
+--- a/arch/arm/boot/dts/at91sam9x5.dtsi
++++ b/arch/arm/boot/dts/at91sam9x5.dtsi
+@@ -1062,7 +1062,7 @@
+ usb2: gadget@f803c000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "atmel,at91sam9rl-udc";
++ compatible = "atmel,at91sam9g45-udc";
+ reg = <0x00500000 0x80000
+ 0xf803c000 0x400>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
+--- a/arch/arm/boot/dts/sama5d3.dtsi
++++ b/arch/arm/boot/dts/sama5d3.dtsi
+@@ -1321,7 +1321,7 @@
+ usb0: gadget@00500000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "atmel,at91sam9rl-udc";
++ compatible = "atmel,sama5d3-udc";
+ reg = <0x00500000 0x100000
+ 0xf8030000 0x4000>;
+ interrupts = <33 IRQ_TYPE_LEVEL_HIGH 2>;
+--- a/arch/arm/boot/dts/sama5d4.dtsi
++++ b/arch/arm/boot/dts/sama5d4.dtsi
+@@ -123,7 +123,7 @@
+ usb0: gadget@00400000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "atmel,at91sam9rl-udc";
++ compatible = "atmel,sama5d3-udc";
+ reg = <0x00400000 0x100000
+ 0xfc02c000 0x4000>;
+ interrupts = <47 IRQ_TYPE_LEVEL_HIGH 2>;
--- /dev/null
+From f3f5da624e0a891c34d8cd513c57f1d9b0c7dadc Mon Sep 17 00:00:00 2001
+From: "Martin K. Petersen" <martin.petersen@oracle.com>
+Date: Wed, 22 Jul 2015 07:57:12 -0400
+Subject: block: Do a full clone when splitting discard bios
+
+From: "Martin K. Petersen" <martin.petersen@oracle.com>
+
+commit f3f5da624e0a891c34d8cd513c57f1d9b0c7dadc upstream.
+
+This fixes a data corruption bug when using discard on top of MD linear,
+raid0 and raid10 personalities.
+
+Commit 20d0189b1012 "block: Introduce new bio_split()" permits sharing
+the bio_vec between the two resulting bios. That is fine for read/write
+requests where the bio_vec is immutable. For discards, however, we need
+to be able to attach a payload and update the bio_vec so the page can
+get mapped to a scatterlist entry. Therefore the bio_vec can not be
+shared when splitting discards and we must do a full clone.
+
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Reported-by: Seunguk Shin <seunguk.shin@samsung.com>
+Tested-by: Seunguk Shin <seunguk.shin@samsung.com>
+Cc: Seunguk Shin <seunguk.shin@samsung.com>
+Cc: Jens Axboe <axboe@fb.com>
+Cc: Kent Overstreet <kent.overstreet@gmail.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1814,8 +1814,9 @@ EXPORT_SYMBOL(bio_endio_nodec);
+ * Allocates and returns a new bio which represents @sectors from the start of
+ * @bio, and updates @bio to represent the remaining sectors.
+ *
+- * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
+- * responsibility to ensure that @bio is not freed before the split.
++ * Unless this is a discard request the newly allocated bio will point
++ * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
++ * @bio is not freed before the split.
+ */
+ struct bio *bio_split(struct bio *bio, int sectors,
+ gfp_t gfp, struct bio_set *bs)
+@@ -1825,7 +1826,15 @@ struct bio *bio_split(struct bio *bio, i
+ BUG_ON(sectors <= 0);
+ BUG_ON(sectors >= bio_sectors(bio));
+
+- split = bio_clone_fast(bio, gfp, bs);
++ /*
++ * Discards need a mutable bio_vec to accommodate the payload
++ * required by the DSM TRIM and UNMAP commands.
++ */
++ if (bio->bi_rw & REQ_DISCARD)
++ split = bio_clone_bioset(bio, gfp, bs);
++ else
++ split = bio_clone_fast(bio, gfp, bs);
++
+ if (!split)
+ return NULL;
+
--- /dev/null
+From 4d4e41aef9429872ea3b105e83426941f7185ab6 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@canonical.com>
+Date: Tue, 5 May 2015 19:49:55 +0800
+Subject: block: loop: avoiding too many pending per work I/O
+
+From: Ming Lei <ming.lei@canonical.com>
+
+commit 4d4e41aef9429872ea3b105e83426941f7185ab6 upstream.
+
+If there are too many pending per work I/O, too many
+high priority work thread can be generated so that
+system performance can be effected.
+
+This patch limits the max_active parameter of workqueue as 16.
+
+This patch fixes Fedora 22 live booting performance
+regression when it is booted from squashfs over dm
+based on loop, and looks the following reasons are
+related with the problem:
+
+- not like other filesyststems(such as ext4), squashfs
+is a bit special, and I observed that increasing I/O jobs
+to access file in squashfs only improve I/O performance a
+little, but it can make big difference for ext4
+
+- nested loop: both squashfs.img and ext3fs.img are mounted
+as loop block, and ext3fs.img is inside the squashfs
+
+- during booting, lots of tasks may run concurrently
+
+Fixes: b5dd2f6047ca108001328aac0e8588edd15f1778
+Cc: Justin M. Forbes <jforbes@fedoraproject.org>
+Signed-off-by: Ming Lei <ming.lei@canonical.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/loop.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -725,7 +725,7 @@ static int loop_set_fd(struct loop_devic
+ goto out_putf;
+ error = -ENOMEM;
+ lo->wq = alloc_workqueue("kloopd%d",
+- WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0,
++ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16,
+ lo->lo_number);
+ if (!lo->wq)
+ goto out_putf;
--- /dev/null
+From f4aa4c7bbac6c4afdd4adccf90898c1a3685396d Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@canonical.com>
+Date: Tue, 5 May 2015 19:49:54 +0800
+Subject: block: loop: convert to per-device workqueue
+
+From: Ming Lei <ming.lei@canonical.com>
+
+commit f4aa4c7bbac6c4afdd4adccf90898c1a3685396d upstream.
+
+Documentation/workqueue.txt:
+ If there is dependency among multiple work items used
+ during memory reclaim, they should be queued to separate
+ wq each with WQ_MEM_RECLAIM.
+
+Loop devices can be stacked, so we have to convert to per-device
+workqueue. One example is Fedora live CD.
+
+Fixes: b5dd2f6047ca108001328aac0e8588edd15f1778
+Cc: Justin M. Forbes <jforbes@fedoraproject.org>
+Signed-off-by: Ming Lei <ming.lei@canonical.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/loop.c | 30 ++++++++++++++----------------
+ drivers/block/loop.h | 1 +
+ 2 files changed, 15 insertions(+), 16 deletions(-)
+
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -86,8 +86,6 @@ static DEFINE_MUTEX(loop_index_mutex);
+ static int max_part;
+ static int part_shift;
+
+-static struct workqueue_struct *loop_wq;
+-
+ static int transfer_xor(struct loop_device *lo, int cmd,
+ struct page *raw_page, unsigned raw_off,
+ struct page *loop_page, unsigned loop_off,
+@@ -725,6 +723,12 @@ static int loop_set_fd(struct loop_devic
+ size = get_loop_size(lo, file);
+ if ((loff_t)(sector_t)size != size)
+ goto out_putf;
++ error = -ENOMEM;
++ lo->wq = alloc_workqueue("kloopd%d",
++ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0,
++ lo->lo_number);
++ if (!lo->wq)
++ goto out_putf;
+
+ error = 0;
+
+@@ -872,6 +876,8 @@ static int loop_clr_fd(struct loop_devic
+ lo->lo_flags = 0;
+ if (!part_shift)
+ lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
++ destroy_workqueue(lo->wq);
++ lo->wq = NULL;
+ mutex_unlock(&lo->lo_ctl_mutex);
+ /*
+ * Need not hold lo_ctl_mutex to fput backing file.
+@@ -1425,9 +1431,13 @@ static int loop_queue_rq(struct blk_mq_h
+ const struct blk_mq_queue_data *bd)
+ {
+ struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
++ struct loop_device *lo = cmd->rq->q->queuedata;
+
+ blk_mq_start_request(bd->rq);
+
++ if (lo->lo_state != Lo_bound)
++ return -EIO;
++
+ if (cmd->rq->cmd_flags & REQ_WRITE) {
+ struct loop_device *lo = cmd->rq->q->queuedata;
+ bool need_sched = true;
+@@ -1441,9 +1451,9 @@ static int loop_queue_rq(struct blk_mq_h
+ spin_unlock_irq(&lo->lo_lock);
+
+ if (need_sched)
+- queue_work(loop_wq, &lo->write_work);
++ queue_work(lo->wq, &lo->write_work);
+ } else {
+- queue_work(loop_wq, &cmd->read_work);
++ queue_work(lo->wq, &cmd->read_work);
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+@@ -1455,9 +1465,6 @@ static void loop_handle_cmd(struct loop_
+ struct loop_device *lo = cmd->rq->q->queuedata;
+ int ret = -EIO;
+
+- if (lo->lo_state != Lo_bound)
+- goto failed;
+-
+ if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+ goto failed;
+
+@@ -1806,13 +1813,6 @@ static int __init loop_init(void)
+ goto misc_out;
+ }
+
+- loop_wq = alloc_workqueue("kloopd",
+- WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0);
+- if (!loop_wq) {
+- err = -ENOMEM;
+- goto misc_out;
+- }
+-
+ blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
+ THIS_MODULE, loop_probe, NULL, NULL);
+
+@@ -1850,8 +1850,6 @@ static void __exit loop_exit(void)
+ blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
+ unregister_blkdev(LOOP_MAJOR, "loop");
+
+- destroy_workqueue(loop_wq);
+-
+ misc_deregister(&loop_misc);
+ }
+
+--- a/drivers/block/loop.h
++++ b/drivers/block/loop.h
+@@ -54,6 +54,7 @@ struct loop_device {
+ gfp_t old_gfp_mask;
+
+ spinlock_t lo_lock;
++ struct workqueue_struct *wq;
+ struct list_head write_cmd_head;
+ struct work_struct write_work;
+ bool write_started;
--- /dev/null
+From a18f8e97fe69195823d7fb5c68a8d6565f39db4b Mon Sep 17 00:00:00 2001
+From: Pawel Moll <pawel.moll@arm.com>
+Date: Thu, 2 Apr 2015 18:50:32 +0100
+Subject: bus: arm-ccn: Fix node->XP config conversion
+
+From: Pawel Moll <pawel.moll@arm.com>
+
+commit a18f8e97fe69195823d7fb5c68a8d6565f39db4b upstream.
+
+Events defined as watchpoints on nodes must have their config values
+converted so that they apply to the respective node's XP. The
+function setting new values was using wrong mask for the "port" field,
+resulting in corrupted value. Fixed now.
+
+Signed-off-by: Pawel Moll <pawel.moll@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bus/arm-ccn.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/bus/arm-ccn.c
++++ b/drivers/bus/arm-ccn.c
+@@ -212,7 +212,7 @@ static int arm_ccn_node_to_xp_port(int n
+
+ static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
+ {
+- *config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24));
++ *config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
+ *config |= (node_xp << 0) | (type << 8) | (port << 24);
+ }
+
--- /dev/null
+From 4c7e309340ff85072e96f529582d159002c36734 Mon Sep 17 00:00:00 2001
+From: Dennis Yang <shinrairis@gmail.com>
+Date: Fri, 26 Jun 2015 15:25:48 +0100
+Subject: dm btree remove: fix bug in redistribute3
+
+From: Dennis Yang <shinrairis@gmail.com>
+
+commit 4c7e309340ff85072e96f529582d159002c36734 upstream.
+
+redistribute3() shares entries out across 3 nodes. Some entries were
+being moved the wrong way, breaking the ordering. This manifested as a
+BUG() in dm-btree-remove.c:shift() when entries were removed from the
+btree.
+
+For additional context see:
+https://www.redhat.com/archives/dm-devel/2015-May/msg00113.html
+
+Signed-off-by: Dennis Yang <shinrairis@gmail.com>
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-btree-remove.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btre
+
+ if (s < 0 && nr_center < -s) {
+ /* not enough in central node */
+- shift(left, center, nr_center);
+- s = nr_center - target;
++ shift(left, center, -nr_center);
++ s += nr_center;
+ shift(left, right, s);
+ nr_right += s;
+ } else
+@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btre
+ if (s > 0 && nr_center < s) {
+ /* not enough in central node */
+ shift(center, right, nr_center);
+- s = target - nr_center;
++ s -= nr_center;
+ shift(left, right, s);
+ nr_left -= s;
+ } else
--- /dev/null
+From 1c7518794a3647eb345d59ee52844e8a40405198 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Fri, 3 Jul 2015 14:51:32 +0100
+Subject: dm btree: silence lockdep lock inversion in dm_btree_del()
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 1c7518794a3647eb345d59ee52844e8a40405198 upstream.
+
+Allocate memory using GFP_NOIO when deleting a btree. dm_btree_del()
+can be called via an ioctl and we don't want to recurse into the FS or
+block layer.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-btree.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *i
+ int r;
+ struct del_stack *s;
+
+- s = kmalloc(sizeof(*s), GFP_KERNEL);
++ s = kmalloc(sizeof(*s), GFP_NOIO);
+ if (!s)
+ return -ENOMEM;
+ s->info = info;
--- /dev/null
+From fb4100ae7f312c3d614b37621c2b17b3b7cf65f8 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Wed, 20 May 2015 10:30:32 +0100
+Subject: dm cache: fix race when issuing a POLICY_REPLACE operation
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit fb4100ae7f312c3d614b37621c2b17b3b7cf65f8 upstream.
+
+There is a race between a policy deciding to replace a cache entry,
+the core target writing back any dirty data from this block, and other
+IO threads doing IO to the same block.
+
+This sort of problem is avoided most of the time by the core target
+grabbing a bio prison cell before making the request to the policy.
+But for a demotion the core target doesn't know which block will be
+demoted, so can't do this in advance.
+
+Fix this demotion race by introducing a callback to the policy interface
+that allows the policy to grab the cell on behalf of the core target.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-policy-cleaner.c | 3 +
+ drivers/md/dm-cache-policy-internal.h | 5 +-
+ drivers/md/dm-cache-policy-mq.c | 41 ++++++++++++++++--------
+ drivers/md/dm-cache-policy.h | 15 ++++++++
+ drivers/md/dm-cache-target.c | 58 ++++++++++++++++++++++------------
+ 5 files changed, 85 insertions(+), 37 deletions(-)
+
+--- a/drivers/md/dm-cache-policy-cleaner.c
++++ b/drivers/md/dm-cache-policy-cleaner.c
+@@ -171,7 +171,8 @@ static void remove_cache_hash_entry(stru
+ /* Public interface (see dm-cache-policy.h */
+ static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+- struct bio *bio, struct policy_result *result)
++ struct bio *bio, struct policy_locker *locker,
++ struct policy_result *result)
+ {
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+--- a/drivers/md/dm-cache-policy-internal.h
++++ b/drivers/md/dm-cache-policy-internal.h
+@@ -16,9 +16,10 @@
+ */
+ static inline int policy_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+- struct bio *bio, struct policy_result *result)
++ struct bio *bio, struct policy_locker *locker,
++ struct policy_result *result)
+ {
+- return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, result);
++ return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, locker, result);
+ }
+
+ static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+--- a/drivers/md/dm-cache-policy-mq.c
++++ b/drivers/md/dm-cache-policy-mq.c
+@@ -693,9 +693,10 @@ static void requeue(struct mq_policy *mq
+ * - set the hit count to a hard coded value other than 1, eg, is it better
+ * if it goes in at level 2?
+ */
+-static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
++static int demote_cblock(struct mq_policy *mq,
++ struct policy_locker *locker, dm_oblock_t *oblock)
+ {
+- struct entry *demoted = pop(mq, &mq->cache_clean);
++ struct entry *demoted = peek(&mq->cache_clean);
+
+ if (!demoted)
+ /*
+@@ -707,6 +708,13 @@ static int demote_cblock(struct mq_polic
+ */
+ return -ENOSPC;
+
++ if (locker->fn(locker, demoted->oblock))
++ /*
++ * We couldn't lock the demoted block.
++ */
++ return -EBUSY;
++
++ del(mq, demoted);
+ *oblock = demoted->oblock;
+ free_entry(&mq->cache_pool, demoted);
+
+@@ -795,6 +803,7 @@ static int cache_entry_found(struct mq_p
+ * finding which cache block to use.
+ */
+ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
++ struct policy_locker *locker,
+ struct policy_result *result)
+ {
+ int r;
+@@ -803,11 +812,12 @@ static int pre_cache_to_cache(struct mq_
+ /* Ensure there's a free cblock in the cache */
+ if (epool_empty(&mq->cache_pool)) {
+ result->op = POLICY_REPLACE;
+- r = demote_cblock(mq, &result->old_oblock);
++ r = demote_cblock(mq, locker, &result->old_oblock);
+ if (r) {
+ result->op = POLICY_MISS;
+ return 0;
+ }
++
+ } else
+ result->op = POLICY_NEW;
+
+@@ -829,7 +839,8 @@ static int pre_cache_to_cache(struct mq_
+
+ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
+ bool can_migrate, bool discarded_oblock,
+- int data_dir, struct policy_result *result)
++ int data_dir, struct policy_locker *locker,
++ struct policy_result *result)
+ {
+ int r = 0;
+
+@@ -842,7 +853,7 @@ static int pre_cache_entry_found(struct
+
+ else {
+ requeue(mq, e);
+- r = pre_cache_to_cache(mq, e, result);
++ r = pre_cache_to_cache(mq, e, locker, result);
+ }
+
+ return r;
+@@ -872,6 +883,7 @@ static void insert_in_pre_cache(struct m
+ }
+
+ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
++ struct policy_locker *locker,
+ struct policy_result *result)
+ {
+ int r;
+@@ -879,7 +891,7 @@ static void insert_in_cache(struct mq_po
+
+ if (epool_empty(&mq->cache_pool)) {
+ result->op = POLICY_REPLACE;
+- r = demote_cblock(mq, &result->old_oblock);
++ r = demote_cblock(mq, locker, &result->old_oblock);
+ if (unlikely(r)) {
+ result->op = POLICY_MISS;
+ insert_in_pre_cache(mq, oblock);
+@@ -907,11 +919,12 @@ static void insert_in_cache(struct mq_po
+
+ static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
+ bool can_migrate, bool discarded_oblock,
+- int data_dir, struct policy_result *result)
++ int data_dir, struct policy_locker *locker,
++ struct policy_result *result)
+ {
+ if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
+ if (can_migrate)
+- insert_in_cache(mq, oblock, result);
++ insert_in_cache(mq, oblock, locker, result);
+ else
+ return -EWOULDBLOCK;
+ } else {
+@@ -928,7 +941,8 @@ static int no_entry_found(struct mq_poli
+ */
+ static int map(struct mq_policy *mq, dm_oblock_t oblock,
+ bool can_migrate, bool discarded_oblock,
+- int data_dir, struct policy_result *result)
++ int data_dir, struct policy_locker *locker,
++ struct policy_result *result)
+ {
+ int r = 0;
+ struct entry *e = hash_lookup(mq, oblock);
+@@ -942,11 +956,11 @@ static int map(struct mq_policy *mq, dm_
+
+ else if (e)
+ r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
+- data_dir, result);
++ data_dir, locker, result);
+
+ else
+ r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
+- data_dir, result);
++ data_dir, locker, result);
+
+ if (r == -EWOULDBLOCK)
+ result->op = POLICY_MISS;
+@@ -1012,7 +1026,8 @@ static void copy_tick(struct mq_policy *
+
+ static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+- struct bio *bio, struct policy_result *result)
++ struct bio *bio, struct policy_locker *locker,
++ struct policy_result *result)
+ {
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+@@ -1028,7 +1043,7 @@ static int mq_map(struct dm_cache_policy
+
+ iot_examine_bio(&mq->tracker, bio);
+ r = map(mq, oblock, can_migrate, discarded_oblock,
+- bio_data_dir(bio), result);
++ bio_data_dir(bio), locker, result);
+
+ mutex_unlock(&mq->lock);
+
+--- a/drivers/md/dm-cache-policy.h
++++ b/drivers/md/dm-cache-policy.h
+@@ -70,6 +70,18 @@ enum policy_operation {
+ };
+
+ /*
++ * When issuing a POLICY_REPLACE the policy needs to make a callback to
++ * lock the block being demoted. This doesn't need to occur during a
++ * writeback operation since the block remains in the cache.
++ */
++struct policy_locker;
++typedef int (*policy_lock_fn)(struct policy_locker *l, dm_oblock_t oblock);
++
++struct policy_locker {
++ policy_lock_fn fn;
++};
++
++/*
+ * This is the instruction passed back to the core target.
+ */
+ struct policy_result {
+@@ -122,7 +134,8 @@ struct dm_cache_policy {
+ */
+ int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+- struct bio *bio, struct policy_result *result);
++ struct bio *bio, struct policy_locker *locker,
++ struct policy_result *result);
+
+ /*
+ * Sometimes we want to see if a block is in the cache, without
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -1445,16 +1445,43 @@ static void inc_miss_counter(struct cach
+ &cache->stats.read_miss : &cache->stats.write_miss);
+ }
+
++/*----------------------------------------------------------------*/
++
++struct old_oblock_lock {
++ struct policy_locker locker;
++ struct cache *cache;
++ struct prealloc *structs;
++ struct dm_bio_prison_cell *cell;
++};
++
++static int null_locker(struct policy_locker *locker, dm_oblock_t b)
++{
++ /* This should never be called */
++ BUG();
++ return 0;
++}
++
++static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
++{
++ struct old_oblock_lock *l = container_of(locker, struct old_oblock_lock, locker);
++ struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs);
++
++ return bio_detain(l->cache, b, NULL, cell_prealloc,
++ (cell_free_fn) prealloc_put_cell,
++ l->structs, &l->cell);
++}
++
+ static void process_bio(struct cache *cache, struct prealloc *structs,
+ struct bio *bio)
+ {
+ int r;
+ bool release_cell = true;
+ dm_oblock_t block = get_bio_block(cache, bio);
+- struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
++ struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
+ struct policy_result lookup_result;
+ bool passthrough = passthrough_mode(&cache->features);
+ bool discarded_block, can_migrate;
++ struct old_oblock_lock ool;
+
+ /*
+ * Check to see if that block is currently migrating.
+@@ -1469,8 +1496,12 @@ static void process_bio(struct cache *ca
+ discarded_block = is_discarded_oblock(cache, block);
+ can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
+
++ ool.locker.fn = cell_locker;
++ ool.cache = cache;
++ ool.structs = structs;
++ ool.cell = NULL;
+ r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
+- bio, &lookup_result);
++ bio, &ool.locker, &lookup_result);
+
+ if (r == -EWOULDBLOCK)
+ /* migration has been denied */
+@@ -1527,27 +1558,11 @@ static void process_bio(struct cache *ca
+ break;
+
+ case POLICY_REPLACE:
+- cell_prealloc = prealloc_get_cell(structs);
+- r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
+- (cell_free_fn) prealloc_put_cell,
+- structs, &old_ocell);
+- if (r > 0) {
+- /*
+- * We have to be careful to avoid lock inversion of
+- * the cells. So we back off, and wait for the
+- * old_ocell to become free.
+- */
+- policy_force_mapping(cache->policy, block,
+- lookup_result.old_oblock);
+- atomic_inc(&cache->stats.cache_cell_clash);
+- break;
+- }
+ atomic_inc(&cache->stats.demotion);
+ atomic_inc(&cache->stats.promotion);
+-
+ demote_then_promote(cache, structs, lookup_result.old_oblock,
+ block, lookup_result.cblock,
+- old_ocell, new_ocell);
++ ool.cell, new_ocell);
+ release_cell = false;
+ break;
+
+@@ -2595,6 +2610,9 @@ static int __cache_map(struct cache *cac
+ bool discarded_block;
+ struct policy_result lookup_result;
+ struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
++ struct old_oblock_lock ool;
++
++ ool.locker.fn = null_locker;
+
+ if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
+ /*
+@@ -2633,7 +2651,7 @@ static int __cache_map(struct cache *cac
+ discarded_block = is_discarded_oblock(cache, block);
+
+ r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
+- bio, &lookup_result);
++ bio, &ool.locker, &lookup_result);
+ if (r == -EWOULDBLOCK) {
+ cell_defer(cache, *cell, true);
+ return DM_MAPIO_SUBMITTED;
--- /dev/null
+From 6096d91af0b65a3967139b32d5adbb3647858a26 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Wed, 17 Jun 2015 13:35:19 +0100
+Subject: dm space map metadata: fix occasional leak of a metadata block on resize
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 6096d91af0b65a3967139b32d5adbb3647858a26 upstream.
+
+The metadata space map has a simplified 'bootstrap' mode that is
+operational when extending the space maps. Whilst in this mode it's
+possible for some refcount decrement operations to become queued (eg, as
+a result of shadowing one of the bitmap indexes). These decrements were
+not being applied when switching out of bootstrap mode.
+
+The effect of this bug was the leaking of a 4k metadata block. This is
+detected by the latest version of thin_check as a non fatal error.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-space-map-metadata.c | 50 ++++++++++++++-------
+ 1 file changed, 35 insertions(+), 15 deletions(-)
+
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -204,6 +204,27 @@ static void in(struct sm_metadata *smm)
+ smm->recursion_count++;
+ }
+
++static int apply_bops(struct sm_metadata *smm)
++{
++ int r = 0;
++
++ while (!brb_empty(&smm->uncommitted)) {
++ struct block_op bop;
++
++ r = brb_pop(&smm->uncommitted, &bop);
++ if (r) {
++ DMERR("bug in bop ring buffer");
++ break;
++ }
++
++ r = commit_bop(smm, &bop);
++ if (r)
++ break;
++ }
++
++ return r;
++}
++
+ static int out(struct sm_metadata *smm)
+ {
+ int r = 0;
+@@ -216,21 +237,8 @@ static int out(struct sm_metadata *smm)
+ return -ENOMEM;
+ }
+
+- if (smm->recursion_count == 1) {
+- while (!brb_empty(&smm->uncommitted)) {
+- struct block_op bop;
+-
+- r = brb_pop(&smm->uncommitted, &bop);
+- if (r) {
+- DMERR("bug in bop ring buffer");
+- break;
+- }
+-
+- r = commit_bop(smm, &bop);
+- if (r)
+- break;
+- }
+- }
++ if (smm->recursion_count == 1)
++ apply_bops(smm);
+
+ smm->recursion_count--;
+
+@@ -704,6 +712,12 @@ static int sm_metadata_extend(struct dm_
+ }
+ old_len = smm->begin;
+
++ r = apply_bops(smm);
++ if (r) {
++ DMERR("%s: apply_bops failed", __func__);
++ goto out;
++ }
++
+ r = sm_ll_commit(&smm->ll);
+ if (r)
+ goto out;
+@@ -773,6 +787,12 @@ int dm_sm_metadata_create(struct dm_spac
+ if (r)
+ return r;
+
++ r = apply_bops(smm);
++ if (r) {
++ DMERR("%s: apply_bops failed", __func__);
++ return r;
++ }
++
+ return sm_metadata_commit(sm);
+ }
+
--- /dev/null
+From dd4c1b7d0c95be1c9245118a3accc41a16f1db67 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 5 Jun 2015 09:50:42 -0400
+Subject: dm stats: fix divide by zero if 'number_of_areas' arg is zero
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit dd4c1b7d0c95be1c9245118a3accc41a16f1db67 upstream.
+
+If the number_of_areas argument was zero the kernel would crash on
+div-by-zero. Add better input validation.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-stats.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -795,6 +795,8 @@ static int message_stats_create(struct m
+ return -EINVAL;
+
+ if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
++ if (!divisor)
++ return -EINVAL;
+ step = end - start;
+ if (do_div(step, divisor))
+ step++;
--- /dev/null
+From a822c83e47d97cdef38c4352e1ef62d9f46cfe98 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Fri, 3 Jul 2015 10:22:42 +0100
+Subject: dm thin: allocate the cell_sort_array dynamically
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit a822c83e47d97cdef38c4352e1ef62d9f46cfe98 upstream.
+
+Given the pool's cell_sort_array holds 8192 pointers it triggers an
+order 5 allocation via kmalloc. This order 5 allocation is prone to
+failure as system memory gets more fragmented over time.
+
+Fix this by allocating the cell_sort_array using vmalloc.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -18,6 +18,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/vmalloc.h>
+ #include <linux/sort.h>
+ #include <linux/rbtree.h>
+
+@@ -260,7 +261,7 @@ struct pool {
+ process_mapping_fn process_prepared_mapping;
+ process_mapping_fn process_prepared_discard;
+
+- struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
++ struct dm_bio_prison_cell **cell_sort_array;
+ };
+
+ static enum pool_mode get_pool_mode(struct pool *pool);
+@@ -2499,6 +2500,7 @@ static void __pool_destroy(struct pool *
+ {
+ __pool_table_remove(pool);
+
++ vfree(pool->cell_sort_array);
+ if (dm_pool_metadata_close(pool->pmd) < 0)
+ DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
+
+@@ -2611,6 +2613,13 @@ static struct pool *pool_create(struct m
+ goto bad_mapping_pool;
+ }
+
++ pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
++ if (!pool->cell_sort_array) {
++ *error = "Error allocating cell sort array";
++ err_p = ERR_PTR(-ENOMEM);
++ goto bad_sort_array;
++ }
++
+ pool->ref_count = 1;
+ pool->last_commit_jiffies = jiffies;
+ pool->pool_md = pool_md;
+@@ -2619,6 +2628,8 @@ static struct pool *pool_create(struct m
+
+ return pool;
+
++bad_sort_array:
++ mempool_destroy(pool->mapping_pool);
+ bad_mapping_pool:
+ dm_deferred_set_destroy(pool->all_io_ds);
+ bad_all_io_ds:
--- /dev/null
+From 01447e9f04ba1c49a9534ae6a5a6f26c2bb05226 Mon Sep 17 00:00:00 2001
+From: Zhao Junwang <zhjwpku@gmail.com>
+Date: Tue, 7 Jul 2015 17:08:35 +0800
+Subject: drm: add a check for x/y in drm_mode_setcrtc
+
+From: Zhao Junwang <zhjwpku@gmail.com>
+
+commit 01447e9f04ba1c49a9534ae6a5a6f26c2bb05226 upstream.
+
+legacy setcrtc ioctl does take a 32 bit value which might indeed
+overflow
+
+the checks of crtc_req->x > INT_MAX and crtc_req->y > INT_MAX aren't
+needed any more with this
+
+v2: -polish the annotation according to Daniel's comment
+
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Zhao Junwang <zhjwpku@gmail.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_crtc.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -2749,8 +2749,11 @@ int drm_mode_setcrtc(struct drm_device *
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+- /* For some reason crtc x/y offsets are signed internally. */
+- if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
++ /*
++ * Universal plane src offsets are only 16.16, prevent havoc for
++ * drivers using universal plane code internally.
++ */
++ if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
+ return -ERANGE;
+
+ drm_modeset_lock_all(dev);
--- /dev/null
+From 60f207a5b6d8f23c2e8388b415e8d5c7311cc79d Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <a.ryabinin@samsung.com>
+Date: Mon, 25 May 2015 13:29:44 +0300
+Subject: drm/atomic: fix out of bounds read in for_each_*_in_state helpers
+
+From: Andrey Ryabinin <a.ryabinin@samsung.com>
+
+commit 60f207a5b6d8f23c2e8388b415e8d5c7311cc79d upstream.
+
+for_each_*_in_state validate array index after
+access to array elements, thus perform out of bounds read.
+
+Fix this by validating index in the first place and read
+array element iff validation was successful.
+
+Fixes: df63b9994eaf ("drm/atomic: Add for_each_{connector,crtc,plane}_in_state helper macros")
+Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/drm/drm_atomic.h | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/include/drm/drm_atomic.h
++++ b/include/drm/drm_atomic.h
+@@ -77,26 +77,26 @@ int __must_check drm_atomic_async_commit
+
+ #define for_each_connector_in_state(state, connector, connector_state, __i) \
+ for ((__i) = 0; \
+- (connector) = (state)->connectors[__i], \
+- (connector_state) = (state)->connector_states[__i], \
+- (__i) < (state)->num_connector; \
++ (__i) < (state)->num_connector && \
++ ((connector) = (state)->connectors[__i], \
++ (connector_state) = (state)->connector_states[__i], 1); \
+ (__i)++) \
+ if (connector)
+
+ #define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
+ for ((__i) = 0; \
+- (crtc) = (state)->crtcs[__i], \
+- (crtc_state) = (state)->crtc_states[__i], \
+- (__i) < (state)->dev->mode_config.num_crtc; \
++ (__i) < (state)->dev->mode_config.num_crtc && \
++ ((crtc) = (state)->crtcs[__i], \
++ (crtc_state) = (state)->crtc_states[__i], 1); \
+ (__i)++) \
+ if (crtc_state)
+
+-#define for_each_plane_in_state(state, plane, plane_state, __i) \
+- for ((__i) = 0; \
+- (plane) = (state)->planes[__i], \
+- (plane_state) = (state)->plane_states[__i], \
+- (__i) < (state)->dev->mode_config.num_total_plane; \
+- (__i)++) \
++#define for_each_plane_in_state(state, plane, plane_state, __i) \
++ for ((__i) = 0; \
++ (__i) < (state)->dev->mode_config.num_total_plane && \
++ ((plane) = (state)->planes[__i], \
++ (plane_state) = (state)->plane_states[__i], 1); \
++ (__i)++) \
+ if (plane_state)
+
+ #endif /* DRM_ATOMIC_H_ */
--- /dev/null
+From dad3c3503462f59c6bec7edfa19dbde1857962c0 Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+Date: Tue, 5 May 2015 18:32:17 +0200
+Subject: drm/bridge: ptn3460: Include linux/gpio/consumer.h
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+
+commit dad3c3503462f59c6bec7edfa19dbde1857962c0 upstream.
+
+If GPIOLIB=n and asm-generic/gpio.h is not used:
+
+ drivers/gpu/drm/bridge/ptn3460.c: In function ‘ptn3460_pre_enable’:
+ drivers/gpu/drm/bridge/ptn3460.c:135: error: implicit declaration of function ‘gpiod_set_value’
+ drivers/gpu/drm/bridge/ptn3460.c: In function ‘ptn3460_probe’:
+ drivers/gpu/drm/bridge/ptn3460.c:333: error: implicit declaration of function ‘devm_gpiod_get’
+ drivers/gpu/drm/bridge/ptn3460.c:333: warning: assignment makes pointer from integer without a cast
+ drivers/gpu/drm/bridge/ptn3460.c:340: error: implicit declaration of function ‘gpiod_direction_output’
+ drivers/gpu/drm/bridge/ptn3460.c:346: warning: assignment makes pointer from integer without a cast
+
+Add the missing #include <linux/gpio/consumer.h> to fix this.
+
+Fixes: af478d8823 ("drm/bridge: ptn3460: use gpiod interface")
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: David Airlie <airlied@linux.ie>
+Cc: dri-devel@lists.freedesktop.org
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/bridge/ptn3460.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/bridge/ptn3460.c
++++ b/drivers/gpu/drm/bridge/ptn3460.c
+@@ -15,6 +15,7 @@
+
+ #include <linux/delay.h>
+ #include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
--- /dev/null
+From 6b8eeca65b18ae77e175cc2b6571731f0ee413bf Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Mon, 15 Jun 2015 10:34:28 +1000
+Subject: drm/dp/mst: close deadlock in connector destruction.
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 6b8eeca65b18ae77e175cc2b6571731f0ee413bf upstream.
+
+I've only seen this once, and I failed to capture the
+lockdep backtrace, but I did some investigations.
+
+If we are calling into the MST layer from EDID probing,
+we have the mode_config mutex held, if during that EDID
+probing, the MST hub goes away, then we can get a deadlock
+where the connector destruction function in the driver
+tries to retake the mode config mutex.
+
+This offloads connector destruction to a workqueue,
+and avoid the subsequenct lock ordering issue.
+
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 40 ++++++++++++++++++++++++++++++++--
+ include/drm/drm_crtc.h | 2 +
+ include/drm/drm_dp_mst_helper.h | 4 +++
+ 3 files changed, 44 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -867,8 +867,16 @@ static void drm_dp_destroy_port(struct k
+ port->vcpi.num_slots = 0;
+
+ kfree(port->cached_edid);
+- if (port->connector)
+- (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
++
++ /* we can't destroy the connector here, as
++ we might be holding the mode_config.mutex
++ from an EDID retrieval */
++ if (port->connector) {
++ mutex_lock(&mgr->destroy_connector_lock);
++ list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
++ mutex_unlock(&mgr->destroy_connector_lock);
++ schedule_work(&mgr->destroy_connector_work);
++ }
+ drm_dp_port_teardown_pdt(port, port->pdt);
+
+ if (!port->input && port->vcpi.vcpi > 0)
+@@ -2632,6 +2640,30 @@ static void drm_dp_tx_work(struct work_s
+ mutex_unlock(&mgr->qlock);
+ }
+
++static void drm_dp_destroy_connector_work(struct work_struct *work)
++{
++ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
++ struct drm_connector *connector;
++
++ /*
++ * Not a regular list traverse as we have to drop the destroy
++ * connector lock before destroying the connector, to avoid AB->BA
++ * ordering between this lock and the config mutex.
++ */
++ for (;;) {
++ mutex_lock(&mgr->destroy_connector_lock);
++ connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
++ if (!connector) {
++ mutex_unlock(&mgr->destroy_connector_lock);
++ break;
++ }
++ list_del(&connector->destroy_list);
++ mutex_unlock(&mgr->destroy_connector_lock);
++
++ mgr->cbs->destroy_connector(mgr, connector);
++ }
++}
++
+ /**
+ * drm_dp_mst_topology_mgr_init - initialise a topology manager
+ * @mgr: manager struct to initialise
+@@ -2651,10 +2683,13 @@ int drm_dp_mst_topology_mgr_init(struct
+ mutex_init(&mgr->lock);
+ mutex_init(&mgr->qlock);
+ mutex_init(&mgr->payload_lock);
++ mutex_init(&mgr->destroy_connector_lock);
+ INIT_LIST_HEAD(&mgr->tx_msg_upq);
+ INIT_LIST_HEAD(&mgr->tx_msg_downq);
++ INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
+ INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
++ INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
+ init_waitqueue_head(&mgr->tx_waitq);
+ mgr->dev = dev;
+ mgr->aux = aux;
+@@ -2679,6 +2714,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_in
+ */
+ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
+ {
++ flush_work(&mgr->destroy_connector_work);
+ mutex_lock(&mgr->payload_lock);
+ kfree(mgr->payloads);
+ mgr->payloads = NULL;
+--- a/include/drm/drm_crtc.h
++++ b/include/drm/drm_crtc.h
+@@ -731,6 +731,8 @@ struct drm_connector {
+ uint8_t num_h_tile, num_v_tile;
+ uint8_t tile_h_loc, tile_v_loc;
+ uint16_t tile_h_size, tile_v_size;
++
++ struct list_head destroy_list;
+ };
+
+ /**
+--- a/include/drm/drm_dp_mst_helper.h
++++ b/include/drm/drm_dp_mst_helper.h
+@@ -463,6 +463,10 @@ struct drm_dp_mst_topology_mgr {
+ struct work_struct work;
+
+ struct work_struct tx_work;
++
++ struct list_head destroy_connector_list;
++ struct mutex destroy_connector_lock;
++ struct work_struct destroy_connector_work;
+ };
+
+ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
--- /dev/null
+From 9254ec496a1dbdddeab50021a8138dc627a8166a Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Mon, 22 Jun 2015 17:31:59 +1000
+Subject: drm/dp/mst: make sure mst_primary mstb is valid in work function
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 9254ec496a1dbdddeab50021a8138dc627a8166a upstream.
+
+This validates the mst_primary under the lock, and then calls
+into the check and send function. This makes the code a lot
+easier to understand the locking rules in.
+
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1200,7 +1200,7 @@ static void drm_dp_check_and_send_link_a
+ struct drm_dp_mst_branch *mstb)
+ {
+ struct drm_dp_mst_port *port;
+-
++ struct drm_dp_mst_branch *mstb_child;
+ if (!mstb->link_address_sent) {
+ drm_dp_send_link_address(mgr, mstb);
+ mstb->link_address_sent = true;
+@@ -1215,17 +1215,31 @@ static void drm_dp_check_and_send_link_a
+ if (!port->available_pbn)
+ drm_dp_send_enum_path_resources(mgr, mstb, port);
+
+- if (port->mstb)
+- drm_dp_check_and_send_link_address(mgr, port->mstb);
++ if (port->mstb) {
++ mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
++ if (mstb_child) {
++ drm_dp_check_and_send_link_address(mgr, mstb_child);
++ drm_dp_put_mst_branch_device(mstb_child);
++ }
++ }
+ }
+ }
+
+ static void drm_dp_mst_link_probe_work(struct work_struct *work)
+ {
+ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
++ struct drm_dp_mst_branch *mstb;
+
+- drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
+-
++ mutex_lock(&mgr->lock);
++ mstb = mgr->mst_primary;
++ if (mstb) {
++ kref_get(&mstb->kref);
++ }
++ mutex_unlock(&mgr->lock);
++ if (mstb) {
++ drm_dp_check_and_send_link_address(mgr, mstb);
++ drm_dp_put_mst_branch_device(mstb);
++ }
+ }
+
+ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
--- /dev/null
+From 9eb1e57f564d4e6e10991402726cc83fe0b9172f Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Mon, 22 Jun 2015 14:40:44 +1000
+Subject: drm/dp/mst: take lock around looking up the branch device on hpd irq
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 9eb1e57f564d4e6e10991402726cc83fe0b9172f upstream.
+
+If we are doing an MST transaction and we've gotten HPD and we
+lookup the device from the incoming msg, we should take the mgr
+lock around it, so that mst_primary and mstb->ports are valid.
+
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1171,6 +1171,8 @@ static struct drm_dp_mst_branch *drm_dp_
+ struct drm_dp_mst_port *port;
+ int i;
+ /* find the port by iterating down */
++
++ mutex_lock(&mgr->lock);
+ mstb = mgr->mst_primary;
+
+ for (i = 0; i < lct - 1; i++) {
+@@ -1190,6 +1192,7 @@ static struct drm_dp_mst_branch *drm_dp_
+ }
+ }
+ kref_get(&mstb->kref);
++ mutex_unlock(&mgr->lock);
+ return mstb;
+ }
+
--- /dev/null
+From 19ee835cdb0b5a8eb11a68f25a51b8039d564488 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 29 Jun 2015 14:01:19 +0100
+Subject: drm/i915: Declare the swizzling unknown for L-shaped configurations
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 19ee835cdb0b5a8eb11a68f25a51b8039d564488 upstream.
+
+The old style of memory interleaving swizzled upto the end of the
+first even bank of memory, and then used the remainder as unswizzled on
+the unpaired bank - i.e. swizzling is not constant for all memory. This
+causes problems when we try to migrate memory and so the kernel prevents
+migration at all when we detect L-shaped inconsistent swizzling.
+However, this issue also extends to userspace who try to manually detile
+into memory as the swizzling for an individual page is unknown (it
+depends on its physical address only known to the kernel), userspace
+cannot correctly swizzle objects.
+
+v2: Mark the global swizzling as unknown rather than adjust the value
+reported to userspace.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91105
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_gem_tiling.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -183,8 +183,18 @@ i915_gem_detect_bit_6_swizzle(struct drm
+ if (IS_GEN4(dev)) {
+ uint32_t ddc2 = I915_READ(DCC2);
+
+- if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
++ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
++ /* Since the swizzling may vary within an
++ * object, we have no idea what the swizzling
++ * is for any page in particular. Thus we
++ * cannot migrate tiled pages using the GPU,
++ * nor can we tell userspace what the exact
++ * swizzling is for any object.
++ */
+ dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
++ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
++ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
++ }
+ }
+
+ if (dcc == 0xffffffff) {
--- /dev/null
+From 2059ac3b1304cb6a82f9d90762dea9f556831627 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Fri, 26 Jun 2015 14:18:56 +0300
+Subject: drm/i915: fix backlight after resume on 855gm
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 2059ac3b1304cb6a82f9d90762dea9f556831627 upstream.
+
+Some 855gm models (at least ThinkPad X40) regressed because of
+
+commit b0cd324faed23d10d66ba6ade66579c681feef6f
+Author: Jani Nikula <jani.nikula@intel.com>
+Date: Wed Nov 12 16:25:43 2014 +0200
+
+ drm/i915: don't save/restore backlight hist ctl registers
+
+which tried to make our driver more robust by not blindly saving and
+restoring registers, but it failed to take into account
+
+commit 0eb96d6ed38430b72897adde58f5477a6b71757a
+Author: Jesse Barnes <jbarnes@virtuousgeek.org>
+Date: Wed Oct 14 12:33:41 2009 -0700
+
+ drm/i915: save/restore BLC histogram control reg across suspend/resume
+
+Fix the regression by enabling hist ctl on gen2.
+
+v2: Improved the comment.
+
+v3: Improved the comment, again.
+
+Reported-and-tested-by: Philipp Gesang <phg@phi-gamma.net>
+References: http://mid.gmane.org/20150623222648.GD12335@acheron
+Fixes: b0cd324faed2 ("drm/i915: don't save/restore backlight hist ctl registers")
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_reg.h | 1 +
+ drivers/gpu/drm/i915/intel_panel.c | 8 ++++++++
+ 2 files changed, 9 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -3209,6 +3209,7 @@ enum skl_disp_power_wells {
+ #define BLM_POLARITY_PNV (1 << 0) /* pnv only */
+
+ #define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260)
++#define BLM_HISTOGRAM_ENABLE (1 << 31)
+
+ /* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -879,6 +879,14 @@ static void i9xx_enable_backlight(struct
+
+ /* XXX: combine this into above write? */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
++
++ /*
++ * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
++ * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
++ * that has backlight.
++ */
++ if (IS_GEN2(dev))
++ I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+ }
+
+ static void i965_enable_backlight(struct intel_connector *connector)
--- /dev/null
+From ac88cd738425e04dbed3706621cf613a00708834 Mon Sep 17 00:00:00 2001
+From: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Date: Thu, 28 May 2015 11:07:11 -0700
+Subject: drm/i915: Fix IPS related flicker
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rodrigo Vivi <rodrigo.vivi@intel.com>
+
+commit ac88cd738425e04dbed3706621cf613a00708834 upstream.
+
+We cannot let IPS enabled with no plane on the pipe:
+
+BSpec: "IPS cannot be enabled until after at least one plane has
+been enabled for at least one vertical blank." and "IPS must be
+disabled while there is still at least one plane enabled on the
+same pipe as IPS." This restriction apply to HSW and BDW.
+
+However a shortcut path on update primary plane function
+to make primary plane invisible by setting DSPCTRL to 0
+was leting IPS enabled while there was no
+other plane enabled on the pipe causing flickerings that we were
+believing that it was caused by that other restriction where
+ips cannot be used when pixel rate is greater than 95% of cdclok.
+
+v2: Don't mess with Atomic path as pointed out by Ville.
+
+Reference: https://bugs.freedesktop.org/show_bug.cgi?id=85583
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_display.c | 13 +++++++++++++
+ drivers/gpu/drm/i915/intel_drv.h | 1 +
+ 2 files changed, 14 insertions(+)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -12499,6 +12499,16 @@ intel_check_primary_plane(struct drm_pla
+ intel_crtc->atomic.wait_vblank = true;
+ }
+
++ /*
++ * FIXME: Actually if we will still have any other plane enabled
++ * on the pipe we could let IPS enabled still, but for
++ * now lets consider that when we make primary invisible
++ * by setting DSPCNTR to 0 on update_primary_plane function
++ * IPS needs to be disable.
++ */
++ if (!state->visible || !fb)
++ intel_crtc->atomic.disable_ips = true;
++
+ intel_crtc->atomic.fb_bits |=
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
+
+@@ -12590,6 +12600,9 @@ static void intel_begin_crtc_commit(stru
+ if (intel_crtc->atomic.disable_fbc)
+ intel_fbc_disable(dev);
+
++ if (intel_crtc->atomic.disable_ips)
++ hsw_disable_ips(intel_crtc);
++
+ if (intel_crtc->atomic.pre_disable_primary)
+ intel_pre_disable_primary(crtc);
+
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -424,6 +424,7 @@ struct intel_crtc_atomic_commit {
+ /* Sleepable operations to perform before commit */
+ bool wait_for_flips;
+ bool disable_fbc;
++ bool disable_ips;
+ bool pre_disable_primary;
+ bool update_wm;
+ unsigned disabled_planes;
--- /dev/null
+From ac7e7ab1c3243b10b41653cc8d8536088d83b152 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Date: Mon, 13 Jul 2015 16:51:39 +0100
+Subject: drm/i915: Forward all core DRM ioctls to core compat handling
+
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+
+commit ac7e7ab1c3243b10b41653cc8d8536088d83b152 upstream.
+
+Previously only core DRM ioctls under the DRM_COMMAND_BASE were being
+forwarded, but the drm.h header suggests (and reality confirms) ones
+after (and including) DRM_COMMAND_END should be forwarded as well.
+
+We need this to correctly forward the compat ioctl for the botched-up
+addfb2.1 extension.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: Daniel Vetter <daniel.vetter@intel.com>
+[danvet: Explain why this is suddenly needed and add cc: stable.]
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_ioc32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_ioc32.c
++++ b/drivers/gpu/drm/i915/i915_ioc32.c
+@@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp
+ drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+- if (nr < DRM_COMMAND_BASE)
++ if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
--- /dev/null
+From 00245266b4be4fbe989ee073663f56716da6c1f3 Mon Sep 17 00:00:00 2001
+From: Michel Thierry <michel.thierry@intel.com>
+Date: Thu, 25 Jun 2015 12:59:38 +0100
+Subject: drm/i915/ppgtt: Break loop in gen8_ppgtt_clear_range failure path
+
+From: Michel Thierry <michel.thierry@intel.com>
+
+commit 00245266b4be4fbe989ee073663f56716da6c1f3 upstream.
+
+If for some reason [1], the page directory/table does not exist, clear_range
+would end up in an infinite while loop.
+
+Introduced by commit 06fda602dbca ("drm/i915: Create page table allocators").
+
+[1] This is already being addressed in one of Mika's patches:
+http://mid.gmane.org/1432314314-23530-17-git-send-email-mika.kuoppala@intel.com
+
+Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reported-by: John Harrison <john.c.harrison@intel.com>
+Signed-off-by: Michel Thierry <michel.thierry@intel.com>
+Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_gem_gtt.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -502,17 +502,17 @@ static void gen8_ppgtt_clear_range(struc
+ struct page *page_table;
+
+ if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
+- continue;
++ break;
+
+ pd = ppgtt->pdp.page_directory[pdpe];
+
+ if (WARN_ON(!pd->page_table[pde]))
+- continue;
++ break;
+
+ pt = pd->page_table[pde];
+
+ if (WARN_ON(!pt->page))
+- continue;
++ break;
+
+ page_table = pt->page;
+
--- /dev/null
+From 94f7bbe1509731bdef651d7fb235b2c31fb23be8 Mon Sep 17 00:00:00 2001
+From: Tomas Elf <tomas.elf@intel.com>
+Date: Thu, 9 Jul 2015 15:30:57 +0100
+Subject: drm/i915: Snapshot seqno of most recently submitted request.
+
+From: Tomas Elf <tomas.elf@intel.com>
+
+commit 94f7bbe1509731bdef651d7fb235b2c31fb23be8 upstream.
+
+The hang checker needs to inspect whether or not the ring request list is empty
+as well as if the given engine has reached or passed the most recently
+submitted request. The problem with this is that the hang checker cannot grab
+the struct_mutex, which is required in order to safely inspect requests since
+requests might be deallocated during inspection. In the past we've had kernel
+panics due to this very unsynchronized access in the hang checker.
+
+One solution to this problem is to not inspect the requests directly since
+we're only interested in the seqno of the most recently submitted request - not
+the request itself. Instead the seqno of the most recently submitted request is
+stored separately, which the hang checker then inspects, circumventing the
+issue of synchronization from the hang checker entirely.
+
+This fixes a regression introduced in
+
+commit 44cdd6d219bc64f6810b8ed0023a4d4db9e0fe68
+Author: John Harrison <John.C.Harrison@Intel.com>
+Date: Mon Nov 24 18:49:40 2014 +0000
+
+ drm/i915: Convert 'ring_idle()' to use requests not seqnos
+
+v2 (Chris Wilson):
+- Pass current engine seqno to ring_idle() from i915_hangcheck_elapsed() rather
+than compute it over again.
+- Remove extra whitespace.
+
+Issue: VIZ-5998
+Signed-off-by: Tomas Elf <tomas.elf@intel.com>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+[danvet: Add regressing commit citation provided by Chris.]
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_gem.c | 1 +
+ drivers/gpu/drm/i915/i915_irq.c | 13 +++----------
+ drivers/gpu/drm/i915/intel_ringbuffer.h | 7 +++++++
+ 3 files changed, 11 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2401,6 +2401,7 @@ int __i915_add_request(struct intel_engi
+ }
+
+ request->emitted_jiffies = jiffies;
++ ring->last_submitted_seqno = request->seqno;
+ list_add_tail(&request->list, &ring->request_list);
+ request->file_priv = NULL;
+
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -2650,18 +2650,11 @@ static void gen8_disable_vblank(struct d
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+-static struct drm_i915_gem_request *
+-ring_last_request(struct intel_engine_cs *ring)
+-{
+- return list_entry(ring->request_list.prev,
+- struct drm_i915_gem_request, list);
+-}
+-
+ static bool
+-ring_idle(struct intel_engine_cs *ring)
++ring_idle(struct intel_engine_cs *ring, u32 seqno)
+ {
+ return (list_empty(&ring->request_list) ||
+- i915_gem_request_completed(ring_last_request(ring), false));
++ i915_seqno_passed(seqno, ring->last_submitted_seqno));
+ }
+
+ static bool
+@@ -2883,7 +2876,7 @@ static void i915_hangcheck_elapsed(struc
+ acthd = intel_ring_get_active_head(ring);
+
+ if (ring->hangcheck.seqno == seqno) {
+- if (ring_idle(ring)) {
++ if (ring_idle(ring, seqno)) {
+ ring->hangcheck.action = HANGCHECK_IDLE;
+
+ if (waitqueue_active(&ring->irq_queue)) {
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
+@@ -266,6 +266,13 @@ struct intel_engine_cs {
+ * Do we have some not yet emitted requests outstanding?
+ */
+ struct drm_i915_gem_request *outstanding_lazy_request;
++ /**
++ * Seqno of request most recently submitted to request_list.
++ * Used exclusively by hang checker to avoid grabbing lock while
++ * inspecting request list.
++ */
++ u32 last_submitted_seqno;
++
+ bool gpu_caches_dirty;
+
+ wait_queue_head_t irq_queue;
--- /dev/null
+From 648a9bc5308d952f2c80772301b339f73026f013 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu, 16 Jul 2015 12:37:56 +0100
+Subject: drm/i915: Use two 32bit reads for select 64bit REG_READ ioctls
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 648a9bc5308d952f2c80772301b339f73026f013 upstream.
+
+Since the hardware sometimes mysteriously totally flummoxes the 64bit
+read of a 64bit register when read using a single instruction, split the
+read into two instructions. Since the read here is of automatically
+incrementing timestamp counters, we also have to be very careful in
+order to make sure that it does not increment between the two
+instructions.
+
+However, since userspace tried to workaround this issue and so enshrined
+this ABI for a broken hardware read and in the process neglected that
+the read only fails in some environments, we have to introduce a new
+uABI flag for userspace to request the 2x32 bit accurate read of the
+timestamp.
+
+v2: Fix alignment check and include details of the workaround for
+userspace.
+
+Reported-by: Karol Herbst <freedesktop@karolherbst.de>
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91317
+Testcase: igt/gem_reg_read
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Michał Winiarski <michal.winiarski@intel.com>
+Tested-by: Michał Winiarski <michal.winiarski@intel.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_uncore.c | 26 +++++++++++++++++++-------
+ include/uapi/drm/i915_drm.h | 8 ++++++++
+ 2 files changed, 27 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_uncore.c
++++ b/drivers/gpu/drm/i915/intel_uncore.c
+@@ -1220,10 +1220,12 @@ int i915_reg_read_ioctl(struct drm_devic
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reg_read *reg = data;
+ struct register_whitelist const *entry = whitelist;
++ unsigned size;
++ u64 offset;
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+- if (entry->offset == reg->offset &&
++ if (entry->offset == (reg->offset & -entry->size) &&
+ (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ break;
+ }
+@@ -1231,23 +1233,33 @@ int i915_reg_read_ioctl(struct drm_devic
+ if (i == ARRAY_SIZE(whitelist))
+ return -EINVAL;
+
++ /* We use the low bits to encode extra flags as the register should
++ * be naturally aligned (and those that are not so aligned merely
++ * limit the available flags for that register).
++ */
++ offset = entry->offset;
++ size = entry->size;
++ size |= reg->offset ^ offset;
++
+ intel_runtime_pm_get(dev_priv);
+
+- switch (entry->size) {
++ switch (size) {
++ case 8 | 1:
++ reg->val = I915_READ64_2x32(offset, offset+4);
++ break;
+ case 8:
+- reg->val = I915_READ64(reg->offset);
++ reg->val = I915_READ64(offset);
+ break;
+ case 4:
+- reg->val = I915_READ(reg->offset);
++ reg->val = I915_READ(offset);
+ break;
+ case 2:
+- reg->val = I915_READ16(reg->offset);
++ reg->val = I915_READ16(offset);
+ break;
+ case 1:
+- reg->val = I915_READ8(reg->offset);
++ reg->val = I915_READ8(offset);
+ break;
+ default:
+- MISSING_CASE(entry->size);
+ ret = -EINVAL;
+ goto out;
+ }
+--- a/include/uapi/drm/i915_drm.h
++++ b/include/uapi/drm/i915_drm.h
+@@ -1065,6 +1065,14 @@ struct drm_i915_reg_read {
+ __u64 offset;
+ __u64 val; /* Return value */
+ };
++/* Known registers:
++ *
++ * Render engine timestamp - 0x2358 + 64bit - gen7+
++ * - Note this register returns an invalid value if using the default
++ * single instruction 8byte read, in order to workaround that use
++ * offset (0x2538 | 1) instead.
++ *
++ */
+
+ struct drm_i915_reset_stats {
+ __u32 ctx_id;
--- /dev/null
+From c631d5f90e7ee246536c72f80ade86e9ef4d2f13 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Date: Tue, 14 Jul 2015 11:13:08 +0100
+Subject: drm: Provide compat ioctl for addfb2.1
+
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+
+commit c631d5f90e7ee246536c72f80ade86e9ef4d2f13 upstream.
+
+Frame buffer modifiers extensions provided in;
+
+ commit e3eb3250d84ef97b766312345774367b6a310db8
+ Author: Rob Clark <robdclark@gmail.com>
+ Date: Thu Feb 5 14:41:52 2015 +0000
+
+ drm: add support for tiled/compressed/etc modifier in addfb2
+
+Missed the structure packing/alignment problem where 64-bit
+members were added after the odd number of 32-bit ones. This
+makes the compiler produce structures of different sizes under
+32- and 64-bit x86 targets and makes the ioctl need explicit
+compat handling.
+
+v2: Removed the typedef. (Daniel Vetter)
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: dri-devel@lists.freedesktop.org
+Cc: Rob Clark <robdclark@gmail.com>
+Cc: Daniel Stone <daniels@collabora.com>
+Cc: Daniel Vetter <daniel.vetter@intel.com>
+[danvet: Squash in compile fix from Mika.]
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_ioc32.c | 60 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 60 insertions(+)
+
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -70,6 +70,8 @@
+
+ #define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
+
++#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
++
+ typedef struct drm_version_32 {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+@@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct
+ return 0;
+ }
+
++typedef struct drm_mode_fb_cmd232 {
++ u32 fb_id;
++ u32 width;
++ u32 height;
++ u32 pixel_format;
++ u32 flags;
++ u32 handles[4];
++ u32 pitches[4];
++ u32 offsets[4];
++ u64 modifier[4];
++} __attribute__((packed)) drm_mode_fb_cmd232_t;
++
++static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
++ struct drm_mode_fb_cmd232 req32;
++ struct drm_mode_fb_cmd2 __user *req64;
++ int i;
++ int err;
++
++ if (copy_from_user(&req32, argp, sizeof(req32)))
++ return -EFAULT;
++
++ req64 = compat_alloc_user_space(sizeof(*req64));
++
++ if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
++ || __put_user(req32.width, &req64->width)
++ || __put_user(req32.height, &req64->height)
++ || __put_user(req32.pixel_format, &req64->pixel_format)
++ || __put_user(req32.flags, &req64->flags))
++ return -EFAULT;
++
++ for (i = 0; i < 4; i++) {
++ if (__put_user(req32.handles[i], &req64->handles[i]))
++ return -EFAULT;
++ if (__put_user(req32.pitches[i], &req64->pitches[i]))
++ return -EFAULT;
++ if (__put_user(req32.offsets[i], &req64->offsets[i]))
++ return -EFAULT;
++ if (__put_user(req32.modifier[i], &req64->modifier[i]))
++ return -EFAULT;
++ }
++
++ err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
++ if (err)
++ return err;
++
++ if (__get_user(req32.fb_id, &req64->fb_id))
++ return -EFAULT;
++
++ if (copy_to_user(argp, &req32, sizeof(req32)))
++ return -EFAULT;
++
++ return 0;
++}
++
+ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
+@@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_io
+ [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
+ #endif
+ [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
++ [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+ };
+
+ /**
--- /dev/null
+From 2fa19535ca6abcbfd1ccc9ef694db52f49f77747 Mon Sep 17 00:00:00 2001
+From: Frediano Ziglio <fziglio@redhat.com>
+Date: Wed, 3 Jun 2015 12:09:09 +0100
+Subject: drm/qxl: Do not cause spice-server to clean our objects
+
+From: Frediano Ziglio <fziglio@redhat.com>
+
+commit 2fa19535ca6abcbfd1ccc9ef694db52f49f77747 upstream.
+
+If objects are moved back from system memory to VRAM (and spice id
+created again) memory is already initialized so we need to set flag
+to not clear memory.
+If you don't do it after a while using desktop many images turns to
+black or transparents.
+
+Signed-off-by: Frediano Ziglio <fziglio@redhat.com>
+Reviewed-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/qxl/qxl_cmd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/qxl/qxl_cmd.c
++++ b/drivers/gpu/drm/qxl/qxl_cmd.c
+@@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_devi
+
+ cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_SURFACE_CMD_CREATE;
++ cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
+ cmd->u.surface_create.format = surf->surf.format;
+ cmd->u.surface_create.width = surf->surf.width;
+ cmd->u.surface_create.height = surf->surf.height;
--- /dev/null
+From 8451cc964c1d193b989c41a44e5e77109cc696f8 Mon Sep 17 00:00:00 2001
+From: Frediano Ziglio <fziglio@redhat.com>
+Date: Wed, 3 Jun 2015 12:09:10 +0100
+Subject: drm/qxl: Do not leak memory if qxl_release_list_add fails
+
+From: Frediano Ziglio <fziglio@redhat.com>
+
+commit 8451cc964c1d193b989c41a44e5e77109cc696f8 upstream.
+
+If the function fails reference counter to the object is not decremented
+causing leaks.
+This is hard to spot as it happens only on very low memory situations.
+
+Signed-off-by: Frediano Ziglio <fziglio@redhat.com>
+Reviewed-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/qxl/qxl_ioctl.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -122,8 +122,10 @@ static struct qxl_bo *qxlhw_handle_to_bo
+ qobj = gem_to_qxl_bo(gobj);
+
+ ret = qxl_release_list_add(release, qobj);
+- if (ret)
++ if (ret) {
++ drm_gem_object_unreference_unlocked(gobj);
+ return NULL;
++ }
+
+ return qobj;
+ }
--- /dev/null
+From 5dfc71bc44d91d1620505c064fa22b0b3db58a9d Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 9 Jul 2015 21:08:17 -0400
+Subject: drm/radeon: add a dpm quirk for Sapphire Radeon R9 270X 2GB GDDR5
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 5dfc71bc44d91d1620505c064fa22b0b3db58a9d upstream.
+
+bug:
+https://bugs.freedesktop.org/show_bug.cgi?id=76490
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/si_dpm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2925,6 +2925,7 @@ static struct si_dpm_quirk si_dpm_quirk_
+ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
++ { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ { 0, 0, 0, 0 },
+ };
+
--- /dev/null
+From bda5e3e97ffe80c5a793383df5681d3581d46ac8 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 16 Jul 2015 10:17:09 -0400
+Subject: drm/radeon/ci: silence a harmless PCC warning
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit bda5e3e97ffe80c5a793383df5681d3581d46ac8 upstream.
+
+This has been a source of confusion. Make it debug only.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/ci_dpm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rd
+ tmp |= DPM_ENABLED;
+ break;
+ default:
+- DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
++ DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
+ break;
+ }
+ WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
--- /dev/null
+From 010621936103fcfc15375ccdc92c0f583923d489 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 18 May 2015 11:11:48 -0400
+Subject: drm/radeon: clean up radeon_audio_enable
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 010621936103fcfc15375ccdc92c0f583923d489 upstream.
+
+- make it static
+- fix mask/bool handling for last param
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_audio.c | 18 +++++++++---------
+ drivers/gpu/drm/radeon/radeon_audio.h | 2 --
+ 2 files changed, 9 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -242,6 +242,13 @@ static struct radeon_audio_funcs dce6_dp
+ .dpms = evergreen_dp_enable,
+ };
+
++static void radeon_audio_enable(struct radeon_device *rdev,
++ struct r600_audio_pin *pin, u8 enable_mask)
++{
++ if (rdev->audio.funcs->enable)
++ rdev->audio.funcs->enable(rdev, pin, enable_mask);
++}
++
+ static void radeon_audio_interface_init(struct radeon_device *rdev)
+ {
+ if (ASIC_IS_DCE6(rdev)) {
+@@ -307,7 +314,7 @@ int radeon_audio_init(struct radeon_devi
+
+ /* disable audio. it will be set up later */
+ for (i = 0; i < rdev->audio.num_pins; i++)
+- radeon_audio_enable(rdev, &rdev->audio.pin[i], false);
++ radeon_audio_enable(rdev, &rdev->audio.pin[i], 0);
+
+ return 0;
+ }
+@@ -443,13 +450,6 @@ static void radeon_audio_select_pin(stru
+ radeon_encoder->audio->select_pin(encoder);
+ }
+
+-void radeon_audio_enable(struct radeon_device *rdev,
+- struct r600_audio_pin *pin, u8 enable_mask)
+-{
+- if (rdev->audio.funcs->enable)
+- rdev->audio.funcs->enable(rdev, pin, enable_mask);
+-}
+-
+ void radeon_audio_detect(struct drm_connector *connector,
+ enum drm_connector_status status)
+ {
+@@ -502,7 +502,7 @@ void radeon_audio_fini(struct radeon_dev
+ return;
+
+ for (i = 0; i < rdev->audio.num_pins; i++)
+- radeon_audio_enable(rdev, &rdev->audio.pin[i], false);
++ radeon_audio_enable(rdev, &rdev->audio.pin[i], 0);
+
+ rdev->audio.enabled = false;
+ }
+--- a/drivers/gpu/drm/radeon/radeon_audio.h
++++ b/drivers/gpu/drm/radeon/radeon_audio.h
+@@ -74,8 +74,6 @@ u32 radeon_audio_endpoint_rreg(struct ra
+ void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
+ u32 offset, u32 reg, u32 v);
+ struct r600_audio_pin *radeon_audio_get_pin(struct drm_encoder *encoder);
+-void radeon_audio_enable(struct radeon_device *rdev,
+- struct r600_audio_pin *pin, u8 enable_mask);
+ void radeon_audio_fini(struct radeon_device *rdev);
+ void radeon_audio_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
--- /dev/null
+From cd404af0c930104462aa91344f07d002cf8248ed Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Tue, 7 Jul 2015 16:27:28 +0900
+Subject: drm/radeon: Clean up reference counting and pinning of the cursor BOs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+
+commit cd404af0c930104462aa91344f07d002cf8248ed upstream.
+
+Take a GEM reference for and pin the new cursor BO, unpin and drop the
+GEM reference for the old cursor BO in radeon_crtc_cursor_set2, and use
+radeon_crtc->cursor_addr in radeon_set_cursor.
+
+This fixes radeon_cursor_reset accidentally incrementing the cursor BO
+pin count, and cleans up the code a little.
+
+Reviewed-by: Grigori Goronzy <greg@chown.ath.cx>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_cursor.c | 84 ++++++++++++++-------------------
+ drivers/gpu/drm/radeon/radeon_mode.h | 1
+ 2 files changed, 37 insertions(+), 48 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -205,8 +205,9 @@ static int radeon_cursor_move_locked(str
+ | (x << 16)
+ | y));
+ /* offset is from DISP(2)_BASE_ADDRESS */
+- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
+- (yorigin * 256)));
++ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
++ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
++ yorigin * 256);
+ }
+
+ radeon_crtc->cursor_x = x;
+@@ -227,51 +228,32 @@ int radeon_crtc_cursor_move(struct drm_c
+ return ret;
+ }
+
+-static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
++static void radeon_set_cursor(struct drm_crtc *crtc)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+- struct radeon_bo *robj = gem_to_radeon_bo(obj);
+- uint64_t gpu_addr;
+- int ret;
+-
+- ret = radeon_bo_reserve(robj, false);
+- if (unlikely(ret != 0))
+- goto fail;
+- /* Only 27 bit offset for legacy cursor */
+- ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+- &gpu_addr);
+- radeon_bo_unreserve(robj);
+- if (ret)
+- goto fail;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+- upper_32_bits(gpu_addr));
++ upper_32_bits(radeon_crtc->cursor_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+- gpu_addr & 0xffffffff);
++ lower_32_bits(radeon_crtc->cursor_addr));
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
++ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
++ upper_32_bits(radeon_crtc->cursor_addr));
+ else
+- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
++ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
++ upper_32_bits(radeon_crtc->cursor_addr));
+ }
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+- gpu_addr & 0xffffffff);
++ lower_32_bits(radeon_crtc->cursor_addr));
+ } else {
+- radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
+ /* offset is from DISP(2)_BASE_ADDRESS */
+- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
++ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
++ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
+ }
+-
+- return 0;
+-
+-fail:
+- drm_gem_object_unreference_unlocked(obj);
+-
+- return ret;
+ }
+
+ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+@@ -283,7 +265,9 @@ int radeon_crtc_cursor_set2(struct drm_c
+ int32_t hot_y)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++ struct radeon_device *rdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
++ struct radeon_bo *robj;
+ int ret;
+
+ if (!handle) {
+@@ -305,6 +289,23 @@ int radeon_crtc_cursor_set2(struct drm_c
+ return -ENOENT;
+ }
+
++ robj = gem_to_radeon_bo(obj);
++ ret = radeon_bo_reserve(robj, false);
++ if (ret != 0) {
++ drm_gem_object_unreference_unlocked(obj);
++ return ret;
++ }
++ /* Only 27 bit offset for legacy cursor */
++ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
++ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
++ &radeon_crtc->cursor_addr);
++ radeon_bo_unreserve(robj);
++ if (ret) {
++ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
++ drm_gem_object_unreference_unlocked(obj);
++ return ret;
++ }
++
+ radeon_crtc->cursor_width = width;
+ radeon_crtc->cursor_height = height;
+
+@@ -323,13 +324,8 @@ int radeon_crtc_cursor_set2(struct drm_c
+ radeon_crtc->cursor_hot_y = hot_y;
+ }
+
+- ret = radeon_set_cursor(crtc, obj);
+-
+- if (ret)
+- DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
+- ret);
+- else
+- radeon_show_cursor(crtc);
++ radeon_set_cursor(crtc);
++ radeon_show_cursor(crtc);
+
+ radeon_lock_cursor(crtc, false);
+
+@@ -341,8 +337,7 @@ unpin:
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
+- if (radeon_crtc->cursor_bo != obj)
+- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
++ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ }
+
+ radeon_crtc->cursor_bo = obj;
+@@ -360,7 +355,6 @@ unpin:
+ void radeon_cursor_reset(struct drm_crtc *crtc)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+- int ret;
+
+ if (radeon_crtc->cursor_bo) {
+ radeon_lock_cursor(crtc, true);
+@@ -368,12 +362,8 @@ void radeon_cursor_reset(struct drm_crtc
+ radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
+ radeon_crtc->cursor_y);
+
+- ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
+- if (ret)
+- DRM_ERROR("radeon_set_cursor returned %d, not showing "
+- "cursor\n", ret);
+- else
+- radeon_show_cursor(crtc);
++ radeon_set_cursor(crtc);
++ radeon_show_cursor(crtc);
+
+ radeon_lock_cursor(crtc, false);
+ }
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -343,7 +343,6 @@ struct radeon_crtc {
+ int max_cursor_width;
+ int max_cursor_height;
+ uint32_t legacy_display_base_addr;
+- uint32_t legacy_cursor_offset;
+ enum radeon_rmx_type rmx_type;
+ u8 h_border;
+ u8 v_border;
--- /dev/null
+From 161569deaa03cf3c00ed63352006193f250b0648 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= <jglisse@redhat.com>
+Date: Fri, 19 Jun 2015 10:32:15 -0400
+Subject: drm/radeon: compute ring fix hibernation (CI GPU family) v2.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= <jglisse@redhat.com>
+
+commit 161569deaa03cf3c00ed63352006193f250b0648 upstream.
+
+In order for hibernation to reliably work we need to cleanup more
+thoroughly the compute ring. Hibernation is different from suspend
+resume as when we resume from hibernation the hardware is first
+fully initialize by regular kernel then freeze callback happens
+(which correspond to a suspend inside the radeon kernel driver)
+and turn off each of the block. It turns out we were not cleanly
+shutting down the compute ring. This patch fix that.
+
+Hibernation and suspend to ram were tested (several times) on :
+Bonaire
+Hawaii
+Mullins
+Kaveri
+Kabini
+
+Changed since v1:
+ - Factor the ring stop logic into a function taking ring as arg.
+
+Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/cik.c | 34 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 34 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -4579,6 +4579,31 @@ void cik_compute_set_wptr(struct radeon_
+ WDOORBELL32(ring->doorbell_index, ring->wptr);
+ }
+
++static void cik_compute_stop(struct radeon_device *rdev,
++ struct radeon_ring *ring)
++{
++ u32 j, tmp;
++
++ cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
++ /* Disable wptr polling. */
++ tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
++ tmp &= ~WPTR_POLL_EN;
++ WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
++ /* Disable HQD. */
++ if (RREG32(CP_HQD_ACTIVE) & 1) {
++ WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
++ for (j = 0; j < rdev->usec_timeout; j++) {
++ if (!(RREG32(CP_HQD_ACTIVE) & 1))
++ break;
++ udelay(1);
++ }
++ WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
++ WREG32(CP_HQD_PQ_RPTR, 0);
++ WREG32(CP_HQD_PQ_WPTR, 0);
++ }
++ cik_srbm_select(rdev, 0, 0, 0, 0);
++}
++
+ /**
+ * cik_cp_compute_enable - enable/disable the compute CP MEs
+ *
+@@ -4592,6 +4617,15 @@ static void cik_cp_compute_enable(struct
+ if (enable)
+ WREG32(CP_MEC_CNTL, 0);
+ else {
++ /*
++ * To make hibernation reliable we need to clear compute ring
++ * configuration before halting the compute ring.
++ */
++ mutex_lock(&rdev->srbm_mutex);
++ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
++ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
++ mutex_unlock(&rdev->srbm_mutex);
++
+ WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
--- /dev/null
+From 233709d2cd6bbaaeda0aeb8d11f6ca7f98563b39 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Fri, 3 Jul 2015 10:02:27 +0900
+Subject: drm/radeon: Don't flush the GART TLB if rdev->gart.ptr == NULL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+
+commit 233709d2cd6bbaaeda0aeb8d11f6ca7f98563b39 upstream.
+
+This can be the case when the GPU is powered off, e.g. via vgaswitcheroo
+or runpm. When the GPU is powered up again, radeon_gart_table_vram_pin
+flushes the TLB after setting rdev->gart.ptr to non-NULL.
+
+Fixes panic on powering off R7xx GPUs.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=61529
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_gart.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_gart.c
++++ b/drivers/gpu/drm/radeon/radeon_gart.c
+@@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_de
+ }
+ }
+ }
+- mb();
+- radeon_gart_tlb_flush(rdev);
++ if (rdev->gart.ptr) {
++ mb();
++ radeon_gart_tlb_flush(rdev);
++ }
+ }
+
+ /**
+@@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_devic
+ page_base += RADEON_GPU_PAGE_SIZE;
+ }
+ }
+- mb();
+- radeon_gart_tlb_flush(rdev);
++ if (rdev->gart.ptr) {
++ mb();
++ radeon_gart_tlb_flush(rdev);
++ }
+ return 0;
+ }
+
--- /dev/null
+From 54e03986133468e02cb01b76215e4d53a9cf6380 Mon Sep 17 00:00:00 2001
+From: Grigori Goronzy <greg@chown.ath.cx>
+Date: Fri, 3 Jul 2015 01:54:11 +0200
+Subject: drm/radeon: fix HDP flushing
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Grigori Goronzy <greg@chown.ath.cx>
+
+commit 54e03986133468e02cb01b76215e4d53a9cf6380 upstream.
+
+This was regressed by commit 39e7f6f8, although I don't know of any
+actual issues caused by it.
+
+The storage domain is read without TTM locking now, but the lock
+never helped to prevent any races.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Grigori Goronzy <greg@chown.ath.cx>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_gem.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -471,6 +471,7 @@ int radeon_gem_wait_idle_ioctl(struct dr
+ r = ret;
+
+ /* Flush HDP cache via MMIO if necessary */
++ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ if (rdev->asic->mmio_hdp_flush &&
+ radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
+ robj->rdev->asic->mmio_hdp_flush(rdev);
--- /dev/null
+From 12f1384da650bdb835fff63e66fe815ea882fc0e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 14 Jul 2015 15:58:30 +0200
+Subject: drm/radeon: fix user ptr race condition
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+
+commit 12f1384da650bdb835fff63e66fe815ea882fc0e upstream.
+
+Port of amdgpu patch 9298e52f8b51d1e4acd68f502832f3a97f8cf892.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_gem.c | 1 +
+ drivers/gpu/drm/radeon/radeon_object.c | 1 -
+ 2 files changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_g
+ if (robj) {
+ if (robj->gem_base.import_attach)
+ drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
++ radeon_mn_unregister(robj);
+ radeon_bo_unref(&robj);
+ }
+ }
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct
+ bo = container_of(tbo, struct radeon_bo, tbo);
+
+ radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+- radeon_mn_unregister(bo);
+
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
--- /dev/null
+From 07f18f0bb8d8d65badd8b4988b40d329fc0cc6dc Mon Sep 17 00:00:00 2001
+From: Mario Kleiner <mario.kleiner.de@gmail.com>
+Date: Fri, 3 Jul 2015 06:03:06 +0200
+Subject: drm/radeon: Handle irqs only based on irq ring, not irq status regs.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mario Kleiner <mario.kleiner.de@gmail.com>
+
+commit 07f18f0bb8d8d65badd8b4988b40d329fc0cc6dc upstream.
+
+Trying to resolve issues with missed vblanks and impossible
+values inside delivered kms pageflip completion events showed
+that radeon's irq handling sometimes doesn't handle valid irqs,
+but silently skips them. This was observed for vblank interrupts.
+
+Although those irqs have corresponding events queued in the gpu's
+irq ring at time of interrupt, and therefore the corresponding
+handling code gets triggered by these events, the handling code
+sometimes silently skipped processing the irq. The reason for those
+skips is that the handling code double-checks for each irq event if
+the corresponding irq status bits in the irq status registers
+are set. Sometimes those bits are not set at time of check
+for valid irqs, maybe due to some hardware race on some setups?
+
+The problem only seems to happen on some machine + card combos
+sometimes, e.g., never happened during my testing of different PC
+cards of the DCE-2/3/4 generation a year ago, but happens consistently
+now on two different Apple Mac cards (RV730, DCE-3, Apple iMac and
+Evergreen JUNIPER, DCE-4 in a Apple MacPro). It also doesn't happen
+at each interrupt but only occassionally every couple of
+hundred or thousand vblank interrupts.
+
+This results in XOrg warning messages like
+
+"[ 7084.472] (WW) RADEON(0): radeon_dri2_flip_event_handler:
+Pageflip completion event has impossible msc 420120 < target_msc 420121"
+
+as well as skipped frames and problems for applications that
+use kms pageflip events or vblank events, e.g., users of DRI2 and
+DRI3/Present, Waylands Weston compositor, etc. See also
+
+https://bugs.freedesktop.org/show_bug.cgi?id=85203
+
+After some talking to Alex and Michel, we decided to fix this
+by turning the double-check for asserted irq status bits into a
+warning. Whenever a irq event is queued in the IH ring, always
+execute the corresponding interrupt handler. Still check the irq
+status bits, but only to log a DRM_DEBUG message on a mismatch.
+
+This fixed the problems reliably on both previously failing
+cards, RV-730 dual-head tested on both crtcs (pipes D1 and D2)
+and a triple-output Juniper HD-5770 card tested on all three
+available crtcs (D1/D2/D3). The r600 and evergreen irq handling
+is therefore tested, but the cik an si handling is only compile
+tested due to lack of hw.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com>
+CC: Michel Dänzer <michel.daenzer@amd.com>
+CC: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/cik.c | 348 ++++++++++++++++++-------------
+ drivers/gpu/drm/radeon/evergreen.c | 404 ++++++++++++++++++++-----------------
+ drivers/gpu/drm/radeon/r600.c | 159 ++++++++------
+ drivers/gpu/drm/radeon/si.c | 348 ++++++++++++++++++-------------
+ 4 files changed, 708 insertions(+), 551 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -7939,23 +7939,27 @@ restart_ih:
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[0]) {
+- drm_handle_vblank(rdev->ddev, 0);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[0]))
+- radeon_crtc_handle_vblank(rdev, 0);
+- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D1 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[0]) {
++ drm_handle_vblank(rdev->ddev, 0);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[0]))
++ radeon_crtc_handle_vblank(rdev, 0);
++ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D1 vblank\n");
++
+ break;
+ case 1: /* D1 vline */
+- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D1 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D1 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -7965,23 +7969,27 @@ restart_ih:
+ case 2: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[1]) {
+- drm_handle_vblank(rdev->ddev, 1);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[1]))
+- radeon_crtc_handle_vblank(rdev, 1);
+- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D2 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[1]) {
++ drm_handle_vblank(rdev->ddev, 1);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[1]))
++ radeon_crtc_handle_vblank(rdev, 1);
++ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D2 vblank\n");
++
+ break;
+ case 1: /* D2 vline */
+- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D2 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D2 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -7991,23 +7999,27 @@ restart_ih:
+ case 3: /* D3 vblank/vline */
+ switch (src_data) {
+ case 0: /* D3 vblank */
+- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[2]) {
+- drm_handle_vblank(rdev->ddev, 2);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[2]))
+- radeon_crtc_handle_vblank(rdev, 2);
+- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D3 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[2]) {
++ drm_handle_vblank(rdev->ddev, 2);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[2]))
++ radeon_crtc_handle_vblank(rdev, 2);
++ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D3 vblank\n");
++
+ break;
+ case 1: /* D3 vline */
+- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D3 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D3 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -8017,23 +8029,27 @@ restart_ih:
+ case 4: /* D4 vblank/vline */
+ switch (src_data) {
+ case 0: /* D4 vblank */
+- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[3]) {
+- drm_handle_vblank(rdev->ddev, 3);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[3]))
+- radeon_crtc_handle_vblank(rdev, 3);
+- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D4 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[3]) {
++ drm_handle_vblank(rdev->ddev, 3);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[3]))
++ radeon_crtc_handle_vblank(rdev, 3);
++ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D4 vblank\n");
++
+ break;
+ case 1: /* D4 vline */
+- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D4 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D4 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -8043,23 +8059,27 @@ restart_ih:
+ case 5: /* D5 vblank/vline */
+ switch (src_data) {
+ case 0: /* D5 vblank */
+- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[4]) {
+- drm_handle_vblank(rdev->ddev, 4);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[4]))
+- radeon_crtc_handle_vblank(rdev, 4);
+- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D5 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[4]) {
++ drm_handle_vblank(rdev->ddev, 4);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[4]))
++ radeon_crtc_handle_vblank(rdev, 4);
++ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D5 vblank\n");
++
+ break;
+ case 1: /* D5 vline */
+- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D5 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D5 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -8069,23 +8089,27 @@ restart_ih:
+ case 6: /* D6 vblank/vline */
+ switch (src_data) {
+ case 0: /* D6 vblank */
+- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[5]) {
+- drm_handle_vblank(rdev->ddev, 5);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[5]))
+- radeon_crtc_handle_vblank(rdev, 5);
+- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D6 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[5]) {
++ drm_handle_vblank(rdev->ddev, 5);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[5]))
++ radeon_crtc_handle_vblank(rdev, 5);
++ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D6 vblank\n");
++
+ break;
+ case 1: /* D6 vline */
+- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D6 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D6 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -8105,88 +8129,112 @@ restart_ih:
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD1\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD1\n");
++
+ break;
+ case 1:
+- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD2\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD2\n");
++
+ break;
+ case 2:
+- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD3\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD3\n");
++
+ break;
+ case 3:
+- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD4\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD4\n");
++
+ break;
+ case 4:
+- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD5\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD5\n");
++
+ break;
+ case 5:
+- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD6\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD6\n");
++
+ break;
+ case 6:
+- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 1\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 1\n");
++
+ break;
+ case 7:
+- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 2\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 2\n");
++
+ break;
+ case 8:
+- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 3\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 3\n");
++
+ break;
+ case 9:
+- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 4\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 4\n");
++
+ break;
+ case 10:
+- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 5\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 5\n");
++
+ break;
+ case 11:
+- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 6\n");
+- }
++ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 6\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4855,7 +4855,7 @@ restart_ih:
+ return IRQ_NONE;
+
+ rptr = rdev->ih.rptr;
+- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
++ DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+ /* Order reading of wptr vs. reading of IH ring data */
+ rmb();
+@@ -4873,23 +4873,27 @@ restart_ih:
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[0]) {
+- drm_handle_vblank(rdev->ddev, 0);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[0]))
+- radeon_crtc_handle_vblank(rdev, 0);
+- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D1 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[0]) {
++ drm_handle_vblank(rdev->ddev, 0);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[0]))
++ radeon_crtc_handle_vblank(rdev, 0);
++ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D1 vblank\n");
++
+ break;
+ case 1: /* D1 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D1 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D1 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4899,23 +4903,27 @@ restart_ih:
+ case 2: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[1]) {
+- drm_handle_vblank(rdev->ddev, 1);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[1]))
+- radeon_crtc_handle_vblank(rdev, 1);
+- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D2 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[1]) {
++ drm_handle_vblank(rdev->ddev, 1);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[1]))
++ radeon_crtc_handle_vblank(rdev, 1);
++ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D2 vblank\n");
++
+ break;
+ case 1: /* D2 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D2 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D2 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4925,23 +4933,27 @@ restart_ih:
+ case 3: /* D3 vblank/vline */
+ switch (src_data) {
+ case 0: /* D3 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[2]) {
+- drm_handle_vblank(rdev->ddev, 2);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[2]))
+- radeon_crtc_handle_vblank(rdev, 2);
+- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D3 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[2]) {
++ drm_handle_vblank(rdev->ddev, 2);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[2]))
++ radeon_crtc_handle_vblank(rdev, 2);
++ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D3 vblank\n");
++
+ break;
+ case 1: /* D3 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D3 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D3 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4951,23 +4963,27 @@ restart_ih:
+ case 4: /* D4 vblank/vline */
+ switch (src_data) {
+ case 0: /* D4 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[3]) {
+- drm_handle_vblank(rdev->ddev, 3);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[3]))
+- radeon_crtc_handle_vblank(rdev, 3);
+- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D4 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[3]) {
++ drm_handle_vblank(rdev->ddev, 3);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[3]))
++ radeon_crtc_handle_vblank(rdev, 3);
++ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D4 vblank\n");
++
+ break;
+ case 1: /* D4 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D4 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D4 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4977,23 +4993,27 @@ restart_ih:
+ case 5: /* D5 vblank/vline */
+ switch (src_data) {
+ case 0: /* D5 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[4]) {
+- drm_handle_vblank(rdev->ddev, 4);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[4]))
+- radeon_crtc_handle_vblank(rdev, 4);
+- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D5 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[4]) {
++ drm_handle_vblank(rdev->ddev, 4);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[4]))
++ radeon_crtc_handle_vblank(rdev, 4);
++ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D5 vblank\n");
++
+ break;
+ case 1: /* D5 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D5 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D5 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -5003,23 +5023,27 @@ restart_ih:
+ case 6: /* D6 vblank/vline */
+ switch (src_data) {
+ case 0: /* D6 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[5]) {
+- drm_handle_vblank(rdev->ddev, 5);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[5]))
+- radeon_crtc_handle_vblank(rdev, 5);
+- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D6 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[5]) {
++ drm_handle_vblank(rdev->ddev, 5);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[5]))
++ radeon_crtc_handle_vblank(rdev, 5);
++ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D6 vblank\n");
++
+ break;
+ case 1: /* D6 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D6 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D6 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -5039,88 +5063,100 @@ restart_ih:
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD1\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD1\n");
+ break;
+ case 1:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD2\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD2\n");
+ break;
+ case 2:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD3\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD3\n");
+ break;
+ case 3:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD4\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD4\n");
+ break;
+ case 4:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD5\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD5\n");
+ break;
+ case 5:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD6\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD6\n");
+ break;
+ case 6:
+- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 1\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 1\n");
+ break;
+ case 7:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 2\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 2\n");
+ break;
+ case 8:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 3\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 3\n");
+ break;
+ case 9:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 4\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 4\n");
+ break;
+ case 10:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 5\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 5\n");
+ break;
+ case 11:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 6\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 6\n");
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -5130,46 +5166,52 @@ restart_ih:
+ case 44: /* hdmi */
+ switch (src_data) {
+ case 0:
+- if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+- rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+- queue_hdmi = true;
+- DRM_DEBUG("IH: HDMI0\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
++ queue_hdmi = true;
++ DRM_DEBUG("IH: HDMI0\n");
+ break;
+ case 1:
+- if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+- rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+- queue_hdmi = true;
+- DRM_DEBUG("IH: HDMI1\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
++ queue_hdmi = true;
++ DRM_DEBUG("IH: HDMI1\n");
+ break;
+ case 2:
+- if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+- rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+- queue_hdmi = true;
+- DRM_DEBUG("IH: HDMI2\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
++ queue_hdmi = true;
++ DRM_DEBUG("IH: HDMI2\n");
+ break;
+ case 3:
+- if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+- rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+- queue_hdmi = true;
+- DRM_DEBUG("IH: HDMI3\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
++ queue_hdmi = true;
++ DRM_DEBUG("IH: HDMI3\n");
+ break;
+ case 4:
+- if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+- rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+- queue_hdmi = true;
+- DRM_DEBUG("IH: HDMI4\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
++ queue_hdmi = true;
++ DRM_DEBUG("IH: HDMI4\n");
+ break;
+ case 5:
+- if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+- rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+- queue_hdmi = true;
+- DRM_DEBUG("IH: HDMI5\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
++ queue_hdmi = true;
++ DRM_DEBUG("IH: HDMI5\n");
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -4039,23 +4039,27 @@ restart_ih:
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[0]) {
+- drm_handle_vblank(rdev->ddev, 0);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[0]))
+- radeon_crtc_handle_vblank(rdev, 0);
+- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D1 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[0]) {
++ drm_handle_vblank(rdev->ddev, 0);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[0]))
++ radeon_crtc_handle_vblank(rdev, 0);
++ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D1 vblank\n");
++
+ break;
+ case 1: /* D1 vline */
+- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D1 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D1 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4065,23 +4069,27 @@ restart_ih:
+ case 5: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[1]) {
+- drm_handle_vblank(rdev->ddev, 1);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[1]))
+- radeon_crtc_handle_vblank(rdev, 1);
+- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D2 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[1]) {
++ drm_handle_vblank(rdev->ddev, 1);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[1]))
++ radeon_crtc_handle_vblank(rdev, 1);
++ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D2 vblank\n");
++
+ break;
+ case 1: /* D1 vline */
+- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D2 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D2 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4101,46 +4109,53 @@ restart_ih:
+ case 19: /* HPD/DAC hotplug */
+ switch (src_data) {
+ case 0:
+- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD1\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
++ DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD1\n");
+ break;
+ case 1:
+- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD2\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
++ DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD2\n");
+ break;
+ case 4:
+- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD3\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
++ DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD3\n");
+ break;
+ case 5:
+- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD4\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
++ DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD4\n");
+ break;
+ case 10:
+- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD5\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
++ DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD5\n");
+ break;
+ case 12:
+- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD6\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
++ DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD6\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4150,18 +4165,22 @@ restart_ih:
+ case 21: /* hdmi */
+ switch (src_data) {
+ case 4:
+- if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+- rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+- queue_hdmi = true;
+- DRM_DEBUG("IH: HDMI0\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
++ queue_hdmi = true;
++ DRM_DEBUG("IH: HDMI0\n");
++
+ break;
+ case 5:
+- if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+- rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+- queue_hdmi = true;
+- DRM_DEBUG("IH: HDMI1\n");
+- }
++ if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
++ queue_hdmi = true;
++ DRM_DEBUG("IH: HDMI1\n");
++
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -6466,23 +6466,27 @@ restart_ih:
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[0]) {
+- drm_handle_vblank(rdev->ddev, 0);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[0]))
+- radeon_crtc_handle_vblank(rdev, 0);
+- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D1 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[0]) {
++ drm_handle_vblank(rdev->ddev, 0);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[0]))
++ radeon_crtc_handle_vblank(rdev, 0);
++ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D1 vblank\n");
++
+ break;
+ case 1: /* D1 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D1 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D1 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6492,23 +6496,27 @@ restart_ih:
+ case 2: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[1]) {
+- drm_handle_vblank(rdev->ddev, 1);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[1]))
+- radeon_crtc_handle_vblank(rdev, 1);
+- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D2 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[1]) {
++ drm_handle_vblank(rdev->ddev, 1);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[1]))
++ radeon_crtc_handle_vblank(rdev, 1);
++ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D2 vblank\n");
++
+ break;
+ case 1: /* D2 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D2 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D2 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6518,23 +6526,27 @@ restart_ih:
+ case 3: /* D3 vblank/vline */
+ switch (src_data) {
+ case 0: /* D3 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[2]) {
+- drm_handle_vblank(rdev->ddev, 2);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[2]))
+- radeon_crtc_handle_vblank(rdev, 2);
+- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D3 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[2]) {
++ drm_handle_vblank(rdev->ddev, 2);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[2]))
++ radeon_crtc_handle_vblank(rdev, 2);
++ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D3 vblank\n");
++
+ break;
+ case 1: /* D3 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D3 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D3 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6544,23 +6556,27 @@ restart_ih:
+ case 4: /* D4 vblank/vline */
+ switch (src_data) {
+ case 0: /* D4 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[3]) {
+- drm_handle_vblank(rdev->ddev, 3);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[3]))
+- radeon_crtc_handle_vblank(rdev, 3);
+- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D4 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[3]) {
++ drm_handle_vblank(rdev->ddev, 3);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[3]))
++ radeon_crtc_handle_vblank(rdev, 3);
++ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D4 vblank\n");
++
+ break;
+ case 1: /* D4 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D4 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D4 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6570,23 +6586,27 @@ restart_ih:
+ case 5: /* D5 vblank/vline */
+ switch (src_data) {
+ case 0: /* D5 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[4]) {
+- drm_handle_vblank(rdev->ddev, 4);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[4]))
+- radeon_crtc_handle_vblank(rdev, 4);
+- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D5 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[4]) {
++ drm_handle_vblank(rdev->ddev, 4);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[4]))
++ radeon_crtc_handle_vblank(rdev, 4);
++ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D5 vblank\n");
++
+ break;
+ case 1: /* D5 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D5 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D5 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6596,23 +6616,27 @@ restart_ih:
+ case 6: /* D6 vblank/vline */
+ switch (src_data) {
+ case 0: /* D6 vblank */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+- if (rdev->irq.crtc_vblank_int[5]) {
+- drm_handle_vblank(rdev->ddev, 5);
+- rdev->pm.vblank_sync = true;
+- wake_up(&rdev->irq.vblank_queue);
+- }
+- if (atomic_read(&rdev->irq.pflip[5]))
+- radeon_crtc_handle_vblank(rdev, 5);
+- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+- DRM_DEBUG("IH: D6 vblank\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ if (rdev->irq.crtc_vblank_int[5]) {
++ drm_handle_vblank(rdev->ddev, 5);
++ rdev->pm.vblank_sync = true;
++ wake_up(&rdev->irq.vblank_queue);
++ }
++ if (atomic_read(&rdev->irq.pflip[5]))
++ radeon_crtc_handle_vblank(rdev, 5);
++ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
++ DRM_DEBUG("IH: D6 vblank\n");
++
+ break;
+ case 1: /* D6 vline */
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+- DRM_DEBUG("IH: D6 vline\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
++ DRM_DEBUG("IH: D6 vline\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6632,88 +6656,112 @@ restart_ih:
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD1\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD1\n");
++
+ break;
+ case 1:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD2\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD2\n");
++
+ break;
+ case 2:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD3\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD3\n");
++
+ break;
+ case 3:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD4\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD4\n");
++
+ break;
+ case 4:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD5\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD5\n");
++
+ break;
+ case 5:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+- queue_hotplug = true;
+- DRM_DEBUG("IH: HPD6\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
++ queue_hotplug = true;
++ DRM_DEBUG("IH: HPD6\n");
++
+ break;
+ case 6:
+- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 1\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 1\n");
++
+ break;
+ case 7:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 2\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 2\n");
++
+ break;
+ case 8:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 3\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 3\n");
++
+ break;
+ case 9:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 4\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 4\n");
++
+ break;
+ case 10:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 5\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 5\n");
++
+ break;
+ case 11:
+- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+- queue_dp = true;
+- DRM_DEBUG("IH: HPD_RX 6\n");
+- }
++ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
++ queue_dp = true;
++ DRM_DEBUG("IH: HPD_RX 6\n");
++
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
--- /dev/null
+From 479e9a95120aaae0bf0d3e0b5b26b36ac4a347b6 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 30 Jun 2015 09:30:01 -0400
+Subject: drm/radeon: only check the sink type on DP connectors
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 479e9a95120aaae0bf0d3e0b5b26b36ac4a347b6 upstream.
+
+Avoids a crash on pre-DP asics that support HDMI.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_audio.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -469,22 +469,22 @@ void radeon_audio_detect(struct drm_conn
+ dig = radeon_encoder->enc_priv;
+
+ if (status == connector_status_connected) {
+- struct radeon_connector *radeon_connector;
+- int sink_type;
+-
+ if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ radeon_encoder->audio = NULL;
+ return;
+ }
+
+- radeon_connector = to_radeon_connector(connector);
+- sink_type = radeon_dp_getsinktype(radeon_connector);
++ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
++ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
+- sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+- radeon_encoder->audio = rdev->audio.dp_funcs;
+- else
++ if (radeon_dp_getsinktype(radeon_connector) ==
++ CONNECTOR_OBJECT_ID_DISPLAYPORT)
++ radeon_encoder->audio = rdev->audio.dp_funcs;
++ else
++ radeon_encoder->audio = rdev->audio.hdmi_funcs;
++ } else {
+ radeon_encoder->audio = rdev->audio.hdmi_funcs;
++ }
+
+ dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
--- /dev/null
+From 2ba8d1bb8f6b589037f7db1f01144fc80750e8f7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= <jglisse@redhat.com>
+Date: Fri, 19 Jun 2015 10:32:16 -0400
+Subject: drm/radeon: SDMA fix hibernation (CI GPU family).
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= <jglisse@redhat.com>
+
+commit 2ba8d1bb8f6b589037f7db1f01144fc80750e8f7 upstream.
+
+In order for hibernation to reliably work we need to properly turn
+off the SDMA block, sadly after numerous attemps i haven't not found
+proper sequence for clean and full shutdown. So simply reset both
+SDMA block, this makes hibernation works reliably on sea island GPU
+family (CI)
+
+Hibernation and suspend to ram were tested (several times) on :
+Bonaire
+Hawaii
+Mullins
+Kaveri
+Kabini
+
+Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/cik_sdma.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/cik_sdma.c
++++ b/drivers/gpu/drm/radeon/cik_sdma.c
+@@ -268,6 +268,17 @@ static void cik_sdma_gfx_stop(struct rad
+ }
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
++
++ /* FIXME use something else than big hammer but after few days can not
++ * seem to find good combination so reset SDMA blocks as it seems we
++ * do not shut them down properly. This fix hibernation and does not
++ * affect suspend to ram.
++ */
++ WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
++ (void)RREG32(SRBM_SOFT_RESET);
++ udelay(50);
++ WREG32(SRBM_SOFT_RESET, 0);
++ (void)RREG32(SRBM_SOFT_RESET);
+ }
+
+ /**
--- /dev/null
+From 39fa10f7e21574a70cecf1fed0f9b36535aa68a0 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 15 May 2015 11:48:52 -0400
+Subject: drm/radeon: take the mode_config mutex when dealing with hpds (v2)
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 39fa10f7e21574a70cecf1fed0f9b36535aa68a0 upstream.
+
+Since we are messing with state in the worker.
+
+v2: drop the changes in the mst worker
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_irq_kms.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -79,10 +79,12 @@ static void radeon_hotplug_work_func(str
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
++ mutex_lock(&mode_config->mutex);
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
+ }
++ mutex_unlock(&mode_config->mutex);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+ }
--- /dev/null
+From f3cbb17bcf676a2fc6aedebe9fbebd59e550c51a Mon Sep 17 00:00:00 2001
+From: Grigori Goronzy <greg@chown.ath.cx>
+Date: Tue, 7 Jul 2015 16:27:29 +0900
+Subject: drm/radeon: unpin cursor BOs on suspend and pin them again on resume (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Grigori Goronzy <greg@chown.ath.cx>
+
+commit f3cbb17bcf676a2fc6aedebe9fbebd59e550c51a upstream.
+
+Everything is evicted from VRAM before suspend, so we need to make
+sure all BOs are unpinned and re-pinned after resume. Fixes broken
+mouse cursor after resume introduced by commit b9729b17.
+
+[Michel Dänzer: Add pinning BOs on resume]
+
+v2:
+[Alex Deucher: merge cursor unpin into fb unpin loop]
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=100541
+Reviewed-by: Christian König <christian.koenig@amd.com> (v1)
+Signed-off-by: Grigori Goronzy <greg@chown.ath.cx>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_device.c | 34 ++++++++++++++++++++++++++++++++-
+ 1 file changed, 33 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1572,11 +1572,21 @@ int radeon_suspend_kms(struct drm_device
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+
+- /* unpin the front buffers */
++ /* unpin the front buffers and cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
+ struct radeon_bo *robj;
+
++ if (radeon_crtc->cursor_bo) {
++ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
++ r = radeon_bo_reserve(robj, false);
++ if (r == 0) {
++ radeon_bo_unpin(robj);
++ radeon_bo_unreserve(robj);
++ }
++ }
++
+ if (rfb == NULL || rfb->obj == NULL) {
+ continue;
+ }
+@@ -1639,6 +1649,7 @@ int radeon_resume_kms(struct drm_device
+ {
+ struct drm_connector *connector;
+ struct radeon_device *rdev = dev->dev_private;
++ struct drm_crtc *crtc;
+ int r;
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+@@ -1678,6 +1689,27 @@ int radeon_resume_kms(struct drm_device
+
+ radeon_restore_bios_scratch_regs(rdev);
+
++ /* pin cursors */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++
++ if (radeon_crtc->cursor_bo) {
++ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
++ r = radeon_bo_reserve(robj, false);
++ if (r == 0) {
++ /* Only 27 bit offset for legacy cursor */
++ r = radeon_bo_pin_restricted(robj,
++ RADEON_GEM_DOMAIN_VRAM,
++ ASIC_IS_AVIVO(rdev) ?
++ 0 : 1 << 27,
++ &radeon_crtc->cursor_addr);
++ if (r != 0)
++ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
++ radeon_bo_unreserve(robj);
++ }
++ }
++ }
++
+ /* init dig PHYs, disp eng pll */
+ if (rdev->is_atom_bios) {
+ radeon_atom_encoder_init(rdev);
--- /dev/null
+From 41315b793e13f884cda79389f0d5d44d027e57d1 Mon Sep 17 00:00:00 2001
+From: Daniel Kurtz <djkurtz@chromium.org>
+Date: Tue, 7 Jul 2015 17:03:36 +0800
+Subject: drm/rockchip: use drm_gem_mmap helpers
+
+From: Daniel Kurtz <djkurtz@chromium.org>
+
+commit 41315b793e13f884cda79389f0d5d44d027e57d1 upstream.
+
+Rather than (incompletely [0]) re-implementing drm_gem_mmap() and
+drm_gem_mmap_obj() helpers, call them directly from the rockchip mmap
+routines.
+
+Once the core functions return successfully, the rockchip mmap routines
+can still use dma_mmap_attrs() to simply mmap the entire buffer.
+
+[0] Previously, we were performing the mmap() without first taking a
+reference on the underlying gem buffer. This could leak ptes if the gem
+object is destroyed while userspace is still holding the mapping.
+
+Signed-off-by: Daniel Kurtz <djkurtz@chromium.org>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 67 ++++++++++++++--------------
+ 1 file changed, 34 insertions(+), 33 deletions(-)
+
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+@@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct
+ &rk_obj->dma_attrs);
+ }
+
+-int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+- struct vm_area_struct *vma)
++static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
++ struct vm_area_struct *vma)
++
+ {
++ int ret;
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ struct drm_device *drm = obj->dev;
+- unsigned long vm_size;
+-
+- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+- vm_size = vma->vm_end - vma->vm_start;
+
+- if (vm_size > obj->size)
+- return -EINVAL;
++ /*
++ * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
++ * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
++ */
++ vma->vm_flags &= ~VM_PFNMAP;
+
+- return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
++ ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+ obj->size, &rk_obj->dma_attrs);
++ if (ret)
++ drm_gem_vm_close(vma);
++
++ return ret;
+ }
+
+-/* drm driver mmap file operations */
+-int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
++int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
++ struct vm_area_struct *vma)
+ {
+- struct drm_file *priv = filp->private_data;
+- struct drm_device *dev = priv->minor->dev;
+- struct drm_gem_object *obj;
+- struct drm_vma_offset_node *node;
++ struct drm_device *drm = obj->dev;
+ int ret;
+
+- if (drm_device_is_unplugged(dev))
+- return -ENODEV;
++ mutex_lock(&drm->struct_mutex);
++ ret = drm_gem_mmap_obj(obj, obj->size, vma);
++ mutex_unlock(&drm->struct_mutex);
++ if (ret)
++ return ret;
+
+- mutex_lock(&dev->struct_mutex);
++ return rockchip_drm_gem_object_mmap(obj, vma);
++}
+
+- node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+- vma->vm_pgoff,
+- vma_pages(vma));
+- if (!node) {
+- mutex_unlock(&dev->struct_mutex);
+- DRM_ERROR("failed to find vma node.\n");
+- return -EINVAL;
+- } else if (!drm_vma_node_is_allowed(node, filp)) {
+- mutex_unlock(&dev->struct_mutex);
+- return -EACCES;
+- }
++/* drm driver mmap file operations */
++int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ struct drm_gem_object *obj;
++ int ret;
+
+- obj = container_of(node, struct drm_gem_object, vma_node);
+- ret = rockchip_gem_mmap_buf(obj, vma);
++ ret = drm_gem_mmap(filp, vma);
++ if (ret)
++ return ret;
+
+- mutex_unlock(&dev->struct_mutex);
++ obj = vma->vm_private_data;
+
+- return ret;
++ return rockchip_drm_gem_object_mmap(obj, vma);
+ }
+
+ struct rockchip_gem_object *
--- /dev/null
+From 5677d67ae3949f09f57357241b88222d49b8c782 Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Thu, 16 Jul 2015 16:47:50 +0200
+Subject: drm: Stop resetting connector state to unknown
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 5677d67ae3949f09f57357241b88222d49b8c782 upstream.
+
+It's causing piles of issues since we've stopped forcing full detect
+cycles in the sysfs interfaces with
+
+commit c484f02d0f02fbbfc6decc945a69aae011041a27
+Author: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Fri Mar 6 12:36:42 2015 +0000
+
+ drm: Lighten sysfs connector 'status'
+
+The original justification for this was that the hpd handlers could
+use the unknown state as a hint to force a full detection. But current
+i915 code isn't doing that any more, and no one else really uses reset
+on resume. So instead just keep the old state around.
+
+References: http://article.gmane.org/gmane.comp.freedesktop.xorg.drivers.intel/62584
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=100641
+Cc: Rui Matos <tiagomatos@gmail.com>
+Cc: Julien Wajsberg <felash@gmail.com>
+Cc: kuddel.mail@gmx.de
+Cc: Lennart Poettering <mzxreary@0pointer.de>
+Acked-by: Rob Clark <robdclark@gmail.com>
+Tested-by: Rui Tiago Cação Matos <tiagomatos@gmail.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_crtc.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -5051,12 +5051,9 @@ void drm_mode_config_reset(struct drm_de
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+- connector->status = connector_status_unknown;
+-
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+- }
+ }
+ EXPORT_SYMBOL(drm_mode_config_reset);
+
--- /dev/null
+From 3c1dae0a07c651526f8e878d223a88f82caa5a50 Mon Sep 17 00:00:00 2001
+From: Thierry Reding <treding@nvidia.com>
+Date: Thu, 11 Jun 2015 18:33:48 +0200
+Subject: drm/tegra: dpaux: Fix transfers larger than 4 bytes
+
+From: Thierry Reding <treding@nvidia.com>
+
+commit 3c1dae0a07c651526f8e878d223a88f82caa5a50 upstream.
+
+The DPAUX read/write FIFO registers aren't sequential in the register
+space, causing transfers larger than 4 bytes to cause accesses to non-
+existing FIFO registers.
+
+Fixes: 6b6b604215c6 ("drm/tegra: Add eDP support")
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/tegra/dpaux.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/tegra/dpaux.c
++++ b/drivers/gpu/drm/tegra/dpaux.c
+@@ -72,34 +72,32 @@ static inline void tegra_dpaux_writel(st
+ static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer,
+ size_t size)
+ {
+- unsigned long offset = DPAUX_DP_AUXDATA_WRITE(0);
+ size_t i, j;
+
+- for (i = 0; i < size; i += 4) {
+- size_t num = min_t(size_t, size - i, 4);
++ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
++ size_t num = min_t(size_t, size - i * 4, 4);
+ unsigned long value = 0;
+
+ for (j = 0; j < num; j++)
+- value |= buffer[i + j] << (j * 8);
++ value |= buffer[i * 4 + j] << (j * 8);
+
+- tegra_dpaux_writel(dpaux, value, offset++);
++ tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXDATA_WRITE(i));
+ }
+ }
+
+ static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
+ size_t size)
+ {
+- unsigned long offset = DPAUX_DP_AUXDATA_READ(0);
+ size_t i, j;
+
+- for (i = 0; i < size; i += 4) {
+- size_t num = min_t(size_t, size - i, 4);
++ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
++ size_t num = min_t(size_t, size - i * 4, 4);
+ unsigned long value;
+
+- value = tegra_dpaux_readl(dpaux, offset++);
++ value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXDATA_READ(i));
+
+ for (j = 0; j < num; j++)
+- buffer[i + j] = value >> (j * 8);
++ buffer[i * 4 + j] = value >> (j * 8);
+ }
+ }
+
--- /dev/null
+From fa2f97dd33c2c32a06a5ea7f6e87af06a2e26baa Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Tue, 23 Jun 2015 16:35:06 +0200
+Subject: drm/vgem: Set unique to "vgem"
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit fa2f97dd33c2c32a06a5ea7f6e87af06a2e26baa upstream.
+
+Since there's only one global instance ever we don't need to have
+anything fancy. Stops a WARNING in the get_unique ioctl that the
+unique name isn't set.
+
+Reportedy-and-tested-by: Fabio Coatti <fabio.coatti@gmail.com>
+Cc: Fabio Coatti <fabio.coatti@gmail.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vgem/vgem_drv.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/vgem/vgem_drv.c
++++ b/drivers/gpu/drm/vgem/vgem_drv.c
+@@ -328,6 +328,8 @@ static int __init vgem_init(void)
+ goto out;
+ }
+
++ drm_dev_set_unique(vgem_device, "vgem");
++
+ ret = drm_dev_register(vgem_device, 0);
+
+ if (ret)
--- /dev/null
+From 9098f84cced870f54d8c410dd2444cfa61467fa0 Mon Sep 17 00:00:00 2001
+From: Tomas Winkler <tomas.winkler@intel.com>
+Date: Thu, 16 Jul 2015 15:50:45 +0200
+Subject: mmc: block: Add missing mmc_blk_put() in power_ro_lock_show()
+
+From: Tomas Winkler <tomas.winkler@intel.com>
+
+commit 9098f84cced870f54d8c410dd2444cfa61467fa0 upstream.
+
+Enclosing mmc_blk_put() is missing in power_ro_lock_show() sysfs handler,
+let's add it.
+
+Fixes: add710eaa886 ("mmc: boot partition ro lock support")
+Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/card/block.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+
++ mmc_blk_put(md);
++
+ return ret;
+ }
+
--- /dev/null
+From d82c0ba6e306f079407f07003e53c262d683397b Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Tue, 14 Jul 2015 12:29:27 +0200
+Subject: Revert "drm/i915: Declare the swizzling unknown for L-shaped configurations"
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit d82c0ba6e306f079407f07003e53c262d683397b upstream.
+
+This reverts commit 19ee835cdb0b5a8eb11a68f25a51b8039d564488.
+
+It breaks existing old userspace which doesn't handle UNKNOWN
+swizzling correct. Yes UNKNOWN was a thing back in 2009 and probably
+still is on some other platforms, but it still pretty clearly broke
+the testers machine. If we want this we need to extend the ioctl with
+new paramters that only new userspace looks at.
+
+Cc: Harald Arnesen <harald@skogtun.org>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Reported-by: Harald Arnesen <harald@skogtun.org>
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_gem_tiling.c | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -183,18 +183,8 @@ i915_gem_detect_bit_6_swizzle(struct drm
+ if (IS_GEN4(dev)) {
+ uint32_t ddc2 = I915_READ(DCC2);
+
+- if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+- /* Since the swizzling may vary within an
+- * object, we have no idea what the swizzling
+- * is for any page in particular. Thus we
+- * cannot migrate tiled pages using the GPU,
+- * nor can we tell userspace what the exact
+- * swizzling is for any object.
+- */
++ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+ dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+- }
+ }
+
+ if (dcc == 0xffffffff) {
--- /dev/null
+From ac9134906b3f5c2b45dc80dab0fee792bd516d52 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 29 Jun 2015 11:09:11 -0400
+Subject: Revert "drm/radeon: dont switch vt on suspend"
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit ac9134906b3f5c2b45dc80dab0fee792bd516d52 upstream.
+
+This reverts commit b9729b17a414f99c61f4db9ac9f9ed987fa0cbfe.
+
+This seems to break the cursor on resume for lots of systems.
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_fb.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_fb.c
++++ b/drivers/gpu/drm/radeon/radeon_fb.c
+@@ -257,7 +257,6 @@ static int radeonfb_create(struct drm_fb
+ }
+
+ info->par = rfbdev;
+- info->skip_vt_switch = true;
+
+ ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ if (ret) {
usb-ohci-fix-race-between-ed-unlink-and-urb-submission.patch
usb-core-lpm-set-lpm_capable-for-root-hub-device.patch
usb-xhci-bugfix-for-null-pointer-deference-in-xhci_endpoint_init-function.patch
+dm-cache-fix-race-when-issuing-a-policy_replace-operation.patch
+dm-stats-fix-divide-by-zero-if-number_of_areas-arg-is-zero.patch
+dm-space-map-metadata-fix-occasional-leak-of-a-metadata-block-on-resize.patch
+dm-btree-remove-fix-bug-in-redistribute3.patch
+dm-thin-allocate-the-cell_sort_array-dynamically.patch
+dm-btree-silence-lockdep-lock-inversion-in-dm_btree_del.patch
+mmc-block-add-missing-mmc_blk_put-in-power_ro_lock_show.patch
+block-loop-convert-to-per-device-workqueue.patch
+block-loop-avoiding-too-many-pending-per-work-i-o.patch
+block-do-a-full-clone-when-splitting-discard-bios.patch
+arm-at91-dt-sama5d4ek-mci0-uses-slot-0.patch
+arm-at91-dt-sama5d4-fix-dma-conf-for-aes-sha-and-tdes-nodes.patch
+tty-serial-at91-rs485-mode-0-is-valid-for-delay_rts_after_send.patch
+arm-at91-dt-trivial-fix-usb-udc-compatible-string.patch
+arm-at91-dt-update-udc-compatible-strings.patch
+bus-arm-ccn-fix-node-xp-config-conversion.patch
+drm-vgem-set-unique-to-vgem.patch
+drm-dp-mst-close-deadlock-in-connector-destruction.patch
+drm-dp-mst-take-lock-around-looking-up-the-branch-device-on-hpd-irq.patch
+drm-dp-mst-make-sure-mst_primary-mstb-is-valid-in-work-function.patch
+drm-tegra-dpaux-fix-transfers-larger-than-4-bytes.patch
+drm-qxl-do-not-cause-spice-server-to-clean-our-objects.patch
+drm-qxl-do-not-leak-memory-if-qxl_release_list_add-fails.patch
+drm-bridge-ptn3460-include-linux-gpio-consumer.h.patch
+drm-atomic-fix-out-of-bounds-read-in-for_each_-_in_state-helpers.patch
+drm-radeon-take-the-mode_config-mutex-when-dealing-with-hpds-v2.patch
+drm-radeon-clean-up-radeon_audio_enable.patch
+drm-i915-ppgtt-break-loop-in-gen8_ppgtt_clear_range-failure-path.patch
+drm-i915-fix-ips-related-flicker.patch
+drm-i915-fix-backlight-after-resume-on-855gm.patch
+drm-i915-declare-the-swizzling-unknown-for-l-shaped-configurations.patch
+drm-i915-snapshot-seqno-of-most-recently-submitted-request.patch
+drm-i915-forward-all-core-drm-ioctls-to-core-compat-handling.patch
+revert-drm-i915-declare-the-swizzling-unknown-for-l-shaped-configurations.patch
+drm-i915-use-two-32bit-reads-for-select-64bit-reg_read-ioctls.patch
+drm-radeon-compute-ring-fix-hibernation-ci-gpu-family-v2.patch
+drm-radeon-sdma-fix-hibernation-ci-gpu-family.patch
+revert-drm-radeon-dont-switch-vt-on-suspend.patch
+drm-radeon-only-check-the-sink-type-on-dp-connectors.patch
+drm-radeon-fix-hdp-flushing.patch
+drm-radeon-handle-irqs-only-based-on-irq-ring-not-irq-status-regs.patch
+drm-radeon-clean-up-reference-counting-and-pinning-of-the-cursor-bos.patch
+drm-radeon-unpin-cursor-bos-on-suspend-and-pin-them-again-on-resume-v2.patch
+drm-radeon-don-t-flush-the-gart-tlb-if-rdev-gart.ptr-null.patch
+drm-radeon-add-a-dpm-quirk-for-sapphire-radeon-r9-270x-2gb-gddr5.patch
+drm-radeon-fix-user-ptr-race-condition.patch
+drm-radeon-ci-silence-a-harmless-pcc-warning.patch
+drm-rockchip-use-drm_gem_mmap-helpers.patch
+drm-add-a-check-for-x-y-in-drm_mode_setcrtc.patch
+drm-provide-compat-ioctl-for-addfb2.1.patch
+drm-stop-resetting-connector-state-to-unknown.patch
--- /dev/null
+From 8687634b7908c42eb700e0469e110e02833611d1 Mon Sep 17 00:00:00 2001
+From: Nicolas Ferre <nicolas.ferre@atmel.com>
+Date: Mon, 11 May 2015 13:00:31 +0200
+Subject: tty/serial: at91: RS485 mode: 0 is valid for delay_rts_after_send
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nicolas Ferre <nicolas.ferre@atmel.com>
+
+commit 8687634b7908c42eb700e0469e110e02833611d1 upstream.
+
+In RS485 mode, we may want to set the delay_rts_after_send value to 0.
+In the datasheet, the 0 value is said to "disable" the Transmitter Timeguard but
+this is exactly the expected behavior if we want no delay...
+
+Moreover, if the value was set to non-zero value by device-tree or earlier
+ioctl command, it was impossible to change it back to zero.
+
+Reported-by: Sami Pietikäinen <Sami.Pietikainen@wapice.com>
+Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/atmel_serial.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -315,8 +315,7 @@ static int atmel_config_rs485(struct uar
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+- if ((rs485conf->delay_rts_after_send) > 0)
+- UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
++ UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+@@ -354,8 +353,7 @@ static void atmel_set_mctrl(struct uart_
+
+ /* override mode to RS485 if needed, otherwise keep the current mode */
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+- if ((port->rs485.delay_rts_after_send) > 0)
+- UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
++ UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+ mode &= ~ATMEL_US_USMODE;
+ mode |= ATMEL_US_USMODE_RS485;
+ }
+@@ -2061,8 +2059,7 @@ static void atmel_set_termios(struct uar
+
+ /* mode */
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+- if ((port->rs485.delay_rts_after_send) > 0)
+- UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
++ UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else if (termios->c_cflag & CRTSCTS) {
+ /* RS232 with hardware handshake (RTS/CTS) */