]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 24 May 2018 07:37:22 +0000 (09:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 24 May 2018 07:37:22 +0000 (09:37 +0200)
added patches:
dmaengine-ensure-dmaengine-helpers-check-valid-callback.patch
gpio-rcar-add-runtime-pm-handling-for-interrupts.patch
scsi-libsas-defer-ata-device-eh-commands-to-libata.patch
scsi-sg-allocate-with-__gfp_zero-in-sg_build_indirect.patch
scsi-zfcp-fix-infinite-iteration-on-erp-ready-list.patch
time-fix-clock_monotonic_raw-sub-nanosecond-accounting.patch

queue-4.4/dmaengine-ensure-dmaengine-helpers-check-valid-callback.patch [new file with mode: 0644]
queue-4.4/gpio-rcar-add-runtime-pm-handling-for-interrupts.patch [new file with mode: 0644]
queue-4.4/scsi-libsas-defer-ata-device-eh-commands-to-libata.patch [new file with mode: 0644]
queue-4.4/scsi-sg-allocate-with-__gfp_zero-in-sg_build_indirect.patch [new file with mode: 0644]
queue-4.4/scsi-zfcp-fix-infinite-iteration-on-erp-ready-list.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/time-fix-clock_monotonic_raw-sub-nanosecond-accounting.patch [new file with mode: 0644]

diff --git a/queue-4.4/dmaengine-ensure-dmaengine-helpers-check-valid-callback.patch b/queue-4.4/dmaengine-ensure-dmaengine-helpers-check-valid-callback.patch
new file mode 100644 (file)
index 0000000..10a3591
--- /dev/null
@@ -0,0 +1,94 @@
+From 757d12e5849be549076901b0d33c60d5f360269c Mon Sep 17 00:00:00 2001
+From: Vinod Koul <vinod.koul@intel.com>
+Date: Tue, 12 Apr 2016 21:07:06 +0530
+Subject: dmaengine: ensure dmaengine helpers check valid callback
+
+From: Vinod Koul <vinod.koul@intel.com>
+
+commit 757d12e5849be549076901b0d33c60d5f360269c upstream.
+
+dmaengine has various device callbacks and exposes helper
+functions to invoke these. These helpers should check if channel,
+device and callback is valid or not before invoking them.
+
+Reported-by: Jon Hunter <jonathanh@nvidia.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Fabrizio Castro <fabrizio.castro@bp.renesas.com>
+Signed-off-by: Jianming Qiao <jianming.qiao@bp.renesas.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/dmaengine.h |   20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -767,6 +767,9 @@ static inline struct dma_async_tx_descri
+       sg_dma_address(&sg) = buf;
+       sg_dma_len(&sg) = len;
++      if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
++              return NULL;
++
+       return chan->device->device_prep_slave_sg(chan, &sg, 1,
+                                                 dir, flags, NULL);
+ }
+@@ -775,6 +778,9 @@ static inline struct dma_async_tx_descri
+       struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+       enum dma_transfer_direction dir, unsigned long flags)
+ {
++      if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
++              return NULL;
++
+       return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+                                                 dir, flags, NULL);
+ }
+@@ -786,6 +792,9 @@ static inline struct dma_async_tx_descri
+       enum dma_transfer_direction dir, unsigned long flags,
+       struct rio_dma_ext *rio_ext)
+ {
++      if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
++              return NULL;
++
+       return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+                                                 dir, flags, rio_ext);
+ }
+@@ -796,6 +805,9 @@ static inline struct dma_async_tx_descri
+               size_t period_len, enum dma_transfer_direction dir,
+               unsigned long flags)
+ {
++      if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
++              return NULL;
++
+       return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
+                                               period_len, dir, flags);
+ }
+@@ -804,6 +816,9 @@ static inline struct dma_async_tx_descri
+               struct dma_chan *chan, struct dma_interleaved_template *xt,
+               unsigned long flags)
+ {
++      if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
++              return NULL;
++
+       return chan->device->device_prep_interleaved_dma(chan, xt, flags);
+ }
+@@ -811,7 +826,7 @@ static inline struct dma_async_tx_descri
+               struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+               unsigned long flags)
+ {
+-      if (!chan || !chan->device)
++      if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
+               return NULL;
+       return chan->device->device_prep_dma_memset(chan, dest, value,
+@@ -824,6 +839,9 @@ static inline struct dma_async_tx_descri
+               struct scatterlist *src_sg, unsigned int src_nents,
+               unsigned long flags)
+ {
++      if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
++              return NULL;
++
+       return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
+                       src_sg, src_nents, flags);
+ }
diff --git a/queue-4.4/gpio-rcar-add-runtime-pm-handling-for-interrupts.patch b/queue-4.4/gpio-rcar-add-runtime-pm-handling-for-interrupts.patch
new file mode 100644 (file)
index 0000000..f6f8c30
--- /dev/null
@@ -0,0 +1,95 @@
+From b26a719bdba9aa926ceaadecc66e07623d2b8a53 Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+Date: Thu, 18 Feb 2016 17:06:30 +0100
+Subject: gpio: rcar: Add Runtime PM handling for interrupts
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+commit b26a719bdba9aa926ceaadecc66e07623d2b8a53 upstream.
+
+The R-Car GPIO driver handles Runtime PM for requested GPIOs only.
+
+When using a GPIO purely as an interrupt source, no Runtime PM handling
+is done, and the GPIO module's clock may not be enabled.
+
+To fix this:
+  - Add .irq_request_resources() and .irq_release_resources() callbacks
+    to handle Runtime PM when an interrupt is requested,
+  - Add irq_bus_lock() and sync_unlock() callbacks to handle Runtime PM
+    when e.g. disabling/enabling an interrupt, or configuring the
+    interrupt type.
+
+Fixes: d5c3d84657db57bd "net: phy: Avoid polling PHY with PHY_IGNORE_INTERRUPTS"
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+[fabrizio: cherry-pick to v4.4.y. Use container_of instead of
+gpiochip_get_data.]
+Signed-off-by: Fabrizio Castro <fabrizio.castro@bp.renesas.com>
+Reviewed-by: Biju Das <biju.das@bp.renesas.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-rcar.c |   46 ++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 46 insertions(+)
+
+--- a/drivers/gpio/gpio-rcar.c
++++ b/drivers/gpio/gpio-rcar.c
+@@ -200,6 +200,48 @@ static int gpio_rcar_irq_set_wake(struct
+       return 0;
+ }
++static void gpio_rcar_irq_bus_lock(struct irq_data *d)
++{
++      struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++      struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
++                                              gpio_chip);
++
++      pm_runtime_get_sync(&p->pdev->dev);
++}
++
++static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
++{
++      struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++      struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
++                                              gpio_chip);
++
++      pm_runtime_put(&p->pdev->dev);
++}
++
++
++static int gpio_rcar_irq_request_resources(struct irq_data *d)
++{
++      struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++      struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
++                                              gpio_chip);
++      int error;
++
++      error = pm_runtime_get_sync(&p->pdev->dev);
++      if (error < 0)
++              return error;
++
++      return 0;
++}
++
++static void gpio_rcar_irq_release_resources(struct irq_data *d)
++{
++      struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++      struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
++                                              gpio_chip);
++
++      pm_runtime_put(&p->pdev->dev);
++}
++
+ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
+ {
+       struct gpio_rcar_priv *p = dev_id;
+@@ -460,6 +502,10 @@ static int gpio_rcar_probe(struct platfo
+       irq_chip->irq_unmask = gpio_rcar_irq_enable;
+       irq_chip->irq_set_type = gpio_rcar_irq_set_type;
+       irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
++      irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
++      irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
++      irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
++      irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
+       irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
+       ret = gpiochip_add(gpio_chip);
diff --git a/queue-4.4/scsi-libsas-defer-ata-device-eh-commands-to-libata.patch b/queue-4.4/scsi-libsas-defer-ata-device-eh-commands-to-libata.patch
new file mode 100644 (file)
index 0000000..741cfa7
--- /dev/null
@@ -0,0 +1,132 @@
+From 318aaf34f1179b39fa9c30fa0f3288b645beee39 Mon Sep 17 00:00:00 2001
+From: Jason Yan <yanaijie@huawei.com>
+Date: Thu, 8 Mar 2018 10:34:53 +0800
+Subject: scsi: libsas: defer ata device eh commands to libata
+
+From: Jason Yan <yanaijie@huawei.com>
+
+commit 318aaf34f1179b39fa9c30fa0f3288b645beee39 upstream.
+
+When ata device doing EH, some commands still attached with tasks are
+not passed to libata when abort failed or recover failed, so libata did
+not handle these commands. After these commands done, sas task is freed,
+but ata qc is not freed. This will cause ata qc leak and trigger a
+warning like below:
+
+WARNING: CPU: 0 PID: 28512 at drivers/ata/libata-eh.c:4037
+ata_eh_finish+0xb4/0xcc
+CPU: 0 PID: 28512 Comm: kworker/u32:2 Tainted: G     W  OE 4.14.0#1
+......
+Call trace:
+[<ffff0000088b7bd0>] ata_eh_finish+0xb4/0xcc
+[<ffff0000088b8420>] ata_do_eh+0xc4/0xd8
+[<ffff0000088b8478>] ata_std_error_handler+0x44/0x8c
+[<ffff0000088b8068>] ata_scsi_port_error_handler+0x480/0x694
+[<ffff000008875fc4>] async_sas_ata_eh+0x4c/0x80
+[<ffff0000080f6be8>] async_run_entry_fn+0x4c/0x170
+[<ffff0000080ebd70>] process_one_work+0x144/0x390
+[<ffff0000080ec100>] worker_thread+0x144/0x418
+[<ffff0000080f2c98>] kthread+0x10c/0x138
+[<ffff0000080855dc>] ret_from_fork+0x10/0x18
+
+If ata qc leaked too many, ata tag allocation will fail and io blocked
+for ever.
+
+As suggested by Dan Williams, defer ata device commands to libata and
+merge sas_eh_finish_cmd() with sas_eh_defer_cmd(). libata will handle
+ata qcs correctly after this.
+
+Signed-off-by: Jason Yan <yanaijie@huawei.com>
+CC: Xiaofei Tan <tanxiaofei@huawei.com>
+CC: John Garry <john.garry@huawei.com>
+CC: Dan Williams <dan.j.williams@intel.com>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/libsas/sas_scsi_host.c |   33 +++++++++++++--------------------
+ 1 file changed, 13 insertions(+), 20 deletions(-)
+
+--- a/drivers/scsi/libsas/sas_scsi_host.c
++++ b/drivers/scsi/libsas/sas_scsi_host.c
+@@ -222,6 +222,7 @@ out_done:
+ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
+ {
+       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
++      struct domain_device *dev = cmd_to_domain_dev(cmd);
+       struct sas_task *task = TO_SAS_TASK(cmd);
+       /* At this point, we only get called following an actual abort
+@@ -230,6 +231,14 @@ static void sas_eh_finish_cmd(struct scs
+        */
+       sas_end_task(cmd, task);
++      if (dev_is_sata(dev)) {
++              /* defer commands to libata so that libata EH can
++               * handle ata qcs correctly
++               */
++              list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
++              return;
++      }
++
+       /* now finish the command and move it on to the error
+        * handler done list, this also takes it off the
+        * error handler pending list.
+@@ -237,22 +246,6 @@ static void sas_eh_finish_cmd(struct scs
+       scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
+ }
+-static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
+-{
+-      struct domain_device *dev = cmd_to_domain_dev(cmd);
+-      struct sas_ha_struct *ha = dev->port->ha;
+-      struct sas_task *task = TO_SAS_TASK(cmd);
+-
+-      if (!dev_is_sata(dev)) {
+-              sas_eh_finish_cmd(cmd);
+-              return;
+-      }
+-
+-      /* report the timeout to libata */
+-      sas_end_task(cmd, task);
+-      list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
+-}
+-
+ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
+ {
+       struct scsi_cmnd *cmd, *n;
+@@ -260,7 +253,7 @@ static void sas_scsi_clear_queue_lu(stru
+       list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
+               if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
+                   cmd->device->lun == my_cmd->device->lun)
+-                      sas_eh_defer_cmd(cmd);
++                      sas_eh_finish_cmd(cmd);
+       }
+ }
+@@ -622,12 +615,12 @@ static void sas_eh_handle_sas_errors(str
+               case TASK_IS_DONE:
+                       SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
+                                   task);
+-                      sas_eh_defer_cmd(cmd);
++                      sas_eh_finish_cmd(cmd);
+                       continue;
+               case TASK_IS_ABORTED:
+                       SAS_DPRINTK("%s: task 0x%p is aborted\n",
+                                   __func__, task);
+-                      sas_eh_defer_cmd(cmd);
++                      sas_eh_finish_cmd(cmd);
+                       continue;
+               case TASK_IS_AT_LU:
+                       SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
+@@ -638,7 +631,7 @@ static void sas_eh_handle_sas_errors(str
+                                           "recovered\n",
+                                           SAS_ADDR(task->dev),
+                                           cmd->device->lun);
+-                              sas_eh_defer_cmd(cmd);
++                              sas_eh_finish_cmd(cmd);
+                               sas_scsi_clear_queue_lu(work_q, cmd);
+                               goto Again;
+                       }
diff --git a/queue-4.4/scsi-sg-allocate-with-__gfp_zero-in-sg_build_indirect.patch b/queue-4.4/scsi-sg-allocate-with-__gfp_zero-in-sg_build_indirect.patch
new file mode 100644 (file)
index 0000000..a38ccfd
--- /dev/null
@@ -0,0 +1,35 @@
+From a45b599ad808c3c982fdcdc12b0b8611c2f92824 Mon Sep 17 00:00:00 2001
+From: Alexander Potapenko <glider@google.com>
+Date: Fri, 18 May 2018 16:23:18 +0200
+Subject: scsi: sg: allocate with __GFP_ZERO in sg_build_indirect()
+
+From: Alexander Potapenko <glider@google.com>
+
+commit a45b599ad808c3c982fdcdc12b0b8611c2f92824 upstream.
+
+This shall help avoid copying uninitialized memory to the userspace when
+calling ioctl(fd, SG_IO) with an empty command.
+
+Reported-by: syzbot+7d26fc1eea198488deab@syzkaller.appspotmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Potapenko <glider@google.com>
+Acked-by: Douglas Gilbert <dgilbert@interlog.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sg.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1903,7 +1903,7 @@ retry:
+               num = (rem_sz > scatter_elem_sz_prev) ?
+                       scatter_elem_sz_prev : rem_sz;
+-              schp->pages[k] = alloc_pages(gfp_mask, order);
++              schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
+               if (!schp->pages[k])
+                       goto out;
diff --git a/queue-4.4/scsi-zfcp-fix-infinite-iteration-on-erp-ready-list.patch b/queue-4.4/scsi-zfcp-fix-infinite-iteration-on-erp-ready-list.patch
new file mode 100644 (file)
index 0000000..26ebcf7
--- /dev/null
@@ -0,0 +1,182 @@
+From fa89adba1941e4f3b213399b81732a5c12fd9131 Mon Sep 17 00:00:00 2001
+From: Jens Remus <jremus@linux.ibm.com>
+Date: Thu, 3 May 2018 13:52:47 +0200
+Subject: scsi: zfcp: fix infinite iteration on ERP ready list
+
+From: Jens Remus <jremus@linux.ibm.com>
+
+commit fa89adba1941e4f3b213399b81732a5c12fd9131 upstream.
+
+zfcp_erp_adapter_reopen() schedules blocking of all of the adapter's
+rports via zfcp_scsi_schedule_rports_block() and enqueues a reopen
+adapter ERP action via zfcp_erp_action_enqueue(). Both are separately
+processed asynchronously and concurrently.
+
+Blocking of rports is done in a kworker by zfcp_scsi_rport_work(). It
+calls zfcp_scsi_rport_block(), which then traces a DBF REC "scpdely" via
+zfcp_dbf_rec_trig().  zfcp_dbf_rec_trig() acquires the DBF REC spin lock
+and then iterates with list_for_each() over the adapter's ERP ready list
+without holding the ERP lock. This opens a race window in which the
+current list entry can be moved to another list, causing list_for_each()
+to iterate forever on the wrong list, as the erp_ready_head is never
+encountered as terminal condition.
+
+Meanwhile the ERP action can be processed in the ERP thread by
+zfcp_erp_thread(). It calls zfcp_erp_strategy(), which acquires the ERP
+lock and then calls zfcp_erp_action_to_running() to move the ERP action
+from the ready to the running list.  zfcp_erp_action_to_running() can
+move the ERP action using list_move() just during the aforementioned
+race window. It then traces a REC RUN "erator1" via zfcp_dbf_rec_run().
+zfcp_dbf_rec_run() tries to acquire the DBF REC spin lock. If this is
+held by the infinitely looping kworker, it effectively spins forever.
+
+Example Sequence Diagram:
+
+Process                ERP Thread             rport_work
+-------------------    -------------------    -------------------
+zfcp_erp_adapter_reopen()
+zfcp_erp_adapter_block()
+zfcp_scsi_schedule_rports_block()
+lock ERP                                      zfcp_scsi_rport_work()
+zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER)
+list_add_tail() on ready                      !(rport_task==RPORT_ADD)
+wake_up() ERP thread                          zfcp_scsi_rport_block()
+zfcp_dbf_rec_trig()    zfcp_erp_strategy()    zfcp_dbf_rec_trig()
+unlock ERP                                    lock DBF REC
+zfcp_erp_wait()        lock ERP
+|                      zfcp_erp_action_to_running()
+|                                             list_for_each() ready
+|                      list_move()              current entry
+|                        ready to running
+|                      zfcp_dbf_rec_run()       endless loop over running
+|                      zfcp_dbf_rec_run_lvl()
+|                      lock DBF REC spins forever
+
+Any adapter recovery can trigger this, such as setting the device offline
+or reboot.
+
+V4.9 commit 4eeaa4f3f1d6 ("zfcp: close window with unblocked rport
+during rport gone") introduced additional tracing of (un)blocking of
+rports. It missed that the adapter->erp_lock must be held when calling
+zfcp_dbf_rec_trig().
+
+This fix uses the approach formerly introduced by commit aa0fec62391c
+("[SCSI] zfcp: Fix sparse warning by providing new entry in dbf") that got
+later removed by commit ae0904f60fab ("[SCSI] zfcp: Redesign of the debug
+tracing for recovery actions.").
+
+Introduce zfcp_dbf_rec_trig_lock(), a wrapper for zfcp_dbf_rec_trig() that
+acquires and releases the adapter->erp_lock for read.
+
+Reported-by: Sebastian Ott <sebott@linux.ibm.com>
+Signed-off-by: Jens Remus <jremus@linux.ibm.com>
+Fixes: 4eeaa4f3f1d6 ("zfcp: close window with unblocked rport during rport gone")
+Cc: <stable@vger.kernel.org> # 2.6.32+
+Reviewed-by: Benjamin Block <bblock@linux.vnet.ibm.com>
+Signed-off-by: Steffen Maier <maier@linux.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/scsi/zfcp_dbf.c  |   23 ++++++++++++++++++++++-
+ drivers/s390/scsi/zfcp_ext.h  |    5 ++++-
+ drivers/s390/scsi/zfcp_scsi.c |   14 +++++++-------
+ 3 files changed, 33 insertions(+), 9 deletions(-)
+
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -3,7 +3,7 @@
+  *
+  * Debug traces for zfcp.
+  *
+- * Copyright IBM Corp. 2002, 2017
++ * Copyright IBM Corp. 2002, 2018
+  */
+ #define KMSG_COMPONENT "zfcp"
+@@ -287,6 +287,27 @@ void zfcp_dbf_rec_trig(char *tag, struct
+       spin_unlock_irqrestore(&dbf->rec_lock, flags);
+ }
++/**
++ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
++ * @tag: identifier for event
++ * @adapter: adapter on which the erp_action should run
++ * @port: remote port involved in the erp_action
++ * @sdev: scsi device involved in the erp_action
++ * @want: wanted erp_action
++ * @need: required erp_action
++ *
++ * The adapter->erp_lock must not be held.
++ */
++void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
++                          struct zfcp_port *port, struct scsi_device *sdev,
++                          u8 want, u8 need)
++{
++      unsigned long flags;
++
++      read_lock_irqsave(&adapter->erp_lock, flags);
++      zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
++      read_unlock_irqrestore(&adapter->erp_lock, flags);
++}
+ /**
+  * zfcp_dbf_rec_run_lvl - trace event related to running recovery
+--- a/drivers/s390/scsi/zfcp_ext.h
++++ b/drivers/s390/scsi/zfcp_ext.h
+@@ -3,7 +3,7 @@
+  *
+  * External function declarations.
+  *
+- * Copyright IBM Corp. 2002, 2016
++ * Copyright IBM Corp. 2002, 2018
+  */
+ #ifndef ZFCP_EXT_H
+@@ -34,6 +34,9 @@ extern int zfcp_dbf_adapter_register(str
+ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+ extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+                             struct zfcp_port *, struct scsi_device *, u8, u8);
++extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
++                                 struct zfcp_port *port,
++                                 struct scsi_device *sdev, u8 want, u8 need);
+ extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+ extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
+                                struct zfcp_erp_action *erp);
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -3,7 +3,7 @@
+  *
+  * Interface to Linux SCSI midlayer.
+  *
+- * Copyright IBM Corp. 2002, 2017
++ * Copyright IBM Corp. 2002, 2018
+  */
+ #define KMSG_COMPONENT "zfcp"
+@@ -616,9 +616,9 @@ static void zfcp_scsi_rport_register(str
+       ids.port_id = port->d_id;
+       ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+-      zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
+-                        ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+-                        ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
++      zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
++                             ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
++                             ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+       rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
+       if (!rport) {
+               dev_err(&port->adapter->ccw_device->dev,
+@@ -640,9 +640,9 @@ static void zfcp_scsi_rport_block(struct
+       struct fc_rport *rport = port->rport;
+       if (rport) {
+-              zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
+-                                ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+-                                ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
++              zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
++                                     ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
++                                     ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+               fc_remote_port_delete(rport);
+               port->rport = NULL;
+       }
index 8c26dcc93f29712b896b1ef71212c42ef537b674..54d4e782747de73d1024ac8e677c3fa00bc8d1b2 100644 (file)
@@ -81,3 +81,9 @@ s390-kernel-use-expoline-for-indirect-branches.patch
 s390-move-spectre-sysfs-attribute-code.patch
 s390-extend-expoline-to-bc-instructions.patch
 s390-use-expoline-thunks-in-the-bpf-jit.patch
+scsi-libsas-defer-ata-device-eh-commands-to-libata.patch
+scsi-sg-allocate-with-__gfp_zero-in-sg_build_indirect.patch
+scsi-zfcp-fix-infinite-iteration-on-erp-ready-list.patch
+dmaengine-ensure-dmaengine-helpers-check-valid-callback.patch
+time-fix-clock_monotonic_raw-sub-nanosecond-accounting.patch
+gpio-rcar-add-runtime-pm-handling-for-interrupts.patch
diff --git a/queue-4.4/time-fix-clock_monotonic_raw-sub-nanosecond-accounting.patch b/queue-4.4/time-fix-clock_monotonic_raw-sub-nanosecond-accounting.patch
new file mode 100644 (file)
index 0000000..5b11ab9
--- /dev/null
@@ -0,0 +1,113 @@
+From 3d88d56c5873f6eebe23e05c3da701960146b801 Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Thu, 8 Jun 2017 16:44:21 -0700
+Subject: time: Fix CLOCK_MONOTONIC_RAW sub-nanosecond accounting
+
+From: John Stultz <john.stultz@linaro.org>
+
+commit 3d88d56c5873f6eebe23e05c3da701960146b801 upstream.
+
+Due to how the MONOTONIC_RAW accumulation logic was handled,
+there is the potential for a 1ns discontinuity when we do
+accumulations. This small discontinuity has for the most part
+gone un-noticed, but since ARM64 enabled CLOCK_MONOTONIC_RAW
+in their vDSO clock_gettime implementation, we've seen failures
+with the inconsistency-check test in kselftest.
+
+This patch addresses the issue by using the same sub-ns
+accumulation handling that CLOCK_MONOTONIC uses, which avoids
+the issue for in-kernel users.
+
+Since the ARM64 vDSO implementation has its own clock_gettime
+calculation logic, this patch reduces the frequency of errors,
+but failures are still seen. The ARM64 vDSO will need to be
+updated to include the sub-nanosecond xtime_nsec values in its
+calculation for this issue to be completely fixed.
+
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Tested-by: Daniel Mentz <danielmentz@google.com>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Kevin Brodsky <kevin.brodsky@arm.com>
+Cc: Richard Cochran <richardcochran@gmail.com>
+Cc: Stephen Boyd <stephen.boyd@linaro.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: "stable #4 . 8+" <stable@vger.kernel.org>
+Cc: Miroslav Lichvar <mlichvar@redhat.com>
+Link: http://lkml.kernel.org/r/1496965462-20003-3-git-send-email-john.stultz@linaro.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[fabrizio: cherry-pick to 4.4. Kept cycle_t type for function
+logarithmic_accumulation local variable "interval". Dropped
+casting of "interval" variable]
+Signed-off-by: Fabrizio Castro <fabrizio.castro@bp.renesas.com>
+Signed-off-by: Biju Das <biju.das@bp.renesas.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/timekeeper_internal.h |    4 ++--
+ kernel/time/timekeeping.c           |   20 ++++++++++----------
+ 2 files changed, 12 insertions(+), 12 deletions(-)
+
+--- a/include/linux/timekeeper_internal.h
++++ b/include/linux/timekeeper_internal.h
+@@ -56,7 +56,7 @@ struct tk_read_base {
+  *                    interval.
+  * @xtime_remainder:  Shifted nano seconds left over when rounding
+  *                    @cycle_interval
+- * @raw_interval:     Raw nano seconds accumulated per NTP interval.
++ * @raw_interval:     Shifted raw nano seconds accumulated per NTP interval.
+  * @ntp_error:                Difference between accumulated time and NTP time in ntp
+  *                    shifted nano seconds.
+  * @ntp_error_shift:  Shift conversion between clock shifted nano seconds and
+@@ -97,7 +97,7 @@ struct timekeeper {
+       cycle_t                 cycle_interval;
+       u64                     xtime_interval;
+       s64                     xtime_remainder;
+-      u32                     raw_interval;
++      u64                     raw_interval;
+       /* The ntp_tick_length() value currently being used.
+        * This cached copy ensures we consistently apply the tick
+        * length for an entire tick, as ntp_tick_length may change
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -277,8 +277,7 @@ static void tk_setup_internals(struct ti
+       /* Go back from cycles -> shifted ns */
+       tk->xtime_interval = (u64) interval * clock->mult;
+       tk->xtime_remainder = ntpinterval - tk->xtime_interval;
+-      tk->raw_interval =
+-              ((u64) interval * clock->mult) >> clock->shift;
++      tk->raw_interval = interval * clock->mult;
+        /* if changing clocks, convert xtime_nsec shift units */
+       if (old_clock) {
+@@ -1767,7 +1766,7 @@ static cycle_t logarithmic_accumulation(
+                                               unsigned int *clock_set)
+ {
+       cycle_t interval = tk->cycle_interval << shift;
+-      u64 raw_nsecs;
++      u64 snsec_per_sec;
+       /* If the offset is smaller than a shifted interval, do nothing */
+       if (offset < interval)
+@@ -1782,14 +1781,15 @@ static cycle_t logarithmic_accumulation(
+       *clock_set |= accumulate_nsecs_to_secs(tk);
+       /* Accumulate raw time */
+-      raw_nsecs = (u64)tk->raw_interval << shift;
+-      raw_nsecs += tk->raw_time.tv_nsec;
+-      if (raw_nsecs >= NSEC_PER_SEC) {
+-              u64 raw_secs = raw_nsecs;
+-              raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
+-              tk->raw_time.tv_sec += raw_secs;
++      tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
++      tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
++      snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
++      while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
++              tk->tkr_raw.xtime_nsec -= snsec_per_sec;
++              tk->raw_time.tv_sec++;
+       }
+-      tk->raw_time.tv_nsec = raw_nsecs;
++      tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
++      tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
+       /* Accumulate error between NTP and clock interval */
+       tk->ntp_error += tk->ntp_tick << shift;