--- /dev/null
+From foo@baz Thu Jun 15 15:00:53 CEST 2017
+From: "Y.C. Chen" <yc_chen@aspeedtech.com>
+Date: Thu, 26 Jan 2017 09:45:40 +0800
+Subject: drm/ast: Fixed system hanged if disable P2A
+
+From: "Y.C. Chen" <yc_chen@aspeedtech.com>
+
+
+[ Upstream commit 6c971c09f38704513c426ba6515f22fb3d6c87d5 ]
+
+The original ast driver will access some BMC configuration through P2A bridge
+that can be disabled since AST2300 and after.
+It will cause system hanged if P2A bridge is disabled.
+Here is the update to fix it.
+
+Signed-off-by: Y.C. Chen <yc_chen@aspeedtech.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/ast/ast_drv.h | 1
+ drivers/gpu/drm/ast/ast_main.c | 157 +++++++++++++++++++++--------------------
+ drivers/gpu/drm/ast/ast_post.c | 18 +++-
+ 3 files changed, 97 insertions(+), 79 deletions(-)
+
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -113,6 +113,7 @@ struct ast_private {
+ struct ttm_bo_kmap_obj cache_kmap;
+ int next_cursor;
+ bool support_wide_screen;
++ bool DisableP2A;
+
+ enum ast_tx_chip tx_chip_type;
+ u8 dp501_maxclk;
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_de
+ } else
+ *need_post = false;
+
++ /* Check P2A Access */
++ ast->DisableP2A = true;
++ data = ast_read32(ast, 0xf004);
++ if (data != 0xFFFFFFFF)
++ ast->DisableP2A = false;
++
+ /* Check if we support wide screen */
+ switch (ast->chip) {
+ case AST1180:
+@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_de
+ ast->support_wide_screen = true;
+ else {
+ ast->support_wide_screen = false;
+- /* Read SCU7c (silicon revision register) */
+- ast_write32(ast, 0xf004, 0x1e6e0000);
+- ast_write32(ast, 0xf000, 0x1);
+- data = ast_read32(ast, 0x1207c);
+- data &= 0x300;
+- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+- ast->support_wide_screen = true;
+- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+- ast->support_wide_screen = true;
++ if (ast->DisableP2A == false) {
++ /* Read SCU7c (silicon revision register) */
++ ast_write32(ast, 0xf004, 0x1e6e0000);
++ ast_write32(ast, 0xf000, 0x1);
++ data = ast_read32(ast, 0x1207c);
++ data &= 0x300;
++ if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
++ ast->support_wide_screen = true;
++ if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
++ ast->support_wide_screen = true;
++ }
+ }
+ break;
+ }
+@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_
+ uint32_t data, data2;
+ uint32_t denum, num, div, ref_pll;
+
+- ast_write32(ast, 0xf004, 0x1e6e0000);
+- ast_write32(ast, 0xf000, 0x1);
+-
+-
+- ast_write32(ast, 0x10000, 0xfc600309);
+-
+- do {
+- if (pci_channel_offline(dev->pdev))
+- return -EIO;
+- } while (ast_read32(ast, 0x10000) != 0x01);
+- data = ast_read32(ast, 0x10004);
+-
+- if (data & 0x40)
++ if (ast->DisableP2A)
++ {
+ ast->dram_bus_width = 16;
++ ast->dram_type = AST_DRAM_1Gx16;
++ ast->mclk = 396;
++ }
+ else
+- ast->dram_bus_width = 32;
++ {
++ ast_write32(ast, 0xf004, 0x1e6e0000);
++ ast_write32(ast, 0xf000, 0x1);
++ data = ast_read32(ast, 0x10004);
++
++ if (data & 0x40)
++ ast->dram_bus_width = 16;
++ else
++ ast->dram_bus_width = 32;
++
++ if (ast->chip == AST2300 || ast->chip == AST2400) {
++ switch (data & 0x03) {
++ case 0:
++ ast->dram_type = AST_DRAM_512Mx16;
++ break;
++ default:
++ case 1:
++ ast->dram_type = AST_DRAM_1Gx16;
++ break;
++ case 2:
++ ast->dram_type = AST_DRAM_2Gx16;
++ break;
++ case 3:
++ ast->dram_type = AST_DRAM_4Gx16;
++ break;
++ }
++ } else {
++ switch (data & 0x0c) {
++ case 0:
++ case 4:
++ ast->dram_type = AST_DRAM_512Mx16;
++ break;
++ case 8:
++ if (data & 0x40)
++ ast->dram_type = AST_DRAM_1Gx16;
++ else
++ ast->dram_type = AST_DRAM_512Mx32;
++ break;
++ case 0xc:
++ ast->dram_type = AST_DRAM_1Gx32;
++ break;
++ }
++ }
+
+- if (ast->chip == AST2300 || ast->chip == AST2400) {
+- switch (data & 0x03) {
+- case 0:
+- ast->dram_type = AST_DRAM_512Mx16;
+- break;
+- default:
+- case 1:
+- ast->dram_type = AST_DRAM_1Gx16;
+- break;
+- case 2:
+- ast->dram_type = AST_DRAM_2Gx16;
+- break;
++ data = ast_read32(ast, 0x10120);
++ data2 = ast_read32(ast, 0x10170);
++ if (data2 & 0x2000)
++ ref_pll = 14318;
++ else
++ ref_pll = 12000;
++
++ denum = data & 0x1f;
++ num = (data & 0x3fe0) >> 5;
++ data = (data & 0xc000) >> 14;
++ switch (data) {
+ case 3:
+- ast->dram_type = AST_DRAM_4Gx16;
+- break;
+- }
+- } else {
+- switch (data & 0x0c) {
+- case 0:
+- case 4:
+- ast->dram_type = AST_DRAM_512Mx16;
++ div = 0x4;
+ break;
+- case 8:
+- if (data & 0x40)
+- ast->dram_type = AST_DRAM_1Gx16;
+- else
+- ast->dram_type = AST_DRAM_512Mx32;
++ case 2:
++ case 1:
++ div = 0x2;
+ break;
+- case 0xc:
+- ast->dram_type = AST_DRAM_1Gx32;
++ default:
++ div = 0x1;
+ break;
+ }
++ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+ }
+-
+- data = ast_read32(ast, 0x10120);
+- data2 = ast_read32(ast, 0x10170);
+- if (data2 & 0x2000)
+- ref_pll = 14318;
+- else
+- ref_pll = 12000;
+-
+- denum = data & 0x1f;
+- num = (data & 0x3fe0) >> 5;
+- data = (data & 0xc000) >> 14;
+- switch (data) {
+- case 3:
+- div = 0x4;
+- break;
+- case 2:
+- case 1:
+- div = 0x2;
+- break;
+- default:
+- div = 0x1;
+- break;
+- }
+- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+ return 0;
+ }
+
+--- a/drivers/gpu/drm/ast/ast_post.c
++++ b/drivers/gpu/drm/ast/ast_post.c
+@@ -375,12 +375,20 @@ void ast_post_gpu(struct drm_device *dev
+ ast_enable_mmio(dev);
+ ast_set_def_ext_reg(dev);
+
+- if (ast->chip == AST2300 || ast->chip == AST2400)
+- ast_init_dram_2300(dev);
+- else
+- ast_init_dram_reg(dev);
++ if (ast->DisableP2A == false)
++ {
++ if (ast->chip == AST2300 || ast->chip == AST2400)
++ ast_init_dram_2300(dev);
++ else
++ ast_init_dram_reg(dev);
+
+- ast_init_3rdtx(dev);
++ ast_init_3rdtx(dev);
++ }
++ else
++ {
++ if (ast->tx_chip_type != AST_TX_NONE)
++ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
++ }
+ }
+
+ /* AST 2300 DRAM settings */
--- /dev/null
+From foo@baz Thu Jun 15 15:00:52 CEST 2017
+From: Lyude Paul <lyude@redhat.com>
+Date: Wed, 11 Jan 2017 21:25:23 -0500
+Subject: drm/nouveau: Don't enabling polling twice on runtime resume
+
+From: Lyude Paul <lyude@redhat.com>
+
+
+[ Upstream commit cae9ff036eea577856d5b12860b4c79c5e71db4a ]
+
+As it turns out, on cards that actually have CRTCs on them we're already
+calling drm_kms_helper_poll_enable(drm_dev) from
+nouveau_display_resume() before we call it in
+nouveau_pmops_runtime_resume(). This leads us to accidentally trying to
+enable polling twice, which results in a potential deadlock between the
+RPM locks and drm_dev->mode_config.mutex if we end up trying to enable
+polling the second time while output_poll_execute is running and holding
+the mode_config lock. As such, make sure we only enable polling in
+nouveau_pmops_runtime_resume() if we need to.
+
+This fixes hangs observed on the ThinkPad W541
+
+Signed-off-by: Lyude <lyude@redhat.com>
+Cc: Hans de Goede <hdegoede@redhat.com>
+Cc: Kilian Singer <kilian.singer@quantumtechnology.info>
+Cc: Lukas Wunner <lukas@wunner.de>
+Cc: David Airlie <airlied@redhat.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_display.c | 3 ++-
+ drivers/gpu/drm/nouveau/nouveau_drm.c | 5 ++++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -421,7 +421,8 @@ nouveau_display_init(struct drm_device *
+ return ret;
+
+ /* enable polling for external displays */
+- drm_kms_helper_poll_enable(dev);
++ if (!dev->mode_config.poll_enabled)
++ drm_kms_helper_poll_enable(dev);
+
+ /* enable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -761,7 +761,10 @@ nouveau_pmops_runtime_resume(struct devi
+ pci_set_master(pdev);
+
+ ret = nouveau_do_resume(drm_dev, true);
+- drm_kms_helper_poll_enable(drm_dev);
++
++ if (!drm_dev->mode_config.poll_enabled)
++ drm_kms_helper_poll_enable(drm_dev);
++
+ /* do magic */
+ nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
--- /dev/null
+From foo@baz Thu Jun 15 15:00:52 CEST 2017
+From: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Date: Mon, 15 May 2017 12:04:31 +0300
+Subject: drm/nouveau: Fix drm poll_helper handling
+
+From: Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+
+[ Upstream commit 9a2eba337cacefc95b97c2726e3efdd435b3460e ]
+
+Commit cae9ff036eea effectively disabled the drm poll_helper by checking
+the wrong flag to see if the driver should enable the poll or not:
+mode_config.poll_enabled is only set to true by poll_init and it is not
+indicating if the poll is enabled or not.
+nouveau_display_create() will initialize the poll and going to disable it
+right away. After poll_init() the mode_config.poll_enabled will be true,
+but the poll itself is disabled.
+
+To avoid the race caused by calling the poll_enable() from different paths,
+this patch will enable the poll from one place, in the
+nouveau_display_hpd_work().
+
+In case the pm_runtime is disabled we will enable the poll in
+nouveau_drm_load() once.
+
+Fixes: cae9ff036eea ("drm/nouveau: Don't enabling polling twice on runtime resume")
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Reviewed-by: Lyude <lyude@redhat.com>
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_display.c | 6 ++----
+ drivers/gpu/drm/nouveau/nouveau_drm.c | 6 +++---
+ 2 files changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -367,6 +367,8 @@ nouveau_display_hpd_work(struct work_str
+ pm_runtime_get_sync(drm->dev->dev);
+
+ drm_helper_hpd_irq_event(drm->dev);
++ /* enable polling for external displays */
++ drm_kms_helper_poll_enable(drm->dev);
+
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+@@ -420,10 +422,6 @@ nouveau_display_init(struct drm_device *
+ if (ret)
+ return ret;
+
+- /* enable polling for external displays */
+- if (!dev->mode_config.poll_enabled)
+- drm_kms_helper_poll_enable(dev);
+-
+ /* enable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -483,6 +483,9 @@ nouveau_drm_load(struct drm_device *dev,
+ pm_runtime_allow(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put(dev->dev);
++ } else {
++ /* enable polling for external displays */
++ drm_kms_helper_poll_enable(dev);
+ }
+ return 0;
+
+@@ -762,9 +765,6 @@ nouveau_pmops_runtime_resume(struct devi
+
+ ret = nouveau_do_resume(drm_dev, true);
+
+- if (!drm_dev->mode_config.poll_enabled)
+- drm_kms_helper_poll_enable(drm_dev);
+-
+ /* do magic */
+ nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
--- /dev/null
+From foo@baz Thu Jun 15 15:00:53 CEST 2017
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Thu, 26 Jan 2017 15:14:52 -0500
+Subject: nfs: Fix "Don't increment lock sequence ID after NFS4ERR_MOVED"
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+
+[ Upstream commit 406dab8450ec76eca88a1af2fc15d18a2b36ca49 ]
+
+Lock sequence IDs are bumped in decode_lock by calling
+nfs_increment_seqid(). nfs_increment_sequid() does not use the
+seqid_mutating_err() function fixed in commit 059aa7348241 ("Don't
+increment lock sequence ID after NFS4ERR_MOVED").
+
+Fixes: 059aa7348241 ("Don't increment lock sequence ID after ...")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Tested-by: Xuan Qi <xuan.qi@oracle.com>
+Cc: stable@vger.kernel.org # v3.7+
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/nfs4state.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1079,6 +1079,7 @@ static void nfs_increment_seqid(int stat
+ case -NFS4ERR_BADXDR:
+ case -NFS4ERR_RESOURCE:
+ case -NFS4ERR_NOFILEHANDLE:
++ case -NFS4ERR_MOVED:
+ /* Non-seqid mutating errors */
+ return;
+ };
--- /dev/null
+From foo@baz Thu Jun 15 15:00:53 CEST 2017
+From: Parav Pandit <parav@mellanox.com>
+Date: Thu, 19 Jan 2017 09:55:08 -0600
+Subject: nvmet-rdma: Fix missing dma sync to nvme data structures
+
+From: Parav Pandit <parav@mellanox.com>
+
+
+[ Upstream commit 748ff8408f8e208f279ba221e5c12612fbb4dddb ]
+
+This patch performs dma sync operations on nvme_command
+and nvme_completion.
+
+nvme_command is synced
+(a) on receiving of the recv queue completion for cpu access.
+(b) before posting recv wqe back to rdma adapter for device access.
+
+nvme_completion is synced
+(a) on receiving of the recv queue completion of associated
+nvme_command for cpu access.
+(b) before posting send wqe to rdma adapter for device access.
+
+This patch is generated for git://git.infradead.org/nvme-fabrics.git
+Branch: nvmf-4.10
+
+Signed-off-by: Parav Pandit <parav@mellanox.com>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/target/rdma.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct n
+ {
+ struct ib_recv_wr *bad_wr;
+
++ ib_dma_sync_single_for_device(ndev->device,
++ cmd->sge[0].addr, cmd->sge[0].length,
++ DMA_FROM_DEVICE);
++
+ if (ndev->srq)
+ return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
+ return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
+@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(st
+ first_wr = &rsp->send_wr;
+
+ nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
++
++ ib_dma_sync_single_for_device(rsp->queue->dev->device,
++ rsp->send_sge.addr, rsp->send_sge.length,
++ DMA_TO_DEVICE);
++
+ if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
+ pr_err("sending cmd response failed\n");
+ nvmet_rdma_release_rsp(rsp);
+@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(st
+ cmd->n_rdma = 0;
+ cmd->req.port = queue->port;
+
++
++ ib_dma_sync_single_for_cpu(queue->dev->device,
++ cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
++ DMA_FROM_DEVICE);
++ ib_dma_sync_single_for_cpu(queue->dev->device,
++ cmd->send_sge.addr, cmd->send_sge.length,
++ DMA_TO_DEVICE);
++
+ if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
+ &queue->nvme_sq, &nvmet_rdma_ops))
+ return;
--- /dev/null
+From foo@baz Thu Jun 15 15:00:53 CEST 2017
+From: hayeswang <hayeswang@realtek.com>
+Date: Thu, 26 Jan 2017 09:38:31 +0800
+Subject: r8152: avoid start_xmit to call napi_schedule during autosuspend
+
+From: hayeswang <hayeswang@realtek.com>
+
+
+[ Upstream commit 26afec39306926654e9cd320f19bbf3685bb0997 ]
+
+Adjust the setting of the flag of SELECTIVE_SUSPEND to prevent start_xmit()
+from calling napi_schedule() directly during runtime suspend.
+
+After calling napi_disable() or clearing the flag of WORK_ENABLE,
+scheduling the napi is useless.
+
+Signed-off-by: Hayes Wang <hayeswang@realtek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/r8152.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3583,10 +3583,15 @@ static int rtl8152_rumtime_suspend(struc
+ struct net_device *netdev = tp->netdev;
+ int ret = 0;
+
++ set_bit(SELECTIVE_SUSPEND, &tp->flags);
++ smp_mb__after_atomic();
++
+ if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
+ u32 rcr = 0;
+
+ if (delay_autosuspend(tp)) {
++ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
++ smp_mb__after_atomic();
+ ret = -EBUSY;
+ goto out1;
+ }
+@@ -3603,6 +3608,8 @@ static int rtl8152_rumtime_suspend(struc
+ if (!(ocp_data & RXFIFO_EMPTY)) {
+ rxdy_gated_en(tp, false);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
++ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
++ smp_mb__after_atomic();
+ ret = -EBUSY;
+ goto out1;
+ }
+@@ -3622,8 +3629,6 @@ static int rtl8152_rumtime_suspend(struc
+ }
+ }
+
+- set_bit(SELECTIVE_SUSPEND, &tp->flags);
+-
+ out1:
+ return ret;
+ }
+@@ -3679,12 +3684,13 @@ static int rtl8152_resume(struct usb_int
+ if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ tp->rtl_ops.autosuspend_en(tp, false);
+- clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ napi_disable(&tp->napi);
+ set_bit(WORK_ENABLE, &tp->flags);
+ if (netif_carrier_ok(tp->netdev))
+ rtl_start_rx(tp);
+ napi_enable(&tp->napi);
++ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
++ smp_mb__after_atomic();
+ } else {
+ tp->rtl_ops.up(tp);
+ netif_carrier_off(tp->netdev);
--- /dev/null
+From foo@baz Thu Jun 15 15:00:53 CEST 2017
+From: hayeswang <hayeswang@realtek.com>
+Date: Thu, 26 Jan 2017 09:38:34 +0800
+Subject: r8152: check rx after napi is enabled
+
+From: hayeswang <hayeswang@realtek.com>
+
+
+[ Upstream commit 7489bdadb7d17d3c81e39b85688500f700beb790 ]
+
+Schedule the napi after napi_enable() for rx, if it is necessary.
+
+If the rx is completed when napi is disabled, the sheduling of napi
+would be lost. Then, no one handles the rx packet until next napi
+is scheduled.
+
+Signed-off-by: Hayes Wang <hayeswang@realtek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/r8152.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -32,7 +32,7 @@
+ #define NETNEXT_VERSION "08"
+
+ /* Information for net */
+-#define NET_VERSION "7"
++#define NET_VERSION "8"
+
+ #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
+ #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
+@@ -3552,6 +3552,9 @@ static int rtl8152_post_reset(struct usb
+
+ napi_enable(&tp->napi);
+
++ if (!list_empty(&tp->rx_done))
++ napi_schedule(&tp->napi);
++
+ return 0;
+ }
+
+@@ -3691,6 +3694,8 @@ static int rtl8152_resume(struct usb_int
+ napi_enable(&tp->napi);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
++ if (!list_empty(&tp->rx_done))
++ napi_schedule(&tp->napi);
+ } else {
+ tp->rtl_ops.up(tp);
+ netif_carrier_off(tp->netdev);
--- /dev/null
+From foo@baz Thu Jun 15 15:00:53 CEST 2017
+From: hayeswang <hayeswang@realtek.com>
+Date: Fri, 20 Jan 2017 14:33:55 +0800
+Subject: r8152: fix rtl8152_post_reset function
+
+From: hayeswang <hayeswang@realtek.com>
+
+
+[ Upstream commit 2c561b2b728ca4013e76d6439bde2c137503745e ]
+
+The rtl8152_post_reset() should sumbit rx urb and interrupt transfer,
+otherwise the rx wouldn't work and the linking change couldn't be
+detected.
+
+Signed-off-by: Hayes Wang <hayeswang@realtek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/r8152.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3548,12 +3548,14 @@ static int rtl8152_post_reset(struct usb
+ if (netif_carrier_ok(netdev)) {
+ mutex_lock(&tp->control);
+ tp->rtl_ops.enable(tp);
++ rtl_start_rx(tp);
+ rtl8152_set_rx_mode(netdev);
+ mutex_unlock(&tp->control);
+ netif_wake_queue(netdev);
+ }
+
+ napi_enable(&tp->napi);
++ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
--- /dev/null
+From foo@baz Thu Jun 15 15:00:53 CEST 2017
+From: hayeswang <hayeswang@realtek.com>
+Date: Thu, 26 Jan 2017 09:38:33 +0800
+Subject: r8152: re-schedule napi for tx
+
+From: hayeswang <hayeswang@realtek.com>
+
+
+[ Upstream commit 248b213ad908b88db15941202ef7cb7eb137c1a0 ]
+
+Re-schedule napi after napi_complete() for tx, if it is necessay.
+
+In r8152_poll(), if the tx is completed after tx_bottom() and before
+napi_complete(), the scheduling of napi would be lost. Then, no
+one handles the next tx until the next napi_schedule() is called.
+
+Signed-off-by: Hayes Wang <hayeswang@realtek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/r8152.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct
+ napi_complete(napi);
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(napi);
++ else if (!skb_queue_empty(&tp->tx_queue) &&
++ !list_empty(&tp->tx_free))
++ napi_schedule(napi);
+ }
+
+ return work_done;
--- /dev/null
+From foo@baz Thu Jun 15 15:00:53 CEST 2017
+From: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>
+Date: Thu, 26 Jan 2017 14:29:27 +0100
+Subject: ravb: unmap descriptors when freeing rings
+
+From: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>
+
+
+[ Upstream commit a47b70ea86bdeb3091341f5ae3ef580f1a1ad822 ]
+
+"swiotlb buffer is full" errors occur after repeated initialisation of a
+device - f.e. suspend/resume or ip link set up/down. This is because memory
+mapped using dma_map_single() in ravb_ring_format() and ravb_start_xmit()
+is not released. Resolve this problem by unmapping descriptors when
+freeing rings.
+
+Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
+Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>
+[simon: reworked]
+Signed-off-by: Simon Horman <horms+renesas@verge.net.au>
+Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+
+Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/renesas/ravb_main.c | 112 +++++++++++++++++--------------
+ 1 file changed, 64 insertions(+), 48 deletions(-)
+
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
+ .get_mdio_data = ravb_get_mdio_data,
+ };
+
++/* Free TX skb function for AVB-IP */
++static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
++{
++ struct ravb_private *priv = netdev_priv(ndev);
++ struct net_device_stats *stats = &priv->stats[q];
++ struct ravb_tx_desc *desc;
++ int free_num = 0;
++ int entry;
++ u32 size;
++
++ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
++ bool txed;
++
++ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
++ NUM_TX_DESC);
++ desc = &priv->tx_ring[q][entry];
++ txed = desc->die_dt == DT_FEMPTY;
++ if (free_txed_only && !txed)
++ break;
++ /* Descriptor type must be checked before all other reads */
++ dma_rmb();
++ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
++ /* Free the original skb. */
++ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
++ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
++ size, DMA_TO_DEVICE);
++ /* Last packet descriptor? */
++ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
++ entry /= NUM_TX_DESC;
++ dev_kfree_skb_any(priv->tx_skb[q][entry]);
++ priv->tx_skb[q][entry] = NULL;
++ if (txed)
++ stats->tx_packets++;
++ }
++ free_num++;
++ }
++ if (txed)
++ stats->tx_bytes += size;
++ desc->die_dt = DT_EEMPTY;
++ }
++ return free_num;
++}
++
+ /* Free skb's and DMA buffers for Ethernet AVB */
+ static void ravb_ring_free(struct net_device *ndev, int q)
+ {
+@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_de
+ kfree(priv->rx_skb[q]);
+ priv->rx_skb[q] = NULL;
+
+- /* Free TX skb ringbuffer */
+- if (priv->tx_skb[q]) {
+- for (i = 0; i < priv->num_tx_ring[q]; i++)
+- dev_kfree_skb(priv->tx_skb[q][i]);
+- }
+- kfree(priv->tx_skb[q]);
+- priv->tx_skb[q] = NULL;
+-
+ /* Free aligned TX buffers */
+ kfree(priv->tx_align[q]);
+ priv->tx_align[q] = NULL;
+
+ if (priv->rx_ring[q]) {
++ for (i = 0; i < priv->num_rx_ring[q]; i++) {
++ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
++
++ if (!dma_mapping_error(ndev->dev.parent,
++ le32_to_cpu(desc->dptr)))
++ dma_unmap_single(ndev->dev.parent,
++ le32_to_cpu(desc->dptr),
++ PKT_BUF_SZ,
++ DMA_FROM_DEVICE);
++ }
+ ring_size = sizeof(struct ravb_ex_rx_desc) *
+ (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
+@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_de
+ }
+
+ if (priv->tx_ring[q]) {
++ ravb_tx_free(ndev, q, false);
++
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
+ priv->tx_desc_dma[q]);
+ priv->tx_ring[q] = NULL;
+ }
++
++ /* Free TX skb ringbuffer.
++ * SKBs are freed by ravb_tx_free() call above.
++ */
++ kfree(priv->tx_skb[q]);
++ priv->tx_skb[q] = NULL;
+ }
+
+ /* Format skb and descriptor buffer for Ethernet AVB */
+@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_dev
+ return 0;
+ }
+
+-/* Free TX skb function for AVB-IP */
+-static int ravb_tx_free(struct net_device *ndev, int q)
+-{
+- struct ravb_private *priv = netdev_priv(ndev);
+- struct net_device_stats *stats = &priv->stats[q];
+- struct ravb_tx_desc *desc;
+- int free_num = 0;
+- int entry;
+- u32 size;
+-
+- for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+- entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+- NUM_TX_DESC);
+- desc = &priv->tx_ring[q][entry];
+- if (desc->die_dt != DT_FEMPTY)
+- break;
+- /* Descriptor type must be checked before all other reads */
+- dma_rmb();
+- size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+- /* Free the original skb. */
+- if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+- size, DMA_TO_DEVICE);
+- /* Last packet descriptor? */
+- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+- entry /= NUM_TX_DESC;
+- dev_kfree_skb_any(priv->tx_skb[q][entry]);
+- priv->tx_skb[q][entry] = NULL;
+- stats->tx_packets++;
+- }
+- free_num++;
+- }
+- stats->tx_bytes += size;
+- desc->die_dt = DT_EEMPTY;
+- }
+- return free_num;
+-}
+-
+ static void ravb_get_tx_tstamp(struct net_device *ndev)
+ {
+ struct ravb_private *priv = netdev_priv(ndev);
+@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Clear TX interrupt */
+ ravb_write(ndev, ~mask, TIS);
+- ravb_tx_free(ndev, q);
++ ravb_tx_free(ndev, q, true);
+ netif_wake_subqueue(ndev, q);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -1571,7 +1586,8 @@ static netdev_tx_t ravb_start_xmit(struc
+
+ priv->cur_tx[q] += NUM_TX_DESC;
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >
+- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
++ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
++ !ravb_tx_free(ndev, q, true))
+ netif_stop_subqueue(ndev, q);
+
+ exit:
drm-nouveau-intercept-acpi_video_notify_probe.patch
drm-nouveau-rename-acpi_work-to-hpd_work.patch
drm-nouveau-handle-fbcon-suspend-resume-in-seperate-worker.patch
+drm-nouveau-don-t-enabling-polling-twice-on-runtime-resume.patch
+drm-nouveau-fix-drm-poll_helper-handling.patch
+drm-ast-fixed-system-hanged-if-disable-p2a.patch
+ravb-unmap-descriptors-when-freeing-rings.patch
+nfs-fix-don-t-increment-lock-sequence-id-after-nfs4err_moved.patch
+nvmet-rdma-fix-missing-dma-sync-to-nvme-data-structures.patch
+r8152-avoid-start_xmit-to-call-napi_schedule-during-autosuspend.patch
+r8152-check-rx-after-napi-is-enabled.patch
+r8152-re-schedule-napi-for-tx.patch
+r8152-fix-rtl8152_post_reset-function.patch