From: Greg Kroah-Hartman Date: Thu, 15 Jun 2017 13:03:21 +0000 (+0200) Subject: 4.4-stable patches X-Git-Tag: v4.9.33~11 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=989cf03629199e0f98a60543ab5a9441bd4602da;p=thirdparty%2Fkernel%2Fstable-queue.git 4.4-stable patches added patches: drm-ast-fixed-system-hanged-if-disable-p2a.patch drm-nouveau-don-t-enabling-polling-twice-on-runtime-resume.patch drm-nouveau-fence-g84-protect-against-concurrent-access-to-semaphore-buffers.patch gianfar-synchronize-dma-api-usage-by-free_skb_rx_queue-w-gfar_new_page.patch net-adaptec-starfire-add-checks-for-dma-mapping-errors.patch net-mlx4_core-avoid-command-timeouts-during-vf-driver-device-shutdown.patch nfs-fix-don-t-increment-lock-sequence-id-after-nfs4err_moved.patch parisc-parport_gsc-fixes-for-printk-continuation-lines.patch pinctrl-berlin-bg4ct-fix-the-value-for-sd1a-of-pin-scrd0_crd_pres.patch r8152-fix-rtl8152_post_reset-function.patch r8152-re-schedule-napi-for-tx.patch ravb-unmap-descriptors-when-freeing-rings.patch --- diff --git a/queue-4.4/drm-ast-fixed-system-hanged-if-disable-p2a.patch b/queue-4.4/drm-ast-fixed-system-hanged-if-disable-p2a.patch new file mode 100644 index 00000000000..33f42c86821 --- /dev/null +++ b/queue-4.4/drm-ast-fixed-system-hanged-if-disable-p2a.patch @@ -0,0 +1,252 @@ +From foo@baz Thu Jun 15 15:01:15 CEST 2017 +From: "Y.C. Chen" +Date: Thu, 26 Jan 2017 09:45:40 +0800 +Subject: drm/ast: Fixed system hanged if disable P2A + +From: "Y.C. Chen" + + +[ Upstream commit 6c971c09f38704513c426ba6515f22fb3d6c87d5 ] + +The original ast driver will access some BMC configuration through P2A bridge +that can be disabled since AST2300 and after. +It will cause system hanged if P2A bridge is disabled. +Here is the update to fix it. + +Signed-off-by: Y.C. Chen +Signed-off-by: Dave Airlie +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/ast/ast_drv.h | 1 + drivers/gpu/drm/ast/ast_main.c | 157 +++++++++++++++++++++-------------------- + drivers/gpu/drm/ast/ast_post.c | 18 +++- + 3 files changed, 97 insertions(+), 79 deletions(-) + +--- a/drivers/gpu/drm/ast/ast_drv.h ++++ b/drivers/gpu/drm/ast/ast_drv.h +@@ -113,6 +113,7 @@ struct ast_private { + struct ttm_bo_kmap_obj cache_kmap; + int next_cursor; + bool support_wide_screen; ++ bool DisableP2A; + + enum ast_tx_chip tx_chip_type; + u8 dp501_maxclk; +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_de + } else + *need_post = false; + ++ /* Check P2A Access */ ++ ast->DisableP2A = true; ++ data = ast_read32(ast, 0xf004); ++ if (data != 0xFFFFFFFF) ++ ast->DisableP2A = false; ++ + /* Check if we support wide screen */ + switch (ast->chip) { + case AST1180: +@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_de + ast->support_wide_screen = true; + else { + ast->support_wide_screen = false; +- /* Read SCU7c (silicon revision register) */ +- ast_write32(ast, 0xf004, 0x1e6e0000); +- ast_write32(ast, 0xf000, 0x1); +- data = ast_read32(ast, 0x1207c); +- data &= 0x300; +- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ +- ast->support_wide_screen = true; +- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ +- ast->support_wide_screen = true; ++ if (ast->DisableP2A == false) { ++ /* Read SCU7c (silicon revision register) */ ++ ast_write32(ast, 0xf004, 0x1e6e0000); ++ ast_write32(ast, 0xf000, 0x1); ++ data = ast_read32(ast, 0x1207c); ++ data &= 0x300; ++ if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ ++ ast->support_wide_screen = true; ++ if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ ++ ast->support_wide_screen = true; ++ } + } + break; + } +@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_ + uint32_t data, data2; + uint32_t denum, num, div, ref_pll; + +- ast_write32(ast, 0xf004, 0x1e6e0000); +- ast_write32(ast, 0xf000, 0x1); +- +- +- ast_write32(ast, 0x10000, 0xfc600309); +- +- do { +- if (pci_channel_offline(dev->pdev)) +- return -EIO; +- } while (ast_read32(ast, 0x10000) != 0x01); +- data = ast_read32(ast, 0x10004); +- +- if (data & 0x40) ++ if (ast->DisableP2A) ++ { + ast->dram_bus_width = 16; ++ ast->dram_type = AST_DRAM_1Gx16; ++ ast->mclk = 396; ++ } + else +- ast->dram_bus_width = 32; ++ { ++ ast_write32(ast, 0xf004, 0x1e6e0000); ++ ast_write32(ast, 0xf000, 0x1); ++ data = ast_read32(ast, 0x10004); ++ ++ if (data & 0x40) ++ ast->dram_bus_width = 16; ++ else ++ ast->dram_bus_width = 32; ++ ++ if (ast->chip == AST2300 || ast->chip == AST2400) { ++ switch (data & 0x03) { ++ case 0: ++ ast->dram_type = AST_DRAM_512Mx16; ++ break; ++ default: ++ case 1: ++ ast->dram_type = AST_DRAM_1Gx16; ++ break; ++ case 2: ++ ast->dram_type = AST_DRAM_2Gx16; ++ break; ++ case 3: ++ ast->dram_type = AST_DRAM_4Gx16; ++ break; ++ } ++ } else { ++ switch (data & 0x0c) { ++ case 0: ++ case 4: ++ ast->dram_type = AST_DRAM_512Mx16; ++ break; ++ case 8: ++ if (data & 0x40) ++ ast->dram_type = AST_DRAM_1Gx16; ++ else ++ ast->dram_type = AST_DRAM_512Mx32; ++ break; ++ case 0xc: ++ ast->dram_type = AST_DRAM_1Gx32; ++ break; ++ } ++ } + +- if (ast->chip == AST2300 || ast->chip == AST2400) { +- switch (data & 0x03) { +- case 0: +- ast->dram_type = AST_DRAM_512Mx16; +- break; +- default: +- case 1: +- ast->dram_type = AST_DRAM_1Gx16; +- break; +- case 2: +- ast->dram_type = AST_DRAM_2Gx16; +- break; ++ data = ast_read32(ast, 0x10120); ++ data2 = ast_read32(ast, 0x10170); ++ if (data2 & 0x2000) ++ ref_pll = 14318; ++ else ++ ref_pll = 12000; ++ ++ denum = data & 0x1f; ++ num = (data & 0x3fe0) >> 5; ++ data = (data & 0xc000) >> 14; ++ switch (data) { + case 3: +- ast->dram_type = AST_DRAM_4Gx16; +- break; +- } +- } else { +- switch (data & 0x0c) { +- case 0: +- case 4: +- ast->dram_type = AST_DRAM_512Mx16; ++ div = 0x4; + break; +- case 8: +- if (data & 0x40) +- ast->dram_type = AST_DRAM_1Gx16; +- else +- ast->dram_type = AST_DRAM_512Mx32; ++ case 2: ++ case 1: ++ div = 0x2; + break; +- case 0xc: +- ast->dram_type = AST_DRAM_1Gx32; ++ default: ++ div = 0x1; + break; + } ++ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); + } +- +- data = ast_read32(ast, 0x10120); +- data2 = ast_read32(ast, 0x10170); +- if (data2 & 0x2000) +- ref_pll = 14318; +- else +- ref_pll = 12000; +- +- denum = data & 0x1f; +- num = (data & 0x3fe0) >> 5; +- data = (data & 0xc000) >> 14; +- switch (data) { +- case 3: +- div = 0x4; +- break; +- case 2: +- case 1: +- div = 0x2; +- break; +- default: +- div = 0x1; +- break; +- } +- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); + return 0; + } + +--- a/drivers/gpu/drm/ast/ast_post.c ++++ b/drivers/gpu/drm/ast/ast_post.c +@@ -375,12 +375,20 @@ void ast_post_gpu(struct drm_device *dev + ast_enable_mmio(dev); + ast_set_def_ext_reg(dev); + +- if (ast->chip == AST2300 || ast->chip == AST2400) +- ast_init_dram_2300(dev); +- else +- ast_init_dram_reg(dev); ++ if (ast->DisableP2A == false) ++ { ++ if (ast->chip == AST2300 || ast->chip == AST2400) ++ ast_init_dram_2300(dev); ++ else ++ ast_init_dram_reg(dev); + +- ast_init_3rdtx(dev); ++ ast_init_3rdtx(dev); ++ } ++ else ++ { ++ if (ast->tx_chip_type != AST_TX_NONE) ++ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ ++ } + } + + /* AST 2300 DRAM settings */ diff --git a/queue-4.4/drm-nouveau-don-t-enabling-polling-twice-on-runtime-resume.patch b/queue-4.4/drm-nouveau-don-t-enabling-polling-twice-on-runtime-resume.patch new file mode 100644 index 00000000000..3cf0e5f5e1c --- /dev/null +++ b/queue-4.4/drm-nouveau-don-t-enabling-polling-twice-on-runtime-resume.patch @@ -0,0 +1,61 @@ +From foo@baz Thu Jun 15 15:01:15 CEST 2017 +From: Lyude Paul +Date: Wed, 11 Jan 2017 21:25:23 -0500 +Subject: drm/nouveau: Don't enabling polling twice on runtime resume + +From: Lyude Paul + + +[ Upstream commit cae9ff036eea577856d5b12860b4c79c5e71db4a ] + +As it turns out, on cards that actually have CRTCs on them we're already +calling drm_kms_helper_poll_enable(drm_dev) from +nouveau_display_resume() before we call it in +nouveau_pmops_runtime_resume(). This leads us to accidentally trying to +enable polling twice, which results in a potential deadlock between the +RPM locks and drm_dev->mode_config.mutex if we end up trying to enable +polling the second time while output_poll_execute is running and holding +the mode_config lock. As such, make sure we only enable polling in +nouveau_pmops_runtime_resume() if we need to. + +This fixes hangs observed on the ThinkPad W541 + +Signed-off-by: Lyude +Cc: Hans de Goede +Cc: Kilian Singer +Cc: Lukas Wunner +Cc: David Airlie +Signed-off-by: Dave Airlie +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/nouveau/nouveau_display.c | 3 ++- + drivers/gpu/drm/nouveau/nouveau_drm.c | 5 ++++- + 2 files changed, 6 insertions(+), 2 deletions(-) + +--- a/drivers/gpu/drm/nouveau/nouveau_display.c ++++ b/drivers/gpu/drm/nouveau/nouveau_display.c +@@ -370,7 +370,8 @@ nouveau_display_init(struct drm_device * + return ret; + + /* enable polling for external displays */ +- drm_kms_helper_poll_enable(dev); ++ if (!dev->mode_config.poll_enabled) ++ drm_kms_helper_poll_enable(dev); + + /* enable hotplug interrupts */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c +@@ -743,7 +743,10 @@ nouveau_pmops_runtime_resume(struct devi + pci_set_master(pdev); + + ret = nouveau_do_resume(drm_dev, true); +- drm_kms_helper_poll_enable(drm_dev); ++ ++ if (!drm_dev->mode_config.poll_enabled) ++ drm_kms_helper_poll_enable(drm_dev); ++ + /* do magic */ + nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); + vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); diff --git a/queue-4.4/drm-nouveau-fence-g84-protect-against-concurrent-access-to-semaphore-buffers.patch b/queue-4.4/drm-nouveau-fence-g84-protect-against-concurrent-access-to-semaphore-buffers.patch new file mode 100644 index 00000000000..2f22c3d0646 --- /dev/null +++ b/queue-4.4/drm-nouveau-fence-g84-protect-against-concurrent-access-to-semaphore-buffers.patch @@ -0,0 +1,64 @@ +From foo@baz Thu Jun 15 13:08:00 CEST 2017 +From: Ben Skeggs +Date: Tue, 23 May 2017 21:54:09 -0400 +Subject: drm/nouveau/fence/g84-: protect against concurrent access to semaphore buffers + +From: Ben Skeggs + + +[ Upstream commit 96692b097ba76d0c637ae8af47b29c73da33c9d0 ] + +Signed-off-by: Ben Skeggs +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/nouveau/nouveau_fence.h | 1 + + drivers/gpu/drm/nouveau/nv84_fence.c | 6 ++++++ + 2 files changed, 7 insertions(+) + +--- a/drivers/gpu/drm/nouveau/nouveau_fence.h ++++ b/drivers/gpu/drm/nouveau/nouveau_fence.h +@@ -99,6 +99,7 @@ struct nv84_fence_priv { + struct nouveau_bo *bo; + struct nouveau_bo *bo_gart; + u32 *suspend; ++ struct mutex mutex; + }; + + u64 nv84_fence_crtc(struct nouveau_channel *, int); +--- a/drivers/gpu/drm/nouveau/nv84_fence.c ++++ b/drivers/gpu/drm/nouveau/nv84_fence.c +@@ -121,8 +121,10 @@ nv84_fence_context_del(struct nouveau_ch + } + + nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); ++ mutex_lock(&priv->mutex); + nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); + nouveau_bo_vma_del(priv->bo, &fctx->vma); ++ mutex_unlock(&priv->mutex); + nouveau_fence_context_del(&fctx->base); + chan->fence = NULL; + nouveau_fence_context_free(&fctx->base); +@@ -148,11 +150,13 @@ nv84_fence_context_new(struct nouveau_ch + fctx->base.sync32 = nv84_fence_sync32; + fctx->base.sequence = nv84_fence_read(chan); + ++ mutex_lock(&priv->mutex); + ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); + if (ret == 0) { + ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, + &fctx->vma_gart); + } ++ mutex_unlock(&priv->mutex); + + /* map display semaphore buffers into channel's vm */ + for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { +@@ -232,6 +236,8 @@ nv84_fence_create(struct nouveau_drm *dr + priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.uevent = true; + ++ mutex_init(&priv->mutex); ++ + /* Use VRAM if there is any ; otherwise fallback to system memory */ + domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : + /* diff --git a/queue-4.4/gianfar-synchronize-dma-api-usage-by-free_skb_rx_queue-w-gfar_new_page.patch b/queue-4.4/gianfar-synchronize-dma-api-usage-by-free_skb_rx_queue-w-gfar_new_page.patch new file mode 100644 index 00000000000..a22f9b63d3e --- /dev/null +++ b/queue-4.4/gianfar-synchronize-dma-api-usage-by-free_skb_rx_queue-w-gfar_new_page.patch @@ -0,0 +1,89 @@ +From foo@baz Thu Jun 15 13:08:00 CEST 2017 +From: Arseny Solokha +Date: Sun, 29 Jan 2017 19:52:20 +0700 +Subject: gianfar: synchronize DMA API usage by free_skb_rx_queue w/ gfar_new_page + +From: Arseny Solokha + + +[ Upstream commit 4af0e5bb95ee3ba5ea4bd7dbb94e1648a5279cc9 ] + +In spite of switching to paged allocation of Rx buffers, the driver still +called dma_unmap_single() in the Rx queues tear-down path. + +The DMA region unmapping code in free_skb_rx_queue() basically predates +the introduction of paged allocation to the driver. While being refactored, +it apparently hasn't reflected the change in the DMA API usage by its +counterpart gfar_new_page(). + +As a result, setting an interface to the DOWN state now yields the following: + + # ip link set eth2 down + fsl-gianfar ffe24000.ethernet: DMA-API: device driver frees DMA memory with wrong function [device address=0x000000001ecd0000] [size=40] + ------------[ cut here ]------------ + WARNING: CPU: 1 PID: 189 at lib/dma-debug.c:1123 check_unmap+0x8e0/0xa28 + CPU: 1 PID: 189 Comm: ip Tainted: G O 4.9.5 #1 + task: dee73400 task.stack: dede2000 + NIP: c02101e8 LR: c02101e8 CTR: c0260d74 + REGS: dede3bb0 TRAP: 0700 Tainted: G O (4.9.5) + MSR: 00021000 CR: 28002222 XER: 00000000 + + GPR00: c02101e8 dede3c60 dee73400 000000b6 dfbd033c dfbd36c4 1f622000 dede2000 + GPR08: 00000007 c05b1634 1f622000 00000000 22002484 100a9904 00000000 00000000 + GPR16: 00000000 db4c849c 00000002 db4c8480 00000001 df142240 db4c84bc 00000000 + GPR24: c0706148 c0700000 00029000 c07552e8 c07323b4 dede3cb8 c07605e0 db535540 + NIP [c02101e8] check_unmap+0x8e0/0xa28 + LR [c02101e8] check_unmap+0x8e0/0xa28 + Call Trace: + [dede3c60] [c02101e8] check_unmap+0x8e0/0xa28 (unreliable) + [dede3cb0] [c02103b8] debug_dma_unmap_page+0x88/0x9c + [dede3d30] [c02dffbc] free_skb_resources+0x2c4/0x404 + [dede3d80] [c02e39b4] gfar_close+0x24/0xc8 + [dede3da0] [c0361550] __dev_close_many+0xa0/0xf8 + [dede3dd0] [c03616f0] __dev_close+0x2c/0x4c + [dede3df0] [c036b1b8] __dev_change_flags+0xa0/0x174 + [dede3e10] [c036b2ac] dev_change_flags+0x20/0x60 + [dede3e30] [c03e130c] devinet_ioctl+0x540/0x824 + [dede3e90] [c0347dcc] sock_ioctl+0x134/0x298 + [dede3eb0] [c0111814] do_vfs_ioctl+0xac/0x854 + [dede3f20] [c0111ffc] SyS_ioctl+0x40/0x74 + [dede3f40] [c000f290] ret_from_syscall+0x0/0x3c + --- interrupt: c01 at 0xff45da0 + LR = 0xff45cd0 + Instruction dump: + 811d001c 7c66482e 813d0020 9061000c 807f000c 5463103a 7cc6182e 3c60c052 + 386309ac 90c10008 4cc63182 4826b845 <0fe00000> 4bfffa60 3c80c052 388402c4 + ---[ end trace 695ae6d7ac1d0c47 ]--- + Mapped at: + [] gfar_alloc_rx_buffs+0x178/0x248 + [] startup_gfar+0x368/0x570 + [] __dev_open+0xdc/0x150 + [] __dev_change_flags+0xa0/0x174 + [] dev_change_flags+0x20/0x60 + +Even though the issue was discovered in 4.9 kernel, the code in question +is identical in the current net and net-next trees. + +Fixes: 75354148ce69 ("gianfar: Add paged allocation and Rx S/G") +Signed-off-by: Arseny Solokha +Acked-by: Claudiu Manoil +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/freescale/gianfar.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -1999,8 +1999,8 @@ static void free_skb_rx_queue(struct gfa + if (!rxb->page) + continue; + +- dma_unmap_single(rx_queue->dev, rxb->dma, +- PAGE_SIZE, DMA_FROM_DEVICE); ++ dma_unmap_page(rx_queue->dev, rxb->dma, ++ PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rxb->page); + + rxb->page = NULL; diff --git a/queue-4.4/net-adaptec-starfire-add-checks-for-dma-mapping-errors.patch b/queue-4.4/net-adaptec-starfire-add-checks-for-dma-mapping-errors.patch new file mode 100644 index 00000000000..17838784851 --- /dev/null +++ b/queue-4.4/net-adaptec-starfire-add-checks-for-dma-mapping-errors.patch @@ -0,0 +1,115 @@ +From foo@baz Thu Jun 15 14:39:14 CEST 2017 +From: Alexey Khoroshilov +Date: Sat, 28 Jan 2017 01:07:30 +0300 +Subject: net: adaptec: starfire: add checks for dma mapping errors + +From: Alexey Khoroshilov + + +[ Upstream commit d1156b489fa734d1af763d6a07b1637c01bb0aed ] + +init_ring(), refill_rx_ring() and start_tx() don't check +if mapping dma memory succeed. +The patch adds the checks and failure handling. + +Found by Linux Driver Verification project (linuxtesting.org). + +Signed-off-by: Alexey Khoroshilov +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/adaptec/starfire.c | 45 ++++++++++++++++++++++++++++++-- + 1 file changed, 43 insertions(+), 2 deletions(-) + +--- a/drivers/net/ethernet/adaptec/starfire.c ++++ b/drivers/net/ethernet/adaptec/starfire.c +@@ -1153,6 +1153,12 @@ static void init_ring(struct net_device + if (skb == NULL) + break; + np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); ++ if (pci_dma_mapping_error(np->pci_dev, ++ np->rx_info[i].mapping)) { ++ dev_kfree_skb(skb); ++ np->rx_info[i].skb = NULL; ++ break; ++ } + /* Grrr, we cannot offset to correctly align the IP header. */ + np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); + } +@@ -1183,8 +1189,9 @@ static netdev_tx_t start_tx(struct sk_bu + { + struct netdev_private *np = netdev_priv(dev); + unsigned int entry; ++ unsigned int prev_tx; + u32 status; +- int i; ++ int i, j; + + /* + * be cautious here, wrapping the queue has weird semantics +@@ -1202,6 +1209,7 @@ static netdev_tx_t start_tx(struct sk_bu + } + #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ + ++ prev_tx = np->cur_tx; + entry = np->cur_tx % TX_RING_SIZE; + for (i = 0; i < skb_num_frags(skb); i++) { + int wrap_ring = 0; +@@ -1235,6 +1243,11 @@ static netdev_tx_t start_tx(struct sk_bu + skb_frag_size(this_frag), + PCI_DMA_TODEVICE); + } ++ if (pci_dma_mapping_error(np->pci_dev, ++ np->tx_info[entry].mapping)) { ++ dev->stats.tx_dropped++; ++ goto err_out; ++ } + + np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); + np->tx_ring[entry].status = cpu_to_le32(status); +@@ -1269,8 +1282,30 @@ static netdev_tx_t start_tx(struct sk_bu + netif_stop_queue(dev); + + return NETDEV_TX_OK; +-} + ++err_out: ++ entry = prev_tx % TX_RING_SIZE; ++ np->tx_info[entry].skb = NULL; ++ if (i > 0) { ++ pci_unmap_single(np->pci_dev, ++ np->tx_info[entry].mapping, ++ skb_first_frag_len(skb), ++ PCI_DMA_TODEVICE); ++ np->tx_info[entry].mapping = 0; ++ entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; ++ for (j = 1; j < i; j++) { ++ pci_unmap_single(np->pci_dev, ++ np->tx_info[entry].mapping, ++ skb_frag_size( ++ &skb_shinfo(skb)->frags[j-1]), ++ PCI_DMA_TODEVICE); ++ entry++; ++ } ++ } ++ dev_kfree_skb_any(skb); ++ np->cur_tx = prev_tx; ++ return NETDEV_TX_OK; ++} + + /* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +@@ -1570,6 +1605,12 @@ static void refill_rx_ring(struct net_de + break; /* Better luck next round. */ + np->rx_info[entry].mapping = + pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); ++ if (pci_dma_mapping_error(np->pci_dev, ++ np->rx_info[entry].mapping)) { ++ dev_kfree_skb(skb); ++ np->rx_info[entry].skb = NULL; ++ break; ++ } + np->rx_ring[entry].rxaddr = + cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); + } diff --git a/queue-4.4/net-mlx4_core-avoid-command-timeouts-during-vf-driver-device-shutdown.patch b/queue-4.4/net-mlx4_core-avoid-command-timeouts-during-vf-driver-device-shutdown.patch new file mode 100644 index 00000000000..2a431b8ed1d --- /dev/null +++ b/queue-4.4/net-mlx4_core-avoid-command-timeouts-during-vf-driver-device-shutdown.patch @@ -0,0 +1,80 @@ +From foo@baz Thu Jun 15 13:08:00 CEST 2017 +From: Jack Morgenstein +Date: Mon, 30 Jan 2017 15:11:45 +0200 +Subject: net/mlx4_core: Avoid command timeouts during VF driver device shutdown + +From: Jack Morgenstein + + +[ Upstream commit d585df1c5ccf995fcee910705ad7a9cdd11d4152 ] + +Some Hypervisors detach VFs from VMs by instantly causing an FLR event +to be generated for a VF. + +In the mlx4 case, this will cause that VF's comm channel to be disabled +before the VM has an opportunity to invoke the VF device's "shutdown" +method. + +The result is that the VF driver on the VM will experience a command +timeout during the shutdown process when the Hypervisor does not deliver +a command-completion event to the VM. + +To avoid FW command timeouts on the VM when the driver's shutdown method +is invoked, we detect the absence of the VF's comm channel at the very +start of the shutdown process. If the comm-channel has already been +disabled, we cause all FW commands during the device shutdown process to +immediately return success (and thus avoid all command timeouts). + +Signed-off-by: Jack Morgenstein +Signed-off-by: Tariq Toukan +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/mellanox/mlx4/catas.c | 2 +- + drivers/net/ethernet/mellanox/mlx4/intf.c | 12 ++++++++++++ + drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 + + 3 files changed, 14 insertions(+), 1 deletion(-) + +--- a/drivers/net/ethernet/mellanox/mlx4/catas.c ++++ b/drivers/net/ethernet/mellanox/mlx4/catas.c +@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_ + return -ETIMEDOUT; + } + +-static int mlx4_comm_internal_err(u32 slave_read) ++int mlx4_comm_internal_err(u32 slave_read) + { + return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == + (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; +--- a/drivers/net/ethernet/mellanox/mlx4/intf.c ++++ b/drivers/net/ethernet/mellanox/mlx4/intf.c +@@ -218,6 +218,18 @@ void mlx4_unregister_device(struct mlx4_ + struct mlx4_interface *intf; + + mlx4_stop_catas_poll(dev); ++ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION && ++ mlx4_is_slave(dev)) { ++ /* In mlx4_remove_one on a VF */ ++ u32 slave_read = ++ swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read)); ++ ++ if (mlx4_comm_internal_err(slave_read)) { ++ mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n", ++ __func__); ++ mlx4_enter_error_state(dev->persist); ++ } ++ } + mutex_lock(&intf_mutex); + + list_for_each_entry(intf, &intf_list, list) +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h +@@ -1205,6 +1205,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, + void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); + + void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); ++int mlx4_comm_internal_err(u32 slave_read); + + int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, + enum mlx4_port_type *type); diff --git a/queue-4.4/nfs-fix-don-t-increment-lock-sequence-id-after-nfs4err_moved.patch b/queue-4.4/nfs-fix-don-t-increment-lock-sequence-id-after-nfs4err_moved.patch new file mode 100644 index 00000000000..8ba00d02e9f --- /dev/null +++ b/queue-4.4/nfs-fix-don-t-increment-lock-sequence-id-after-nfs4err_moved.patch @@ -0,0 +1,36 @@ +From foo@baz Thu Jun 15 15:01:15 CEST 2017 +From: Chuck Lever +Date: Thu, 26 Jan 2017 15:14:52 -0500 +Subject: nfs: Fix "Don't increment lock sequence ID after NFS4ERR_MOVED" + +From: Chuck Lever + + +[ Upstream commit 406dab8450ec76eca88a1af2fc15d18a2b36ca49 ] + +Lock sequence IDs are bumped in decode_lock by calling +nfs_increment_seqid(). nfs_increment_sequid() does not use the +seqid_mutating_err() function fixed in commit 059aa7348241 ("Don't +increment lock sequence ID after NFS4ERR_MOVED"). + +Fixes: 059aa7348241 ("Don't increment lock sequence ID after ...") +Signed-off-by: Chuck Lever +Tested-by: Xuan Qi +Cc: stable@vger.kernel.org # v3.7+ +Signed-off-by: Trond Myklebust +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + fs/nfs/nfs4state.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1072,6 +1072,7 @@ static void nfs_increment_seqid(int stat + case -NFS4ERR_BADXDR: + case -NFS4ERR_RESOURCE: + case -NFS4ERR_NOFILEHANDLE: ++ case -NFS4ERR_MOVED: + /* Non-seqid mutating errors */ + return; + }; diff --git a/queue-4.4/parisc-parport_gsc-fixes-for-printk-continuation-lines.patch b/queue-4.4/parisc-parport_gsc-fixes-for-printk-continuation-lines.patch new file mode 100644 index 00000000000..a07bbd903ba --- /dev/null +++ b/queue-4.4/parisc-parport_gsc-fixes-for-printk-continuation-lines.patch @@ -0,0 +1,48 @@ +From foo@baz Thu Jun 15 14:39:14 CEST 2017 +From: Helge Deller +Date: Tue, 3 Jan 2017 22:55:50 +0100 +Subject: parisc, parport_gsc: Fixes for printk continuation lines + +From: Helge Deller + + +[ Upstream commit 83b5d1e3d3013dbf90645a5d07179d018c8243fa ] + +Signed-off-by: Helge Deller +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/parport/parport_gsc.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/parport/parport_gsc.c ++++ b/drivers/parport/parport_gsc.c +@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(u + p->irq = PARPORT_IRQ_NONE; + } + if (p->irq != PARPORT_IRQ_NONE) { +- printk(", irq %d", p->irq); ++ pr_cont(", irq %d", p->irq); + + if (p->dma == PARPORT_DMA_AUTO) { + p->dma = PARPORT_DMA_NONE; +@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(u + is mandatory (see above) */ + p->dma = PARPORT_DMA_NONE; + +- printk(" ["); +-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}} ++ pr_cont(" ["); ++#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}} + { + int f = 0; + printmode(PCSPP); +@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(u + // printmode(DMA); + } + #undef printmode +- printk("]\n"); ++ pr_cont("]\n"); + + if (p->irq != PARPORT_IRQ_NONE) { + if (request_irq (p->irq, parport_irq_handler, diff --git a/queue-4.4/pinctrl-berlin-bg4ct-fix-the-value-for-sd1a-of-pin-scrd0_crd_pres.patch b/queue-4.4/pinctrl-berlin-bg4ct-fix-the-value-for-sd1a-of-pin-scrd0_crd_pres.patch new file mode 100644 index 00000000000..7cb1fe6c2a0 --- /dev/null +++ b/queue-4.4/pinctrl-berlin-bg4ct-fix-the-value-for-sd1a-of-pin-scrd0_crd_pres.patch @@ -0,0 +1,31 @@ +From foo@baz Thu Jun 15 14:39:14 CEST 2017 +From: Jisheng Zhang +Date: Mon, 23 Jan 2017 15:15:32 +0800 +Subject: pinctrl: berlin-bg4ct: fix the value for "sd1a" of pin SCRD0_CRD_PRES + +From: Jisheng Zhang + + +[ Upstream commit e82d02580af45663fad6d3596e4344c606e81e10 ] + +This should be a typo. + +Signed-off-by: Jisheng Zhang +Signed-off-by: Linus Walleij +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/pinctrl/berlin/berlin-bg4ct.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/pinctrl/berlin/berlin-bg4ct.c ++++ b/drivers/pinctrl/berlin/berlin-bg4ct.c +@@ -217,7 +217,7 @@ static const struct berlin_desc_group be + BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15, + BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */ + BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */ +- BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */ ++ BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */ + BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18, + BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ + BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ diff --git a/queue-4.4/r8152-fix-rtl8152_post_reset-function.patch b/queue-4.4/r8152-fix-rtl8152_post_reset-function.patch new file mode 100644 index 00000000000..70bbe7bd24e --- /dev/null +++ b/queue-4.4/r8152-fix-rtl8152_post_reset-function.patch @@ -0,0 +1,39 @@ +From foo@baz Thu Jun 15 15:01:15 CEST 2017 +From: hayeswang +Date: Fri, 20 Jan 2017 14:33:55 +0800 +Subject: r8152: fix rtl8152_post_reset function + +From: hayeswang + + +[ Upstream commit 2c561b2b728ca4013e76d6439bde2c137503745e ] + +The rtl8152_post_reset() should sumbit rx urb and interrupt transfer, +otherwise the rx wouldn't work and the linking change couldn't be +detected. + +Signed-off-by: Hayes Wang +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/usb/r8152.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -3418,12 +3418,14 @@ static int rtl8152_post_reset(struct usb + if (netif_carrier_ok(netdev)) { + mutex_lock(&tp->control); + tp->rtl_ops.enable(tp); ++ rtl_start_rx(tp); + rtl8152_set_rx_mode(netdev); + mutex_unlock(&tp->control); + netif_wake_queue(netdev); + } + + napi_enable(&tp->napi); ++ usb_submit_urb(tp->intr_urb, GFP_KERNEL); + + return 0; + } diff --git a/queue-4.4/r8152-re-schedule-napi-for-tx.patch b/queue-4.4/r8152-re-schedule-napi-for-tx.patch new file mode 100644 index 00000000000..6472f8ac8d7 --- /dev/null +++ b/queue-4.4/r8152-re-schedule-napi-for-tx.patch @@ -0,0 +1,36 @@ +From foo@baz Thu Jun 15 15:01:15 CEST 2017 +From: hayeswang +Date: Thu, 26 Jan 2017 09:38:33 +0800 +Subject: r8152: re-schedule napi for tx + +From: hayeswang + + +[ Upstream commit 248b213ad908b88db15941202ef7cb7eb137c1a0 ] + +Re-schedule napi after napi_complete() for tx, if it is necessay. + +In r8152_poll(), if the tx is completed after tx_bottom() and before +napi_complete(), the scheduling of napi would be lost. Then, no +one handles the next tx until the next napi_schedule() is called. + +Signed-off-by: Hayes Wang +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/usb/r8152.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -1851,6 +1851,9 @@ static int r8152_poll(struct napi_struct + napi_complete(napi); + if (!list_empty(&tp->rx_done)) + napi_schedule(napi); ++ else if (!skb_queue_empty(&tp->tx_queue) && ++ !list_empty(&tp->tx_free)) ++ napi_schedule(napi); + } + + return work_done; diff --git a/queue-4.4/ravb-unmap-descriptors-when-freeing-rings.patch b/queue-4.4/ravb-unmap-descriptors-when-freeing-rings.patch new file mode 100644 index 00000000000..16476088775 --- /dev/null +++ b/queue-4.4/ravb-unmap-descriptors-when-freeing-rings.patch @@ -0,0 +1,196 @@ +From foo@baz Thu Jun 15 15:01:15 CEST 2017 +From: Kazuya Mizuguchi +Date: Thu, 26 Jan 2017 14:29:27 +0100 +Subject: ravb: unmap descriptors when freeing rings + +From: Kazuya Mizuguchi + + +[ Upstream commit a47b70ea86bdeb3091341f5ae3ef580f1a1ad822 ] + +"swiotlb buffer is full" errors occur after repeated initialisation of a +device - f.e. suspend/resume or ip link set up/down. This is because memory +mapped using dma_map_single() in ravb_ring_format() and ravb_start_xmit() +is not released. Resolve this problem by unmapping descriptors when +freeing rings. + +Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") +Signed-off-by: Kazuya Mizuguchi +[simon: reworked] +Signed-off-by: Simon Horman +Acked-by: Sergei Shtylyov +Signed-off-by: David S. Miller + +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/renesas/ravb_main.c | 112 +++++++++++++++++-------------- + 1 file changed, 64 insertions(+), 48 deletions(-) + +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -171,6 +171,49 @@ static struct mdiobb_ops bb_ops = { + .get_mdio_data = ravb_get_mdio_data, + }; + ++/* Free TX skb function for AVB-IP */ ++static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) ++{ ++ struct ravb_private *priv = netdev_priv(ndev); ++ struct net_device_stats *stats = &priv->stats[q]; ++ struct ravb_tx_desc *desc; ++ int free_num = 0; ++ int entry; ++ u32 size; ++ ++ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { ++ bool txed; ++ ++ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * ++ NUM_TX_DESC); ++ desc = &priv->tx_ring[q][entry]; ++ txed = desc->die_dt == DT_FEMPTY; ++ if (free_txed_only && !txed) ++ break; ++ /* Descriptor type must be checked before all other reads */ ++ dma_rmb(); ++ size = le16_to_cpu(desc->ds_tagl) & TX_DS; ++ /* Free the original skb. */ ++ if (priv->tx_skb[q][entry / NUM_TX_DESC]) { ++ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), ++ size, DMA_TO_DEVICE); ++ /* Last packet descriptor? */ ++ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { ++ entry /= NUM_TX_DESC; ++ dev_kfree_skb_any(priv->tx_skb[q][entry]); ++ priv->tx_skb[q][entry] = NULL; ++ if (txed) ++ stats->tx_packets++; ++ } ++ free_num++; ++ } ++ if (txed) ++ stats->tx_bytes += size; ++ desc->die_dt = DT_EEMPTY; ++ } ++ return free_num; ++} ++ + /* Free skb's and DMA buffers for Ethernet AVB */ + static void ravb_ring_free(struct net_device *ndev, int q) + { +@@ -186,19 +229,21 @@ static void ravb_ring_free(struct net_de + kfree(priv->rx_skb[q]); + priv->rx_skb[q] = NULL; + +- /* Free TX skb ringbuffer */ +- if (priv->tx_skb[q]) { +- for (i = 0; i < priv->num_tx_ring[q]; i++) +- dev_kfree_skb(priv->tx_skb[q][i]); +- } +- kfree(priv->tx_skb[q]); +- priv->tx_skb[q] = NULL; +- + /* Free aligned TX buffers */ + kfree(priv->tx_align[q]); + priv->tx_align[q] = NULL; + + if (priv->rx_ring[q]) { ++ for (i = 0; i < priv->num_rx_ring[q]; i++) { ++ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; ++ ++ if (!dma_mapping_error(ndev->dev.parent, ++ le32_to_cpu(desc->dptr))) ++ dma_unmap_single(ndev->dev.parent, ++ le32_to_cpu(desc->dptr), ++ PKT_BUF_SZ, ++ DMA_FROM_DEVICE); ++ } + ring_size = sizeof(struct ravb_ex_rx_desc) * + (priv->num_rx_ring[q] + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], +@@ -207,12 +252,20 @@ static void ravb_ring_free(struct net_de + } + + if (priv->tx_ring[q]) { ++ ravb_tx_free(ndev, q, false); ++ + ring_size = sizeof(struct ravb_tx_desc) * + (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], + priv->tx_desc_dma[q]); + priv->tx_ring[q] = NULL; + } ++ ++ /* Free TX skb ringbuffer. ++ * SKBs are freed by ravb_tx_free() call above. ++ */ ++ kfree(priv->tx_skb[q]); ++ priv->tx_skb[q] = NULL; + } + + /* Format skb and descriptor buffer for Ethernet AVB */ +@@ -420,44 +473,6 @@ static int ravb_dmac_init(struct net_dev + return 0; + } + +-/* Free TX skb function for AVB-IP */ +-static int ravb_tx_free(struct net_device *ndev, int q) +-{ +- struct ravb_private *priv = netdev_priv(ndev); +- struct net_device_stats *stats = &priv->stats[q]; +- struct ravb_tx_desc *desc; +- int free_num = 0; +- int entry; +- u32 size; +- +- for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { +- entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * +- NUM_TX_DESC); +- desc = &priv->tx_ring[q][entry]; +- if (desc->die_dt != DT_FEMPTY) +- break; +- /* Descriptor type must be checked before all other reads */ +- dma_rmb(); +- size = le16_to_cpu(desc->ds_tagl) & TX_DS; +- /* Free the original skb. */ +- if (priv->tx_skb[q][entry / NUM_TX_DESC]) { +- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), +- size, DMA_TO_DEVICE); +- /* Last packet descriptor? */ +- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { +- entry /= NUM_TX_DESC; +- dev_kfree_skb_any(priv->tx_skb[q][entry]); +- priv->tx_skb[q][entry] = NULL; +- stats->tx_packets++; +- } +- free_num++; +- } +- stats->tx_bytes += size; +- desc->die_dt = DT_EEMPTY; +- } +- return free_num; +-} +- + static void ravb_get_tx_tstamp(struct net_device *ndev) + { + struct ravb_private *priv = netdev_priv(ndev); +@@ -797,7 +812,7 @@ static int ravb_poll(struct napi_struct + spin_lock_irqsave(&priv->lock, flags); + /* Clear TX interrupt */ + ravb_write(ndev, ~mask, TIS); +- ravb_tx_free(ndev, q); ++ ravb_tx_free(ndev, q, true); + netif_wake_subqueue(ndev, q); + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); +@@ -1393,7 +1408,8 @@ static netdev_tx_t ravb_start_xmit(struc + + priv->cur_tx[q] += NUM_TX_DESC; + if (priv->cur_tx[q] - priv->dirty_tx[q] > +- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) ++ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && ++ !ravb_tx_free(ndev, q, true)) + netif_stop_subqueue(ndev, q); + + exit: diff --git a/queue-4.4/series b/queue-4.4/series index fe46005affa..c7a5c9a555c 100644 --- a/queue-4.4/series +++ b/queue-4.4/series @@ -24,3 +24,15 @@ fscache-clear-outstanding-writes-when-disabling-a-cookie.patch fs-cache-initialise-stores_lock-in-netfs-cookie.patch ipv6-fix-flow-labels-when-the-traffic-class-is-non-0.patch drm-nouveau-prevent-userspace-from-deleting-client-object.patch +drm-nouveau-fence-g84-protect-against-concurrent-access-to-semaphore-buffers.patch +net-mlx4_core-avoid-command-timeouts-during-vf-driver-device-shutdown.patch +gianfar-synchronize-dma-api-usage-by-free_skb_rx_queue-w-gfar_new_page.patch +pinctrl-berlin-bg4ct-fix-the-value-for-sd1a-of-pin-scrd0_crd_pres.patch +net-adaptec-starfire-add-checks-for-dma-mapping-errors.patch +parisc-parport_gsc-fixes-for-printk-continuation-lines.patch +drm-nouveau-don-t-enabling-polling-twice-on-runtime-resume.patch +drm-ast-fixed-system-hanged-if-disable-p2a.patch +ravb-unmap-descriptors-when-freeing-rings.patch +nfs-fix-don-t-increment-lock-sequence-id-after-nfs4err_moved.patch +r8152-re-schedule-napi-for-tx.patch +r8152-fix-rtl8152_post_reset-function.patch