From: Greg Kroah-Hartman Date: Tue, 3 Feb 2015 03:01:23 +0000 (-0800) Subject: 3.18-stable patches X-Git-Tag: v3.18.6~15 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=47bbd3ee371679b162776f6788db1bc3104fdbc2;p=thirdparty%2Fkernel%2Fstable-queue.git 3.18-stable patches added patches: arm-mvebu-don-t-set-the-pl310-in-i-o-coherency-mode-when-i-o-coherency-is-disabled.patch dm-cache-fix-missing-err_ptr-returns-and-handling.patch dm-thin-don-t-allow-messages-to-be-sent-to-a-pool-target-in-read_only-or-fail-mode.patch drivers-net-cpsw-discard-dual-emac-default-vlan-configuration.patch drivers-rtc-rtc-s5m.c-terminate-s5m_rtc_id-array-with-empty-element.patch drm-fix-fb-helper-vs-mst-dangling-connector-ptrs-v2.patch drm-radeon-restore-gart-table-contents-after-pinning-it-in-vram-v3.patch drm-radeon-split-off-gart_get_page_entry-asic-hook-from-set_page_entry.patch drm-vmwgfx-replace-the-hw-mutex-with-a-hw-spinlock.patch nl80211-fix-per-station-group-key-get-del-and-memory-leak.patch pinctrl-at91-allow-to-have-disabled-gpio-bank.patch regulator-core-fix-race-condition-in-regulator_put.patch spi-pxa2xx-clear-cur_chip-pointer-before-starting-next-message.patch --- diff --git a/queue-3.18/arm-mvebu-don-t-set-the-pl310-in-i-o-coherency-mode-when-i-o-coherency-is-disabled.patch b/queue-3.18/arm-mvebu-don-t-set-the-pl310-in-i-o-coherency-mode-when-i-o-coherency-is-disabled.patch new file mode 100644 index 00000000000..aa2bc8a7866 --- /dev/null +++ b/queue-3.18/arm-mvebu-don-t-set-the-pl310-in-i-o-coherency-mode-when-i-o-coherency-is-disabled.patch @@ -0,0 +1,55 @@ +From dcad68876c21bac709b01eda24e39d4410dc36a8 Mon Sep 17 00:00:00 2001 +From: Thomas Petazzoni +Date: Wed, 28 Jan 2015 12:55:45 +0100 +Subject: ARM: mvebu: don't set the PL310 in I/O coherency mode when I/O coherency is disabled + +From: Thomas Petazzoni + +commit dcad68876c21bac709b01eda24e39d4410dc36a8 upstream. + +Since commit f2c3c67f00 (merge commit that adds commit "ARM: mvebu: +completely disable hardware I/O coherency"), we disable I/O coherency +on Armada EBU platforms. + +However, we continue to initialize the coherency fabric, because this +coherency fabric is needed on Armada XP for inter-CPU +coherency. Unfortunately, due to this, we also continued to execute +the coherency fabric initialization code for Armada 375/38x, which +switched the PL310 into I/O coherent mode. This has the effect of +disabling the outer cache sync operation: this is needed when I/O +coherency is enabled to work around a PCIe/L2 deadlock. But obviously, +when I/O coherency is disabled, having the outer cache sync operation +is crucial. + +Therefore, this commit fixes the armada_375_380_coherency_init() so +that the PL310 is switched to I/O coherent mode only if I/O coherency +is enabled. + +Without this fix, all devices using DMA are broken on Armada 375/38x. + +Signed-off-by: Thomas Petazzoni +Acked-by: Gregory CLEMENT +Tested-by: Gregory CLEMENT +Signed-off-by: Andrew Lunn +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/mach-mvebu/coherency.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/arch/arm/mach-mvebu/coherency.c ++++ b/arch/arm/mach-mvebu/coherency.c +@@ -342,6 +342,13 @@ static void __init armada_375_380_cohere + arch_ioremap_caller = armada_pcie_wa_ioremap_caller; + + /* ++ * We should switch the PL310 to I/O coherency mode only if ++ * I/O coherency is actually enabled. ++ */ ++ if (!coherency_available()) ++ return; ++ ++ /* + * Add the PL310 property "arm,io-coherent". This makes sure the + * outer sync operation is not used, which allows to + * workaround the system erratum that causes deadlocks when diff --git a/queue-3.18/dm-cache-fix-missing-err_ptr-returns-and-handling.patch b/queue-3.18/dm-cache-fix-missing-err_ptr-returns-and-handling.patch new file mode 100644 index 00000000000..41d29268787 --- /dev/null +++ b/queue-3.18/dm-cache-fix-missing-err_ptr-returns-and-handling.patch @@ -0,0 +1,56 @@ +From 766a78882ddf79b162243649d7dfdbac1fb6fb88 Mon Sep 17 00:00:00 2001 +From: Joe Thornber +Date: Wed, 28 Jan 2015 12:07:46 +0000 +Subject: dm cache: fix missing ERR_PTR returns and handling + +From: Joe Thornber + +commit 766a78882ddf79b162243649d7dfdbac1fb6fb88 upstream. + +Commit 9b1cc9f251 ("dm cache: share cache-metadata object across +inactive and active DM tables") mistakenly ignored the use of ERR_PTR +returns. Restore missing IS_ERR checks and ERR_PTR returns where +appropriate. + +Reported-by: Dan Carpenter +Signed-off-by: Joe Thornber +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-cache-metadata.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +--- a/drivers/md/dm-cache-metadata.c ++++ b/drivers/md/dm-cache-metadata.c +@@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadat + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + DMERR("could not allocate metadata struct"); +- return NULL; ++ return ERR_PTR(-ENOMEM); + } + + atomic_set(&cmd->ref_count, 1); +@@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_ + return cmd; + + cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); +- if (cmd) { ++ if (!IS_ERR(cmd)) { + mutex_lock(&table_lock); + cmd2 = lookup(bdev); + if (cmd2) { +@@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metad + { + struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, + may_format_device, policy_hint_size); +- if (cmd && !same_params(cmd, data_block_size)) { ++ ++ if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) { + dm_cache_metadata_close(cmd); +- return NULL; ++ return ERR_PTR(-EINVAL); + } + + return cmd; diff --git a/queue-3.18/dm-thin-don-t-allow-messages-to-be-sent-to-a-pool-target-in-read_only-or-fail-mode.patch b/queue-3.18/dm-thin-don-t-allow-messages-to-be-sent-to-a-pool-target-in-read_only-or-fail-mode.patch new file mode 100644 index 00000000000..431e71d38dd --- /dev/null +++ b/queue-3.18/dm-thin-don-t-allow-messages-to-be-sent-to-a-pool-target-in-read_only-or-fail-mode.patch @@ -0,0 +1,38 @@ +From 2a7eaea02b99b6e267b1e89c79acc6e9a51cee3b Mon Sep 17 00:00:00 2001 +From: Joe Thornber +Date: Mon, 26 Jan 2015 11:38:21 +0000 +Subject: dm thin: don't allow messages to be sent to a pool target in READ_ONLY or FAIL mode + +From: Joe Thornber + +commit 2a7eaea02b99b6e267b1e89c79acc6e9a51cee3b upstream. + +You can't modify the metadata in these modes. It's better to fail these +messages immediately than let the block-manager deny write locks on +metadata blocks. Otherwise these failed metadata changes will trigger +'needs_check' to get set in the metadata superblock -- requiring repair +using the thin_check utility. + +Signed-off-by: Joe Thornber +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-thin.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -2978,6 +2978,12 @@ static int pool_message(struct dm_target + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + ++ if (get_pool_mode(pool) >= PM_READ_ONLY) { ++ DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", ++ dm_device_name(pool->pool_md)); ++ return -EINVAL; ++ } ++ + if (!strcasecmp(argv[0], "create_thin")) + r = process_create_thin_mesg(argc, argv, pool); + diff --git a/queue-3.18/drivers-net-cpsw-discard-dual-emac-default-vlan-configuration.patch b/queue-3.18/drivers-net-cpsw-discard-dual-emac-default-vlan-configuration.patch new file mode 100644 index 00000000000..0baef476919 --- /dev/null +++ b/queue-3.18/drivers-net-cpsw-discard-dual-emac-default-vlan-configuration.patch @@ -0,0 +1,65 @@ +From 02a54164c52ed6eca3089a0d402170fbf34d6cf5 Mon Sep 17 00:00:00 2001 +From: Mugunthan V N +Date: Thu, 22 Jan 2015 15:19:22 +0530 +Subject: drivers: net: cpsw: discard dual emac default vlan configuration + +From: Mugunthan V N + +commit 02a54164c52ed6eca3089a0d402170fbf34d6cf5 upstream. + +In Dual EMAC, the default VLANs are used to segregate Rx packets between +the ports, so adding the same default VLAN to the switch will affect the +normal packet transfers. So returning error on addition of dual EMAC +default VLANs. + +Even if EMAC 0 default port VLAN is added to EMAC 1, it will lead to +break dual EMAC port separations. + +Fixes: d9ba8f9e6298 (driver: net: ethernet: cpsw: dual emac interface implementation) +Reported-by: Felipe Balbi +Signed-off-by: Mugunthan V N +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/ethernet/ti/cpsw.c | 22 ++++++++++++++++++++++ + 1 file changed, 22 insertions(+) + +--- a/drivers/net/ethernet/ti/cpsw.c ++++ b/drivers/net/ethernet/ti/cpsw.c +@@ -1676,6 +1676,19 @@ static int cpsw_ndo_vlan_rx_add_vid(stru + if (vid == priv->data.default_vlan) + return 0; + ++ if (priv->data.dual_emac) { ++ /* In dual EMAC, reserved VLAN id should not be used for ++ * creating VLAN interfaces as this can break the dual ++ * EMAC port separation ++ */ ++ int i; ++ ++ for (i = 0; i < priv->data.slaves; i++) { ++ if (vid == priv->slaves[i].port_vlan) ++ return -EINVAL; ++ } ++ } ++ + dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); + return cpsw_add_vlan_ale_entry(priv, vid); + } +@@ -1689,6 +1702,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(str + if (vid == priv->data.default_vlan) + return 0; + ++ if (priv->data.dual_emac) { ++ int i; ++ ++ for (i = 0; i < priv->data.slaves; i++) { ++ if (vid == priv->slaves[i].port_vlan) ++ return -EINVAL; ++ } ++ } ++ + dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); + ret = cpsw_ale_del_vlan(priv->ale, vid, 0); + if (ret != 0) diff --git a/queue-3.18/drivers-rtc-rtc-s5m.c-terminate-s5m_rtc_id-array-with-empty-element.patch b/queue-3.18/drivers-rtc-rtc-s5m.c-terminate-s5m_rtc_id-array-with-empty-element.patch new file mode 100644 index 00000000000..d22a8023f33 --- /dev/null +++ b/queue-3.18/drivers-rtc-rtc-s5m.c-terminate-s5m_rtc_id-array-with-empty-element.patch @@ -0,0 +1,33 @@ +From 45cd15e600ec8006305ce83f62c7208c2cb7a052 Mon Sep 17 00:00:00 2001 +From: Andrey Ryabinin +Date: Mon, 26 Jan 2015 12:58:46 -0800 +Subject: drivers/rtc/rtc-s5m.c: terminate s5m_rtc_id array with empty element + +From: Andrey Ryabinin + +commit 45cd15e600ec8006305ce83f62c7208c2cb7a052 upstream. + +Array of platform_device_id elements should be terminated with empty +element. + +Fixes: 5bccae6ec458 ("rtc: s5m-rtc: add real-time clock driver for s5m8767") +Signed-off-by: Andrey Ryabinin +Reviewed-by: Krzysztof Kozlowski +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/rtc/rtc-s5m.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/rtc/rtc-s5m.c ++++ b/drivers/rtc/rtc-s5m.c +@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, + static const struct platform_device_id s5m_rtc_id[] = { + { "s5m-rtc", S5M8767X }, + { "s2mps14-rtc", S2MPS14X }, ++ { }, + }; + + static struct platform_driver s5m_rtc_driver = { diff --git a/queue-3.18/drm-fix-fb-helper-vs-mst-dangling-connector-ptrs-v2.patch b/queue-3.18/drm-fix-fb-helper-vs-mst-dangling-connector-ptrs-v2.patch new file mode 100644 index 00000000000..106128a91a6 --- /dev/null +++ b/queue-3.18/drm-fix-fb-helper-vs-mst-dangling-connector-ptrs-v2.patch @@ -0,0 +1,74 @@ +From 2148f18fdb45f31ca269a7787fbc24053cd42e70 Mon Sep 17 00:00:00 2001 +From: Rob Clark +Date: Mon, 26 Jan 2015 10:11:08 -0500 +Subject: drm: fix fb-helper vs MST dangling connector ptrs (v2) + +From: Rob Clark + +commit 2148f18fdb45f31ca269a7787fbc24053cd42e70 upstream. + +VT switch back/forth from console to xserver (for example) has potential +to go horribly wrong if a dynamic DP MST connector ends up in the saved +modeset that is restored when switching back to fbcon. + +When removing a dynamic connector, don't forget to clean up the saved +state. + +v1: original +v2: null out set->fb if no more connectors to avoid making i915 cranky + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1184968 +Signed-off-by: Rob Clark +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/drm_fb_helper.c | 30 ++++++++++++++++++++++++++++++ + 1 file changed, 30 insertions(+) + +--- a/drivers/gpu/drm/drm_fb_helper.c ++++ b/drivers/gpu/drm/drm_fb_helper.c +@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(stru + } + EXPORT_SYMBOL(drm_fb_helper_add_one_connector); + ++static void remove_from_modeset(struct drm_mode_set *set, ++ struct drm_connector *connector) ++{ ++ int i, j; ++ ++ for (i = 0; i < set->num_connectors; i++) { ++ if (set->connectors[i] == connector) ++ break; ++ } ++ ++ if (i == set->num_connectors) ++ return; ++ ++ for (j = i + 1; j < set->num_connectors; j++) { ++ set->connectors[j - 1] = set->connectors[j]; ++ } ++ set->num_connectors--; ++ ++ /* because i915 is pissy about this.. ++ * TODO maybe need to makes sure we set it back to !=NULL somewhere? ++ */ ++ if (set->num_connectors == 0) ++ set->fb = NULL; ++} ++ + int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) + { +@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(s + } + fb_helper->connector_count--; + kfree(fb_helper_connector); ++ ++ /* also cleanup dangling references to the connector: */ ++ for (i = 0; i < fb_helper->crtc_count; i++) ++ remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector); ++ + return 0; + } + EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); diff --git a/queue-3.18/drm-radeon-restore-gart-table-contents-after-pinning-it-in-vram-v3.patch b/queue-3.18/drm-radeon-restore-gart-table-contents-after-pinning-it-in-vram-v3.patch new file mode 100644 index 00000000000..0eed0fa02e1 --- /dev/null +++ b/queue-3.18/drm-radeon-restore-gart-table-contents-after-pinning-it-in-vram-v3.patch @@ -0,0 +1,60 @@ +From 5636d2f842c7bd7800002868ead3d6b809d385a0 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michel=20D=C3=A4nzer?= +Date: Thu, 22 Jan 2015 18:58:46 +0900 +Subject: drm/radeon: Restore GART table contents after pinning it in VRAM v3 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: =?UTF-8?q?Michel=20D=C3=A4nzer?= + +commit 5636d2f842c7bd7800002868ead3d6b809d385a0 upstream. + +The GART table BO has to be moved out of VRAM for suspend/resume. Any +updates to the GART table during that time were silently dropped without +this change. This caused GPU lockups on resume in some cases, see the bug +reports referenced below. + +This might also make GPU reset more robust in some cases, as we no longer +rely on the GART table in VRAM being preserved across the GPU +lockup/reset. + +v2: Add logic to radeon_gart_table_vram_pin directly instead of + reinstating radeon_gart_restore +v3: Move code after assignment of rdev->gart.table_addr so that the GART + TLB flush can work as intended, add code comment explaining why we're + doing this + +Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=85204 +Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=86267 +Reviewed-by: Christian König +Signed-off-by: Michel Dänzer +Signed-off-by: Alex Deucher +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/radeon/radeon_gart.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/drivers/gpu/drm/radeon/radeon_gart.c ++++ b/drivers/gpu/drm/radeon/radeon_gart.c +@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct ra + radeon_bo_unpin(rdev->gart.robj); + radeon_bo_unreserve(rdev->gart.robj); + rdev->gart.table_addr = gpu_addr; ++ ++ if (!r) { ++ int i; ++ ++ /* We might have dropped some GART table updates while it wasn't ++ * mapped, restore all entries ++ */ ++ for (i = 0; i < rdev->gart.num_gpu_pages; i++) ++ radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); ++ mb(); ++ radeon_gart_tlb_flush(rdev); ++ } ++ + return r; + } + diff --git a/queue-3.18/drm-radeon-split-off-gart_get_page_entry-asic-hook-from-set_page_entry.patch b/queue-3.18/drm-radeon-split-off-gart_get_page_entry-asic-hook-from-set_page_entry.patch new file mode 100644 index 00000000000..27813cdc7fc --- /dev/null +++ b/queue-3.18/drm-radeon-split-off-gart_get_page_entry-asic-hook-from-set_page_entry.patch @@ -0,0 +1,552 @@ +From cb65890610dca287718a63bd8a5d9ce3dc80c3d7 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michel=20D=C3=A4nzer?= +Date: Wed, 21 Jan 2015 17:36:35 +0900 +Subject: drm/radeon: Split off gart_get_page_entry ASIC hook from set_page_entry +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: =?UTF-8?q?Michel=20D=C3=A4nzer?= + +commit cb65890610dca287718a63bd8a5d9ce3dc80c3d7 upstream. + +get_page_entry calculates the GART page table entry, which is just written +to the GART page table by set_page_entry. + +This is a prerequisite for the following fix. + +Reviewed-by: Christian König +Signed-off-by: Michel Dänzer +Signed-off-by: Alex Deucher +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/radeon/r100.c | 10 +++++++- + drivers/gpu/drm/radeon/r300.c | 16 +++++++++----- + drivers/gpu/drm/radeon/radeon.h | 8 +++++-- + drivers/gpu/drm/radeon/radeon_asic.c | 24 +++++++++++++++++++++ + drivers/gpu/drm/radeon/radeon_asic.h | 12 +++++++--- + drivers/gpu/drm/radeon/radeon_device.c | 2 + + drivers/gpu/drm/radeon/radeon_gart.c | 37 ++++++++++++++++++++------------- + drivers/gpu/drm/radeon/rs400.c | 14 ++++++++---- + drivers/gpu/drm/radeon/rs600.c | 14 ++++++++---- + 9 files changed, 100 insertions(+), 37 deletions(-) + +--- a/drivers/gpu/drm/radeon/r100.c ++++ b/drivers/gpu/drm/radeon/r100.c +@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_dev + return r; + rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; + rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; ++ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; + rdev->asic->gart.set_page = &r100_pci_gart_set_page; + return radeon_gart_table_ram_alloc(rdev); + } +@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon + WREG32(RADEON_AIC_HI_ADDR, 0); + } + ++uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) ++{ ++ return addr; ++} ++ + void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, +- uint64_t addr, uint32_t flags) ++ uint64_t entry) + { + u32 *gtt = rdev->gart.ptr; +- gtt[i] = cpu_to_le32(lower_32_bits(addr)); ++ gtt[i] = cpu_to_le32(lower_32_bits(entry)); + } + + void r100_pci_gart_fini(struct radeon_device *rdev) +--- a/drivers/gpu/drm/radeon/r300.c ++++ b/drivers/gpu/drm/radeon/r300.c +@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct ra + #define R300_PTE_WRITEABLE (1 << 2) + #define R300_PTE_READABLE (1 << 3) + +-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, +- uint64_t addr, uint32_t flags) ++uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) + { +- void __iomem *ptr = rdev->gart.ptr; +- + addr = (lower_32_bits(addr) >> 8) | + ((upper_32_bits(addr) & 0xff) << 24); + if (flags & RADEON_GART_PAGE_READ) +@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct rad + addr |= R300_PTE_WRITEABLE; + if (!(flags & RADEON_GART_PAGE_SNOOP)) + addr |= R300_PTE_UNSNOOPED; ++ return addr; ++} ++ ++void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, ++ uint64_t entry) ++{ ++ void __iomem *ptr = rdev->gart.ptr; ++ + /* on x86 we want this to be CPU endian, on powerpc + * on powerpc without HW swappers, it'll get swapped on way + * into VRAM - so no need for cpu_to_le32 on VRAM tables */ +- writel(addr, ((void __iomem *)ptr) + (i * 4)); ++ writel(entry, ((void __iomem *)ptr) + (i * 4)); + } + + int rv370_pcie_gart_init(struct radeon_device *rdev) +@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_d + DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); + rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; + rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; ++ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; + rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; + return radeon_gart_table_vram_alloc(rdev); + } +--- a/drivers/gpu/drm/radeon/radeon.h ++++ b/drivers/gpu/drm/radeon/radeon.h +@@ -245,6 +245,7 @@ bool radeon_get_bios(struct radeon_devic + * Dummy page + */ + struct radeon_dummy_page { ++ uint64_t entry; + struct page *page; + dma_addr_t addr; + }; +@@ -626,6 +627,7 @@ struct radeon_gart { + unsigned table_size; + struct page **pages; + dma_addr_t *pages_addr; ++ uint64_t *pages_entry; + bool ready; + }; + +@@ -1819,8 +1821,9 @@ struct radeon_asic { + /* gart */ + struct { + void (*tlb_flush)(struct radeon_device *rdev); ++ uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); + void (*set_page)(struct radeon_device *rdev, unsigned i, +- uint64_t addr, uint32_t flags); ++ uint64_t entry); + } gart; + struct { + int (*init)(struct radeon_device *rdev); +@@ -2818,7 +2821,8 @@ static inline void radeon_ring_write(str + #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) + #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) + #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) +-#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) ++#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) ++#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) + #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) + #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) + #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) +--- a/drivers/gpu/drm/radeon/radeon_asic.c ++++ b/drivers/gpu/drm/radeon/radeon_asic.c +@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_de + DRM_INFO("Forcing AGP to PCIE mode\n"); + rdev->flags |= RADEON_IS_PCIE; + rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; ++ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; + rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; + } else { + DRM_INFO("Forcing AGP to PCI mode\n"); + rdev->flags |= RADEON_IS_PCI; + rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; ++ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; + rdev->asic->gart.set_page = &r100_pci_gart_set_page; + } + rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; +@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = { + .mc_wait_for_idle = &r100_mc_wait_for_idle, + .gart = { + .tlb_flush = &r100_pci_gart_tlb_flush, ++ .get_page_entry = &r100_pci_gart_get_page_entry, + .set_page = &r100_pci_gart_set_page, + }, + .ring = { +@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = { + .mc_wait_for_idle = &r100_mc_wait_for_idle, + .gart = { + .tlb_flush = &r100_pci_gart_tlb_flush, ++ .get_page_entry = &r100_pci_gart_get_page_entry, + .set_page = &r100_pci_gart_set_page, + }, + .ring = { +@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = { + .mc_wait_for_idle = &r300_mc_wait_for_idle, + .gart = { + .tlb_flush = &r100_pci_gart_tlb_flush, ++ .get_page_entry = &r100_pci_gart_get_page_entry, + .set_page = &r100_pci_gart_set_page, + }, + .ring = { +@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie + .mc_wait_for_idle = &r300_mc_wait_for_idle, + .gart = { + .tlb_flush = &rv370_pcie_gart_tlb_flush, ++ .get_page_entry = &rv370_pcie_gart_get_page_entry, + .set_page = &rv370_pcie_gart_set_page, + }, + .ring = { +@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = { + .mc_wait_for_idle = &r300_mc_wait_for_idle, + .gart = { + .tlb_flush = &rv370_pcie_gart_tlb_flush, ++ .get_page_entry = &rv370_pcie_gart_get_page_entry, + .set_page = &rv370_pcie_gart_set_page, + }, + .ring = { +@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = { + .mc_wait_for_idle = &rs400_mc_wait_for_idle, + .gart = { + .tlb_flush = &rs400_gart_tlb_flush, ++ .get_page_entry = &rs400_gart_get_page_entry, + .set_page = &rs400_gart_set_page, + }, + .ring = { +@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = { + .mc_wait_for_idle = &rs600_mc_wait_for_idle, + .gart = { + .tlb_flush = &rs600_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .ring = { +@@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = { + .mc_wait_for_idle = &rs690_mc_wait_for_idle, + .gart = { + .tlb_flush = &rs400_gart_tlb_flush, ++ .get_page_entry = &rs400_gart_get_page_entry, + .set_page = &rs400_gart_set_page, + }, + .ring = { +@@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = { + .mc_wait_for_idle = &rv515_mc_wait_for_idle, + .gart = { + .tlb_flush = &rv370_pcie_gart_tlb_flush, ++ .get_page_entry = &rv370_pcie_gart_get_page_entry, + .set_page = &rv370_pcie_gart_set_page, + }, + .ring = { +@@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = { + .mc_wait_for_idle = &r520_mc_wait_for_idle, + .gart = { + .tlb_flush = &rv370_pcie_gart_tlb_flush, ++ .get_page_entry = &rv370_pcie_gart_get_page_entry, + .set_page = &rv370_pcie_gart_set_page, + }, + .ring = { +@@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = { + .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .gart = { + .tlb_flush = &r600_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .ring = { +@@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = { + .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .gart = { + .tlb_flush = &r600_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .ring = { +@@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = { + .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .gart = { + .tlb_flush = &r600_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .ring = { +@@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = { + .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .gart = { + .tlb_flush = &r600_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .ring = { +@@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic + .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .gart = { + .tlb_flush = &evergreen_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .ring = { +@@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = { + .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .gart = { + .tlb_flush = &evergreen_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .ring = { +@@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = { + .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .gart = { + .tlb_flush = &evergreen_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .ring = { +@@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = + .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .gart = { + .tlb_flush = &cayman_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .vm = { +@@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = + .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .gart = { + .tlb_flush = &cayman_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .vm = { +@@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = { + .get_gpu_clock_counter = &si_get_gpu_clock_counter, + .gart = { + .tlb_flush = &si_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .vm = { +@@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = { + .get_gpu_clock_counter = &cik_get_gpu_clock_counter, + .gart = { + .tlb_flush = &cik_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .vm = { +@@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = { + .get_gpu_clock_counter = &cik_get_gpu_clock_counter, + .gart = { + .tlb_flush = &cik_pcie_gart_tlb_flush, ++ .get_page_entry = &rs600_gart_get_page_entry, + .set_page = &rs600_gart_set_page, + }, + .vm = { +--- a/drivers/gpu/drm/radeon/radeon_asic.h ++++ b/drivers/gpu/drm/radeon/radeon_asic.h +@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_de + int r100_asic_reset(struct radeon_device *rdev); + u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); + void r100_pci_gart_tlb_flush(struct radeon_device *rdev); ++uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); + void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, +- uint64_t addr, uint32_t flags); ++ uint64_t entry); + void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); + int r100_irq_set(struct radeon_device *rdev); + int r100_irq_process(struct radeon_device *rdev); +@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct + struct radeon_fence *fence); + extern int r300_cs_parse(struct radeon_cs_parser *p); + extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); ++extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); + extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, +- uint64_t addr, uint32_t flags); ++ uint64_t entry); + extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); + extern int rv370_get_pcie_lanes(struct radeon_device *rdev); + extern void r300_set_reg_safe(struct radeon_device *rdev); +@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_dev + extern int rs400_suspend(struct radeon_device *rdev); + extern int rs400_resume(struct radeon_device *rdev); + void rs400_gart_tlb_flush(struct radeon_device *rdev); ++uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); + void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, +- uint64_t addr, uint32_t flags); ++ uint64_t entry); + uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); + void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); + int rs400_gart_init(struct radeon_device *rdev); +@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_devi + void rs600_irq_disable(struct radeon_device *rdev); + u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); + void rs600_gart_tlb_flush(struct radeon_device *rdev); ++uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); + void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, +- uint64_t addr, uint32_t flags); ++ uint64_t entry); + uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); + void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); + void rs600_bandwidth_update(struct radeon_device *rdev); +--- a/drivers/gpu/drm/radeon/radeon_device.c ++++ b/drivers/gpu/drm/radeon/radeon_device.c +@@ -743,6 +743,8 @@ int radeon_dummy_page_init(struct radeon + rdev->dummy_page.page = NULL; + return -ENOMEM; + } ++ rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, ++ RADEON_GART_PAGE_DUMMY); + return 0; + } + +--- a/drivers/gpu/drm/radeon/radeon_gart.c ++++ b/drivers/gpu/drm/radeon/radeon_gart.c +@@ -228,7 +228,6 @@ void radeon_gart_unbind(struct radeon_de + unsigned t; + unsigned p; + int i, j; +- u64 page_base; + + if (!rdev->gart.ready) { + WARN(1, "trying to unbind memory from uninitialized GART !\n"); +@@ -240,13 +239,12 @@ void radeon_gart_unbind(struct radeon_de + if (rdev->gart.pages[p]) { + rdev->gart.pages[p] = NULL; + rdev->gart.pages_addr[p] = rdev->dummy_page.addr; +- page_base = rdev->gart.pages_addr[p]; + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { ++ rdev->gart.pages_entry[t] = rdev->dummy_page.entry; + if (rdev->gart.ptr) { +- radeon_gart_set_page(rdev, t, page_base, +- RADEON_GART_PAGE_DUMMY); ++ radeon_gart_set_page(rdev, t, ++ rdev->dummy_page.entry); + } +- page_base += RADEON_GPU_PAGE_SIZE; + } + } + } +@@ -274,7 +272,7 @@ int radeon_gart_bind(struct radeon_devic + { + unsigned t; + unsigned p; +- uint64_t page_base; ++ uint64_t page_base, page_entry; + int i, j; + + if (!rdev->gart.ready) { +@@ -287,12 +285,14 @@ int radeon_gart_bind(struct radeon_devic + for (i = 0; i < pages; i++, p++) { + rdev->gart.pages_addr[p] = dma_addr[i]; + rdev->gart.pages[p] = pagelist[i]; +- if (rdev->gart.ptr) { +- page_base = rdev->gart.pages_addr[p]; +- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { +- radeon_gart_set_page(rdev, t, page_base, flags); +- page_base += RADEON_GPU_PAGE_SIZE; ++ page_base = dma_addr[i]; ++ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { ++ page_entry = radeon_gart_get_page_entry(page_base, flags); ++ rdev->gart.pages_entry[t] = page_entry; ++ if (rdev->gart.ptr) { ++ radeon_gart_set_page(rdev, t, page_entry); + } ++ page_base += RADEON_GPU_PAGE_SIZE; + } + } + mb(); +@@ -340,10 +340,17 @@ int radeon_gart_init(struct radeon_devic + radeon_gart_fini(rdev); + return -ENOMEM; + } ++ rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * ++ rdev->gart.num_gpu_pages); ++ if (rdev->gart.pages_entry == NULL) { ++ radeon_gart_fini(rdev); ++ return -ENOMEM; ++ } + /* set GART entry to point to the dummy page by default */ +- for (i = 0; i < rdev->gart.num_cpu_pages; i++) { ++ for (i = 0; i < rdev->gart.num_cpu_pages; i++) + rdev->gart.pages_addr[i] = rdev->dummy_page.addr; +- } ++ for (i = 0; i < rdev->gart.num_gpu_pages; i++) ++ rdev->gart.pages_entry[i] = rdev->dummy_page.entry; + return 0; + } + +@@ -356,15 +363,17 @@ int radeon_gart_init(struct radeon_devic + */ + void radeon_gart_fini(struct radeon_device *rdev) + { +- if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { ++ if (rdev->gart.ready) { + /* unbind pages */ + radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); + } + rdev->gart.ready = false; + vfree(rdev->gart.pages); + vfree(rdev->gart.pages_addr); ++ vfree(rdev->gart.pages_entry); + rdev->gart.pages = NULL; + rdev->gart.pages_addr = NULL; ++ rdev->gart.pages_entry = NULL; + + radeon_dummy_page_fini(rdev); + } +--- a/drivers/gpu/drm/radeon/rs400.c ++++ b/drivers/gpu/drm/radeon/rs400.c +@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_devic + #define RS400_PTE_WRITEABLE (1 << 2) + #define RS400_PTE_READABLE (1 << 3) + +-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, +- uint64_t addr, uint32_t flags) ++uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) + { + uint32_t entry; +- u32 *gtt = rdev->gart.ptr; + + entry = (lower_32_bits(addr) & PAGE_MASK) | + ((upper_32_bits(addr) & 0xff) << 4); +@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_d + entry |= RS400_PTE_WRITEABLE; + if (!(flags & RADEON_GART_PAGE_SNOOP)) + entry |= RS400_PTE_UNSNOOPED; +- entry = cpu_to_le32(entry); +- gtt[i] = entry; ++ return entry; ++} ++ ++void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, ++ uint64_t entry) ++{ ++ u32 *gtt = rdev->gart.ptr; ++ gtt[i] = cpu_to_le32(lower_32_bits(entry)); + } + + int rs400_mc_wait_for_idle(struct radeon_device *rdev) +--- a/drivers/gpu/drm/radeon/rs600.c ++++ b/drivers/gpu/drm/radeon/rs600.c +@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeo + radeon_gart_table_vram_free(rdev); + } + +-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, +- uint64_t addr, uint32_t flags) ++uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) + { +- void __iomem *ptr = (void *)rdev->gart.ptr; +- + addr = addr & 0xFFFFFFFFFFFFF000ULL; + addr |= R600_PTE_SYSTEM; + if (flags & RADEON_GART_PAGE_VALID) +@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_d + addr |= R600_PTE_WRITEABLE; + if (flags & RADEON_GART_PAGE_SNOOP) + addr |= R600_PTE_SNOOPED; +- writeq(addr, ptr + (i * 8)); ++ return addr; ++} ++ ++void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, ++ uint64_t entry) ++{ ++ void __iomem *ptr = (void *)rdev->gart.ptr; ++ writeq(entry, ptr + (i * 8)); + } + + int rs600_irq_set(struct radeon_device *rdev) diff --git a/queue-3.18/drm-vmwgfx-replace-the-hw-mutex-with-a-hw-spinlock.patch b/queue-3.18/drm-vmwgfx-replace-the-hw-mutex-with-a-hw-spinlock.patch new file mode 100644 index 00000000000..a43d861650d --- /dev/null +++ b/queue-3.18/drm-vmwgfx-replace-the-hw-mutex-with-a-hw-spinlock.patch @@ -0,0 +1,523 @@ +From 496eb6fd2c3fd13f4b914e537598e5c86ce4f52a Mon Sep 17 00:00:00 2001 +From: Thomas Hellstrom +Date: Wed, 14 Jan 2015 02:33:39 -0800 +Subject: drm/vmwgfx: Replace the hw mutex with a hw spinlock + +From: Thomas Hellstrom + +commit 496eb6fd2c3fd13f4b914e537598e5c86ce4f52a upstream. + +Fixes a case where we call vmw_fifo_idle() from within a wait function with +task state !TASK_RUNNING, which is illegal. + +In addition, make the locking fine-grained, so that it is performed once +for every read- and write operation. This is of course more costly, but we +don't perform much register access in the timing critical paths anyway. Instead +we have the extra benefit of being sure that we don't forget the hw lock around +register accesses. I think currently the kms code was quite buggy w r t this. + +This fixes Red Hat Bugzilla Bug 1180796 + +Signed-off-by: Thomas Hellstrom +Reviewed-by: Jakob Bornecrantz +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 28 ++++---------------------- + drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 25 +++++++++++++++++++---- + drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 18 +---------------- + drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 36 ++++++++++++++-------------------- + drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 8 +++---- + drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 25 ++++++++--------------- + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 - + 7 files changed, 56 insertions(+), 86 deletions(-) + +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_priva + if (unlikely(ret != 0)) + --dev_priv->num_3d_resources; + } else if (unhide_svga) { +- mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_ENABLE, + vmw_read(dev_priv, SVGA_REG_ENABLE) & + ~SVGA_REG_ENABLE_HIDE); +- mutex_unlock(&dev_priv->hw_mutex); + } + + mutex_unlock(&dev_priv->release_mutex); +@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_priv + mutex_lock(&dev_priv->release_mutex); + if (unlikely(--dev_priv->num_3d_resources == 0)) + vmw_release_device(dev_priv); +- else if (hide_svga) { +- mutex_lock(&dev_priv->hw_mutex); ++ else if (hide_svga) + vmw_write(dev_priv, SVGA_REG_ENABLE, + vmw_read(dev_priv, SVGA_REG_ENABLE) | + SVGA_REG_ENABLE_HIDE); +- mutex_unlock(&dev_priv->hw_mutex); +- } + + n3d = (int32_t) dev_priv->num_3d_resources; + mutex_unlock(&dev_priv->release_mutex); +@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_de + dev_priv->dev = dev; + dev_priv->vmw_chipset = chipset; + dev_priv->last_read_seqno = (uint32_t) -100; +- mutex_init(&dev_priv->hw_mutex); + mutex_init(&dev_priv->cmdbuf_mutex); + mutex_init(&dev_priv->release_mutex); + mutex_init(&dev_priv->binding_mutex); + rwlock_init(&dev_priv->resource_lock); + ttm_lock_init(&dev_priv->reservation_sem); ++ spin_lock_init(&dev_priv->hw_lock); ++ spin_lock_init(&dev_priv->waiter_lock); ++ spin_lock_init(&dev_priv->cap_lock); + + for (i = vmw_res_context; i < vmw_res_max; ++i) { + idr_init(&dev_priv->res_idr[i]); +@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_de + + dev_priv->enable_fb = enable_fbdev; + +- mutex_lock(&dev_priv->hw_mutex); +- + vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); + svga_id = vmw_read(dev_priv, SVGA_REG_ID); + if (svga_id != SVGA_ID_2) { + ret = -ENOSYS; + DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); +- mutex_unlock(&dev_priv->hw_mutex); + goto out_err0; + } + +@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_de + dev_priv->prim_bb_mem = dev_priv->vram_size; + + ret = vmw_dma_masks(dev_priv); +- if (unlikely(ret != 0)) { +- mutex_unlock(&dev_priv->hw_mutex); ++ if (unlikely(ret != 0)) + goto out_err0; +- } + + /* + * Limit back buffer size to VRAM size. Remove this once +@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_de + if (dev_priv->prim_bb_mem > dev_priv->vram_size) + dev_priv->prim_bb_mem = dev_priv->vram_size; + +- mutex_unlock(&dev_priv->hw_mutex); +- + vmw_print_capabilities(dev_priv->capabilities); + + if (dev_priv->capabilities & SVGA_CAP_GMR2) { +@@ -1161,9 +1151,7 @@ static int vmw_master_set(struct drm_dev + if (unlikely(ret != 0)) + return ret; + vmw_kms_save_vga(dev_priv); +- mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_TRACES, 0); +- mutex_unlock(&dev_priv->hw_mutex); + } + + if (active) { +@@ -1197,9 +1185,7 @@ out_no_active_lock: + if (!dev_priv->enable_fb) { + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv, true); +- mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_TRACES, 1); +- mutex_unlock(&dev_priv->hw_mutex); + } + return ret; + } +@@ -1234,9 +1220,7 @@ static void vmw_master_drop(struct drm_d + DRM_ERROR("Unable to clean VRAM on master drop.\n"); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv, true); +- mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_TRACES, 1); +- mutex_unlock(&dev_priv->hw_mutex); + } + + dev_priv->active_master = &dev_priv->fbdev_master; +@@ -1368,10 +1352,8 @@ static void vmw_pm_complete(struct devic + struct drm_device *dev = pci_get_drvdata(pdev); + struct vmw_private *dev_priv = vmw_priv(dev); + +- mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); + (void) vmw_read(dev_priv, SVGA_REG_ID); +- mutex_unlock(&dev_priv->hw_mutex); + + /** + * Reclaim 3d reference held by fbdev and potentially +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +@@ -399,7 +399,8 @@ struct vmw_private { + uint32_t memory_size; + bool has_gmr; + bool has_mob; +- struct mutex hw_mutex; ++ spinlock_t hw_lock; ++ spinlock_t cap_lock; + + /* + * VGA registers. +@@ -449,8 +450,9 @@ struct vmw_private { + atomic_t marker_seq; + wait_queue_head_t fence_queue; + wait_queue_head_t fifo_queue; +- int fence_queue_waiters; /* Protected by hw_mutex */ +- int goal_queue_waiters; /* Protected by hw_mutex */ ++ spinlock_t waiter_lock; ++ int fence_queue_waiters; /* Protected by waiter_lock */ ++ int goal_queue_waiters; /* Protected by waiter_lock */ + atomic_t fifo_queue_waiters; + uint32_t last_read_seqno; + spinlock_t irq_lock; +@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_mas + return (struct vmw_master *) master->driver_priv; + } + ++/* ++ * The locking here is fine-grained, so that it is performed once ++ * for every read- and write operation. This is of course costly, but we ++ * don't perform much register access in the timing critical paths anyway. ++ * Instead we have the extra benefit of being sure that we don't forget ++ * the hw lock around register accesses. ++ */ + static inline void vmw_write(struct vmw_private *dev_priv, + unsigned int offset, uint32_t value) + { ++ unsigned long irq_flags; ++ ++ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); + outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); + outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); ++ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); + } + + static inline uint32_t vmw_read(struct vmw_private *dev_priv, + unsigned int offset) + { +- uint32_t val; ++ unsigned long irq_flags; ++ u32 val; + ++ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); + outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); + val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); ++ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); ++ + return val; + } + +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +@@ -35,7 +35,7 @@ struct vmw_fence_manager { + struct vmw_private *dev_priv; + spinlock_t lock; + struct list_head fence_list; +- struct work_struct work, ping_work; ++ struct work_struct work; + u32 user_fence_size; + u32 fence_size; + u32 event_fence_action_size; +@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timelin + return "svga"; + } + +-static void vmw_fence_ping_func(struct work_struct *work) +-{ +- struct vmw_fence_manager *fman = +- container_of(work, struct vmw_fence_manager, ping_work); +- +- vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); +-} +- + static bool vmw_fence_enable_signaling(struct fence *f) + { + struct vmw_fence_obj *fence = +@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(s + if (seqno - fence->base.seqno < VMW_FENCE_WRAP) + return false; + +- if (mutex_trylock(&dev_priv->hw_mutex)) { +- vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); +- mutex_unlock(&dev_priv->hw_mutex); +- } else +- schedule_work(&fman->ping_work); ++ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); + + return true; + } +@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_mana + INIT_LIST_HEAD(&fman->fence_list); + INIT_LIST_HEAD(&fman->cleanup_list); + INIT_WORK(&fman->work, &vmw_fence_work_func); +- INIT_WORK(&fman->ping_work, &vmw_fence_ping_func); + fman->fifo_down = true; + fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); + fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); +@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct v + bool lists_empty; + + (void) cancel_work_sync(&fman->work); +- (void) cancel_work_sync(&fman->ping_work); + + spin_lock_irqsave(&fman->lock, irq_flags); + lists_empty = list_empty(&fman->fence_list) && +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private + if (!dev_priv->has_mob) + return false; + +- mutex_lock(&dev_priv->hw_mutex); ++ spin_lock(&dev_priv->cap_lock); + vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); + result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); +- mutex_unlock(&dev_priv->hw_mutex); ++ spin_unlock(&dev_priv->cap_lock); + + return (result != 0); + } +@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *de + DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); + DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); + +- mutex_lock(&dev_priv->hw_mutex); + dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); + dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); + dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); +@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *de + mb(); + + vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); +- mutex_unlock(&dev_priv->hw_mutex); + + max = ioread32(fifo_mem + SVGA_FIFO_MAX); + min = ioread32(fifo_mem + SVGA_FIFO_MIN); +@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *de + return vmw_fifo_send_fence(dev_priv, &dummy); + } + +-void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) ++void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) + { + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; ++ static DEFINE_SPINLOCK(ping_lock); ++ unsigned long irq_flags; + ++ /* ++ * The ping_lock is needed because we don't have an atomic ++ * test-and-set of the SVGA_FIFO_BUSY register. ++ */ ++ spin_lock_irqsave(&ping_lock, irq_flags); + if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { + iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); + vmw_write(dev_priv, SVGA_REG_SYNC, reason); + } +-} +- +-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) +-{ +- mutex_lock(&dev_priv->hw_mutex); +- +- vmw_fifo_ping_host_locked(dev_priv, reason); +- +- mutex_unlock(&dev_priv->hw_mutex); ++ spin_unlock_irqrestore(&ping_lock, irq_flags); + } + + void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) + { + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + +- mutex_lock(&dev_priv->hw_mutex); +- + vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); + while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) + ; +@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private + vmw_write(dev_priv, SVGA_REG_TRACES, + dev_priv->traces_state); + +- mutex_unlock(&dev_priv->hw_mutex); + vmw_marker_queue_takedown(&fifo->marker_queue); + + if (likely(fifo->static_buffer != NULL)) { +@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_priv + return vmw_fifo_wait_noirq(dev_priv, bytes, + interruptible, timeout); + +- mutex_lock(&dev_priv->hw_mutex); ++ spin_lock(&dev_priv->waiter_lock); + if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { + spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); + outl(SVGA_IRQFLAG_FIFO_PROGRESS, +@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_priv + vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); + } +- mutex_unlock(&dev_priv->hw_mutex); ++ spin_unlock(&dev_priv->waiter_lock); + + if (interruptible) + ret = wait_event_interruptible_timeout +@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_priv + else if (likely(ret > 0)) + ret = 0; + +- mutex_lock(&dev_priv->hw_mutex); ++ spin_lock(&dev_priv->waiter_lock); + if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { + spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); + dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; + vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); + } +- mutex_unlock(&dev_priv->hw_mutex); ++ spin_unlock(&dev_priv->waiter_lock); + + return ret; + } +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vm + (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); + compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; + +- mutex_lock(&dev_priv->hw_mutex); ++ spin_lock(&dev_priv->cap_lock); + for (i = 0; i < max_size; ++i) { + vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); + compat_cap->pairs[i][0] = i; + compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); + } +- mutex_unlock(&dev_priv->hw_mutex); ++ spin_unlock(&dev_priv->cap_lock); + + return 0; + } +@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_devi + if (num > SVGA3D_DEVCAP_MAX) + num = SVGA3D_DEVCAP_MAX; + +- mutex_lock(&dev_priv->hw_mutex); ++ spin_lock(&dev_priv->cap_lock); + for (i = 0; i < num; ++i) { + vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); + *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); + } +- mutex_unlock(&dev_priv->hw_mutex); ++ spin_unlock(&dev_priv->cap_lock); + } else if (gb_objects) { + ret = vmw_fill_compat_cap(dev_priv, bounce, size); + if (unlikely(ret != 0)) +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, voi + + static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) + { +- uint32_t busy; + +- mutex_lock(&dev_priv->hw_mutex); +- busy = vmw_read(dev_priv, SVGA_REG_BUSY); +- mutex_unlock(&dev_priv->hw_mutex); +- +- return (busy == 0); ++ return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); + } + + void vmw_update_seqno(struct vmw_private *dev_priv, +@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private + + void vmw_seqno_waiter_add(struct vmw_private *dev_priv) + { +- mutex_lock(&dev_priv->hw_mutex); ++ spin_lock(&dev_priv->waiter_lock); + if (dev_priv->fence_queue_waiters++ == 0) { + unsigned long irq_flags; + +@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_pri + vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); + } +- mutex_unlock(&dev_priv->hw_mutex); ++ spin_unlock(&dev_priv->waiter_lock); + } + + void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) + { +- mutex_lock(&dev_priv->hw_mutex); ++ spin_lock(&dev_priv->waiter_lock); + if (--dev_priv->fence_queue_waiters == 0) { + unsigned long irq_flags; + +@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_ + vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); + } +- mutex_unlock(&dev_priv->hw_mutex); ++ spin_unlock(&dev_priv->waiter_lock); + } + + + void vmw_goal_waiter_add(struct vmw_private *dev_priv) + { +- mutex_lock(&dev_priv->hw_mutex); ++ spin_lock(&dev_priv->waiter_lock); + if (dev_priv->goal_queue_waiters++ == 0) { + unsigned long irq_flags; + +@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_priv + vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); + } +- mutex_unlock(&dev_priv->hw_mutex); ++ spin_unlock(&dev_priv->waiter_lock); + } + + void vmw_goal_waiter_remove(struct vmw_private *dev_priv) + { +- mutex_lock(&dev_priv->hw_mutex); ++ spin_lock(&dev_priv->waiter_lock); + if (--dev_priv->goal_queue_waiters == 0) { + unsigned long irq_flags; + +@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_p + vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); + } +- mutex_unlock(&dev_priv->hw_mutex); ++ spin_unlock(&dev_priv->waiter_lock); + } + + int vmw_wait_seqno(struct vmw_private *dev_priv, +@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device + if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) + return; + +- mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); +- mutex_unlock(&dev_priv->hw_mutex); + + status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); + outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_conne + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_display_unit *du = vmw_connector_to_du(connector); + +- mutex_lock(&dev_priv->hw_mutex); + num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); +- mutex_unlock(&dev_priv->hw_mutex); + + return ((vmw_connector_to_du(connector)->unit < num_displays && + du->pref_active) ? diff --git a/queue-3.18/nl80211-fix-per-station-group-key-get-del-and-memory-leak.patch b/queue-3.18/nl80211-fix-per-station-group-key-get-del-and-memory-leak.patch new file mode 100644 index 00000000000..833b3fe8ef8 --- /dev/null +++ b/queue-3.18/nl80211-fix-per-station-group-key-get-del-and-memory-leak.patch @@ -0,0 +1,59 @@ +From 0fa7b39131576dd1baa6ca17fca53c65d7f62249 Mon Sep 17 00:00:00 2001 +From: Johannes Berg +Date: Fri, 23 Jan 2015 11:10:12 +0100 +Subject: nl80211: fix per-station group key get/del and memory leak + +From: Johannes Berg + +commit 0fa7b39131576dd1baa6ca17fca53c65d7f62249 upstream. + +In case userspace attempts to obtain key information for or delete a +unicast key, this is currently erroneously rejected unless the driver +sets the WIPHY_FLAG_IBSS_RSN flag. Apparently enough drivers do so it +was never noticed. + +Fix that, and while at it fix a potential memory leak: the error path +in the get_key() function was placed after allocating a message but +didn't free it - move it to a better place. Luckily admin permissions +are needed to call this operation. + +Fixes: e31b82136d1ad ("cfg80211/mac80211: allow per-station GTKs") +Signed-off-by: Johannes Berg +Signed-off-by: Greg Kroah-Hartman + +--- + net/wireless/nl80211.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -2805,6 +2805,9 @@ static int nl80211_get_key(struct sk_buf + if (!rdev->ops->get_key) + return -EOPNOTSUPP; + ++ if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) ++ return -ENOENT; ++ + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; +@@ -2824,10 +2827,6 @@ static int nl80211_get_key(struct sk_buf + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) + goto nla_put_failure; + +- if (pairwise && mac_addr && +- !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) +- return -ENOENT; +- + err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, + get_key_callback); + +@@ -2998,7 +2997,7 @@ static int nl80211_del_key(struct sk_buf + wdev_lock(dev->ieee80211_ptr); + err = nl80211_key_allowed(dev->ieee80211_ptr); + +- if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr && ++ if (key.type == NL80211_KEYTYPE_GROUP && mac_addr && + !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) + err = -ENOENT; + diff --git a/queue-3.18/pinctrl-at91-allow-to-have-disabled-gpio-bank.patch b/queue-3.18/pinctrl-at91-allow-to-have-disabled-gpio-bank.patch new file mode 100644 index 00000000000..56b1e97c989 --- /dev/null +++ b/queue-3.18/pinctrl-at91-allow-to-have-disabled-gpio-bank.patch @@ -0,0 +1,257 @@ +From a0b957f306fa4d0a39f4ffe5e5e25e856e6be46e Mon Sep 17 00:00:00 2001 +From: Jean-Christophe PLAGNIOL-VILLARD +Date: Fri, 16 Jan 2015 16:31:05 +0100 +Subject: pinctrl: at91: allow to have disabled gpio bank + +From: Jean-Christophe PLAGNIOL-VILLARD + +commit a0b957f306fa4d0a39f4ffe5e5e25e856e6be46e upstream. + +Today we expect that all the bank are enabled, and count the number of banks +used by the pinctrl based on it instead of using the last bank id enabled. + +So switch to it, set the chained IRQ at runtime based on enabled banks +and wait only the number of enabled gpio controllers at probe time. + +Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD +Signed-off-by: Ludovic Desroches +Acked-by: Nicolas Ferre +Signed-off-by: Linus Walleij +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/pinctrl/pinctrl-at91.c | 108 ++++++++++++++++++++--------------------- + 1 file changed, 55 insertions(+), 53 deletions(-) + +--- a/drivers/pinctrl/pinctrl-at91.c ++++ b/drivers/pinctrl/pinctrl-at91.c +@@ -179,7 +179,7 @@ struct at91_pinctrl { + struct device *dev; + struct pinctrl_dev *pctl; + +- int nbanks; ++ int nactive_banks; + + uint32_t *mux_mask; + int nmux; +@@ -655,12 +655,18 @@ static int pin_check_config(struct at91_ + int mux; + + /* check if it's a valid config */ +- if (pin->bank >= info->nbanks) { ++ if (pin->bank >= gpio_banks) { + dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", +- name, index, pin->bank, info->nbanks); ++ name, index, pin->bank, gpio_banks); + return -EINVAL; + } + ++ if (!gpio_chips[pin->bank]) { ++ dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n", ++ name, index, pin->bank); ++ return -ENXIO; ++ } ++ + if (pin->pin >= MAX_NB_GPIO_PER_BANK) { + dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n", + name, index, pin->pin, MAX_NB_GPIO_PER_BANK); +@@ -983,7 +989,8 @@ static void at91_pinctrl_child_count(str + + for_each_child_of_node(np, child) { + if (of_device_is_compatible(child, gpio_compat)) { +- info->nbanks++; ++ if (of_device_is_available(child)) ++ info->nactive_banks++; + } else { + info->nfunctions++; + info->ngroups += of_get_child_count(child); +@@ -1005,11 +1012,11 @@ static int at91_pinctrl_mux_mask(struct + } + + size /= sizeof(*list); +- if (!size || size % info->nbanks) { +- dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks); ++ if (!size || size % gpio_banks) { ++ dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks); + return -EINVAL; + } +- info->nmux = size / info->nbanks; ++ info->nmux = size / gpio_banks; + + info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); + if (!info->mux_mask) { +@@ -1133,7 +1140,7 @@ static int at91_pinctrl_probe_dt(struct + of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; + at91_pinctrl_child_count(info, np); + +- if (info->nbanks < 1) { ++ if (gpio_banks < 1) { + dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); + return -EINVAL; + } +@@ -1146,7 +1153,7 @@ static int at91_pinctrl_probe_dt(struct + + dev_dbg(&pdev->dev, "mux-mask\n"); + tmp = info->mux_mask; +- for (i = 0; i < info->nbanks; i++) { ++ for (i = 0; i < gpio_banks; i++) { + for (j = 0; j < info->nmux; j++, tmp++) { + dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); + } +@@ -1164,7 +1171,7 @@ static int at91_pinctrl_probe_dt(struct + if (!info->groups) + return -ENOMEM; + +- dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks); ++ dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks); + dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); + dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); + +@@ -1187,7 +1194,7 @@ static int at91_pinctrl_probe(struct pla + { + struct at91_pinctrl *info; + struct pinctrl_pin_desc *pdesc; +- int ret, i, j, k; ++ int ret, i, j, k, ngpio_chips_enabled = 0; + + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); + if (!info) +@@ -1202,23 +1209,27 @@ static int at91_pinctrl_probe(struct pla + * to obtain references to the struct gpio_chip * for them, and we + * need this to proceed. + */ +- for (i = 0; i < info->nbanks; i++) { +- if (!gpio_chips[i]) { +- dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); +- devm_kfree(&pdev->dev, info); +- return -EPROBE_DEFER; +- } ++ for (i = 0; i < gpio_banks; i++) ++ if (gpio_chips[i]) ++ ngpio_chips_enabled++; ++ ++ if (ngpio_chips_enabled < info->nactive_banks) { ++ dev_warn(&pdev->dev, ++ "All GPIO chips are not registered yet (%d/%d)\n", ++ ngpio_chips_enabled, info->nactive_banks); ++ devm_kfree(&pdev->dev, info); ++ return -EPROBE_DEFER; + } + + at91_pinctrl_desc.name = dev_name(&pdev->dev); +- at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK; ++ at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK; + at91_pinctrl_desc.pins = pdesc = + devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); + + if (!at91_pinctrl_desc.pins) + return -ENOMEM; + +- for (i = 0 , k = 0; i < info->nbanks; i++) { ++ for (i = 0, k = 0; i < gpio_banks; i++) { + for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { + pdesc->number = k; + pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); +@@ -1236,8 +1247,9 @@ static int at91_pinctrl_probe(struct pla + } + + /* We will handle a range of GPIO pins */ +- for (i = 0; i < info->nbanks; i++) +- pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); ++ for (i = 0; i < gpio_banks; i++) ++ if (gpio_chips[i]) ++ pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); + + dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); + +@@ -1614,9 +1626,10 @@ static void gpio_irq_handler(unsigned ir + static int at91_gpio_of_irq_setup(struct platform_device *pdev, + struct at91_gpio_chip *at91_gpio) + { ++ struct gpio_chip *gpiochip_prev = NULL; + struct at91_gpio_chip *prev = NULL; + struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); +- int ret; ++ int ret, i; + + at91_gpio->pioc_hwirq = irqd_to_hwirq(d); + +@@ -1642,24 +1655,33 @@ static int at91_gpio_of_irq_setup(struct + return ret; + } + +- /* Setup chained handler */ +- if (at91_gpio->pioc_idx) +- prev = gpio_chips[at91_gpio->pioc_idx - 1]; +- + /* The top level handler handles one bank of GPIOs, except + * on some SoC it can handle up to three... + * We only set up the handler for the first of the list. + */ +- if (prev && prev->next == at91_gpio) ++ gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq); ++ if (!gpiochip_prev) { ++ /* Then register the chain on the parent IRQ */ ++ gpiochip_set_chained_irqchip(&at91_gpio->chip, ++ &gpio_irqchip, ++ at91_gpio->pioc_virq, ++ gpio_irq_handler); + return 0; ++ } + +- /* Then register the chain on the parent IRQ */ +- gpiochip_set_chained_irqchip(&at91_gpio->chip, +- &gpio_irqchip, +- at91_gpio->pioc_virq, +- gpio_irq_handler); ++ prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip); + +- return 0; ++ /* we can only have 2 banks before */ ++ for (i = 0; i < 2; i++) { ++ if (prev->next) { ++ prev = prev->next; ++ } else { ++ prev->next = at91_gpio; ++ return 0; ++ } ++ } ++ ++ return -EINVAL; + } + + /* This structure is replicated for each GPIO block allocated at probe time */ +@@ -1676,24 +1698,6 @@ static struct gpio_chip at91_gpio_templa + .ngpio = MAX_NB_GPIO_PER_BANK, + }; + +-static void at91_gpio_probe_fixup(void) +-{ +- unsigned i; +- struct at91_gpio_chip *at91_gpio, *last = NULL; +- +- for (i = 0; i < gpio_banks; i++) { +- at91_gpio = gpio_chips[i]; +- +- /* +- * GPIO controller are grouped on some SoC: +- * PIOC, PIOD and PIOE can share the same IRQ line +- */ +- if (last && last->pioc_virq == at91_gpio->pioc_virq) +- last->next = at91_gpio; +- last = at91_gpio; +- } +-} +- + static struct of_device_id at91_gpio_of_match[] = { + { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, + { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, +@@ -1806,8 +1810,6 @@ static int at91_gpio_probe(struct platfo + gpio_chips[alias_idx] = at91_chip; + gpio_banks = max(gpio_banks, alias_idx + 1); + +- at91_gpio_probe_fixup(); +- + ret = at91_gpio_of_irq_setup(pdev, at91_chip); + if (ret) + goto irq_setup_err; diff --git a/queue-3.18/regulator-core-fix-race-condition-in-regulator_put.patch b/queue-3.18/regulator-core-fix-race-condition-in-regulator_put.patch new file mode 100644 index 00000000000..26d3fd07fb6 --- /dev/null +++ b/queue-3.18/regulator-core-fix-race-condition-in-regulator_put.patch @@ -0,0 +1,55 @@ +From 83b0302d347a49f951e904184afe57ac3723476e Mon Sep 17 00:00:00 2001 +From: Ashay Jaiswal +Date: Thu, 8 Jan 2015 18:54:25 +0530 +Subject: regulator: core: fix race condition in regulator_put() + +From: Ashay Jaiswal + +commit 83b0302d347a49f951e904184afe57ac3723476e upstream. + +The regulator framework maintains a list of consumer regulators +for a regulator device and protects it from concurrent access using +the regulator device's mutex lock. + +In the case of regulator_put() the consumer is removed and regulator +device's parameters are updated without holding the regulator device's +mutex. This would lead to a race condition between the regulator_put() +and any function which traverses the consumer list or modifies regulator +device's parameters. +Fix this race condition by holding the regulator device's mutex in case +of regulator_put. + +Signed-off-by: Ashay Jaiswal +Signed-off-by: Mark Brown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/regulator/core.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional + } + EXPORT_SYMBOL_GPL(regulator_get_optional); + +-/* Locks held by regulator_put() */ ++/* regulator_list_mutex lock held by regulator_put() */ + static void _regulator_put(struct regulator *regulator) + { + struct regulator_dev *rdev; +@@ -1503,12 +1503,14 @@ static void _regulator_put(struct regula + /* remove any sysfs entries */ + if (regulator->dev) + sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); ++ mutex_lock(&rdev->mutex); + kfree(regulator->supply_name); + list_del(®ulator->list); + kfree(regulator); + + rdev->open_count--; + rdev->exclusive = 0; ++ mutex_unlock(&rdev->mutex); + + module_put(rdev->owner); + } diff --git a/queue-3.18/series b/queue-3.18/series index 4737c3a5428..7434e71600e 100644 --- a/queue-3.18/series +++ b/queue-3.18/series @@ -30,3 +30,16 @@ nfs-fix-dio-deadlock-when-o_direct-flag-is-flipped.patch nfsv4.1-fix-an-oops-in-nfs41_walk_client_list.patch mac80211-properly-set-cck-flag-in-radiotap.patch mac80211-only-roll-back-station-states-for-wds-when-suspending.patch +nl80211-fix-per-station-group-key-get-del-and-memory-leak.patch +pinctrl-at91-allow-to-have-disabled-gpio-bank.patch +arm-mvebu-don-t-set-the-pl310-in-i-o-coherency-mode-when-i-o-coherency-is-disabled.patch +dm-thin-don-t-allow-messages-to-be-sent-to-a-pool-target-in-read_only-or-fail-mode.patch +dm-cache-fix-missing-err_ptr-returns-and-handling.patch +drm-vmwgfx-replace-the-hw-mutex-with-a-hw-spinlock.patch +drm-radeon-split-off-gart_get_page_entry-asic-hook-from-set_page_entry.patch +drm-radeon-restore-gart-table-contents-after-pinning-it-in-vram-v3.patch +spi-pxa2xx-clear-cur_chip-pointer-before-starting-next-message.patch +drivers-rtc-rtc-s5m.c-terminate-s5m_rtc_id-array-with-empty-element.patch +regulator-core-fix-race-condition-in-regulator_put.patch +drivers-net-cpsw-discard-dual-emac-default-vlan-configuration.patch +drm-fix-fb-helper-vs-mst-dangling-connector-ptrs-v2.patch diff --git a/queue-3.18/spi-pxa2xx-clear-cur_chip-pointer-before-starting-next-message.patch b/queue-3.18/spi-pxa2xx-clear-cur_chip-pointer-before-starting-next-message.patch new file mode 100644 index 00000000000..fc5b812ab09 --- /dev/null +++ b/queue-3.18/spi-pxa2xx-clear-cur_chip-pointer-before-starting-next-message.patch @@ -0,0 +1,79 @@ +From c957e8f084e0d21febcd6b8a0ea9631eccc92f36 Mon Sep 17 00:00:00 2001 +From: Mika Westerberg +Date: Mon, 29 Dec 2014 10:33:36 +0200 +Subject: spi/pxa2xx: Clear cur_chip pointer before starting next message + +From: Mika Westerberg + +commit c957e8f084e0d21febcd6b8a0ea9631eccc92f36 upstream. + +Once the current message is finished, the driver notifies SPI core about +this by calling spi_finalize_current_message(). This function queues next +message to be transferred. If there are more messages in the queue, it is +possible that the driver is asked to transfer the next message at this +point. + +When spi_finalize_current_message() returns the driver clears the +drv_data->cur_chip pointer to NULL. The problem is that if the driver +already started the next message clearing drv_data->cur_chip will cause +NULL pointer dereference which crashes the kernel like: + + BUG: unable to handle kernel NULL pointer dereference at 0000000000000048 + IP: [] cs_deassert+0x18/0x70 [spi_pxa2xx_platform] + PGD 78bb8067 PUD 37712067 PMD 0 + Oops: 0000 [#1] SMP + Modules linked in: + CPU: 1 PID: 11 Comm: ksoftirqd/1 Tainted: G O 3.18.0-rc4-mjo #5 + Hardware name: Intel Corp. VALLEYVIEW B3 PLATFORM/NOTEBOOK, BIOS MNW2CRB1.X64.0071.R30.1408131301 08/13/2014 + task: ffff880077f9f290 ti: ffff88007a820000 task.ti: ffff88007a820000 + RIP: 0010:[] [] cs_deassert+0x18/0x70 [spi_pxa2xx_platform] + RSP: 0018:ffff88007a823d08 EFLAGS: 00010202 + RAX: 0000000000000008 RBX: ffff8800379a4430 RCX: 0000000000000026 + RDX: 0000000000000000 RSI: 0000000000000246 RDI: ffff8800379a4430 + RBP: ffff88007a823d18 R08: 00000000ffffffff R09: 000000007a9bc65a + R10: 000000000000028f R11: 0000000000000005 R12: ffff880070123e98 + R13: ffff880070123de8 R14: 0000000000000100 R15: ffffc90004888000 + FS: 0000000000000000(0000) GS:ffff880079a80000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b + CR2: 0000000000000048 CR3: 000000007029b000 CR4: 00000000001007e0 + Stack: + ffff88007a823d58 ffff8800379a4430 ffff88007a823d48 ffffffffa0022c89 + 0000000000000000 ffff8800379a4430 0000000000000000 0000000000000006 + ffff88007a823da8 ffffffffa0023be0 ffff88007a823dd8 ffffffff81076204 + Call Trace: + [] giveback+0x69/0xa0 [spi_pxa2xx_platform] + [] pump_transfers+0x710/0x740 [spi_pxa2xx_platform] + [] ? pick_next_task_fair+0x744/0x830 + [] tasklet_action+0xa9/0xe0 + [] __do_softirq+0xee/0x280 + [] run_ksoftirqd+0x20/0x40 + [] smpboot_thread_fn+0xff/0x1b0 + [] ? SyS_setgroups+0x150/0x150 + [] kthread+0xcd/0xf0 + [] ? kthread_create_on_node+0x180/0x180 + [] ret_from_fork+0x7c/0xb0 + +Fix this by clearing drv_data->cur_chip before we call spi_finalize_current_message(). + +Reported-by: Martin Oldfield +Signed-off-by: Mika Westerberg +Acked-by: Robert Jarzmik +Signed-off-by: Mark Brown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/spi/spi-pxa2xx.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -402,8 +402,8 @@ static void giveback(struct driver_data + cs_deassert(drv_data); + } + +- spi_finalize_current_message(drv_data->master); + drv_data->cur_chip = NULL; ++ spi_finalize_current_message(drv_data->master); + } + + static void reset_sccr1(struct driver_data *drv_data)