--- /dev/null
+From 57fb907da89977640ef183556a621336c1348fa0 Mon Sep 17 00:00:00 2001
+From: Emil Medve <Emilian.Medve@Freescale.com>
+Date: Wed, 25 Mar 2015 00:28:48 -0500
+Subject: iommu/fsl: Really fix init section(s) content
+
+From: Emil Medve <Emilian.Medve@Freescale.com>
+
+commit 57fb907da89977640ef183556a621336c1348fa0 upstream.
+
+'0f1fb99 iommu/fsl: Fix section mismatch' was intended to address the modpost
+warning and the potential crash. Crash which is actually easy to trigger with a
+'unbind' followed by a 'bind' sequence. The fix is wrong as
+fsl_of_pamu_driver.driver gets added by bus_add_driver() to a couple of
+klist(s) which become invalid/corrupted as soon as the init sections are freed.
+Depending on when/how the init sections storage is reused various/random errors
+and crashes will happen
+
+'cd70d46 iommu/fsl: Various cleanups' contains annotations that go further down
+the wrong path laid by '0f1fb99 iommu/fsl: Fix section mismatch'
+
+Now remove all the incorrect annotations from the above mentioned patches (not
+exactly a revert) and those previously existing in the code, This fixes the
+modpost warning(s), the unbind/bind sequence crashes and the random
+errors/crashes
+
+Fixes: 0f1fb99b62ce ("iommu/fsl: Fix section mismatch")
+Fixes: cd70d4659ff3 ("iommu/fsl: Various cleanups")
+Signed-off-by: Emil Medve <Emilian.Medve@Freescale.com>
+Acked-by: Varun Sethi <Varun.Sethi@freescale.com>
+Tested-by: Madalin Bucur <Madalin.Bucur@freescale.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/fsl_pamu.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/iommu/fsl_pamu.c
++++ b/drivers/iommu/fsl_pamu.c
+@@ -41,7 +41,6 @@ struct pamu_isr_data {
+
+ static struct paace *ppaact;
+ static struct paace *spaact;
+-static struct ome *omt __initdata;
+
+ /*
+ * Table for matching compatible strings, for device tree
+@@ -50,7 +49,7 @@ static struct ome *omt __initdata;
+ * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
+ * string would be used.
+ */
+-static const struct of_device_id guts_device_ids[] __initconst = {
++static const struct of_device_id guts_device_ids[] = {
+ { .compatible = "fsl,qoriq-device-config-1.0", },
+ { .compatible = "fsl,qoriq-device-config-2.0", },
+ {}
+@@ -599,7 +598,7 @@ found_cpu_node:
+ * Memory accesses to QMAN and BMAN private memory need not be coherent, so
+ * clear the PAACE entry coherency attribute for them.
+ */
+-static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
++static void setup_qbman_paace(struct paace *ppaace, int paace_type)
+ {
+ switch (paace_type) {
+ case QMAN_PAACE:
+@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(str
+ * this table to translate device transaction to appropriate corenet
+ * transaction.
+ */
+-static void __init setup_omt(struct ome *omt)
++static void setup_omt(struct ome *omt)
+ {
+ struct ome *ome;
+
+@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome
+ * Get the maximum number of PAACT table entries
+ * and subwindows supported by PAMU
+ */
+-static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
++static void get_pamu_cap_values(unsigned long pamu_reg_base)
+ {
+ u32 pc_val;
+
+@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(u
+ }
+
+ /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
+-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+- phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+- phys_addr_t omt_phys)
++static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
++ phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
++ phys_addr_t omt_phys)
+ {
+ u32 *pc;
+ struct pamu_mmap_regs *pamu_regs;
+@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigne
+ }
+
+ /* Enable all device LIODNS */
+-static void __init setup_liodns(void)
++static void setup_liodns(void)
+ {
+ int i, len;
+ struct paace *ppaace;
+@@ -846,7 +845,7 @@ struct ccsr_law {
+ /*
+ * Create a coherence subdomain for a given memory block.
+ */
+-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
++static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+ {
+ struct device_node *np;
+ const __be32 *iprop;
+@@ -988,7 +987,7 @@ error:
+ static const struct {
+ u32 svr;
+ u32 port_id;
+-} port_id_map[] __initconst = {
++} port_id_map[] = {
+ {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
+ {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
+ {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
+@@ -1006,7 +1005,7 @@ static const struct {
+
+ #define SVR_SECURITY 0x80000 /* The Security (E) bit */
+
+-static int __init fsl_pamu_probe(struct platform_device *pdev)
++static int fsl_pamu_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ void __iomem *pamu_regs = NULL;
+@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct
+ int irq;
+ phys_addr_t ppaact_phys;
+ phys_addr_t spaact_phys;
++ struct ome *omt;
+ phys_addr_t omt_phys;
+ size_t mem_size = 0;
+ unsigned int order = 0;
+@@ -1200,7 +1200,7 @@ error:
+ return ret;
+ }
+
+-static struct platform_driver fsl_of_pamu_driver __initdata = {
++static struct platform_driver fsl_of_pamu_driver = {
+ .driver = {
+ .name = "fsl-of-pamu",
+ },
--- /dev/null
+From cf27ec930be906e142c752f9161197d69ca534d7 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 11 Aug 2015 16:48:32 +0100
+Subject: iommu/io-pgtable-arm: Unmap and free table when overwriting with block
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit cf27ec930be906e142c752f9161197d69ca534d7 upstream.
+
+When installing a block mapping, we unconditionally overwrite a non-leaf
+PTE if we find one. However, this can cause a problem if the following
+sequence of events occur:
+
+ (1) iommu_map called for a 4k (i.e. PAGE_SIZE) mapping at some address
+ - We initialise the page table all the way down to a leaf entry
+ - No TLB maintenance is required, because we're going from invalid
+ to valid.
+
+ (2) iommu_unmap is called on the mapping installed in (1)
+ - We walk the page table to the final (leaf) entry and zero it
+ - We only changed a valid leaf entry, so we invalidate leaf-only
+
+ (3) iommu_map is called on the same address as (1), but this time for
+ a 2MB (i.e. BLOCK_SIZE) mapping)
+ - We walk the page table down to the penultimate level, where we
+ find a table entry
+ - We overwrite the table entry with a block mapping and return
+ without any TLB maintenance and without freeing the memory used
+ by the now-orphaned table.
+
+This last step can lead to a walk-cache caching the overwritten table
+entry, causing unexpected faults when the new mapping is accessed by a
+device. One way to fix this would be to collapse the page table when
+freeing the last page at a given level, but this would require expensive
+iteration on every map call. Instead, this patch detects the case when
+we are overwriting a table entry and explicitly unmaps the table first,
+which takes care of both freeing and TLB invalidation.
+
+Reported-by: Brian Starkey <brian.starkey@arm.com>
+Tested-by: Brian Starkey <brian.starkey@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/io-pgtable-arm.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -200,6 +200,10 @@ typedef u64 arm_lpae_iopte;
+
+ static bool selftest_running = false;
+
++static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
++ unsigned long iova, size_t size, int lvl,
++ arm_lpae_iopte *ptep);
++
+ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, phys_addr_t paddr,
+ arm_lpae_iopte prot, int lvl,
+@@ -207,10 +211,21 @@ static int arm_lpae_init_pte(struct arm_
+ {
+ arm_lpae_iopte pte = prot;
+
+- /* We require an unmap first */
+ if (iopte_leaf(*ptep, lvl)) {
++ /* We require an unmap first */
+ WARN_ON(!selftest_running);
+ return -EEXIST;
++ } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
++ /*
++ * We need to unmap and free the old table before
++ * overwriting it with a block entry.
++ */
++ arm_lpae_iopte *tblp;
++ size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
++
++ tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
++ if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
++ return -EINVAL;
+ }
+
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
--- /dev/null
+From 11cec15bf3fb498206ef63b1fa26c27689e02d0e Mon Sep 17 00:00:00 2001
+From: Thierry Reding <treding@nvidia.com>
+Date: Thu, 6 Aug 2015 14:20:31 +0200
+Subject: iommu/tegra-smmu: Parameterize number of TLB lines
+
+From: Thierry Reding <treding@nvidia.com>
+
+commit 11cec15bf3fb498206ef63b1fa26c27689e02d0e upstream.
+
+The number of TLB lines was increased from 16 on Tegra30 to 32 on
+Tegra114 and later. Parameterize the value so that the initial default
+can be set accordingly.
+
+On Tegra30, initializing the value to 32 would effectively disable the
+TLB and hence cause massive latencies for memory accesses translated
+through the SMMU. This is especially noticeable for isochronuous clients
+such as display, whose FIFOs would continuously underrun.
+
+Fixes: 891846516317 ("memory: Add NVIDIA Tegra memory controller support")
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/tegra-smmu.c | 9 +++++++--
+ drivers/memory/tegra/tegra114.c | 1 +
+ drivers/memory/tegra/tegra124.c | 1 +
+ drivers/memory/tegra/tegra30.c | 1 +
+ include/soc/tegra/mc.h | 1 +
+ 5 files changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/tegra-smmu.c
++++ b/drivers/iommu/tegra-smmu.c
+@@ -27,6 +27,7 @@ struct tegra_smmu {
+ const struct tegra_smmu_soc *soc;
+
+ unsigned long pfn_mask;
++ unsigned long tlb_mask;
+
+ unsigned long *asids;
+ struct mutex lock;
+@@ -68,7 +69,8 @@ static inline u32 smmu_readl(struct tegr
+ #define SMMU_TLB_CONFIG 0x14
+ #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
+ #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
+-#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
++#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
++ ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
+
+ #define SMMU_PTC_CONFIG 0x18
+ #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
+@@ -816,6 +818,9 @@ struct tegra_smmu *tegra_smmu_probe(stru
+ smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
+ dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
+ mc->soc->num_address_bits, smmu->pfn_mask);
++ smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
++ dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
++ smmu->tlb_mask);
+
+ value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
+
+@@ -825,7 +830,7 @@ struct tegra_smmu *tegra_smmu_probe(stru
+ smmu_writel(smmu, value, SMMU_PTC_CONFIG);
+
+ value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
+- SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
++ SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
+
+ if (soc->supports_round_robin_arbitration)
+ value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
+--- a/drivers/memory/tegra/tegra114.c
++++ b/drivers/memory/tegra/tegra114.c
+@@ -935,6 +935,7 @@ static const struct tegra_smmu_soc tegra
+ .num_swgroups = ARRAY_SIZE(tegra114_swgroups),
+ .supports_round_robin_arbitration = false,
+ .supports_request_limit = false,
++ .num_tlb_lines = 32,
+ .num_asids = 4,
+ .ops = &tegra114_smmu_ops,
+ };
+--- a/drivers/memory/tegra/tegra124.c
++++ b/drivers/memory/tegra/tegra124.c
+@@ -1023,6 +1023,7 @@ static const struct tegra_smmu_soc tegra
+ .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
+ .supports_round_robin_arbitration = true,
+ .supports_request_limit = true,
++ .num_tlb_lines = 32,
+ .num_asids = 128,
+ .ops = &tegra124_smmu_ops,
+ };
+--- a/drivers/memory/tegra/tegra30.c
++++ b/drivers/memory/tegra/tegra30.c
+@@ -957,6 +957,7 @@ static const struct tegra_smmu_soc tegra
+ .num_swgroups = ARRAY_SIZE(tegra30_swgroups),
+ .supports_round_robin_arbitration = false,
+ .supports_request_limit = false,
++ .num_tlb_lines = 16,
+ .num_asids = 4,
+ .ops = &tegra30_smmu_ops,
+ };
+--- a/include/soc/tegra/mc.h
++++ b/include/soc/tegra/mc.h
+@@ -66,6 +66,7 @@ struct tegra_smmu_soc {
+ bool supports_round_robin_arbitration;
+ bool supports_request_limit;
+
++ unsigned int num_tlb_lines;
+ unsigned int num_asids;
+
+ const struct tegra_smmu_ops *ops;
--- /dev/null
+From 4df4eab168c1c4058603be55a3169d4a45779cc0 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Tue, 25 Aug 2015 10:54:28 +0200
+Subject: iommu/vt-d: Really use upper context table when necessary
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit 4df4eab168c1c4058603be55a3169d4a45779cc0 upstream.
+
+There is a bug in iommu_context_addr() which will always use
+the lower context table, even when the upper context table
+needs to be used. Fix this issue.
+
+Fixes: 03ecc32c5274 ("iommu/vt-d: support extended root and context entries")
+Reported-by: Xiao, Nan <nan.xiao@hp.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/intel-iommu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -755,6 +755,7 @@ static inline struct context_entry *iomm
+ struct context_entry *context;
+ u64 *entry;
+
++ entry = &root->lo;
+ if (ecs_enabled(iommu)) {
+ if (devfn >= 0x80) {
+ devfn -= 0x80;
+@@ -762,7 +763,6 @@ static inline struct context_entry *iomm
+ }
+ devfn *= 2;
+ }
+- entry = &root->lo;
+ if (*entry & 1)
+ context = phys_to_virt(*entry & VTD_PAGE_MASK);
+ else {
mmc-sdhci-also-get-preset-value-and-driver-type-for-mmc_ddr52.patch
mmc-sdhci-fix-dma-memory-leak-in-sdhci_pre_req.patch
mmc-core-fix-race-condition-in-mmc_wait_data_done.patch
+iommu-fsl-really-fix-init-section-s-content.patch
+iommu-io-pgtable-arm-unmap-and-free-table-when-overwriting-with-block.patch
+iommu-tegra-smmu-parameterize-number-of-tlb-lines.patch
+iommu-vt-d-really-use-upper-context-table-when-necessary.patch