* AMD IOMMU hardware only support 512 IRTEs despite
* the architectural limitation of 2048 entries.
*/
-#define DTE_INTTAB_ALIGNMENT 128
#define DTE_INTTABLEN_VALUE 9ULL
#define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1)
#define DTE_INTTABLEN_MASK (0xfULL << 1)
/* IVRS indicates that pre-boot remapping was enabled */
extern bool amdr_ivrs_remap_support;
-/* kmem_cache to get tables with 128 byte alignement */
-extern struct kmem_cache *amd_iommu_irq_cache;
-
#define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff)
#define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff)
#define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/bitmap.h>
-#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
static void __init free_iommu_resources(void)
{
- kmem_cache_destroy(amd_iommu_irq_cache);
- amd_iommu_irq_cache = NULL;
-
free_iommu_all();
free_pci_segments();
}
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- int remap_cache_sz, ret;
+ int ret;
acpi_status status;
if (!amd_iommu_detected)
if (amd_iommu_irq_remap) {
struct amd_iommu_pci_seg *pci_seg;
- /*
- * Interrupt remapping enabled, create kmem_cache for the
- * remapping tables.
- */
ret = -ENOMEM;
- if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
- remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
- else
- remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
- amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
- remap_cache_sz,
- DTE_INTTAB_ALIGNMENT,
- 0, NULL);
- if (!amd_iommu_irq_cache)
- goto out;
-
for_each_pci_segment(pci_seg) {
if (alloc_irq_lookup_table(pci_seg))
goto out;
*/
DEFINE_IDA(pdom_ids);
-struct kmem_cache *amd_iommu_irq_cache;
-
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev);
return table;
}
-static struct irq_remap_table *__alloc_irq_table(void)
+static struct irq_remap_table *__alloc_irq_table(int nid, int order)
{
struct irq_remap_table *table;
if (!table)
return NULL;
- table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
+ table->table = iommu_alloc_pages_node(nid, GFP_KERNEL, order);
if (!table->table) {
kfree(table);
return NULL;
}
raw_spin_lock_init(&table->lock);
- if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
- memset(table->table, 0,
- MAX_IRQS_PER_TABLE * sizeof(u32));
- else
- memset(table->table, 0,
- (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
return table;
}
return 0;
}
+static inline size_t get_irq_table_size(unsigned int max_irqs)
+{
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ return max_irqs * sizeof(u32);
+
+ return max_irqs * (sizeof(u64) * 2);
+}
+
static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
u16 devid, struct pci_dev *pdev)
{
struct irq_remap_table *new_table = NULL;
struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
+ int order = get_order(get_irq_table_size(MAX_IRQS_PER_TABLE));
+ int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
u16 alias;
spin_lock_irqsave(&iommu_table_lock, flags);
spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- new_table = __alloc_irq_table();
+ new_table = __alloc_irq_table(nid, order);
if (!new_table)
return NULL;
spin_unlock_irqrestore(&iommu_table_lock, flags);
if (new_table) {
- kmem_cache_free(amd_iommu_irq_cache, new_table->table);
+ iommu_free_pages(new_table->table, order);
kfree(new_table);
}
return table;