[ Upstream commit
47f218d108950984b24af81f66356ceda380eb74 ]
We already have memory in the union here that is being wasted in AMD's
case, use it to store the nid.
Putting the nid here further isolates the io_pgtable code from the struct
protection_domain.
Fixup protection_domain_alloc so that the NID from the device is provided,
at this point dev is never NULL for AMD so this will now allocate the
first table pointer on the correct NUMA node.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Link: https://lore.kernel.org/r/8-v2-831cdc4d00f3+1a315-amd_iopgtbl_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Stable-dep-of:
016991606aa0 ("iommu/amd/pgtbl_v2: Take protection domain lock before invalidating TLB")
Signed-off-by: Sasha Levin <sashal@kernel.org>
extern int amd_iommu_gpt_level;
/* Protection domain ops */
-struct protection_domain *protection_domain_alloc(unsigned int type);
+struct protection_domain *protection_domain_alloc(unsigned int type, int nid);
void protection_domain_free(struct protection_domain *domain);
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct mm_struct *mm);
struct amd_io_pgtable iop;
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
- int nid; /* Node ID */
enum protection_domain_mode pd_mode; /* Track page table type */
bool dirty_tracking; /* dirty tracking is enabled in the domain */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned long address,
gfp_t gfp)
{
+ struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg;
unsigned long flags;
bool ret = true;
u64 *pte;
- pte = iommu_alloc_page_node(domain->nid, gfp);
+ pte = iommu_alloc_page_node(cfg->amd.nid, gfp);
if (!pte)
return false;
gfp_t gfp,
bool *updated)
{
+ struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg;
int level, end_lvl;
u64 *pte, *page;
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
- page = iommu_alloc_page_node(domain->nid, gfp);
+ page = iommu_alloc_page_node(cfg->amd.nid, gfp);
if (!page)
return NULL;
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- pgtable->root = iommu_alloc_page(GFP_KERNEL);
+ pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
while (mapped_size < size) {
map_size = get_alloc_page_size(pgsize);
- pte = v2_alloc_pte(pdom->nid, pdom->iop.pgd,
+ pte = v2_alloc_pte(cfg->amd.nid, pdom->iop.pgd,
iova, map_size, gfp, &updated);
if (!pte) {
ret = -EINVAL;
static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- struct protection_domain *pdom = (struct protection_domain *)cookie;
int ias = IOMMU_IN_ADDR_BIT_SIZE;
- pgtable->pgd = iommu_alloc_page_node(pdom->nid, GFP_KERNEL);
+ pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
if (!pgtable->pgd)
return NULL;
struct protection_domain *domain)
{
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg;
int ret = 0;
/* Update data structures */
list_add(&dev_data->list, &domain->dev_list);
/* Update NUMA Node ID */
- if (domain->nid == NUMA_NO_NODE)
- domain->nid = dev_to_node(dev_data->dev);
+ if (cfg->amd.nid == NUMA_NO_NODE)
+ cfg->amd.nid = dev_to_node(dev_data->dev);
/* Do reference counting */
domain->dev_iommu[iommu->index] += 1;
kfree(domain);
}
-struct protection_domain *protection_domain_alloc(unsigned int type)
+struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
{
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
spin_lock_init(&domain->lock);
INIT_LIST_HEAD(&domain->dev_list);
INIT_LIST_HEAD(&domain->dev_data_list);
- domain->nid = NUMA_NO_NODE;
+ domain->iop.pgtbl.cfg.amd.nid = nid;
switch (type) {
/* No need to allocate io pgtable ops in passthrough mode */
if (dirty_tracking && !amd_iommu_hd_support(iommu))
return ERR_PTR(-EOPNOTSUPP);
- domain = protection_domain_alloc(type);
+ domain = protection_domain_alloc(type,
+ dev ? dev_to_node(dev) : NUMA_NO_NODE);
if (!domain)
return ERR_PTR(-ENOMEM);
struct protection_domain *pdom;
int ret;
- pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA);
+ pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA, dev_to_node(dev));
if (!pdom)
return ERR_PTR(-ENOMEM);
u64 ttbr[4];
u32 n_ttbrs;
} apple_dart_cfg;
+
+ struct {
+ int nid;
+ } amd;
};
};