xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC);
for (i = 0; i < num_vmas; i++) {
+ struct xe_vma *vma = vmas[i];
+ struct xe_vma_preferred_loc *loc = &vma->attr.preferred_loc;
+
/*TODO: Extend attributes to bo based vmas */
- if ((vmas[i]->attr.preferred_loc.devmem_fd == op->preferred_mem_loc.devmem_fd &&
- vmas[i]->attr.preferred_loc.migration_policy ==
- op->preferred_mem_loc.migration_policy) ||
- !xe_vma_is_cpu_addr_mirror(vmas[i])) {
- vmas[i]->skip_invalidation = true;
+ if ((loc->devmem_fd == op->preferred_mem_loc.devmem_fd &&
+ loc->migration_policy == op->preferred_mem_loc.migration_policy) ||
+ !xe_vma_is_cpu_addr_mirror(vma)) {
+ vma->skip_invalidation = true;
} else {
- vmas[i]->skip_invalidation = false;
- vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
+ vma->skip_invalidation = false;
+ loc->devmem_fd = op->preferred_mem_loc.devmem_fd;
/* Till multi-device support is not added migration_policy
* is of no use and can be ignored.
*/
- vmas[i]->attr.preferred_loc.migration_policy =
- op->preferred_mem_loc.migration_policy;
- vmas[i]->attr.preferred_loc.dpagemap = NULL;
+ loc->migration_policy = op->preferred_mem_loc.migration_policy;
+ loc->dpagemap = NULL;
}
}
}