dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_vunmap);
+
+int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ int ret;
+
+ ret = dma_resv_lock_interruptible(obj->resv, NULL);
+ if (ret)
+ return ret;
+ ret = drm_gem_shmem_madvise_locked(shmem, madv);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
+}
+EXPORT_SYMBOL_IF_KUNIT(drm_gem_shmem_madvise);
#endif
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_madvise_locked(shmem, 1);
+ ret = drm_gem_shmem_madvise(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, 1);
/* Set madv to a negative value */
- ret = drm_gem_shmem_madvise_locked(shmem, -1);
+ ret = drm_gem_shmem_madvise(shmem, -1);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
/* Check that madv cannot be set back to a positive value */
- ret = drm_gem_shmem_madvise_locked(shmem, 0);
+ ret = drm_gem_shmem_madvise(shmem, 0);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
}
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_FALSE(test, ret);
- ret = drm_gem_shmem_madvise_locked(shmem, 1);
+ ret = drm_gem_shmem_madvise(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
/* The scatter/gather table will be freed by drm_gem_shmem_free */
#if IS_ENABLED(CONFIG_KUNIT)
int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map);
void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map);
+int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
#endif
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */