vfree(ptr);
}
+static void vmalloc_oob_helper(struct kunit *test, char *v_ptr, size_t size)
+{
+ /*
+ * We have to be careful not to hit the guard page in vmalloc tests.
+ * The MMU will catch that and crash us.
+ */
+
+ /* Make sure in-bounds accesses are valid. */
+ v_ptr[0] = 0;
+ v_ptr[size - 1] = 0;
+
+ /*
+ * An unaligned access past the requested vmalloc size.
+ * Only generic KASAN can precisely detect these.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
+
+ /* An aligned access into the first out-of-bounds granule. */
+ size = round_up(size, KASAN_GRANULE_SIZE);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)v_ptr)[size]);
+}
+
static void vmalloc_oob(struct kunit *test)
{
char *v_ptr, *p_ptr;
OPTIMIZER_HIDE_VAR(v_ptr);
- /*
- * We have to be careful not to hit the guard page in vmalloc tests.
- * The MMU will catch that and crash us.
- */
+ vmalloc_oob_helper(test, v_ptr, size);
- /* Make sure in-bounds accesses are valid. */
- v_ptr[0] = 0;
- v_ptr[size - 1] = 0;
+ size -= KASAN_GRANULE_SIZE + 1;
+ v_ptr = vrealloc(v_ptr, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
- /*
- * An unaligned access past the requested vmalloc size.
- * Only generic KASAN can precisely detect these.
- */
- if (IS_ENABLED(CONFIG_KASAN_GENERIC))
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
+ OPTIMIZER_HIDE_VAR(v_ptr);
- /* An aligned access into the first out-of-bounds granule. */
- KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)v_ptr)[size + 5]);
+ vmalloc_oob_helper(test, v_ptr, size);
+
+ size += 2 * KASAN_GRANULE_SIZE + 2;
+ v_ptr = vrealloc(v_ptr, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
+
+ vmalloc_oob_helper(test, v_ptr, size);
/* Check that in-bounds accesses to the physical page are valid. */
page = vmalloc_to_page(v_ptr);