* specified new range.
*/
void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
- unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags)
+ unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags)
{
vma_iter_set(vmg->vmi, start);
vmg->start = start;
vmg->end = end;
vmg->pgoff = pgoff;
- vmg->vm_flags = vm_flags;
+ vmg->vma_flags = vma_flags;
vmg->just_expand = false;
vmg->__remove_middle = false;
/* Helper function to set both the VMG range and its anon_vma. */
static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
- unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
+ unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags,
struct anon_vma *anon_vma)
{
- vmg_set_range(vmg, start, end, pgoff, vm_flags);
+ vmg_set_range(vmg, start, end, pgoff, vma_flags);
vmg->anon_vma = anon_vma;
}
*/
static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
struct vma_merge_struct *vmg, unsigned long start,
- unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
+ unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags,
bool *was_merged)
{
struct vm_area_struct *merged;
- vmg_set_range(vmg, start, end, pgoff, vm_flags);
+ vmg_set_range(vmg, start, end, pgoff, vma_flags);
merged = merge_new(vmg);
if (merged) {
ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
- return alloc_and_link_vma(mm, start, end, pgoff, vm_flags);
+ return alloc_and_link_vma(mm, start, end, pgoff, vma_flags);
}
static bool test_simple_merge(void)
{
struct vm_area_struct *vma;
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
- struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags);
+ struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vma_flags);
+ struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vma_flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
.start = 0x1000,
.end = 0x2000,
- .vm_flags = vm_flags,
+ .vma_flags = vma_flags,
.pgoff = 1,
};
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x3000);
ASSERT_EQ(vma->vm_pgoff, 0);
- ASSERT_EQ(vma->vm_flags, vm_flags);
+ ASSERT_FLAGS_SAME_MASK(&vma->flags, vma_flags);
detach_free_vma(vma);
mtree_destroy(&mm.mm_mt);
static bool test_simple_modify(void)
{
struct vm_area_struct *vma;
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
+ vm_flags_t legacy_flags = VM_READ | VM_WRITE;
struct mm_struct mm = {};
- struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
+ struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
- vm_flags_t flags = VM_READ | VM_MAYREAD;
ASSERT_FALSE(attach_vma(&mm, init_vma));
* performs the merge/split only.
*/
vma = vma_modify_flags(&vmi, init_vma, init_vma,
- 0x1000, 0x2000, &flags);
+ 0x1000, 0x2000, &legacy_flags);
ASSERT_NE(vma, NULL);
/* We modify the provided VMA, and on split allocate new VMAs. */
ASSERT_EQ(vma, init_vma);
static bool test_simple_expand(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
+ struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vma_flags);
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.vmi = &vmi,
static bool test_simple_shrink(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
+ VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
- struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
+ struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags);
VMA_ITERATOR(vmi, &mm, 0);
ASSERT_FALSE(attach_vma(&mm, vma));
static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
bool merged;
if (is_sticky)
- vm_flags |= VM_STICKY;
+ vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS);
/*
* 0123456789abc
* AA B CC
*/
- vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
+ vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
ASSERT_NE(vma_a, NULL);
if (a_is_sticky)
- vm_flags_set(vma_a, VM_STICKY);
+ vma_flags_set_mask(&vma_a->flags, VMA_STICKY_FLAGS);
/* We give each VMA a single avc so we can test anon_vma duplication. */
INIT_LIST_HEAD(&vma_a->anon_vma_chain);
list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
- vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
+ vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
ASSERT_NE(vma_b, NULL);
if (b_is_sticky)
- vm_flags_set(vma_b, VM_STICKY);
+ vma_flags_set_mask(&vma_b->flags, VMA_STICKY_FLAGS);
INIT_LIST_HEAD(&vma_b->anon_vma_chain);
list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
- vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vm_flags);
+ vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vma_flags);
ASSERT_NE(vma_c, NULL);
if (c_is_sticky)
- vm_flags_set(vma_c, VM_STICKY);
+ vma_flags_set_mask(&vma_c->flags, VMA_STICKY_FLAGS);
INIT_LIST_HEAD(&vma_c->anon_vma_chain);
list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
* 0123456789abc
* AA B ** CC
*/
- vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vm_flags, &merged);
+ vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vma_flags, &merged);
ASSERT_NE(vma_d, NULL);
INIT_LIST_HEAD(&vma_d->anon_vma_chain);
list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
*/
vma_a->vm_ops = &vm_ops; /* This should have no impact. */
vma_b->anon_vma = &dummy_anon_vma;
- vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Merge with A, delete B. */
ASSERT_TRUE(merged);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
if (is_sticky || a_is_sticky || b_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge to PREVIOUS VMA.
* 0123456789abc
* AAAA* DD CC
*/
- vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Extend A. */
ASSERT_TRUE(merged);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
if (is_sticky || a_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge to NEXT VMA.
*/
vma_d->anon_vma = &dummy_anon_vma;
vma_d->vm_ops = &vm_ops; /* This should have no impact. */
- vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vma_flags, &merged);
ASSERT_EQ(vma, vma_d);
/* Prepend. */
ASSERT_TRUE(merged);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
if (is_sticky) /* D uses is_sticky. */
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge BOTH sides.
* AAAAA*DDD CC
*/
vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
- vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Merge with A, delete D. */
ASSERT_TRUE(merged);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
if (is_sticky || a_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge to NEXT VMA.
* AAAAAAAAA *CC
*/
vma_c->anon_vma = &dummy_anon_vma;
- vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vma_flags, &merged);
ASSERT_EQ(vma, vma_c);
/* Prepend C. */
ASSERT_TRUE(merged);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
if (is_sticky || c_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Merge BOTH sides.
* 0123456789abc
* AAAAAAAAA*CCC
*/
- vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vm_flags, &merged);
+ vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vma_flags, &merged);
ASSERT_EQ(vma, vma_a);
/* Extend A and delete C. */
ASSERT_TRUE(merged);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 1);
if (is_sticky || a_is_sticky || c_is_sticky)
- ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
/*
* Final state.
static bool test_vma_merge_special_flags(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
- vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
- vm_flags_t all_special_flags = 0;
+ vma_flag_t special_flags[] = { VMA_IO_BIT, VMA_DONTEXPAND_BIT,
+ VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT };
+ vma_flags_t all_special_flags = EMPTY_VMA_FLAGS;
int i;
struct vm_area_struct *vma_left, *vma;
/* Make sure there aren't new VM_SPECIAL flags. */
- for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
- all_special_flags |= special_flags[i];
- }
- ASSERT_EQ(all_special_flags, VM_SPECIAL);
+ for (i = 0; i < ARRAY_SIZE(special_flags); i++)
+ vma_flags_set(&all_special_flags, special_flags[i]);
+ ASSERT_FLAGS_SAME_MASK(&all_special_flags, VMA_SPECIAL_FLAGS);
/*
* 01234
* AAA
*/
- vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
+ vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
ASSERT_NE(vma_left, NULL);
/* 1. Set up new VMA with special flag that would otherwise merge. */
*
* This should merge if not for the VM_SPECIAL flag.
*/
- vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x4000, 3, vma_flags);
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
- vm_flags_t special_flag = special_flags[i];
+ vma_flag_t special_flag = special_flags[i];
+ vma_flags_t flags = vma_flags;
- vm_flags_reset(vma_left, vm_flags | special_flag);
- vmg.vm_flags = vm_flags | special_flag;
+ vma_flags_set(&flags, special_flag);
+ vma_left->flags = flags;
+ vmg.vma_flags = flags;
vma = merge_new(&vmg);
ASSERT_EQ(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
*
* Create a VMA to modify.
*/
- vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
ASSERT_NE(vma, NULL);
vmg.middle = vma;
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
- vm_flags_t special_flag = special_flags[i];
+ vma_flag_t special_flag = special_flags[i];
+ vma_flags_t flags = vma_flags;
- vm_flags_reset(vma_left, vm_flags | special_flag);
- vmg.vm_flags = vm_flags | special_flag;
+ vma_flags_set(&flags, special_flag);
+ vma_left->flags = flags;
+ vmg.vma_flags = flags;
vma = merge_existing(&vmg);
ASSERT_EQ(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
static bool test_vma_merge_with_close(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
* PPPPPPNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma_next->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
ASSERT_EQ(merge_new(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
* proceed.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
* proceed.
*/
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
/*
* PPPVVNNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
* PPPPPNNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
vma_next->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
static bool test_vma_merge_new_with_close(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
- struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
- struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vm_flags);
+ struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
+ struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vma_flags);
const struct vm_operations_struct vm_ops = {
.close = dummy_close,
};
vma_prev->vm_ops = &vm_ops;
vma_next->vm_ops = &vm_ops;
- vmg_set_range(&vmg, 0x2000, 0x5000, 2, vm_flags);
+ vmg_set_range(&vmg, 0x2000, 0x5000, 2, vma_flags);
vma = merge_new(&vmg);
ASSERT_NE(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
- vm_flags_t prev_flags = vm_flags;
- vm_flags_t next_flags = vm_flags;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
+ vma_flags_t prev_flags = vma_flags;
+ vma_flags_t next_flags = vma_flags;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma, *vma_prev, *vma_next;
struct anon_vma_chain avc = {};
if (prev_is_sticky)
- prev_flags |= VM_STICKY;
+ vma_flags_set_mask(&prev_flags, VMA_STICKY_FLAGS);
if (middle_is_sticky)
- vm_flags |= VM_STICKY;
+ vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS);
if (next_is_sticky)
- next_flags |= VM_STICKY;
+ vma_flags_set_mask(&next_flags, VMA_STICKY_FLAGS);
/*
* Merge right case - partial span.
* 0123456789
* VNNNNNN
*/
- vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags);
vma->vm_ops = &vm_ops; /* This should have no impact. */
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma);
vmg.middle = vma;
vmg.prev = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_TRUE(vma_write_started(vma_next));
ASSERT_EQ(mm.map_count, 2);
if (middle_is_sticky || next_is_sticky)
- ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
* 0123456789
* NNNNNNN
*/
- vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags);
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
- vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vma_flags, &dummy_anon_vma);
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_EQ(merge_existing(&vmg), vma_next);
ASSERT_TRUE(vma_write_started(vma_next));
ASSERT_EQ(mm.map_count, 1);
if (middle_is_sticky || next_is_sticky)
- ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. We should have deleted vma. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
vma->vm_ops = &vm_ops; /* This should have no impact. */
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
if (prev_is_sticky || middle_is_sticky)
- ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_EQ(mm.map_count, 1);
if (prev_is_sticky || middle_is_sticky)
- ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. We should have deleted vma. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags);
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_EQ(mm.map_count, 1);
if (prev_is_sticky || middle_is_sticky || next_is_sticky)
- ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
+ ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
/* Clear down and reset. We should have deleted prev and next. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags);
vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags);
- vmg_set_range(&vmg, 0x4000, 0x5000, 4, vm_flags);
+ vmg_set_range(&vmg, 0x4000, 0x5000, 4, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
+ vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x6000, 0x7000, 6, vm_flags);
+ vmg_set_range(&vmg, 0x6000, 0x7000, 6, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x4000, 0x7000, 4, vm_flags);
+ vmg_set_range(&vmg, 0x4000, 0x7000, 4, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x4000, 0x6000, 4, vm_flags);
+ vmg_set_range(&vmg, 0x4000, 0x6000, 4, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
- vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
+ vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
static bool test_anon_vma_non_mergeable(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma, *vma_prev, *vma_next;
* 0123456789
* PPPPPPPNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags);
/*
* Give both prev and next single anon_vma_chain fields, so they will
*
* However, when prev is compared to next, the merge should fail.
*/
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
* 0123456789
* PPPPPPPNNN
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags);
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL);
vmg.prev = vma_prev;
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
static bool test_dup_anon_vma(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
* This covers new VMA merging, as these operations amount to a VMA
* expand.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma_next->anon_vma = &dummy_anon_vma;
- vmg_set_range(&vmg, 0, 0x5000, 0, vm_flags);
+ vmg_set_range(&vmg, 0, 0x5000, 0, vma_flags);
vmg.target = vma_prev;
vmg.next = vma_next;
* extend delete delete
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
/* Initialise avc so mergeability check passes. */
INIT_LIST_HEAD(&vma_next->anon_vma_chain);
list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
vma_next->anon_vma = &dummy_anon_vma;
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
* extend delete delete
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
vmg.anon_vma = &dummy_anon_vma;
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
* extend shrink/delete
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags);
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
* shrink/delete extend
*/
- vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vm_flags);
- vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vma_flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
- vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma;
vmg.middle = vma;
static bool test_vmi_prealloc_fail(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
* the duplicated anon_vma is unlinked.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma->anon_vma = &dummy_anon_vma;
- vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vm_flags, &dummy_anon_vma);
+ vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vma_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
* performed in this case too.
*/
- vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
- vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vma->anon_vma = &dummy_anon_vma;
- vmg_set_range(&vmg, 0, 0x5000, 3, vm_flags);
+ vmg_set_range(&vmg, 0, 0x5000, 3, vma_flags);
vmg.target = vma_prev;
vmg.next = vma;
static bool test_merge_extend(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0x1000);
struct vm_area_struct *vma;
- vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vm_flags);
- alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
+ vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vma_flags);
+ alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
/*
* Extend a VMA into the gap between itself and the following VMA.
static bool test_expand_only_mode(void)
{
- vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
+ vm_flags_t legacy_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma_prev, *vma;
- VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vm_flags, 5);
+ VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, legacy_flags, 5);
/*
* Place a VMA prior to the one we're expanding so we assert that we do
* have, through the use of the just_expand flag, indicated we do not
* need to do so.
*/
- alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
+ alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
/*
* We will be positioned at the prev VMA, but looking to expand to
* 0x9000.
*/
vma_iter_set(&vmi, 0x3000);
- vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
+ vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
vmg.prev = vma_prev;
vmg.just_expand = true;