static int shstk_pop_sigframe(unsigned long *ssp)
{
- struct vm_area_struct *vma;
unsigned long token_addr;
- bool need_to_check_vma;
- int err = 1;
+ unsigned int seq;
/*
* It is possible for the SSP to be off the end of a shadow stack by 4
if (!IS_ALIGNED(*ssp, 8))
return -EINVAL;
- need_to_check_vma = PAGE_ALIGN(*ssp) == *ssp;
+ do {
+ struct vm_area_struct *vma;
+ bool valid_vma;
+ int err;
- if (need_to_check_vma)
if (mmap_read_lock_killable(current->mm))
return -EINTR;
- err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp);
- if (unlikely(err))
- goto out_err;
-
- if (need_to_check_vma) {
vma = find_vma(current->mm, *ssp);
- if (!vma || !(vma->vm_flags & VM_SHADOW_STACK)) {
- err = -EFAULT;
- goto out_err;
- }
-
+ valid_vma = vma && (vma->vm_flags & VM_SHADOW_STACK);
+
+ /*
+ * VMAs can change between get_shstk_data() and find_vma().
+ * Watch for changes and ensure that 'token_addr' comes from
+ * 'vma' by recording a seqcount.
+ *
+ * Ignore the return value of mmap_lock_speculate_try_begin()
+ * because the mmap lock excludes the possibility of writers.
+ */
+ mmap_lock_speculate_try_begin(current->mm, &seq);
mmap_read_unlock(current->mm);
- }
+
+ if (!valid_vma)
+ return -EINVAL;
+
+ err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp);
+ if (err)
+ return err;
+ } while (mmap_lock_speculate_retry(current->mm, seq));
/* Restore SSP aligned? */
if (unlikely(!IS_ALIGNED(token_addr, 8)))
*ssp = token_addr;
return 0;
-out_err:
- if (need_to_check_vma)
- mmap_read_unlock(current->mm);
- return err;
}
int setup_signal_shadow_stack(struct ksignal *ksig)