(unsigned long int)(oper.indata + prev_ix),
noinpages,
0, /* read access only for in data */
- 0, /* no force */
inpages,
NULL);
current->mm,
(unsigned long int)oper.cipher_outdata,
nooutpages,
- 1, /* write access for out data */
- 0, /* no force */
+ FOLL_WRITE, /* write access for out data */
outpages,
NULL);
up_read(¤t->mm->mmap_sem);
int ret;
ret = get_user_pages(current, current->mm, virt_addr,
- 1, VM_READ, 0, NULL, NULL);
+ 1, FOLL_WRITE, NULL, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr);
{
long gup_ret;
int nr_pages = 1;
- int force = 0;
gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
- nr_pages, write, force, NULL, NULL);
+ nr_pages, write ? FOLL_WRITE : 0, NULL, NULL);
/*
* get_user_pages() returns number of pages gotten.
* 0 means we failed to fault in and get anything,
int r;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ unsigned int flags = 0;
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ if (write)
+ flags |= FOLL_WRITE;
+
if (current->mm != gtt->usermm)
return -EPERM;
struct page **pages = ttm->pages + pinned;
r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
+ flags, pages, NULL);
if (r < 0)
goto release_pages;
pvec = drm_malloc_ab(npages, sizeof(struct page *));
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
+ unsigned int flags = 0;
+
+ if (!obj->userptr.read_only)
+ flags |= FOLL_WRITE;
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
- !obj->userptr.read_only, 0,
+ flags,
pvec + pinned, NULL);
if (ret < 0)
break;
struct page **pages = ttm->pages + pinned;
r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
+ write ? FOLL_WRITE : 0, pages, NULL);
if (r < 0)
goto release_pages;
ret = get_user_pages(current, current->mm,
(unsigned long)xfer->mem_addr,
vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE),
- 0, vsg->pages, NULL);
+ (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
+ vsg->pages, NULL);
up_read(¤t->mm->mmap_sem);
if (ret != vsg->num_pages) {
DEFINE_DMA_ATTRS(attrs);
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
+ unsigned int gup_flags = FOLL_WRITE;
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
if (ret)
goto out;
+ if (!umem->writable)
+ gup_flags |= FOLL_FORCE;
+
need_release = 1;
sg_list_start = umem->sg_head.sgl;
ret = get_user_pages(current, current->mm, cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- 1, !umem->writable, page_list, vma_list);
+ gup_flags, page_list, vma_list);
if (ret < 0)
goto out;
u64 off;
int j, k, ret = 0, start_idx, npages = 0;
u64 base_virt_addr;
+ unsigned int flags = 0;
if (access_mask == 0)
return -EINVAL;
goto out_put_task;
}
+ if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ flags |= FOLL_WRITE;
+
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
k = start_idx;
*/
npages = get_user_pages(owning_process, owning_mm, user_virt,
gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT, 0,
- local_page_list, NULL);
+ flags, local_page_list, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
goto out;
}
- ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
- pages, NULL);
+ ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1,
+ FOLL_WRITE, pages, NULL);
if (ret < 0)
goto out;
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got,
+ FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
int flags;
dma_addr_t pa;
DEFINE_DMA_ATTRS(attrs);
+ unsigned int gup_flags;
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
flags = IOMMU_READ | IOMMU_CACHE;
flags |= (writable) ? IOMMU_WRITE : 0;
+ gup_flags = FOLL_WRITE;
+ gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK;
ret = 0;
ret = get_user_pages(current, current->mm, cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
- 1, !writable, page_list, NULL);
+ gup_flags, page_list, NULL);
if (ret < 0)
goto out;
{
unsigned long first, last;
int err, rw = 0;
+ unsigned int flags = FOLL_FORCE;
dma->direction = direction;
switch (dma->direction) {
if (NULL == dma->pages)
return -ENOMEM;
+ if (rw == READ)
+ flags |= FOLL_WRITE;
+
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
err = get_user_pages(current, current->mm,
data & PAGE_MASK, dma->nr_pages,
- rw == READ, 1, /* force */
- dma->pages, NULL);
+ flags, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
mm,
(u64)addr,
nr_pages,
- !!(prot & SCIF_PROT_WRITE),
- 0,
+ (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
pinned_pages->pages,
NULL);
up_write(&mm->mmap_sem);
*pageshift = PAGE_SHIFT;
#endif
if (get_user_pages
- (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
+ (current, current->mm, vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got, FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got, FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
down_read(¤t->mm->mmap_sem);
num_pinned = get_user_pages(current, current->mm,
param.local_vaddr - lb_offset, num_pages,
- (param.source == -1) ? READ : WRITE,
- 0, pages, NULL);
+ (param.source == -1) ? 0 : FOLL_WRITE,
+ pages, NULL);
up_read(¤t->mm->mmap_sem);
if (num_pinned != num_pages) {
{
struct page *page;
int ret;
+ unsigned int gup_flags = FOLL_FORCE;
#ifdef CONFIG_STACK_GROWSUP
if (write) {
return NULL;
}
#endif
- ret = get_user_pages(current, bprm->mm, pos,
- 1, write, 1, &page, NULL);
+
+ if (write)
+ gup_flags |= FOLL_WRITE;
+
+ ret = get_user_pages(current, bprm->mm, pos, 1, gup_flags,
+ &page, NULL);
if (ret <= 0)
return NULL;
struct vm_area_struct **vmas, int *nonblocking);
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
retry:
/* Read the page with vaddr into memory */
- ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
+ ret = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, &vma);
if (ret <= 0)
return ret;
if (likely(result == 0))
goto out;
- result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
+ result = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &page, NULL);
if (result < 0)
return result;
* FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
*/
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages, int write,
- int force, struct page **pages, struct vm_area_struct **vmas)
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas)
{
- unsigned int flags = FOLL_TOUCH;
-
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
-
return __get_user_pages_locked(tsk, mm, start, nr_pages,
- pages, vmas, NULL, false, flags);
+ pages, vmas, NULL, false,
+ gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
{
struct vm_area_struct *vma;
void *old_buf = buf;
+ unsigned int flags = FOLL_FORCE;
+
+ if (write)
+ flags |= FOLL_WRITE;
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
struct page *page = NULL;
ret = get_user_pages(tsk, mm, addr, 1,
- write, 1, &page, &vma);
+ flags, &page, &vma);
if (ret <= 0) {
#ifndef CONFIG_HAVE_IOREMAP_PROT
break;
struct page *p;
int err;
- err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
+ err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, &p, NULL);
if (err >= 0) {
err = page_to_nid(p);
put_page(p);
*/
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
- int flags = 0;
-
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
-
- return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
- NULL);
+ return __get_user_pages(tsk, mm, start, nr_pages,
+ gup_flags, pages, vmas, NULL);
}
EXPORT_SYMBOL(get_user_pages);
unsigned int gup_flags, struct page **pages,
int *locked)
{
- int write = gup_flags & FOLL_WRITE;
- int force = gup_flags & FOLL_FORCE;
-
- return get_user_pages(tsk, mm, start, nr_pages, write, force,
+ return get_user_pages(tsk, mm, start, nr_pages, gup_flags,
pages, NULL);
}
EXPORT_SYMBOL(get_user_pages_locked);
}
/* Same with get_arg_page(bprm, pos, 0) in fs/exec.c */
#ifdef CONFIG_MMU
- if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0)
+ if (get_user_pages(current, bprm->mm, pos, 1,
+ FOLL_FORCE, &page, NULL) <= 0)
return false;
#else
page = bprm->page[pos / PAGE_SIZE];