+// SPDX-License-Identifier: GPL-2.0-or-later
+
/*
* zsmalloc memory allocator
*
set_first_obj_offset(page, off);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
*/
link->next = -1UL << OBJ_TAG_BITS;
}
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
page = next_page;
off %= PAGE_SIZE;
}
sizes[1] = size - sizes[0];
/* copy object to per-cpu buffer */
- addr = kmap_atomic(pages[0]);
+ addr = kmap_local_page(pages[0]);
memcpy(buf, addr + off, sizes[0]);
- kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
+ kunmap_local(addr);
+ addr = kmap_local_page(pages[1]);
memcpy(buf + sizes[0], addr, sizes[1]);
- kunmap_atomic(addr);
+ kunmap_local(addr);
out:
return area->vm_buf;
}
sizes[1] = size - sizes[0];
/* copy per-cpu buffer to object */
- addr = kmap_atomic(pages[0]);
+ addr = kmap_local_page(pages[0]);
memcpy(addr + off, buf, sizes[0]);
- kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
+ kunmap_local(addr);
+ addr = kmap_local_page(pages[1]);
memcpy(addr, buf + sizes[0], sizes[1]);
- kunmap_atomic(addr);
+ kunmap_local(addr);
out:
/* enable page faults to match kunmap_atomic() return conditions */
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
- area->vm_addr = kmap_atomic(page);
+ area->vm_addr = kmap_local_page(page);
ret = area->vm_addr + off;
goto out;
}
area = this_cpu_ptr(&zs_map_area);
if (off + class->size <= PAGE_SIZE)
- kunmap_atomic(area->vm_addr);
+ kunmap_local(area->vm_addr);
else {
struct page *pages[2];
for (i = 0; i < nr_page; i++)
m_page = get_next_page(m_page);
- vaddr = kmap_atomic(m_page);
+ vaddr = kmap_local_page(m_page);
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
if (likely(!ZsHugePage(zspage)))
/* record handle to page->index */
zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
mod_zspage_inuse(zspage, 1);
obj = location_to_obj(m_page, obj);
f_offset = offset_in_page(class_size * f_objidx);
zspage = get_zspage(f_page);
- vaddr = kmap_atomic(f_page);
+ vaddr = kmap_local_page(f_page);
link = (struct link_free *)(vaddr + f_offset);
/* Insert this object in containing zspage's freelist */
f_page->index = 0;
set_freeobj(zspage, f_objidx);
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
mod_zspage_inuse(zspage, -1);
}
if (d_off + class->size > PAGE_SIZE)
d_size = PAGE_SIZE - d_off;
- s_addr = kmap_atomic(s_page);
- d_addr = kmap_atomic(d_page);
+ s_addr = kmap_local_page(s_page);
+ d_addr = kmap_local_page(d_page);
while (1) {
size = min(s_size, d_size);
* Documentation/mm/highmem.rst.
*/
if (s_off >= PAGE_SIZE) {
- kunmap_atomic(d_addr);
- kunmap_atomic(s_addr);
+ kunmap_local(d_addr);
+ kunmap_local(s_addr);
s_page = get_next_page(s_page);
- s_addr = kmap_atomic(s_page);
- d_addr = kmap_atomic(d_page);
+ s_addr = kmap_local_page(s_page);
+ d_addr = kmap_local_page(d_page);
s_size = class->size - written;
s_off = 0;
}
if (d_off >= PAGE_SIZE) {
- kunmap_atomic(d_addr);
+ kunmap_local(d_addr);
d_page = get_next_page(d_page);
- d_addr = kmap_atomic(d_page);
+ d_addr = kmap_local_page(d_page);
d_size = class->size - written;
d_off = 0;
}
}
- kunmap_atomic(d_addr);
- kunmap_atomic(s_addr);
+ kunmap_local(d_addr);
+ kunmap_local(s_addr);
}
/*
unsigned int offset;
int index = *obj_idx;
unsigned long handle = 0;
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
offset = get_first_obj_offset(page);
offset += class->size * index;
index++;
}
- kunmap_atomic(addr);
+ kunmap_local(addr);
*obj_idx = index;
migrate_write_lock(zspage);
offset = get_first_obj_offset(page);
- s_addr = kmap_atomic(page);
+ s_addr = kmap_local_page(page);
/*
* Here, any user cannot access all objects in the zspage so let's move.
*/
- d_addr = kmap_atomic(newpage);
+ d_addr = kmap_local_page(newpage);
copy_page(d_addr, s_addr);
- kunmap_atomic(d_addr);
+ kunmap_local(d_addr);
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
addr += class->size) {
record_obj(handle, new_obj);
}
}
- kunmap_atomic(s_addr);
+ kunmap_local(s_addr);
replace_sub_page(class, zspage, newpage, page);
/*