/*
* OK, we tried to call the file hook for mmap(), but an error
- * arose. The mapping is in an inconsistent state and we most not invoke
+ * arose. The mapping is in an inconsistent state and we must not invoke
* any further hooks on it.
*/
vma->vm_ops = &vma_dummy_vm_ops;
* madvise_should_skip() - Return if the request is invalid or nothing.
* @start: Start address of madvise-requested address range.
* @len_in: Length of madvise-requested address range.
- * @behavior: Requested madvise behavor.
+ * @behavior: Requested madvise behavior.
* @err: Pointer to store an error code from the check.
*
* If the specified behaviour is invalid or nothing would occur, we skip the
unsigned long start_pfn, end_pfn, mem_size_mb;
int nid, i;
- /* calculate lose page */
+ /* calculate lost page */
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
if (!numa_valid_node(nid))
nr_pages += end_pfn - start_pfn;
/**
* reserve_mem_release_by_name - Release reserved memory region with a given name
- * @name: The name that is attatched to a reserved memory region
+ * @name: The name that is attached to a reserved memory region
*
* Forcibly release the pages in the reserved memory region so that those memory
* can be used as free memory. After released the reserved region size becomes 0.
memcg = folio_memcg(old);
/*
* Note that it is normal to see !memcg for a hugetlb folio.
- * For e.g, itt could have been allocated when memory_hugetlb_accounting
+ * For e.g, it could have been allocated when memory_hugetlb_accounting
* was not selected.
*/
VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
*
* MF_RECOVERED - The m-f() handler marks the page as PG_hwpoisoned'ed.
* The page has been completely isolated, that is, unmapped, taken out of
- * the buddy system, or hole-punnched out of the file mapping.
+ * the buddy system, or hole-punched out of the file mapping.
*/
static const char *action_name[] = {
[MF_IGNORED] = "Ignored",
if (node_memory_types[node].memtype == memtype || !memtype)
node_memory_types[node].map_count--;
/*
- * If we umapped all the attached devices to this node,
+ * If we unmapped all the attached devices to this node,
* clear the node memory type.
*/
if (!node_memory_types[node].map_count) {
else
*last_cpupid = folio_last_cpupid(folio);
- /* Record the current PID acceesing VMA */
+ /* Record the current PID accessing VMA */
vma_set_access_pid_bit(vma);
count_vm_numa_event(NUMA_HINT_FAULTS);
* Use the maywrite version to indicate that vmf->pte may be
* modified, but since we will use pte_same() to detect the
* change of the !pte_none() entry, there is no need to recheck
- * the pmdval. Here we chooes to pass a dummy variable instead
+ * the pmdval. Here we choose to pass a dummy variable instead
* of NULL, which helps new user think about why this place is
* special.
*/
*
* MOVABLE : KERNEL_EARLY
*
- * Whereby KERNEL_EARLY is memory in one of the kernel zones, available sinze
+ * Whereby KERNEL_EARLY is memory in one of the kernel zones, available since
* boot. We base our calculation on KERNEL_EARLY internally, because:
*
* a) Hotplugged memory in one of the kernel zones can sometimes still get
* NODE_DATA is preallocated (free_area_init) but its internal
* state is not allocated completely. Add missing pieces.
* Completely offline nodes stay around and they just need
- * reintialization.
+ * reinitialization.
*/
pgdat = NODE_DATA(nid);
/**
* migrate_device_pfns() - migrate device private pfns to normal memory.
- * @src_pfns: pre-popluated array of source device private pfns to migrate.
+ * @src_pfns: pre-populated array of source device private pfns to migrate.
* @npages: number of pages to migrate.
*
- * Similar to migrate_device_range() but supports non-contiguous pre-popluated
+ * Similar to migrate_device_range() but supports non-contiguous pre-populated
* array of device pages to migrate.
*/
int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages)
/*
* For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
* (total memory/#cpus), and lift it to 25% for other policies
- * to easy the possible lock contention for percpu_counter
+ * to ease the possible lock contention for percpu_counter
* vm_committed_as, while the max limit is INT_MAX
*/
if (overcommit_policy == OVERCOMMIT_NEVER)
lru_gen_init_pgdat(pgdat);
}
-/* Any regular or high memory on that node ? */
+/* Any regular or high memory on that node? */
static void __init check_for_memory(pg_data_t *pgdat)
{
enum zone_type zone_type;
* Initialize and free pages.
*
* At this point reserved pages and struct pages that correspond to holes in
- * memblock.memory are already intialized so every free range has a valid
+ * memblock.memory are already initialized so every free range has a valid
* memory map around it.
* This ensures that access of pages that are ahead of the range being
* initialized (computing buddy page in __free_one_page()) always reads a valid
/*
* We don't want to have to go hunting for VMAs from the end of the old
* VMA to the next page table boundary, also we want to make sure the
- * operation is wortwhile.
+ * operation is worthwhile.
*
* So ensure that we only perform this realignment if the end of the
* range being copied reaches or crosses the page table boundary.
/*
* Will a new address definitely be assigned? This either if the user specifies
* it via MREMAP_FIXED, or if MREMAP_DONTUNMAP is used, indicating we will
- * always detemrine a target address.
+ * always determine a target address.
*/
static bool vrm_implies_new_addr(struct vma_remap_struct *vrm)
{
/*
* move_vma() need us to stay 4 maps below the threshold, otherwise
* it will bail out at the very beginning.
- * That is a problem if we have already unmaped the regions here
+ * That is a problem if we have already unmapped the regions here
* (new_addr, and old_addr), because userspace will not know the
* state of the vma's after it gets -ENOMEM.
* So, to avoid such scenario we can pre-compute if the whole
* It disallows unmapped regions from start to end whether they exist at the
* start, in the middle, or at the end of the range, or any combination thereof.
*
- * This is because after sealng a range, there's nothing to stop memory mapping
+ * This is because after sealing a range, there's nothing to stop memory mapping
* of ranges in the remaining gaps later, meaning that the user might then
* wrongly consider the entirety of the mseal()'d range to be sealed when it
* in fact isn't.
* -EINVAL:
* invalid input flags.
* start address is not page aligned.
- * Address arange (start + len) overflow.
+ * Address range (start + len) overflow.
* -ENOMEM:
* addr is not a valid address (not allocated).
* end (start + len) is not a valid address.
* We reset memblock back to the top-down direction
* here because if we configured ACPI_NUMA, we have
* parsed SRAT in init_func(). It is ok to have the
- * reset here even if we did't configure ACPI_NUMA
+ * reset here even if we didn't configure ACPI_NUMA
* or acpi numa init fails and fallbacks to dummy
* numa init.
*/
/*
* As memory initialization might be integrated into KASAN,
- * KASAN unpoisoning and memory initializion code must be
+ * KASAN unpoisoning and memory initialization code must be
* kept together to avoid discrepancies in behavior.
*/
* unsafe in NMI. If spin_trylock() is called from hard IRQ the current
* task may be waiting for one rt_spin_lock, but rt_spin_trylock() will
* mark the task as the owner of another rt_spin_lock which will
- * confuse PI logic, so return immediately if called form hard IRQ or
+ * confuse PI logic, so return immediately if called from hard IRQ or
* NMI.
*
* Note, irqs_disabled() case is ok. This function can be called
VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
/*
- * ->flags can be updated non-atomicially (scan_swap_map_slots),
+ * ->flags can be updated non-atomically (scan_swap_map_slots),
* but that will never affect SWP_FS_OPS, so the data_race
* is safe.
*/
if (data_race(sis->flags & SWP_FS_OPS))
swap_writepage_fs(folio, swap_plug);
/*
- * ->flags can be updated non-atomicially (scan_swap_map_slots),
+ * ->flags can be updated non-atomically (scan_swap_map_slots),
* but that will never affect SWP_SYNCHRONOUS_IO, so the data_race
* is safe.
*/
* pageblock. When not all pageblocks within a page are isolated at the same
* time, free page accounting can go wrong. For example, in the case of
* MAX_PAGE_ORDER = pageblock_order + 1, a MAX_PAGE_ORDER page has two
- * pagelbocks.
+ * pageblocks.
* [ MAX_PAGE_ORDER ]
* [ pageblock0 | pageblock1 ]
* When either pageblock is isolated, if it is a free page, the page is not
continue;
/*
- * If page was not comingled with another page we can
+ * If page was not commingled with another page we can
* consider the result to be "reported" since the page
* hasn't been modified, otherwise we will need to
* report on the new larger page when we make our way
EXPORT_SYMBOL(folio_add_lru);
/**
- * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
+ * folio_add_lru_vma() - Add a folio to the appropriate LRU list for this VMA.
* @folio: The folio to be added to the LRU.
* @vma: VMA in which the folio is mapped.
*
/*
* All swap cache helpers below require the caller to ensure the swap entries
- * used are valid and stablize the device by any of the following ways:
+ * used are valid and stabilize the device by any of the following ways:
* - Hold a reference by get_swap_device(): this ensures a single entry is
* valid and increases the swap device's refcount.
* - Locking a folio in the swap cache: this ensures the folio's swap entries
* Context: Caller must ensure @entry is valid and protect the swap device
* with reference count or locks.
* Return: Returns the found folio on success, NULL otherwise. The caller
- * must lock nd check if the folio still matches the swap entry before
+ * must lock and check if the folio still matches the swap entry before
* use (e.g., folio_matches_swap_entry).
*/
struct folio *swap_cache_get_folio(swp_entry_t entry)
if (get_swap_device_info(si)) {
if (si->flags & SWP_WRITEOK) {
/*
- * Grab the local lock to be complaint
+ * Grab the local lock to be compliant
* with swap table allocation.
*/
local_lock(&percpu_swap_cluster.lock);
* Use the maywrite version to indicate that dst_pte will be modified,
* since dst_pte needs to be none, the subsequent pte_same() check
* cannot prevent the dst_pte page from being freed concurrently, so we
- * also need to abtain dst_pmdval and recheck pmd_same() later.
+ * also need to obtain dst_pmdval and recheck pmd_same() later.
*/
dst_pte = pte_offset_map_rw_nolock(mm, dst_pmd, dst_addr, &dst_pmdval,
&dst_ptl);
goto out;
}
- /* If PTE changed after we locked the folio them start over */
+ /* If PTE changed after we locked the folio then start over */
if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
ret = -EAGAIN;
goto out;
return -ENOMEM;
/*
- * Adjust for the gap first so it doesn't interfere with the
- * later alignment. The first step is the minimum needed to
- * fulill the start gap, the next steps is the minimum to align
- * that. It is the minimum needed to fulill both.
+ * Adjust for the gap first so it doesn't interfere with the later
+ * alignment. The first step is the minimum needed to fulfill the start
+ * gap, the next step is the minimum to align that. It is the minimum
+ * needed to fulfill both.
*/
gap = vma_iter_addr(&vmi) + info->start_gap;
gap += (info->align_offset - gap) & info->align_mask;
struct vm_area_struct *prev, struct vm_area_struct *next);
/**
- * vma_modify_flags() - Peform any necessary split/merge in preparation for
+ * vma_modify_flags() - Perform any necessary split/merge in preparation for
* setting VMA flags to *@vm_flags in the range @start to @end contained within
* @vma.
* @vmi: Valid VMA iterator positioned at @vma.
vm_flags_t *vm_flags_ptr);
/**
- * vma_modify_name() - Peform any necessary split/merge in preparation for
+ * vma_modify_name() - Perform any necessary split/merge in preparation for
* setting anonymous VMA name to @new_name in the range @start to @end contained
* within @vma.
* @vmi: Valid VMA iterator positioned at @vma.
struct anon_vma_name *new_name);
/**
- * vma_modify_policy() - Peform any necessary split/merge in preparation for
+ * vma_modify_policy() - Perform any necessary split/merge in preparation for
* setting NUMA policy to @new_pol in the range @start to @end contained
* within @vma.
* @vmi: Valid VMA iterator positioned at @vma.
struct mempolicy *new_pol);
/**
- * vma_modify_flags_uffd() - Peform any necessary split/merge in preparation for
+ * vma_modify_flags_uffd() - Perform any necessary split/merge in preparation for
* setting VMA flags to @vm_flags and UFFD context to @new_ctx in the range
* @start to @end contained within @vma.
* @vmi: Valid VMA iterator positioned at @vma.
/*
* We can "enter_fs" for swap-cache with only __GFP_IO
* providing this isn't SWP_FS_OPS.
- * ->flags can be updated non-atomicially (scan_swap_map_slots),
+ * ->flags can be updated non-atomically (scan_swap_map_slots),
* but that will never affect SWP_FS_OPS, so the data_race
* is safe.
*/
}
}
-/* Print out the free pages at each order for each migatetype */
+/* Print out the free pages at each order for each migratetype */
static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
{
int order;
/*
* On systems with 4K page size, this gives 255 size classes! There is a
- * trader-off here:
+ * trade-off here:
* - Large number of size classes is potentially wasteful as free page are
* spread across these classes
* - Small number of size classes causes large internal fragmentation