]> git.ipfire.org Git - thirdparty/linux.git/blobdiff - mm/swapfile.c
mm/swapfile.c: __swap_entry_free() always free 1 entry
[thirdparty/linux.git] / mm / swapfile.c
index c74c9e1dc50d5864cf62ed211e350eeef38a805a..2aa272376cae18652ad7e3712cc6d78062820e7f 100644 (file)
@@ -629,17 +629,15 @@ new_cluster:
        tmp = cluster->next;
        max = min_t(unsigned long, si->max,
                    (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
-       if (tmp >= max) {
-               cluster_set_null(&cluster->index);
-               goto new_cluster;
-       }
-       ci = lock_cluster(si, tmp);
-       while (tmp < max) {
-               if (!si->swap_map[tmp])
-                       break;
-               tmp++;
+       if (tmp < max) {
+               ci = lock_cluster(si, tmp);
+               while (tmp < max) {
+                       if (!si->swap_map[tmp])
+                               break;
+                       tmp++;
+               }
+               unlock_cluster(ci);
        }
-       unlock_cluster(ci);
        if (tmp >= max) {
                cluster_set_null(&cluster->index);
                goto new_cluster;
@@ -734,6 +732,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
        unsigned long last_in_cluster = 0;
        int latency_ration = LATENCY_LIMIT;
        int n_ret = 0;
+       bool scanned_many = false;
 
        /*
         * We try to cluster swap pages by allocating them sequentially
@@ -865,6 +864,25 @@ checks:
                goto checks;
        }
 
+       /*
+        * Even if there's no free clusters available (fragmented),
+        * try to scan a little more quickly with lock held unless we
+        * have scanned too many slots already.
+        */
+       if (!scanned_many) {
+               unsigned long scan_limit;
+
+               if (offset < scan_base)
+                       scan_limit = scan_base;
+               else
+                       scan_limit = si->highest_bit;
+               for (; offset <= scan_limit && --latency_ration > 0;
+                    offset++) {
+                       if (!si->swap_map[offset])
+                               goto checks;
+               }
+       }
+
 done:
        si->flags -= SWP_SCANNING;
        return n_ret;
@@ -883,6 +901,7 @@ scan:
                if (unlikely(--latency_ration < 0)) {
                        cond_resched();
                        latency_ration = LATENCY_LIMIT;
+                       scanned_many = true;
                }
        }
        offset = si->lowest_bit;
@@ -898,6 +917,7 @@ scan:
                if (unlikely(--latency_ration < 0)) {
                        cond_resched();
                        latency_ration = LATENCY_LIMIT;
+                       scanned_many = true;
                }
                offset++;
        }
@@ -1253,13 +1273,14 @@ unlock_out:
 }
 
 static unsigned char __swap_entry_free(struct swap_info_struct *p,
-                                      swp_entry_t entry, unsigned char usage)
+                                      swp_entry_t entry)
 {
        struct swap_cluster_info *ci;
        unsigned long offset = swp_offset(entry);
+       unsigned char usage;
 
        ci = lock_cluster_or_swap_info(p, offset);
-       usage = __swap_entry_free_locked(p, offset, usage);
+       usage = __swap_entry_free_locked(p, offset, 1);
        unlock_cluster_or_swap_info(p, ci);
        if (!usage)
                free_swap_slot(entry);
@@ -1294,7 +1315,7 @@ void swap_free(swp_entry_t entry)
 
        p = _swap_info_get(entry);
        if (p)
-               __swap_entry_free(p, entry, 1);
+               __swap_entry_free(p, entry);
 }
 
 /*
@@ -1717,7 +1738,7 @@ int free_swap_and_cache(swp_entry_t entry)
 
        p = _swap_info_get(entry);
        if (p) {
-               count = __swap_entry_free(p, entry, 1);
+               count = __swap_entry_free(p, entry);
                if (count == SWAP_HAS_CACHE &&
                    !swap_page_trans_huge_swapped(p, entry))
                        __try_to_reclaim_swap(p, swp_offset(entry),