]>
git.ipfire.org Git - thirdparty/u-boot.git/blob - drivers/mtd/ubi/wl.c
1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) International Business Machines Corp., 2006
5 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
9 * UBI wear-leveling sub-system.
11 * This sub-system is responsible for wear-leveling. It works in terms of
12 * physical eraseblocks and erase counters and knows nothing about logical
13 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
14 * eraseblocks are of two types - used and free. Used physical eraseblocks are
15 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
16 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
18 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
19 * header. The rest of the physical eraseblock contains only %0xFF bytes.
21 * When physical eraseblocks are returned to the WL sub-system by means of the
22 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
23 * done asynchronously in context of the per-UBI device background thread,
24 * which is also managed by the WL sub-system.
26 * The wear-leveling is ensured by means of moving the contents of used
27 * physical eraseblocks with low erase counter to free physical eraseblocks
28 * with high erase counter.
30 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
33 * This sub-system is also responsible for scrubbing. If a bit-flip is detected
34 * in a physical eraseblock, it has to be moved. Technically this is the same
35 * as moving it for wear-leveling reasons.
37 * As it was said, for the UBI sub-system all physical eraseblocks are either
38 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
39 * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
40 * RB-trees, as well as (temporarily) in the @wl->pq queue.
42 * When the WL sub-system returns a physical eraseblock, the physical
43 * eraseblock is protected from being moved for some "time". For this reason,
44 * the physical eraseblock is not directly moved from the @wl->free tree to the
45 * @wl->used tree. There is a protection queue in between where this
46 * physical eraseblock is temporarily stored (@wl->pq).
48 * All this protection stuff is needed because:
49 * o we don't want to move physical eraseblocks just after we have given them
50 * to the user; instead, we first want to let users fill them up with data;
52 * o there is a chance that the user will put the physical eraseblock very
53 * soon, so it makes sense not to move it for some time, but wait.
55 * Physical eraseblocks stay protected only for limited time. But the "time" is
56 * measured in erase cycles in this case. This is implemented with help of the
57 * protection queue. Eraseblocks are put to the tail of this queue when they
58 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
59 * head of the queue on each erase operation (for any eraseblock). So the
60 * length of the queue defines how may (global) erase cycles PEBs are protected.
62 * To put it differently, each physical eraseblock has 2 main states: free and
63 * used. The former state corresponds to the @wl->free tree. The latter state
64 * is split up on several sub-states:
65 * o the WL movement is allowed (@wl->used tree);
66 * o the WL movement is disallowed (@wl->erroneous) because the PEB is
67 * erroneous - e.g., there was a read error;
68 * o the WL movement is temporarily prohibited (@wl->pq queue);
69 * o scrubbing is needed (@wl->scrub tree).
71 * Depending on the sub-state, wear-leveling entries of the used physical
72 * eraseblocks may be kept in one of those structures.
74 * Note, in this implementation, we keep a small in-RAM object for each physical
75 * eraseblock. This is surely not a scalable solution. But it appears to be good
76 * enough for moderately large flashes and it is simple. In future, one may
77 * re-work this sub-system and make it more scalable.
79 * At the moment this sub-system does not utilize the sequence number, which
80 * was introduced relatively recently. But it would be wise to do this because
81 * the sequence number of a logical eraseblock characterizes how old is it. For
82 * example, when we move a PEB with low erase counter, and we need to pick the
83 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
84 * pick target PEB with an average EC if our PEB is not very "old". This is a
85 * room for future re-works of the WL sub-system.
89 #include <linux/slab.h>
90 #include <linux/crc32.h>
91 #include <linux/freezer.h>
92 #include <linux/kthread.h>
94 #include <ubi_uboot.h>
100 /* Number of physical eraseblocks reserved for wear-leveling purposes */
101 #define WL_RESERVED_PEBS 1
104 * Maximum difference between two erase counters. If this threshold is
105 * exceeded, the WL sub-system starts moving data from used physical
106 * eraseblocks with low erase counter to free physical eraseblocks with high
109 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
112 * When a physical eraseblock is moved, the WL sub-system has to pick the target
113 * physical eraseblock to move to. The simplest way would be just to pick the
114 * one with the highest erase counter. But in certain workloads this could lead
115 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
116 * situation when the picked physical eraseblock is constantly erased after the
117 * data is written to it. So, we have a constant which limits the highest erase
118 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
119 * does not pick eraseblocks with erase counter greater than the lowest erase
120 * counter plus %WL_FREE_MAX_DIFF.
122 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
125 * Maximum number of consecutive background thread failures which is enough to
126 * switch to read-only mode.
128 #define WL_MAX_FAILURES 32
130 static int self_check_ec(struct ubi_device
*ubi
, int pnum
, int ec
);
131 static int self_check_in_wl_tree(const struct ubi_device
*ubi
,
132 struct ubi_wl_entry
*e
, struct rb_root
*root
);
133 static int self_check_in_pq(const struct ubi_device
*ubi
,
134 struct ubi_wl_entry
*e
);
137 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
138 * @e: the wear-leveling entry to add
139 * @root: the root of the tree
141 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
142 * the @ubi->used and @ubi->free RB-trees.
144 static void wl_tree_add(struct ubi_wl_entry
*e
, struct rb_root
*root
)
146 struct rb_node
**p
, *parent
= NULL
;
150 struct ubi_wl_entry
*e1
;
153 e1
= rb_entry(parent
, struct ubi_wl_entry
, u
.rb
);
157 else if (e
->ec
> e1
->ec
)
160 ubi_assert(e
->pnum
!= e1
->pnum
);
161 if (e
->pnum
< e1
->pnum
)
168 rb_link_node(&e
->u
.rb
, parent
, p
);
169 rb_insert_color(&e
->u
.rb
, root
);
173 * wl_tree_destroy - destroy a wear-leveling entry.
174 * @ubi: UBI device description object
175 * @e: the wear-leveling entry to add
177 * This function destroys a wear leveling entry and removes
178 * the reference from the lookup table.
180 static void wl_entry_destroy(struct ubi_device
*ubi
, struct ubi_wl_entry
*e
)
182 ubi
->lookuptbl
[e
->pnum
] = NULL
;
183 kmem_cache_free(ubi_wl_entry_slab
, e
);
187 * do_work - do one pending work.
188 * @ubi: UBI device description object
190 * This function returns zero in case of success and a negative error code in
193 static int do_work(struct ubi_device
*ubi
)
196 struct ubi_work
*wrk
;
201 * @ubi->work_sem is used to synchronize with the workers. Workers take
202 * it in read mode, so many of them may be doing works at a time. But
203 * the queue flush code has to be sure the whole queue of works is
204 * done, and it takes the mutex in write mode.
206 down_read(&ubi
->work_sem
);
207 spin_lock(&ubi
->wl_lock
);
208 if (list_empty(&ubi
->works
)) {
209 spin_unlock(&ubi
->wl_lock
);
210 up_read(&ubi
->work_sem
);
214 wrk
= list_entry(ubi
->works
.next
, struct ubi_work
, list
);
215 list_del(&wrk
->list
);
216 ubi
->works_count
-= 1;
217 ubi_assert(ubi
->works_count
>= 0);
218 spin_unlock(&ubi
->wl_lock
);
221 * Call the worker function. Do not touch the work structure
222 * after this call as it will have been freed or reused by that
223 * time by the worker function.
225 err
= wrk
->func(ubi
, wrk
, 0);
227 ubi_err(ubi
, "work failed with error code %d", err
);
228 up_read(&ubi
->work_sem
);
234 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
235 * @e: the wear-leveling entry to check
236 * @root: the root of the tree
238 * This function returns non-zero if @e is in the @root RB-tree and zero if it
241 static int in_wl_tree(struct ubi_wl_entry
*e
, struct rb_root
*root
)
247 struct ubi_wl_entry
*e1
;
249 e1
= rb_entry(p
, struct ubi_wl_entry
, u
.rb
);
251 if (e
->pnum
== e1
->pnum
) {
258 else if (e
->ec
> e1
->ec
)
261 ubi_assert(e
->pnum
!= e1
->pnum
);
262 if (e
->pnum
< e1
->pnum
)
273 * prot_queue_add - add physical eraseblock to the protection queue.
274 * @ubi: UBI device description object
275 * @e: the physical eraseblock to add
277 * This function adds @e to the tail of the protection queue @ubi->pq, where
278 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
279 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
282 static void prot_queue_add(struct ubi_device
*ubi
, struct ubi_wl_entry
*e
)
284 int pq_tail
= ubi
->pq_head
- 1;
287 pq_tail
= UBI_PROT_QUEUE_LEN
- 1;
288 ubi_assert(pq_tail
>= 0 && pq_tail
< UBI_PROT_QUEUE_LEN
);
289 list_add_tail(&e
->u
.list
, &ubi
->pq
[pq_tail
]);
290 dbg_wl("added PEB %d EC %d to the protection queue", e
->pnum
, e
->ec
);
294 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
295 * @ubi: UBI device description object
296 * @root: the RB-tree where to look for
297 * @diff: maximum possible difference from the smallest erase counter
299 * This function looks for a wear leveling entry with erase counter closest to
300 * min + @diff, where min is the smallest erase counter.
302 static struct ubi_wl_entry
*find_wl_entry(struct ubi_device
*ubi
,
303 struct rb_root
*root
, int diff
)
306 struct ubi_wl_entry
*e
, *prev_e
= NULL
;
309 e
= rb_entry(rb_first(root
), struct ubi_wl_entry
, u
.rb
);
314 struct ubi_wl_entry
*e1
;
316 e1
= rb_entry(p
, struct ubi_wl_entry
, u
.rb
);
326 /* If no fastmap has been written and this WL entry can be used
327 * as anchor PEB, hold it back and return the second best WL entry
328 * such that fastmap can use the anchor PEB later. */
329 if (prev_e
&& !ubi
->fm_disabled
&&
330 !ubi
->fm
&& e
->pnum
< UBI_FM_MAX_START
)
337 * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
338 * @ubi: UBI device description object
339 * @root: the RB-tree where to look for
341 * This function looks for a wear leveling entry with medium erase counter,
342 * but not greater or equivalent than the lowest erase counter plus
343 * %WL_FREE_MAX_DIFF/2.
345 static struct ubi_wl_entry
*find_mean_wl_entry(struct ubi_device
*ubi
,
346 struct rb_root
*root
)
348 struct ubi_wl_entry
*e
, *first
, *last
;
350 first
= rb_entry(rb_first(root
), struct ubi_wl_entry
, u
.rb
);
351 last
= rb_entry(rb_last(root
), struct ubi_wl_entry
, u
.rb
);
353 if (last
->ec
- first
->ec
< WL_FREE_MAX_DIFF
) {
354 e
= rb_entry(root
->rb_node
, struct ubi_wl_entry
, u
.rb
);
356 /* If no fastmap has been written and this WL entry can be used
357 * as anchor PEB, hold it back and return the second best
358 * WL entry such that fastmap can use the anchor PEB later. */
359 e
= may_reserve_for_fm(ubi
, e
, root
);
361 e
= find_wl_entry(ubi
, root
, WL_FREE_MAX_DIFF
/2);
367 * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
368 * refill_wl_user_pool().
369 * @ubi: UBI device description object
371 * This function returns a a wear leveling entry in case of success and
372 * NULL in case of failure.
374 static struct ubi_wl_entry
*wl_get_wle(struct ubi_device
*ubi
)
376 struct ubi_wl_entry
*e
;
378 e
= find_mean_wl_entry(ubi
, &ubi
->free
);
380 ubi_err(ubi
, "no free eraseblocks");
384 self_check_in_wl_tree(ubi
, e
, &ubi
->free
);
387 * Move the physical eraseblock to the protection queue where it will
388 * be protected from being moved for some time.
390 rb_erase(&e
->u
.rb
, &ubi
->free
);
392 dbg_wl("PEB %d EC %d", e
->pnum
, e
->ec
);
398 * prot_queue_del - remove a physical eraseblock from the protection queue.
399 * @ubi: UBI device description object
400 * @pnum: the physical eraseblock to remove
402 * This function deletes PEB @pnum from the protection queue and returns zero
403 * in case of success and %-ENODEV if the PEB was not found.
405 static int prot_queue_del(struct ubi_device
*ubi
, int pnum
)
407 struct ubi_wl_entry
*e
;
409 e
= ubi
->lookuptbl
[pnum
];
413 if (self_check_in_pq(ubi
, e
))
416 list_del(&e
->u
.list
);
417 dbg_wl("deleted PEB %d from the protection queue", e
->pnum
);
422 * sync_erase - synchronously erase a physical eraseblock.
423 * @ubi: UBI device description object
424 * @e: the the physical eraseblock to erase
425 * @torture: if the physical eraseblock has to be tortured
427 * This function returns zero in case of success and a negative error code in
430 static int sync_erase(struct ubi_device
*ubi
, struct ubi_wl_entry
*e
,
434 struct ubi_ec_hdr
*ec_hdr
;
435 unsigned long long ec
= e
->ec
;
437 dbg_wl("erase PEB %d, old EC %llu", e
->pnum
, ec
);
439 err
= self_check_ec(ubi
, e
->pnum
, e
->ec
);
443 ec_hdr
= kzalloc(ubi
->ec_hdr_alsize
, GFP_NOFS
);
447 err
= ubi_io_sync_erase(ubi
, e
->pnum
, torture
);
452 if (ec
> UBI_MAX_ERASECOUNTER
) {
454 * Erase counter overflow. Upgrade UBI and use 64-bit
455 * erase counters internally.
457 ubi_err(ubi
, "erase counter overflow at PEB %d, EC %llu",
463 dbg_wl("erased PEB %d, new EC %llu", e
->pnum
, ec
);
465 ec_hdr
->ec
= cpu_to_be64(ec
);
467 err
= ubi_io_write_ec_hdr(ubi
, e
->pnum
, ec_hdr
);
472 spin_lock(&ubi
->wl_lock
);
473 if (e
->ec
> ubi
->max_ec
)
475 spin_unlock(&ubi
->wl_lock
);
483 * serve_prot_queue - check if it is time to stop protecting PEBs.
484 * @ubi: UBI device description object
486 * This function is called after each erase operation and removes PEBs from the
487 * tail of the protection queue. These PEBs have been protected for long enough
488 * and should be moved to the used tree.
490 static void serve_prot_queue(struct ubi_device
*ubi
)
492 struct ubi_wl_entry
*e
, *tmp
;
496 * There may be several protected physical eraseblock to remove,
501 spin_lock(&ubi
->wl_lock
);
502 list_for_each_entry_safe(e
, tmp
, &ubi
->pq
[ubi
->pq_head
], u
.list
) {
503 dbg_wl("PEB %d EC %d protection over, move to used tree",
506 list_del(&e
->u
.list
);
507 wl_tree_add(e
, &ubi
->used
);
510 * Let's be nice and avoid holding the spinlock for
513 spin_unlock(&ubi
->wl_lock
);
520 if (ubi
->pq_head
== UBI_PROT_QUEUE_LEN
)
522 ubi_assert(ubi
->pq_head
>= 0 && ubi
->pq_head
< UBI_PROT_QUEUE_LEN
);
523 spin_unlock(&ubi
->wl_lock
);
527 void ubi_do_worker(struct ubi_device
*ubi
)
531 if (list_empty(&ubi
->works
) || ubi
->ro_mode
||
532 !ubi
->thread_enabled
|| ubi_dbg_is_bgt_disabled(ubi
))
535 spin_lock(&ubi
->wl_lock
);
536 while (!list_empty(&ubi
->works
)) {
538 * call do_work, which executes exactly one work form the queue,
539 * including removeing it from the work queue.
541 spin_unlock(&ubi
->wl_lock
);
543 spin_lock(&ubi
->wl_lock
);
545 ubi_err(ubi
, "%s: work failed with error code %d",
549 spin_unlock(&ubi
->wl_lock
);
554 * __schedule_ubi_work - schedule a work.
555 * @ubi: UBI device description object
556 * @wrk: the work to schedule
558 * This function adds a work defined by @wrk to the tail of the pending works
559 * list. Can only be used if ubi->work_sem is already held in read mode!
561 static void __schedule_ubi_work(struct ubi_device
*ubi
, struct ubi_work
*wrk
)
563 spin_lock(&ubi
->wl_lock
);
564 list_add_tail(&wrk
->list
, &ubi
->works
);
565 ubi_assert(ubi
->works_count
>= 0);
566 ubi
->works_count
+= 1;
568 if (ubi
->thread_enabled
&& !ubi_dbg_is_bgt_disabled(ubi
))
569 wake_up_process(ubi
->bgt_thread
);
571 spin_unlock(&ubi
->wl_lock
);
575 * schedule_ubi_work - schedule a work.
576 * @ubi: UBI device description object
577 * @wrk: the work to schedule
579 * This function adds a work defined by @wrk to the tail of the pending works
582 static void schedule_ubi_work(struct ubi_device
*ubi
, struct ubi_work
*wrk
)
584 down_read(&ubi
->work_sem
);
585 __schedule_ubi_work(ubi
, wrk
);
586 up_read(&ubi
->work_sem
);
589 static int erase_worker(struct ubi_device
*ubi
, struct ubi_work
*wl_wrk
,
593 * schedule_erase - schedule an erase work.
594 * @ubi: UBI device description object
595 * @e: the WL entry of the physical eraseblock to erase
596 * @vol_id: the volume ID that last used this PEB
597 * @lnum: the last used logical eraseblock number for the PEB
598 * @torture: if the physical eraseblock has to be tortured
600 * This function returns zero in case of success and a %-ENOMEM in case of
603 static int schedule_erase(struct ubi_device
*ubi
, struct ubi_wl_entry
*e
,
604 int vol_id
, int lnum
, int torture
)
606 struct ubi_work
*wl_wrk
;
610 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
611 e
->pnum
, e
->ec
, torture
);
613 wl_wrk
= kmalloc(sizeof(struct ubi_work
), GFP_NOFS
);
617 wl_wrk
->func
= &erase_worker
;
619 wl_wrk
->vol_id
= vol_id
;
621 wl_wrk
->torture
= torture
;
623 schedule_ubi_work(ubi
, wl_wrk
);
632 * do_sync_erase - run the erase worker synchronously.
633 * @ubi: UBI device description object
634 * @e: the WL entry of the physical eraseblock to erase
635 * @vol_id: the volume ID that last used this PEB
636 * @lnum: the last used logical eraseblock number for the PEB
637 * @torture: if the physical eraseblock has to be tortured
640 static int do_sync_erase(struct ubi_device
*ubi
, struct ubi_wl_entry
*e
,
641 int vol_id
, int lnum
, int torture
)
643 struct ubi_work
*wl_wrk
;
645 dbg_wl("sync erase of PEB %i", e
->pnum
);
647 wl_wrk
= kmalloc(sizeof(struct ubi_work
), GFP_NOFS
);
652 wl_wrk
->vol_id
= vol_id
;
654 wl_wrk
->torture
= torture
;
656 return erase_worker(ubi
, wl_wrk
, 0);
660 * wear_leveling_worker - wear-leveling worker function.
661 * @ubi: UBI device description object
662 * @wrk: the work object
663 * @shutdown: non-zero if the worker has to free memory and exit
664 * because the WL-subsystem is shutting down
666 * This function copies a more worn out physical eraseblock to a less worn out
667 * one. Returns zero in case of success and a negative error code in case of
670 static int wear_leveling_worker(struct ubi_device
*ubi
, struct ubi_work
*wrk
,
673 int err
, scrubbing
= 0, torture
= 0, protect
= 0, erroneous
= 0;
674 int vol_id
= -1, lnum
= -1;
675 #ifdef CONFIG_MTD_UBI_FASTMAP
676 int anchor
= wrk
->anchor
;
678 struct ubi_wl_entry
*e1
, *e2
;
679 struct ubi_vid_hdr
*vid_hdr
;
685 vid_hdr
= ubi_zalloc_vid_hdr(ubi
, GFP_NOFS
);
689 mutex_lock(&ubi
->move_mutex
);
690 spin_lock(&ubi
->wl_lock
);
691 ubi_assert(!ubi
->move_from
&& !ubi
->move_to
);
692 ubi_assert(!ubi
->move_to_put
);
694 if (!ubi
->free
.rb_node
||
695 (!ubi
->used
.rb_node
&& !ubi
->scrub
.rb_node
)) {
697 * No free physical eraseblocks? Well, they must be waiting in
698 * the queue to be erased. Cancel movement - it will be
699 * triggered again when a free physical eraseblock appears.
701 * No used physical eraseblocks? They must be temporarily
702 * protected from being moved. They will be moved to the
703 * @ubi->used tree later and the wear-leveling will be
706 dbg_wl("cancel WL, a list is empty: free %d, used %d",
707 !ubi
->free
.rb_node
, !ubi
->used
.rb_node
);
711 #ifdef CONFIG_MTD_UBI_FASTMAP
712 /* Check whether we need to produce an anchor PEB */
714 anchor
= !anchor_pebs_avalible(&ubi
->free
);
717 e1
= find_anchor_wl_entry(&ubi
->used
);
720 e2
= get_peb_for_wl(ubi
);
724 self_check_in_wl_tree(ubi
, e1
, &ubi
->used
);
725 rb_erase(&e1
->u
.rb
, &ubi
->used
);
726 dbg_wl("anchor-move PEB %d to PEB %d", e1
->pnum
, e2
->pnum
);
727 } else if (!ubi
->scrub
.rb_node
) {
729 if (!ubi
->scrub
.rb_node
) {
732 * Now pick the least worn-out used physical eraseblock and a
733 * highly worn-out free physical eraseblock. If the erase
734 * counters differ much enough, start wear-leveling.
736 e1
= rb_entry(rb_first(&ubi
->used
), struct ubi_wl_entry
, u
.rb
);
737 e2
= get_peb_for_wl(ubi
);
741 if (!(e2
->ec
- e1
->ec
>= UBI_WL_THRESHOLD
)) {
742 dbg_wl("no WL needed: min used EC %d, max free EC %d",
745 /* Give the unused PEB back */
746 wl_tree_add(e2
, &ubi
->free
);
750 self_check_in_wl_tree(ubi
, e1
, &ubi
->used
);
751 rb_erase(&e1
->u
.rb
, &ubi
->used
);
752 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
753 e1
->pnum
, e1
->ec
, e2
->pnum
, e2
->ec
);
755 /* Perform scrubbing */
757 e1
= rb_entry(rb_first(&ubi
->scrub
), struct ubi_wl_entry
, u
.rb
);
758 e2
= get_peb_for_wl(ubi
);
762 self_check_in_wl_tree(ubi
, e1
, &ubi
->scrub
);
763 rb_erase(&e1
->u
.rb
, &ubi
->scrub
);
764 dbg_wl("scrub PEB %d to PEB %d", e1
->pnum
, e2
->pnum
);
769 spin_unlock(&ubi
->wl_lock
);
772 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
773 * We so far do not know which logical eraseblock our physical
774 * eraseblock (@e1) belongs to. We have to read the volume identifier
777 * Note, we are protected from this PEB being unmapped and erased. The
778 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
779 * which is being moved was unmapped.
782 err
= ubi_io_read_vid_hdr(ubi
, e1
->pnum
, vid_hdr
, 0);
783 if (err
&& err
!= UBI_IO_BITFLIPS
) {
784 if (err
== UBI_IO_FF
) {
786 * We are trying to move PEB without a VID header. UBI
787 * always write VID headers shortly after the PEB was
788 * given, so we have a situation when it has not yet
789 * had a chance to write it, because it was preempted.
790 * So add this PEB to the protection queue so far,
791 * because presumably more data will be written there
792 * (including the missing VID header), and then we'll
795 dbg_wl("PEB %d has no VID header", e1
->pnum
);
798 } else if (err
== UBI_IO_FF_BITFLIPS
) {
800 * The same situation as %UBI_IO_FF, but bit-flips were
801 * detected. It is better to schedule this PEB for
804 dbg_wl("PEB %d has no VID header but has bit-flips",
810 ubi_err(ubi
, "error %d while reading VID header from PEB %d",
815 vol_id
= be32_to_cpu(vid_hdr
->vol_id
);
816 lnum
= be32_to_cpu(vid_hdr
->lnum
);
818 err
= ubi_eba_copy_leb(ubi
, e1
->pnum
, e2
->pnum
, vid_hdr
);
820 if (err
== MOVE_CANCEL_RACE
) {
822 * The LEB has not been moved because the volume is
823 * being deleted or the PEB has been put meanwhile. We
824 * should prevent this PEB from being selected for
825 * wear-leveling movement again, so put it to the
831 if (err
== MOVE_RETRY
) {
835 if (err
== MOVE_TARGET_BITFLIPS
|| err
== MOVE_TARGET_WR_ERR
||
836 err
== MOVE_TARGET_RD_ERR
) {
838 * Target PEB had bit-flips or write error - torture it.
844 if (err
== MOVE_SOURCE_RD_ERR
) {
846 * An error happened while reading the source PEB. Do
847 * not switch to R/O mode in this case, and give the
848 * upper layers a possibility to recover from this,
849 * e.g. by unmapping corresponding LEB. Instead, just
850 * put this PEB to the @ubi->erroneous list to prevent
851 * UBI from trying to move it over and over again.
853 if (ubi
->erroneous_peb_count
> ubi
->max_erroneous
) {
854 ubi_err(ubi
, "too many erroneous eraseblocks (%d)",
855 ubi
->erroneous_peb_count
);
868 /* The PEB has been successfully moved */
870 ubi_msg(ubi
, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
871 e1
->pnum
, vol_id
, lnum
, e2
->pnum
);
872 ubi_free_vid_hdr(ubi
, vid_hdr
);
874 spin_lock(&ubi
->wl_lock
);
875 if (!ubi
->move_to_put
) {
876 wl_tree_add(e2
, &ubi
->used
);
879 ubi
->move_from
= ubi
->move_to
= NULL
;
880 ubi
->move_to_put
= ubi
->wl_scheduled
= 0;
881 spin_unlock(&ubi
->wl_lock
);
883 err
= do_sync_erase(ubi
, e1
, vol_id
, lnum
, 0);
886 wl_entry_destroy(ubi
, e2
);
892 * Well, the target PEB was put meanwhile, schedule it for
895 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
896 e2
->pnum
, vol_id
, lnum
);
897 err
= do_sync_erase(ubi
, e2
, vol_id
, lnum
, 0);
903 mutex_unlock(&ubi
->move_mutex
);
907 * For some reasons the LEB was not moved, might be an error, might be
908 * something else. @e1 was not changed, so return it back. @e2 might
909 * have been changed, schedule it for erasure.
913 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
914 e1
->pnum
, vol_id
, lnum
, e2
->pnum
, err
);
916 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
917 e1
->pnum
, e2
->pnum
, err
);
918 spin_lock(&ubi
->wl_lock
);
920 prot_queue_add(ubi
, e1
);
921 else if (erroneous
) {
922 wl_tree_add(e1
, &ubi
->erroneous
);
923 ubi
->erroneous_peb_count
+= 1;
924 } else if (scrubbing
)
925 wl_tree_add(e1
, &ubi
->scrub
);
927 wl_tree_add(e1
, &ubi
->used
);
928 ubi_assert(!ubi
->move_to_put
);
929 ubi
->move_from
= ubi
->move_to
= NULL
;
930 ubi
->wl_scheduled
= 0;
931 spin_unlock(&ubi
->wl_lock
);
933 ubi_free_vid_hdr(ubi
, vid_hdr
);
934 err
= do_sync_erase(ubi
, e2
, vol_id
, lnum
, torture
);
938 mutex_unlock(&ubi
->move_mutex
);
943 ubi_err(ubi
, "error %d while moving PEB %d to PEB %d",
944 err
, e1
->pnum
, e2
->pnum
);
946 ubi_err(ubi
, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
947 err
, e1
->pnum
, vol_id
, lnum
, e2
->pnum
);
948 spin_lock(&ubi
->wl_lock
);
949 ubi
->move_from
= ubi
->move_to
= NULL
;
950 ubi
->move_to_put
= ubi
->wl_scheduled
= 0;
951 spin_unlock(&ubi
->wl_lock
);
953 ubi_free_vid_hdr(ubi
, vid_hdr
);
954 wl_entry_destroy(ubi
, e1
);
955 wl_entry_destroy(ubi
, e2
);
959 mutex_unlock(&ubi
->move_mutex
);
960 ubi_assert(err
!= 0);
961 return err
< 0 ? err
: -EIO
;
964 ubi
->wl_scheduled
= 0;
965 spin_unlock(&ubi
->wl_lock
);
966 mutex_unlock(&ubi
->move_mutex
);
967 ubi_free_vid_hdr(ubi
, vid_hdr
);
972 * ensure_wear_leveling - schedule wear-leveling if it is needed.
973 * @ubi: UBI device description object
974 * @nested: set to non-zero if this function is called from UBI worker
976 * This function checks if it is time to start wear-leveling and schedules it
977 * if yes. This function returns zero in case of success and a negative error
978 * code in case of failure.
980 static int ensure_wear_leveling(struct ubi_device
*ubi
, int nested
)
983 struct ubi_wl_entry
*e1
;
984 struct ubi_wl_entry
*e2
;
985 struct ubi_work
*wrk
;
987 spin_lock(&ubi
->wl_lock
);
988 if (ubi
->wl_scheduled
)
989 /* Wear-leveling is already in the work queue */
993 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
994 * the WL worker has to be scheduled anyway.
996 if (!ubi
->scrub
.rb_node
) {
997 if (!ubi
->used
.rb_node
|| !ubi
->free
.rb_node
)
998 /* No physical eraseblocks - no deal */
1002 * We schedule wear-leveling only if the difference between the
1003 * lowest erase counter of used physical eraseblocks and a high
1004 * erase counter of free physical eraseblocks is greater than
1005 * %UBI_WL_THRESHOLD.
1007 e1
= rb_entry(rb_first(&ubi
->used
), struct ubi_wl_entry
, u
.rb
);
1008 e2
= find_wl_entry(ubi
, &ubi
->free
, WL_FREE_MAX_DIFF
);
1010 if (!(e2
->ec
- e1
->ec
>= UBI_WL_THRESHOLD
))
1012 dbg_wl("schedule wear-leveling");
1014 dbg_wl("schedule scrubbing");
1016 ubi
->wl_scheduled
= 1;
1017 spin_unlock(&ubi
->wl_lock
);
1019 wrk
= kmalloc(sizeof(struct ubi_work
), GFP_NOFS
);
1026 wrk
->func
= &wear_leveling_worker
;
1028 __schedule_ubi_work(ubi
, wrk
);
1031 schedule_ubi_work(ubi
, wrk
);
1034 schedule_ubi_work(ubi
, wrk
);
1041 spin_lock(&ubi
->wl_lock
);
1042 ubi
->wl_scheduled
= 0;
1044 spin_unlock(&ubi
->wl_lock
);
1049 * erase_worker - physical eraseblock erase worker function.
1050 * @ubi: UBI device description object
1051 * @wl_wrk: the work object
1052 * @shutdown: non-zero if the worker has to free memory and exit
1053 * because the WL sub-system is shutting down
1055 * This function erases a physical eraseblock and perform torture testing if
1056 * needed. It also takes care about marking the physical eraseblock bad if
1057 * needed. Returns zero in case of success and a negative error code in case of
1060 static int erase_worker(struct ubi_device
*ubi
, struct ubi_work
*wl_wrk
,
1063 struct ubi_wl_entry
*e
= wl_wrk
->e
;
1065 int vol_id
= wl_wrk
->vol_id
;
1066 int lnum
= wl_wrk
->lnum
;
1067 int err
, available_consumed
= 0;
1070 dbg_wl("cancel erasure of PEB %d EC %d", pnum
, e
->ec
);
1072 wl_entry_destroy(ubi
, e
);
1076 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1077 pnum
, e
->ec
, wl_wrk
->vol_id
, wl_wrk
->lnum
);
1079 err
= sync_erase(ubi
, e
, wl_wrk
->torture
);
1081 /* Fine, we've erased it successfully */
1084 spin_lock(&ubi
->wl_lock
);
1085 wl_tree_add(e
, &ubi
->free
);
1087 spin_unlock(&ubi
->wl_lock
);
1090 * One more erase operation has happened, take care about
1091 * protected physical eraseblocks.
1093 serve_prot_queue(ubi
);
1095 /* And take care about wear-leveling */
1096 err
= ensure_wear_leveling(ubi
, 1);
1100 ubi_err(ubi
, "failed to erase PEB %d, error %d", pnum
, err
);
1103 if (err
== -EINTR
|| err
== -ENOMEM
|| err
== -EAGAIN
||
1107 /* Re-schedule the LEB for erasure */
1108 err1
= schedule_erase(ubi
, e
, vol_id
, lnum
, 0);
1116 wl_entry_destroy(ubi
, e
);
1119 * If this is not %-EIO, we have no idea what to do. Scheduling
1120 * this physical eraseblock for erasure again would cause
1121 * errors again and again. Well, lets switch to R/O mode.
1125 /* It is %-EIO, the PEB went bad */
1127 if (!ubi
->bad_allowed
) {
1128 ubi_err(ubi
, "bad physical eraseblock %d detected", pnum
);
1132 spin_lock(&ubi
->volumes_lock
);
1133 if (ubi
->beb_rsvd_pebs
== 0) {
1134 if (ubi
->avail_pebs
== 0) {
1135 spin_unlock(&ubi
->volumes_lock
);
1136 ubi_err(ubi
, "no reserved/available physical eraseblocks");
1139 ubi
->avail_pebs
-= 1;
1140 available_consumed
= 1;
1142 spin_unlock(&ubi
->volumes_lock
);
1144 ubi_msg(ubi
, "mark PEB %d as bad", pnum
);
1145 err
= ubi_io_mark_bad(ubi
, pnum
);
1149 spin_lock(&ubi
->volumes_lock
);
1150 if (ubi
->beb_rsvd_pebs
> 0) {
1151 if (available_consumed
) {
1153 * The amount of reserved PEBs increased since we last
1156 ubi
->avail_pebs
+= 1;
1157 available_consumed
= 0;
1159 ubi
->beb_rsvd_pebs
-= 1;
1161 ubi
->bad_peb_count
+= 1;
1162 ubi
->good_peb_count
-= 1;
1163 ubi_calculate_reserved(ubi
);
1164 if (available_consumed
)
1165 ubi_warn(ubi
, "no PEBs in the reserved pool, used an available PEB");
1166 else if (ubi
->beb_rsvd_pebs
)
1167 ubi_msg(ubi
, "%d PEBs left in the reserve",
1168 ubi
->beb_rsvd_pebs
);
1170 ubi_warn(ubi
, "last PEB from the reserve was used");
1171 spin_unlock(&ubi
->volumes_lock
);
1176 if (available_consumed
) {
1177 spin_lock(&ubi
->volumes_lock
);
1178 ubi
->avail_pebs
+= 1;
1179 spin_unlock(&ubi
->volumes_lock
);
1186 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1187 * @ubi: UBI device description object
1188 * @vol_id: the volume ID that last used this PEB
1189 * @lnum: the last used logical eraseblock number for the PEB
1190 * @pnum: physical eraseblock to return
1191 * @torture: if this physical eraseblock has to be tortured
1193 * This function is called to return physical eraseblock @pnum to the pool of
1194 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1195 * occurred to this @pnum and it has to be tested. This function returns zero
1196 * in case of success, and a negative error code in case of failure.
1198 int ubi_wl_put_peb(struct ubi_device
*ubi
, int vol_id
, int lnum
,
1199 int pnum
, int torture
)
1202 struct ubi_wl_entry
*e
;
1204 dbg_wl("PEB %d", pnum
);
1205 ubi_assert(pnum
>= 0);
1206 ubi_assert(pnum
< ubi
->peb_count
);
1208 down_read(&ubi
->fm_protect
);
1211 spin_lock(&ubi
->wl_lock
);
1212 e
= ubi
->lookuptbl
[pnum
];
1213 if (e
== ubi
->move_from
) {
1215 * User is putting the physical eraseblock which was selected to
1216 * be moved. It will be scheduled for erasure in the
1217 * wear-leveling worker.
1219 dbg_wl("PEB %d is being moved, wait", pnum
);
1220 spin_unlock(&ubi
->wl_lock
);
1222 /* Wait for the WL worker by taking the @ubi->move_mutex */
1223 mutex_lock(&ubi
->move_mutex
);
1224 mutex_unlock(&ubi
->move_mutex
);
1226 } else if (e
== ubi
->move_to
) {
1228 * User is putting the physical eraseblock which was selected
1229 * as the target the data is moved to. It may happen if the EBA
1230 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1231 * but the WL sub-system has not put the PEB to the "used" tree
1232 * yet, but it is about to do this. So we just set a flag which
1233 * will tell the WL worker that the PEB is not needed anymore
1234 * and should be scheduled for erasure.
1236 dbg_wl("PEB %d is the target of data moving", pnum
);
1237 ubi_assert(!ubi
->move_to_put
);
1238 ubi
->move_to_put
= 1;
1239 spin_unlock(&ubi
->wl_lock
);
1240 up_read(&ubi
->fm_protect
);
1243 if (in_wl_tree(e
, &ubi
->used
)) {
1244 self_check_in_wl_tree(ubi
, e
, &ubi
->used
);
1245 rb_erase(&e
->u
.rb
, &ubi
->used
);
1246 } else if (in_wl_tree(e
, &ubi
->scrub
)) {
1247 self_check_in_wl_tree(ubi
, e
, &ubi
->scrub
);
1248 rb_erase(&e
->u
.rb
, &ubi
->scrub
);
1249 } else if (in_wl_tree(e
, &ubi
->erroneous
)) {
1250 self_check_in_wl_tree(ubi
, e
, &ubi
->erroneous
);
1251 rb_erase(&e
->u
.rb
, &ubi
->erroneous
);
1252 ubi
->erroneous_peb_count
-= 1;
1253 ubi_assert(ubi
->erroneous_peb_count
>= 0);
1254 /* Erroneous PEBs should be tortured */
1257 err
= prot_queue_del(ubi
, e
->pnum
);
1259 ubi_err(ubi
, "PEB %d not found", pnum
);
1261 spin_unlock(&ubi
->wl_lock
);
1262 up_read(&ubi
->fm_protect
);
1267 spin_unlock(&ubi
->wl_lock
);
1269 err
= schedule_erase(ubi
, e
, vol_id
, lnum
, torture
);
1271 spin_lock(&ubi
->wl_lock
);
1272 wl_tree_add(e
, &ubi
->used
);
1273 spin_unlock(&ubi
->wl_lock
);
1276 up_read(&ubi
->fm_protect
);
1281 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1282 * @ubi: UBI device description object
1283 * @pnum: the physical eraseblock to schedule
1285 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1286 * needs scrubbing. This function schedules a physical eraseblock for
1287 * scrubbing which is done in background. This function returns zero in case of
1288 * success and a negative error code in case of failure.
1290 int ubi_wl_scrub_peb(struct ubi_device
*ubi
, int pnum
)
1292 struct ubi_wl_entry
*e
;
1294 ubi_msg(ubi
, "schedule PEB %d for scrubbing", pnum
);
1297 spin_lock(&ubi
->wl_lock
);
1298 e
= ubi
->lookuptbl
[pnum
];
1299 if (e
== ubi
->move_from
|| in_wl_tree(e
, &ubi
->scrub
) ||
1300 in_wl_tree(e
, &ubi
->erroneous
)) {
1301 spin_unlock(&ubi
->wl_lock
);
1305 if (e
== ubi
->move_to
) {
1307 * This physical eraseblock was used to move data to. The data
1308 * was moved but the PEB was not yet inserted to the proper
1309 * tree. We should just wait a little and let the WL worker
1312 spin_unlock(&ubi
->wl_lock
);
1313 dbg_wl("the PEB %d is not in proper tree, retry", pnum
);
1318 if (in_wl_tree(e
, &ubi
->used
)) {
1319 self_check_in_wl_tree(ubi
, e
, &ubi
->used
);
1320 rb_erase(&e
->u
.rb
, &ubi
->used
);
1324 err
= prot_queue_del(ubi
, e
->pnum
);
1326 ubi_err(ubi
, "PEB %d not found", pnum
);
1328 spin_unlock(&ubi
->wl_lock
);
1333 wl_tree_add(e
, &ubi
->scrub
);
1334 spin_unlock(&ubi
->wl_lock
);
1337 * Technically scrubbing is the same as wear-leveling, so it is done
1340 return ensure_wear_leveling(ubi
, 0);
1344 * ubi_wl_flush - flush all pending works.
1345 * @ubi: UBI device description object
1346 * @vol_id: the volume id to flush for
1347 * @lnum: the logical eraseblock number to flush for
1349 * This function executes all pending works for a particular volume id /
1350 * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1351 * acts as a wildcard for all of the corresponding volume numbers or logical
1352 * eraseblock numbers. It returns zero in case of success and a negative error
1353 * code in case of failure.
1355 int ubi_wl_flush(struct ubi_device
*ubi
, int vol_id
, int lnum
)
1361 * Erase while the pending works queue is not empty, but not more than
1362 * the number of currently pending works.
1364 dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1365 vol_id
, lnum
, ubi
->works_count
);
1368 struct ubi_work
*wrk
, *tmp
;
1371 down_read(&ubi
->work_sem
);
1372 spin_lock(&ubi
->wl_lock
);
1373 list_for_each_entry_safe(wrk
, tmp
, &ubi
->works
, list
) {
1374 if ((vol_id
== UBI_ALL
|| wrk
->vol_id
== vol_id
) &&
1375 (lnum
== UBI_ALL
|| wrk
->lnum
== lnum
)) {
1376 list_del(&wrk
->list
);
1377 ubi
->works_count
-= 1;
1378 ubi_assert(ubi
->works_count
>= 0);
1379 spin_unlock(&ubi
->wl_lock
);
1381 err
= wrk
->func(ubi
, wrk
, 0);
1383 up_read(&ubi
->work_sem
);
1387 spin_lock(&ubi
->wl_lock
);
1392 spin_unlock(&ubi
->wl_lock
);
1393 up_read(&ubi
->work_sem
);
1397 * Make sure all the works which have been done in parallel are
1400 down_write(&ubi
->work_sem
);
1401 up_write(&ubi
->work_sem
);
1407 * tree_destroy - destroy an RB-tree.
1408 * @ubi: UBI device description object
1409 * @root: the root of the tree to destroy
1411 static void tree_destroy(struct ubi_device
*ubi
, struct rb_root
*root
)
1414 struct ubi_wl_entry
*e
;
1420 else if (rb
->rb_right
)
1423 e
= rb_entry(rb
, struct ubi_wl_entry
, u
.rb
);
1427 if (rb
->rb_left
== &e
->u
.rb
)
1430 rb
->rb_right
= NULL
;
1433 wl_entry_destroy(ubi
, e
);
1439 * ubi_thread - UBI background thread.
1440 * @u: the UBI device description object pointer
1442 int ubi_thread(void *u
)
1445 struct ubi_device
*ubi
= u
;
1447 ubi_msg(ubi
, "background thread \"%s\" started, PID %d",
1448 ubi
->bgt_name
, task_pid_nr(current
));
1454 if (kthread_should_stop())
1457 if (try_to_freeze())
1460 spin_lock(&ubi
->wl_lock
);
1461 if (list_empty(&ubi
->works
) || ubi
->ro_mode
||
1462 !ubi
->thread_enabled
|| ubi_dbg_is_bgt_disabled(ubi
)) {
1463 set_current_state(TASK_INTERRUPTIBLE
);
1464 spin_unlock(&ubi
->wl_lock
);
1468 spin_unlock(&ubi
->wl_lock
);
1472 ubi_err(ubi
, "%s: work failed with error code %d",
1473 ubi
->bgt_name
, err
);
1474 if (failures
++ > WL_MAX_FAILURES
) {
1476 * Too many failures, disable the thread and
1477 * switch to read-only mode.
1479 ubi_msg(ubi
, "%s: %d consecutive failures",
1480 ubi
->bgt_name
, WL_MAX_FAILURES
);
1482 ubi
->thread_enabled
= 0;
1491 dbg_wl("background thread \"%s\" is killed", ubi
->bgt_name
);
1496 * shutdown_work - shutdown all pending works.
1497 * @ubi: UBI device description object
1499 static void shutdown_work(struct ubi_device
*ubi
)
1501 #ifdef CONFIG_MTD_UBI_FASTMAP
1503 flush_work(&ubi
->fm_work
);
1505 /* in U-Boot, we have all work done */
1508 while (!list_empty(&ubi
->works
)) {
1509 struct ubi_work
*wrk
;
1511 wrk
= list_entry(ubi
->works
.next
, struct ubi_work
, list
);
1512 list_del(&wrk
->list
);
1513 wrk
->func(ubi
, wrk
, 1);
1514 ubi
->works_count
-= 1;
1515 ubi_assert(ubi
->works_count
>= 0);
1520 * ubi_wl_init - initialize the WL sub-system using attaching information.
1521 * @ubi: UBI device description object
1522 * @ai: attaching information
1524 * This function returns zero in case of success, and a negative error code in
1527 int ubi_wl_init(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
)
1529 int err
, i
, reserved_pebs
, found_pebs
= 0;
1530 struct rb_node
*rb1
, *rb2
;
1531 struct ubi_ainf_volume
*av
;
1532 struct ubi_ainf_peb
*aeb
, *tmp
;
1533 struct ubi_wl_entry
*e
;
1535 ubi
->used
= ubi
->erroneous
= ubi
->free
= ubi
->scrub
= RB_ROOT
;
1536 spin_lock_init(&ubi
->wl_lock
);
1537 mutex_init(&ubi
->move_mutex
);
1538 init_rwsem(&ubi
->work_sem
);
1539 ubi
->max_ec
= ai
->max_ec
;
1540 INIT_LIST_HEAD(&ubi
->works
);
1542 sprintf(ubi
->bgt_name
, UBI_BGT_NAME_PATTERN
, ubi
->ubi_num
);
1545 ubi
->lookuptbl
= kzalloc(ubi
->peb_count
* sizeof(void *), GFP_KERNEL
);
1546 if (!ubi
->lookuptbl
)
1549 for (i
= 0; i
< UBI_PROT_QUEUE_LEN
; i
++)
1550 INIT_LIST_HEAD(&ubi
->pq
[i
]);
1553 ubi
->free_count
= 0;
1554 list_for_each_entry_safe(aeb
, tmp
, &ai
->erase
, u
.list
) {
1557 e
= kmem_cache_alloc(ubi_wl_entry_slab
, GFP_KERNEL
);
1561 e
->pnum
= aeb
->pnum
;
1563 ubi
->lookuptbl
[e
->pnum
] = e
;
1564 if (schedule_erase(ubi
, e
, aeb
->vol_id
, aeb
->lnum
, 0)) {
1565 wl_entry_destroy(ubi
, e
);
1572 list_for_each_entry(aeb
, &ai
->free
, u
.list
) {
1575 e
= kmem_cache_alloc(ubi_wl_entry_slab
, GFP_KERNEL
);
1579 e
->pnum
= aeb
->pnum
;
1581 ubi_assert(e
->ec
>= 0);
1583 wl_tree_add(e
, &ubi
->free
);
1586 ubi
->lookuptbl
[e
->pnum
] = e
;
1591 ubi_rb_for_each_entry(rb1
, av
, &ai
->volumes
, rb
) {
1592 ubi_rb_for_each_entry(rb2
, aeb
, &av
->root
, u
.rb
) {
1595 e
= kmem_cache_alloc(ubi_wl_entry_slab
, GFP_KERNEL
);
1599 e
->pnum
= aeb
->pnum
;
1601 ubi
->lookuptbl
[e
->pnum
] = e
;
1604 dbg_wl("add PEB %d EC %d to the used tree",
1606 wl_tree_add(e
, &ubi
->used
);
1608 dbg_wl("add PEB %d EC %d to the scrub tree",
1610 wl_tree_add(e
, &ubi
->scrub
);
1617 dbg_wl("found %i PEBs", found_pebs
);
1620 ubi_assert(ubi
->good_peb_count
==
1621 found_pebs
+ ubi
->fm
->used_blocks
);
1623 for (i
= 0; i
< ubi
->fm
->used_blocks
; i
++) {
1625 ubi
->lookuptbl
[e
->pnum
] = e
;
1629 ubi_assert(ubi
->good_peb_count
== found_pebs
);
1631 reserved_pebs
= WL_RESERVED_PEBS
;
1632 ubi_fastmap_init(ubi
, &reserved_pebs
);
1634 if (ubi
->avail_pebs
< reserved_pebs
) {
1635 ubi_err(ubi
, "no enough physical eraseblocks (%d, need %d)",
1636 ubi
->avail_pebs
, reserved_pebs
);
1637 if (ubi
->corr_peb_count
)
1638 ubi_err(ubi
, "%d PEBs are corrupted and not used",
1639 ubi
->corr_peb_count
);
1642 ubi
->avail_pebs
-= reserved_pebs
;
1643 ubi
->rsvd_pebs
+= reserved_pebs
;
1645 /* Schedule wear-leveling if needed */
1646 err
= ensure_wear_leveling(ubi
, 0);
1654 tree_destroy(ubi
, &ubi
->used
);
1655 tree_destroy(ubi
, &ubi
->free
);
1656 tree_destroy(ubi
, &ubi
->scrub
);
1657 kfree(ubi
->lookuptbl
);
1662 * protection_queue_destroy - destroy the protection queue.
1663 * @ubi: UBI device description object
1665 static void protection_queue_destroy(struct ubi_device
*ubi
)
1668 struct ubi_wl_entry
*e
, *tmp
;
1670 for (i
= 0; i
< UBI_PROT_QUEUE_LEN
; ++i
) {
1671 list_for_each_entry_safe(e
, tmp
, &ubi
->pq
[i
], u
.list
) {
1672 list_del(&e
->u
.list
);
1673 wl_entry_destroy(ubi
, e
);
1679 * ubi_wl_close - close the wear-leveling sub-system.
1680 * @ubi: UBI device description object
1682 void ubi_wl_close(struct ubi_device
*ubi
)
1684 dbg_wl("close the WL sub-system");
1685 ubi_fastmap_close(ubi
);
1687 protection_queue_destroy(ubi
);
1688 tree_destroy(ubi
, &ubi
->used
);
1689 tree_destroy(ubi
, &ubi
->erroneous
);
1690 tree_destroy(ubi
, &ubi
->free
);
1691 tree_destroy(ubi
, &ubi
->scrub
);
1692 kfree(ubi
->lookuptbl
);
1696 * self_check_ec - make sure that the erase counter of a PEB is correct.
1697 * @ubi: UBI device description object
1698 * @pnum: the physical eraseblock number to check
1699 * @ec: the erase counter to check
1701 * This function returns zero if the erase counter of physical eraseblock @pnum
1702 * is equivalent to @ec, and a negative error code if not or if an error
1705 static int self_check_ec(struct ubi_device
*ubi
, int pnum
, int ec
)
1709 struct ubi_ec_hdr
*ec_hdr
;
1711 if (!ubi_dbg_chk_gen(ubi
))
1714 ec_hdr
= kzalloc(ubi
->ec_hdr_alsize
, GFP_NOFS
);
1718 err
= ubi_io_read_ec_hdr(ubi
, pnum
, ec_hdr
, 0);
1719 if (err
&& err
!= UBI_IO_BITFLIPS
) {
1720 /* The header does not have to exist */
1725 read_ec
= be64_to_cpu(ec_hdr
->ec
);
1726 if (ec
!= read_ec
&& read_ec
- ec
> 1) {
1727 ubi_err(ubi
, "self-check failed for PEB %d", pnum
);
1728 ubi_err(ubi
, "read EC is %lld, should be %d", read_ec
, ec
);
1740 * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1741 * @ubi: UBI device description object
1742 * @e: the wear-leveling entry to check
1743 * @root: the root of the tree
1745 * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1748 static int self_check_in_wl_tree(const struct ubi_device
*ubi
,
1749 struct ubi_wl_entry
*e
, struct rb_root
*root
)
1751 if (!ubi_dbg_chk_gen(ubi
))
1754 if (in_wl_tree(e
, root
))
1757 ubi_err(ubi
, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1758 e
->pnum
, e
->ec
, root
);
1764 * self_check_in_pq - check if wear-leveling entry is in the protection
1766 * @ubi: UBI device description object
1767 * @e: the wear-leveling entry to check
1769 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1771 static int self_check_in_pq(const struct ubi_device
*ubi
,
1772 struct ubi_wl_entry
*e
)
1774 struct ubi_wl_entry
*p
;
1777 if (!ubi_dbg_chk_gen(ubi
))
1780 for (i
= 0; i
< UBI_PROT_QUEUE_LEN
; ++i
)
1781 list_for_each_entry(p
, &ubi
->pq
[i
], u
.list
)
1785 ubi_err(ubi
, "self-check failed for PEB %d, EC %d, Protect queue",
1790 #ifndef CONFIG_MTD_UBI_FASTMAP
1791 static struct ubi_wl_entry
*get_peb_for_wl(struct ubi_device
*ubi
)
1793 struct ubi_wl_entry
*e
;
1795 e
= find_wl_entry(ubi
, &ubi
->free
, WL_FREE_MAX_DIFF
);
1796 self_check_in_wl_tree(ubi
, e
, &ubi
->free
);
1798 ubi_assert(ubi
->free_count
>= 0);
1799 rb_erase(&e
->u
.rb
, &ubi
->free
);
1805 * produce_free_peb - produce a free physical eraseblock.
1806 * @ubi: UBI device description object
1808 * This function tries to make a free PEB by means of synchronous execution of
1809 * pending works. This may be needed if, for example the background thread is
1810 * disabled. Returns zero in case of success and a negative error code in case
1813 static int produce_free_peb(struct ubi_device
*ubi
)
1817 while (!ubi
->free
.rb_node
&& ubi
->works_count
) {
1818 spin_unlock(&ubi
->wl_lock
);
1820 dbg_wl("do one work synchronously");
1823 spin_lock(&ubi
->wl_lock
);
1832 * ubi_wl_get_peb - get a physical eraseblock.
1833 * @ubi: UBI device description object
1835 * This function returns a physical eraseblock in case of success and a
1836 * negative error code in case of failure.
1837 * Returns with ubi->fm_eba_sem held in read mode!
1839 int ubi_wl_get_peb(struct ubi_device
*ubi
)
1842 struct ubi_wl_entry
*e
;
1845 down_read(&ubi
->fm_eba_sem
);
1846 spin_lock(&ubi
->wl_lock
);
1847 if (!ubi
->free
.rb_node
) {
1848 if (ubi
->works_count
== 0) {
1849 ubi_err(ubi
, "no free eraseblocks");
1850 ubi_assert(list_empty(&ubi
->works
));
1851 spin_unlock(&ubi
->wl_lock
);
1855 err
= produce_free_peb(ubi
);
1857 spin_unlock(&ubi
->wl_lock
);
1860 spin_unlock(&ubi
->wl_lock
);
1861 up_read(&ubi
->fm_eba_sem
);
1865 e
= wl_get_wle(ubi
);
1866 prot_queue_add(ubi
, e
);
1867 spin_unlock(&ubi
->wl_lock
);
1869 err
= ubi_self_check_all_ff(ubi
, e
->pnum
, ubi
->vid_hdr_aloffset
,
1870 ubi
->peb_size
- ubi
->vid_hdr_aloffset
);
1872 ubi_err(ubi
, "new PEB %d does not contain all 0xFF bytes", e
->pnum
);
1879 #include "fastmap-wl.c"