--- /dev/null
+From 1dc3039bc87ae7d19a990c3ee71cfd8a9068f428 Mon Sep 17 00:00:00 2001
+From: Alan Jenkins <alan.christopher.jenkins@gmail.com>
+Date: Thu, 12 Apr 2018 19:11:58 +0100
+Subject: block: do not use interruptible wait anywhere
+
+From: Alan Jenkins <alan.christopher.jenkins@gmail.com>
+
+commit 1dc3039bc87ae7d19a990c3ee71cfd8a9068f428 upstream.
+
+When blk_queue_enter() waits for a queue to unfreeze, or unset the
+PREEMPT_ONLY flag, do not allow it to be interrupted by a signal.
+
+The PREEMPT_ONLY flag was introduced later in commit 3a0a529971ec
+("block, scsi: Make SCSI quiesce and resume work reliably"). Note the SCSI
+device is resumed asynchronously, i.e. after un-freezing userspace tasks.
+
+So that commit exposed the bug as a regression in v4.15. A mysterious
+SIGBUS (or -EIO) sometimes happened during the time the device was being
+resumed. Most frequently, there was no kernel log message, and we saw Xorg
+or Xwayland killed by SIGBUS.[1]
+
+[1] E.g. https://bugzilla.redhat.com/show_bug.cgi?id=1553979
+
+Without this fix, I get an IO error in this test:
+
+# dd if=/dev/sda of=/dev/null iflag=direct & \
+ while killall -SIGUSR1 dd; do sleep 0.1; done & \
+ echo mem > /sys/power/state ; \
+ sleep 5; killall dd # stop after 5 seconds
+
+The interruptible wait was added to blk_queue_enter in
+commit 3ef28e83ab15 ("block: generic request_queue reference counting").
+Before then, the interruptible wait was only in blk-mq, but I don't think
+it could ever have been correct.
+
+Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Alan Jenkins <alan.christopher.jenkins@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-core.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -651,21 +651,17 @@ EXPORT_SYMBOL(blk_alloc_queue);
+ int blk_queue_enter(struct request_queue *q, gfp_t gfp)
+ {
+ while (true) {
+- int ret;
+-
+ if (percpu_ref_tryget_live(&q->q_usage_counter))
+ return 0;
+
+ if (!gfpflags_allow_blocking(gfp))
+ return -EBUSY;
+
+- ret = wait_event_interruptible(q->mq_freeze_wq,
+- !atomic_read(&q->mq_freeze_depth) ||
+- blk_queue_dying(q));
++ wait_event(q->mq_freeze_wq,
++ !atomic_read(&q->mq_freeze_depth) ||
++ blk_queue_dying(q));
+ if (blk_queue_dying(q))
+ return -ENODEV;
+- if (ret)
+- return ret;
+ }
+ }
+
--- /dev/null
+From 797097301860c64b63346d068ba4fe4992bd5021 Mon Sep 17 00:00:00 2001
+From: Lucas Stach <dev@lynxeye.de>
+Date: Mon, 29 Feb 2016 21:46:07 +0100
+Subject: clk: tegra: Fix PLL_U post divider and initial rate on Tegra30
+
+From: Lucas Stach <dev@lynxeye.de>
+
+commit 797097301860c64b63346d068ba4fe4992bd5021 upstream.
+
+The post divider value in the frequency table is wrong as it would lead
+to the PLL producing an output rate of 960 MHz instead of the desired
+480 MHz. This wasn't a problem as nothing used the table to actually
+initialize the PLL rate, but the bootloader configuration was used
+unaltered.
+
+If the bootloader does not set up the PLL it will fail to come when used
+under Linux. To fix this don't rely on the bootloader, but set the
+correct rate in the clock driver.
+
+Signed-off-by: Lucas Stach <dev@lynxeye.de>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+[jonathanh@nvidia.com: Back-ported to stable v4.4.y]
+Signed-off-by: Jon Hunter <jonathanh@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/tegra/clk-tegra30.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/clk/tegra/clk-tegra30.c
++++ b/drivers/clk/tegra/clk-tegra30.c
+@@ -333,11 +333,11 @@ static struct pdiv_map pllu_p[] = {
+ };
+
+ static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+- { 12000000, 480000000, 960, 12, 0, 12},
+- { 13000000, 480000000, 960, 13, 0, 12},
+- { 16800000, 480000000, 400, 7, 0, 5},
+- { 19200000, 480000000, 200, 4, 0, 3},
+- { 26000000, 480000000, 960, 26, 0, 12},
++ { 12000000, 480000000, 960, 12, 2, 12 },
++ { 13000000, 480000000, 960, 13, 2, 12 },
++ { 16800000, 480000000, 400, 7, 2, 5 },
++ { 19200000, 480000000, 200, 4, 2, 3 },
++ { 26000000, 480000000, 960, 26, 2, 12 },
+ { 0, 0, 0, 0, 0, 0 },
+ };
+
+@@ -1372,6 +1372,7 @@ static struct tegra_clk_init_table init_
+ {TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0},
+ {TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0},
+ {TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0},
++ { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
+ {TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0}, /* This MUST be the last entry. */
+ };
+
x86-bugs-rename-ssbd_no-to-ssb_no.patch
x86-xen-add-call-of-speculative_store_bypass_ht_init-to-pv-paths.patch
x86-cpu-re-apply-forced-caps-every-time-cpu-caps-are-re-read.patch
+block-do-not-use-interruptible-wait-anywhere.patch
+clk-tegra-fix-pll_u-post-divider-and-initial-rate-on-tegra30.patch
+ubi-introduce-vol_ignored.patch
+ubi-rework-fastmap-attach-base-code.patch
+ubi-be-more-paranoid-while-seaching-for-the-most-recent-fastmap.patch
+ubi-fix-races-around-ubi_refill_pools.patch
+ubi-fix-fastmap-s-update_vol.patch
+ubi-fastmap-erase-outdated-anchor-pebs-during-attach.patch
--- /dev/null
+From 74f2c6e9a47cf4e508198c8594626cc82906a13d Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Tue, 14 Jun 2016 10:12:17 +0200
+Subject: ubi: Be more paranoid while seaching for the most recent Fastmap
+
+From: Richard Weinberger <richard@nod.at>
+
+commit 74f2c6e9a47cf4e508198c8594626cc82906a13d upstream.
+
+Since PEB erasure is asynchornous it can happen that there is
+more than one Fastmap on the MTD. This is fine because the attach logic
+will pick the Fastmap data structure with the highest sequence number.
+
+On a not so well configured MTD stack spurious ECC errors are common.
+Causes can be different, bad hardware, wrong operating modes, etc...
+If the most current Fastmap renders bad due to ECC errors UBI might
+pick an older Fastmap to attach from.
+While this can only happen on an anyway broken setup it will show
+completely different sympthoms and makes finding the root cause much
+more difficult.
+So, be debug friendly and fall back to scanning mode of we're facing
+an ECC error while scanning for Fastmap.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/attach.c | 28 ++++++++++++++++++++++++----
+ drivers/mtd/ubi/ubi.h | 3 +++
+ 2 files changed, 27 insertions(+), 4 deletions(-)
+
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -856,13 +856,15 @@ static bool vol_ignored(int vol_id)
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
++ * @fast: true if we're scanning for a Fastmap
+ *
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
+ */
+-static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum)
++static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
++ int pnum, bool fast)
+ {
+ long long ec;
+ int err, bitflips = 0, vol_id = -1, ec_err = 0;
+@@ -980,6 +982,20 @@ static int scan_peb(struct ubi_device *u
+ */
+ ai->maybe_bad_peb_count += 1;
+ case UBI_IO_BAD_HDR:
++ /*
++ * If we're facing a bad VID header we have to drop *all*
++ * Fastmap data structures we find. The most recent Fastmap
++ * could be bad and therefore there is a chance that we attach
++ * from an old one. On a fine MTD stack a PEB must not render
++ * bad all of a sudden, but the reality is different.
++ * So, let's be paranoid and help finding the root cause by
++ * falling back to scanning mode instead of attaching with a
++ * bad EBA table and cause data corruption which is hard to
++ * analyze.
++ */
++ if (fast)
++ ai->force_full_scan = 1;
++
+ if (ec_err)
+ /*
+ * Both headers are corrupted. There is a possibility
+@@ -1293,7 +1309,7 @@ static int scan_all(struct ubi_device *u
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+- err = scan_peb(ubi, ai, pnum);
++ err = scan_peb(ubi, ai, pnum, false);
+ if (err < 0)
+ goto out_vidh;
+ }
+@@ -1407,7 +1423,7 @@ static int scan_fast(struct ubi_device *
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+- err = scan_peb(ubi, scan_ai, pnum);
++ err = scan_peb(ubi, scan_ai, pnum, true);
+ if (err < 0)
+ goto out_vidh;
+ }
+@@ -1415,7 +1431,11 @@ static int scan_fast(struct ubi_device *
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+- err = ubi_scan_fastmap(ubi, *ai, scan_ai);
++ if (scan_ai->force_full_scan)
++ err = UBI_NO_FASTMAP;
++ else
++ err = ubi_scan_fastmap(ubi, *ai, scan_ai);
++
+ if (err) {
+ /*
+ * Didn't attach via fastmap, do a full scan but reuse what
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -711,6 +711,8 @@ struct ubi_ainf_volume {
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
++ * @force_full_scan: flag indicating whether we need to do a full scan and drop
++ all existing Fastmap data structures
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+@@ -738,6 +740,7 @@ struct ubi_attach_info {
+ int vols_found;
+ int highest_vol_id;
+ int is_empty;
++ int force_full_scan;
+ int min_ec;
+ int max_ec;
+ unsigned long long max_sqnum;
--- /dev/null
+From f78e5623f45bab2b726eec29dc5cefbbab2d0b1c Mon Sep 17 00:00:00 2001
+From: Sascha Hauer <s.hauer@pengutronix.de>
+Date: Tue, 5 Dec 2017 16:01:20 +0100
+Subject: ubi: fastmap: Erase outdated anchor PEBs during attach
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+commit f78e5623f45bab2b726eec29dc5cefbbab2d0b1c upstream.
+
+The fastmap update code might erase the current fastmap anchor PEB
+in case it doesn't find any new free PEB. When a power cut happens
+in this situation we must not have any outdated fastmap anchor PEB
+on the device, because that would be used to attach during next
+boot.
+The easiest way to make that sure is to erase all outdated fastmap
+anchor PEBs synchronously during attach.
+
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Reviewed-by: Richard Weinberger <richard@nod.at>
+Fixes: dbb7d2a88d2a ("UBI: Add fastmap core")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/wl.c | 77 +++++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 57 insertions(+), 20 deletions(-)
+
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1509,6 +1509,46 @@ static void shutdown_work(struct ubi_dev
+ }
+
+ /**
++ * erase_aeb - erase a PEB given in UBI attach info PEB
++ * @ubi: UBI device description object
++ * @aeb: UBI attach info PEB
++ * @sync: If true, erase synchronously. Otherwise schedule for erasure
++ */
++static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
++{
++ struct ubi_wl_entry *e;
++ int err;
++
++ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
++ if (!e)
++ return -ENOMEM;
++
++ e->pnum = aeb->pnum;
++ e->ec = aeb->ec;
++ ubi->lookuptbl[e->pnum] = e;
++
++ if (sync) {
++ err = sync_erase(ubi, e, false);
++ if (err)
++ goto out_free;
++
++ wl_tree_add(e, &ubi->free);
++ ubi->free_count++;
++ } else {
++ err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
++ if (err)
++ goto out_free;
++ }
++
++ return 0;
++
++out_free:
++ wl_entry_destroy(ubi, e);
++
++ return err;
++}
++
++/**
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+@@ -1545,17 +1585,9 @@ int ubi_wl_init(struct ubi_device *ubi,
+ list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
+ cond_resched();
+
+- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+- if (!e)
+- goto out_free;
+-
+- e->pnum = aeb->pnum;
+- e->ec = aeb->ec;
+- ubi->lookuptbl[e->pnum] = e;
+- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
+- wl_entry_destroy(ubi, e);
++ err = erase_aeb(ubi, aeb, false);
++ if (err)
+ goto out_free;
+- }
+
+ found_pebs++;
+ }
+@@ -1615,6 +1647,8 @@ int ubi_wl_init(struct ubi_device *ubi,
+ ubi_assert(!ubi->lookuptbl[e->pnum]);
+ ubi->lookuptbl[e->pnum] = e;
+ } else {
++ bool sync = false;
++
+ /*
+ * Usually old Fastmap PEBs are scheduled for erasure
+ * and we don't have to care about them but if we face
+@@ -1624,18 +1658,21 @@ int ubi_wl_init(struct ubi_device *ubi,
+ if (ubi->lookuptbl[aeb->pnum])
+ continue;
+
+- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+- if (!e)
+- goto out_free;
++ /*
++ * The fastmap update code might not find a free PEB for
++ * writing the fastmap anchor to and then reuses the
++ * current fastmap anchor PEB. When this PEB gets erased
++ * and a power cut happens before it is written again we
++ * must make sure that the fastmap attach code doesn't
++ * find any outdated fastmap anchors, hence we erase the
++ * outdated fastmap anchor PEBs synchronously here.
++ */
++ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
++ sync = true;
+
+- e->pnum = aeb->pnum;
+- e->ec = aeb->ec;
+- ubi_assert(!ubi->lookuptbl[e->pnum]);
+- ubi->lookuptbl[e->pnum] = e;
+- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
+- wl_entry_destroy(ubi, e);
++ err = erase_aeb(ubi, aeb, sync);
++ if (err)
+ goto out_free;
+- }
+ }
+
+ found_pebs++;
--- /dev/null
+From f7d11b33d4e8cedf19367c09b891bbc705163976 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Wed, 24 Aug 2016 14:36:15 +0200
+Subject: ubi: Fix Fastmap's update_vol()
+
+From: Richard Weinberger <richard@nod.at>
+
+commit f7d11b33d4e8cedf19367c09b891bbc705163976 upstream.
+
+Usually Fastmap is free to consider every PEB in one of the pools
+as newer than the existing PEB. Since PEBs in a pool are by definition
+newer than everything else.
+But update_vol() missed the case that a pool can contain more than
+one candidate.
+
+Cc: <stable@vger.kernel.org>
+Fixes: dbb7d2a88d ("UBI: Add fastmap core")
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Reviewed-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/fastmap.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -326,6 +326,7 @@ static int update_vol(struct ubi_device
+ aeb->pnum = new_aeb->pnum;
+ aeb->copy_flag = new_vh->copy_flag;
+ aeb->scrub = new_aeb->scrub;
++ aeb->sqnum = new_aeb->sqnum;
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ /* new_aeb is older */
--- /dev/null
+From 2e8f08deabbc7eefe4c5838aaa6aa9a23a8acf2e Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Wed, 24 Aug 2016 14:36:14 +0200
+Subject: ubi: Fix races around ubi_refill_pools()
+
+From: Richard Weinberger <richard@nod.at>
+
+commit 2e8f08deabbc7eefe4c5838aaa6aa9a23a8acf2e upstream.
+
+When writing a new Fastmap the first thing that happens
+is refilling the pools in memory.
+At this stage it is possible that new PEBs from the new pools
+get already claimed and written with data.
+If this happens before the new Fastmap data structure hits the
+flash and we face power cut the freshly written PEB will not
+scanned and unnoticed.
+
+Solve the issue by locking the pools until Fastmap is written.
+
+Cc: <stable@vger.kernel.org>
+Fixes: dbb7d2a88d ("UBI: Add fastmap core")
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/eba.c | 4 ++--
+ drivers/mtd/ubi/fastmap-wl.c | 6 ++++--
+ drivers/mtd/ubi/fastmap.c | 14 ++++++++++----
+ drivers/mtd/ubi/wl.c | 20 ++++++++++++++------
+ 4 files changed, 30 insertions(+), 14 deletions(-)
+
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1178,6 +1178,8 @@ int ubi_eba_copy_leb(struct ubi_device *
+ struct ubi_volume *vol;
+ uint32_t crc;
+
++ ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
++
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
+@@ -1346,9 +1348,7 @@ int ubi_eba_copy_leb(struct ubi_device *
+ }
+
+ ubi_assert(vol->eba_tbl[lnum] == from);
+- down_read(&ubi->fm_eba_sem);
+ vol->eba_tbl[lnum] = to;
+- up_read(&ubi->fm_eba_sem);
+
+ out_unlock_buf:
+ mutex_unlock(&ubi->buf_mutex);
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -262,6 +262,8 @@ static struct ubi_wl_entry *get_peb_for_
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
++ ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
++
+ if (pool->used == pool->size) {
+ /* We cannot update the fastmap here because this
+ * function is called in atomic context.
+@@ -303,7 +305,7 @@ int ubi_ensure_anchor_pebs(struct ubi_de
+
+ wrk->anchor = 1;
+ wrk->func = &wear_leveling_worker;
+- schedule_ubi_work(ubi, wrk);
++ __schedule_ubi_work(ubi, wrk);
+ return 0;
+ }
+
+@@ -344,7 +346,7 @@ int ubi_wl_put_fm_peb(struct ubi_device
+ spin_unlock(&ubi->wl_lock);
+
+ vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+- return schedule_erase(ubi, e, vol_id, lnum, torture);
++ return schedule_erase(ubi, e, vol_id, lnum, torture, true);
+ }
+
+ /**
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -1514,22 +1514,30 @@ int ubi_update_fastmap(struct ubi_device
+ struct ubi_wl_entry *tmp_e;
+
+ down_write(&ubi->fm_protect);
++ down_write(&ubi->work_sem);
++ down_write(&ubi->fm_eba_sem);
+
+ ubi_refill_pools(ubi);
+
+ if (ubi->ro_mode || ubi->fm_disabled) {
++ up_write(&ubi->fm_eba_sem);
++ up_write(&ubi->work_sem);
+ up_write(&ubi->fm_protect);
+ return 0;
+ }
+
+ ret = ubi_ensure_anchor_pebs(ubi);
+ if (ret) {
++ up_write(&ubi->fm_eba_sem);
++ up_write(&ubi->work_sem);
+ up_write(&ubi->fm_protect);
+ return ret;
+ }
+
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ if (!new_fm) {
++ up_write(&ubi->fm_eba_sem);
++ up_write(&ubi->work_sem);
+ up_write(&ubi->fm_protect);
+ return -ENOMEM;
+ }
+@@ -1638,16 +1646,14 @@ int ubi_update_fastmap(struct ubi_device
+ new_fm->e[0] = tmp_e;
+ }
+
+- down_write(&ubi->work_sem);
+- down_write(&ubi->fm_eba_sem);
+ ret = ubi_write_fastmap(ubi, new_fm);
+- up_write(&ubi->fm_eba_sem);
+- up_write(&ubi->work_sem);
+
+ if (ret)
+ goto err;
+
+ out_unlock:
++ up_write(&ubi->fm_eba_sem);
++ up_write(&ubi->work_sem);
+ up_write(&ubi->fm_protect);
+ kfree(old_fm);
+ return ret;
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -580,7 +580,7 @@ static int erase_worker(struct ubi_devic
+ * failure.
+ */
+ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+- int vol_id, int lnum, int torture)
++ int vol_id, int lnum, int torture, bool nested)
+ {
+ struct ubi_work *wl_wrk;
+
+@@ -599,7 +599,10 @@ static int schedule_erase(struct ubi_dev
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+- schedule_ubi_work(ubi, wl_wrk);
++ if (nested)
++ __schedule_ubi_work(ubi, wl_wrk);
++ else
++ schedule_ubi_work(ubi, wl_wrk);
+ return 0;
+ }
+
+@@ -658,6 +661,7 @@ static int wear_leveling_worker(struct u
+ if (!vid_hdr)
+ return -ENOMEM;
+
++ down_read(&ubi->fm_eba_sem);
+ mutex_lock(&ubi->move_mutex);
+ spin_lock(&ubi->wl_lock);
+ ubi_assert(!ubi->move_from && !ubi->move_to);
+@@ -884,6 +888,7 @@ static int wear_leveling_worker(struct u
+
+ dbg_wl("done");
+ mutex_unlock(&ubi->move_mutex);
++ up_read(&ubi->fm_eba_sem);
+ return 0;
+
+ /*
+@@ -925,6 +930,7 @@ out_not_moved:
+ }
+
+ mutex_unlock(&ubi->move_mutex);
++ up_read(&ubi->fm_eba_sem);
+ return 0;
+
+ out_error:
+@@ -946,6 +952,7 @@ out_error:
+ out_ro:
+ ubi_ro_mode(ubi);
+ mutex_unlock(&ubi->move_mutex);
++ up_read(&ubi->fm_eba_sem);
+ ubi_assert(err != 0);
+ return err < 0 ? err : -EIO;
+
+@@ -953,6 +960,7 @@ out_cancel:
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ mutex_unlock(&ubi->move_mutex);
++ up_read(&ubi->fm_eba_sem);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+ }
+@@ -1075,7 +1083,7 @@ static int __erase_worker(struct ubi_dev
+ int err1;
+
+ /* Re-schedule the LEB for erasure */
+- err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
++ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
+ if (err1) {
+ wl_entry_destroy(ubi, e);
+ err = err1;
+@@ -1256,7 +1264,7 @@ retry:
+ }
+ spin_unlock(&ubi->wl_lock);
+
+- err = schedule_erase(ubi, e, vol_id, lnum, torture);
++ err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
+ if (err) {
+ spin_lock(&ubi->wl_lock);
+ wl_tree_add(e, &ubi->used);
+@@ -1544,7 +1552,7 @@ int ubi_wl_init(struct ubi_device *ubi,
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
++ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
+ wl_entry_destroy(ubi, e);
+ goto out_free;
+ }
+@@ -1624,7 +1632,7 @@ int ubi_wl_init(struct ubi_device *ubi,
+ e->ec = aeb->ec;
+ ubi_assert(!ubi->lookuptbl[e->pnum]);
+ ubi->lookuptbl[e->pnum] = e;
+- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
++ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
+ wl_entry_destroy(ubi, e);
+ goto out_free;
+ }
--- /dev/null
+From 243a4f8126fcf7facb04b324dbb7c85d10b11ce9 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Tue, 14 Jun 2016 10:12:13 +0200
+Subject: ubi: Introduce vol_ignored()
+
+From: Richard Weinberger <richard@nod.at>
+
+commit 243a4f8126fcf7facb04b324dbb7c85d10b11ce9 upstream.
+
+This makes the logic more easy to follow.
+
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/attach.c | 24 ++++++++++++++++++------
+ drivers/mtd/ubi/ubi.h | 15 +++++++++++++++
+ 2 files changed, 33 insertions(+), 6 deletions(-)
+
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -803,6 +803,20 @@ out_unlock:
+ return err;
+ }
+
++static bool vol_ignored(int vol_id)
++{
++ switch (vol_id) {
++ case UBI_LAYOUT_VOLUME_ID:
++ return true;
++ }
++
++#ifdef CONFIG_MTD_UBI_FASTMAP
++ return ubi_is_fm_vol(vol_id);
++#else
++ return false;
++#endif
++}
++
+ /**
+ * scan_peb - scan and process UBI headers of a PEB.
+ * @ubi: UBI device description object
+@@ -995,17 +1009,15 @@ static int scan_peb(struct ubi_device *u
+ *vid = vol_id;
+ if (sqnum)
+ *sqnum = be64_to_cpu(vidh->sqnum);
+- if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
++ if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) {
+ int lnum = be32_to_cpu(vidh->lnum);
+
+ /* Unsupported internal volume */
+ switch (vidh->compat) {
+ case UBI_COMPAT_DELETE:
+- if (vol_id != UBI_FM_SB_VOLUME_ID
+- && vol_id != UBI_FM_DATA_VOLUME_ID) {
+- ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
+- vol_id, lnum);
+- }
++ ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
++ vol_id, lnum);
++
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 1, &ai->erase);
+ if (err)
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -1101,4 +1101,19 @@ static inline int idx2vol_id(const struc
+ return idx;
+ }
+
++/**
++ * ubi_is_fm_vol - check whether a volume ID is a Fastmap volume.
++ * @vol_id: volume ID
++ */
++static inline bool ubi_is_fm_vol(int vol_id)
++{
++ switch (vol_id) {
++ case UBI_FM_SB_VOLUME_ID:
++ case UBI_FM_DATA_VOLUME_ID:
++ return true;
++ }
++
++ return false;
++}
++
+ #endif /* !__UBI_UBI_H__ */
--- /dev/null
+From fdf10ed710c0aa177e8dfcd84e65e4e5e8e0956b Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Tue, 14 Jun 2016 10:12:15 +0200
+Subject: ubi: Rework Fastmap attach base code
+
+From: Richard Weinberger <richard@nod.at>
+
+commit fdf10ed710c0aa177e8dfcd84e65e4e5e8e0956b upstream.
+
+Introduce a new list to the UBI attach information
+object to be able to deal better with old and corrupted
+Fastmap eraseblocks.
+Also move more Fastmap specific code into fastmap.c.
+
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/attach.c | 99 ++++++++++++++++++++++++++++++++--------------
+ drivers/mtd/ubi/fastmap.c | 36 +++++++++++++++-
+ drivers/mtd/ubi/ubi.h | 28 ++++++++++++-
+ drivers/mtd/ubi/wl.c | 41 +++++++++++++++----
+ 4 files changed, 162 insertions(+), 42 deletions(-)
+
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -175,6 +175,40 @@ static int add_corrupted(struct ubi_atta
+ }
+
+ /**
++ * add_fastmap - add a Fastmap related physical eraseblock.
++ * @ai: attaching information
++ * @pnum: physical eraseblock number the VID header came from
++ * @vid_hdr: the volume identifier header
++ * @ec: erase counter of the physical eraseblock
++ *
++ * This function allocates a 'struct ubi_ainf_peb' object for a Fastamp
++ * physical eraseblock @pnum and adds it to the 'fastmap' list.
++ * Such blocks can be Fastmap super and data blocks from both the most
++ * recent Fastmap we're attaching from or from old Fastmaps which will
++ * be erased.
++ */
++static int add_fastmap(struct ubi_attach_info *ai, int pnum,
++ struct ubi_vid_hdr *vid_hdr, int ec)
++{
++ struct ubi_ainf_peb *aeb;
++
++ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
++ if (!aeb)
++ return -ENOMEM;
++
++ aeb->pnum = pnum;
++ aeb->vol_id = be32_to_cpu(vidh->vol_id);
++ aeb->sqnum = be64_to_cpu(vidh->sqnum);
++ aeb->ec = ec;
++ list_add(&aeb->u.list, &ai->fastmap);
++
++ dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
++ aeb->vol_id, aeb->sqnum);
++
++ return 0;
++}
++
++/**
+ * validate_vid_hdr - check volume identifier header.
+ * @ubi: UBI device description object
+ * @vid_hdr: the volume identifier header to check
+@@ -822,18 +856,15 @@ static bool vol_ignored(int vol_id)
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+- * @vid: The volume ID of the found volume will be stored in this pointer
+- * @sqnum: The sqnum of the found volume will be stored in this pointer
+ *
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
+ */
+-static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+- int pnum, int *vid, unsigned long long *sqnum)
++static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum)
+ {
+- long long uninitialized_var(ec);
++ long long ec;
+ int err, bitflips = 0, vol_id = -1, ec_err = 0;
+
+ dbg_bld("scan PEB %d", pnum);
+@@ -1005,10 +1036,6 @@ static int scan_peb(struct ubi_device *u
+ }
+
+ vol_id = be32_to_cpu(vidh->vol_id);
+- if (vid)
+- *vid = vol_id;
+- if (sqnum)
+- *sqnum = be64_to_cpu(vidh->sqnum);
+ if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) {
+ int lnum = be32_to_cpu(vidh->lnum);
+
+@@ -1049,7 +1076,12 @@ static int scan_peb(struct ubi_device *u
+ if (ec_err)
+ ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
+ pnum);
+- err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
++
++ if (ubi_is_fm_vol(vol_id))
++ err = add_fastmap(ai, pnum, vidh, ec);
++ else
++ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
++
+ if (err)
+ return err;
+
+@@ -1198,6 +1230,10 @@ static void destroy_ai(struct ubi_attach
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
++ list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
++ list_del(&aeb->u.list);
++ kmem_cache_free(ai->aeb_slab_cache, aeb);
++ }
+
+ /* Destroy the volume RB-tree */
+ rb = ai->volumes.rb_node;
+@@ -1257,7 +1293,7 @@ static int scan_all(struct ubi_device *u
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+- err = scan_peb(ubi, ai, pnum, NULL, NULL);
++ err = scan_peb(ubi, ai, pnum);
+ if (err < 0)
+ goto out_vidh;
+ }
+@@ -1323,6 +1359,7 @@ static struct ubi_attach_info *alloc_ai(
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
++ INIT_LIST_HEAD(&ai->fastmap);
+ ai->volumes = RB_ROOT;
+ ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
+ sizeof(struct ubi_ainf_peb),
+@@ -1349,52 +1386,54 @@ static struct ubi_attach_info *alloc_ai(
+ */
+ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
+ {
+- int err, pnum, fm_anchor = -1;
+- unsigned long long max_sqnum = 0;
++ int err, pnum;
++ struct ubi_attach_info *scan_ai;
+
+ err = -ENOMEM;
+
++ scan_ai = alloc_ai();
++ if (!scan_ai)
++ goto out;
++
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+- goto out;
++ goto out_ai;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+- int vol_id = -1;
+- unsigned long long sqnum = -1;
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+- err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
++ err = scan_peb(ubi, scan_ai, pnum);
+ if (err < 0)
+ goto out_vidh;
+-
+- if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
+- max_sqnum = sqnum;
+- fm_anchor = pnum;
+- }
+ }
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+- if (fm_anchor < 0)
+- return UBI_NO_FASTMAP;
+-
+- destroy_ai(*ai);
+- *ai = alloc_ai();
+- if (!*ai)
+- return -ENOMEM;
++ err = ubi_scan_fastmap(ubi, *ai, scan_ai);
++ if (err) {
++ /*
++ * Didn't attach via fastmap, do a full scan but reuse what
++ * we've aready scanned.
++ */
++ destroy_ai(*ai);
++ *ai = scan_ai;
++ } else
++ destroy_ai(scan_ai);
+
+- return ubi_scan_fastmap(ubi, *ai, fm_anchor);
++ return err;
+
+ out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+ out_ech:
+ kfree(ech);
++out_ai:
++ destroy_ai(scan_ai);
+ out:
+ return err;
+ }
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -851,27 +851,57 @@ fail:
+ }
+
+ /**
++ * find_fm_anchor - find the most recent Fastmap superblock (anchor)
++ * @ai: UBI attach info to be filled
++ */
++static int find_fm_anchor(struct ubi_attach_info *ai)
++{
++ int ret = -1;
++ struct ubi_ainf_peb *aeb;
++ unsigned long long max_sqnum = 0;
++
++ list_for_each_entry(aeb, &ai->fastmap, u.list) {
++ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
++ max_sqnum = aeb->sqnum;
++ ret = aeb->pnum;
++ }
++ }
++
++ return ret;
++}
++
++/**
+ * ubi_scan_fastmap - scan the fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info to be filled
+- * @fm_anchor: The fastmap starts at this PEB
++ * @scan_ai: UBI attach info from the first 64 PEBs,
++ * used to find the most recent Fastmap data structure
+ *
+ * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
+ * UBI_BAD_FASTMAP if one was found but is not usable.
+ * < 0 indicates an internal error.
+ */
+ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+- int fm_anchor)
++ struct ubi_attach_info *scan_ai)
+ {
+ struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fastmap_layout *fm;
+- int i, used_blocks, pnum, ret = 0;
++ struct ubi_ainf_peb *tmp_aeb, *aeb;
++ int i, used_blocks, pnum, fm_anchor, ret = 0;
+ size_t fm_size;
+ __be32 crc, tmp_crc;
+ unsigned long long sqnum = 0;
+
++ fm_anchor = find_fm_anchor(scan_ai);
++ if (fm_anchor < 0)
++ return UBI_NO_FASTMAP;
++
++ /* Move all (possible) fastmap blocks into our new attach structure. */
++ list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list)
++ list_move_tail(&aeb->u.list, &ai->fastmap);
++
+ down_write(&ubi->fm_protect);
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -699,6 +699,8 @@ struct ubi_ainf_volume {
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ * those belonging to "preserve"-compatible internal volumes)
++ * @fastmap: list of physical eraseblocks which relate to fastmap (e.g.,
++ * eraseblocks of the current and not yet erased old fastmap blocks)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ * 0xFF bytes)
+@@ -727,6 +729,7 @@ struct ubi_attach_info {
+ struct list_head free;
+ struct list_head erase;
+ struct list_head alien;
++ struct list_head fastmap;
+ int corr_peb_count;
+ int empty_peb_count;
+ int alien_peb_count;
+@@ -907,7 +910,7 @@ int ubi_compare_lebs(struct ubi_device *
+ size_t ubi_calc_fm_size(struct ubi_device *ubi);
+ int ubi_update_fastmap(struct ubi_device *ubi);
+ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+- int fm_anchor);
++ struct ubi_attach_info *scan_ai);
+ #else
+ static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
+ #endif
+@@ -1116,4 +1119,27 @@ static inline bool ubi_is_fm_vol(int vol
+ return false;
+ }
+
++/**
++ * ubi_find_fm_block - check whether a PEB is part of the current Fastmap.
++ * @ubi: UBI device description object
++ * @pnum: physical eraseblock to look for
++ *
++ * This function returns a wear leveling object if @pnum relates to the current
++ * fastmap, @NULL otherwise.
++ */
++static inline struct ubi_wl_entry *ubi_find_fm_block(const struct ubi_device *ubi,
++ int pnum)
++{
++ int i;
++
++ if (ubi->fm) {
++ for (i = 0; i < ubi->fm->used_blocks; i++) {
++ if (ubi->fm->e[i]->pnum == pnum)
++ return ubi->fm->e[i];
++ }
++ }
++
++ return NULL;
++}
++
+ #endif /* !__UBI_UBI_H__ */
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1598,19 +1598,44 @@ int ubi_wl_init(struct ubi_device *ubi,
+ }
+ }
+
+- dbg_wl("found %i PEBs", found_pebs);
++ list_for_each_entry(aeb, &ai->fastmap, u.list) {
++ cond_resched();
++
++ e = ubi_find_fm_block(ubi, aeb->pnum);
++
++ if (e) {
++ ubi_assert(!ubi->lookuptbl[e->pnum]);
++ ubi->lookuptbl[e->pnum] = e;
++ } else {
++ /*
++ * Usually old Fastmap PEBs are scheduled for erasure
++ * and we don't have to care about them but if we face
++ * an power cut before scheduling them we need to
++ * take care of them here.
++ */
++ if (ubi->lookuptbl[aeb->pnum])
++ continue;
+
+- if (ubi->fm) {
+- ubi_assert(ubi->good_peb_count ==
+- found_pebs + ubi->fm->used_blocks);
++ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
++ if (!e)
++ goto out_free;
+
+- for (i = 0; i < ubi->fm->used_blocks; i++) {
+- e = ubi->fm->e[i];
++ e->pnum = aeb->pnum;
++ e->ec = aeb->ec;
++ ubi_assert(!ubi->lookuptbl[e->pnum]);
+ ubi->lookuptbl[e->pnum] = e;
++ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
++ wl_entry_destroy(ubi, e);
++ goto out_free;
++ }
+ }
++
++ found_pebs++;
+ }
+- else
+- ubi_assert(ubi->good_peb_count == found_pebs);
++
++ dbg_wl("found %i PEBs", found_pebs);
++
++ ubi_assert(ubi->good_peb_count == found_pebs);
+
+ reserved_pebs = WL_RESERVED_PEBS;
+ ubi_fastmap_init(ubi, &reserved_pebs);