--- /dev/null
+From 7bc40aedf24d31d8bea80e1161e996ef4299fb10 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Thu, 12 Nov 2020 11:22:04 +0100
+Subject: mac80211: free sta in sta_info_insert_finish() on errors
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 7bc40aedf24d31d8bea80e1161e996ef4299fb10 upstream.
+
+If sta_info_insert_finish() fails, we currently keep the station
+around and free it only in the caller, but there's only one such
+caller and it always frees it immediately.
+
+As syzbot found, another consequence of this split is that we can
+put things that sleep only into __cleanup_single_sta() and not in
+sta_info_free(), but this is the only place that requires such of
+sta_info_free() now.
+
+Change this to free the station in sta_info_insert_finish(), in
+which case we can still sleep. This will also let us unify the
+cleanup code later.
+
+Cc: stable@vger.kernel.org
+Fixes: dcd479e10a05 ("mac80211: always wind down STA state")
+Reported-by: syzbot+32c6c38c4812d22f2f0b@syzkaller.appspotmail.com
+Reported-by: syzbot+4c81fe92e372d26c4246@syzkaller.appspotmail.com
+Reported-by: syzbot+6a7fe9faf0d1d61bc24a@syzkaller.appspotmail.com
+Reported-by: syzbot+abed06851c5ffe010921@syzkaller.appspotmail.com
+Reported-by: syzbot+b7aeb9318541a1c709f1@syzkaller.appspotmail.com
+Reported-by: syzbot+d5a9416c6cafe53b5dd0@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/r/20201112112201.ee6b397b9453.I9c31d667a0ea2151441cc64ed6613d36c18a48e0@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/sta_info.c | 14 ++++----------
+ 1 file changed, 4 insertions(+), 10 deletions(-)
+
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -607,7 +607,7 @@ static int sta_info_insert_finish(struct
+ out_drop_sta:
+ local->num_sta--;
+ synchronize_net();
+- __cleanup_single_sta(sta);
++ cleanup_single_sta(sta);
+ out_err:
+ mutex_unlock(&local->sta_mtx);
+ kfree(sinfo);
+@@ -626,19 +626,13 @@ int sta_info_insert_rcu(struct sta_info
+
+ err = sta_info_insert_check(sta);
+ if (err) {
++ sta_info_free(local, sta);
+ mutex_unlock(&local->sta_mtx);
+ rcu_read_lock();
+- goto out_free;
++ return err;
+ }
+
+- err = sta_info_insert_finish(sta);
+- if (err)
+- goto out_free;
+-
+- return 0;
+- out_free:
+- sta_info_free(local, sta);
+- return err;
++ return sta_info_insert_finish(sta);
+ }
+
+ int sta_info_insert(struct sta_info *sta)
--- /dev/null
+From b2911a84396f72149dce310a3b64d8948212c1b3 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 11 Nov 2020 19:33:59 +0100
+Subject: mac80211: minstrel: fix tx status processing corner case
+
+From: Felix Fietkau <nbd@nbd.name>
+
+commit b2911a84396f72149dce310a3b64d8948212c1b3 upstream.
+
+Some drivers fill the status rate list without setting the rate index after
+the final rate to -1. minstrel_ht already deals with this, but minstrel
+doesn't, which causes it to get stuck at the lowest rate on these drivers.
+
+Fix this by checking the count as well.
+
+Cc: stable@vger.kernel.org
+Fixes: cccf129f820e ("mac80211: add the 'minstrel' rate control algorithm")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Link: https://lore.kernel.org/r/20201111183359.43528-3-nbd@nbd.name
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/rc80211_minstrel.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/mac80211/rc80211_minstrel.c
++++ b/net/mac80211/rc80211_minstrel.c
+@@ -276,7 +276,7 @@ minstrel_tx_status(void *priv, struct ie
+ success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+- if (ar[i].idx < 0)
++ if (ar[i].idx < 0 || !ar[i].count)
+ break;
+
+ ndx = rix_to_ndx(mi, ar[i].idx);
--- /dev/null
+From 4fe40b8e1566dad04c87fbf299049a1d0d4bd58d Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 11 Nov 2020 19:33:58 +0100
+Subject: mac80211: minstrel: remove deferred sampling code
+
+From: Felix Fietkau <nbd@nbd.name>
+
+commit 4fe40b8e1566dad04c87fbf299049a1d0d4bd58d upstream.
+
+Deferring sampling attempts to the second stage has some bad interactions
+with drivers that process the rate table in hardware and use the probe flag
+to indicate probing packets (e.g. most mt76 drivers). On affected drivers
+it can lead to probing not working at all.
+
+If the link conditions turn worse, it might not be such a good idea to
+do a lot of sampling for lower rates in this case.
+
+Fix this by simply skipping the sample attempt instead of deferring it,
+but keep the checks that would allow it to be sampled if it was skipped
+too often, but only if it has less than 95% success probability.
+
+Also ensure that IEEE80211_TX_CTL_RATE_CTRL_PROBE is set for all probing
+packets.
+
+Cc: stable@vger.kernel.org
+Fixes: cccf129f820e ("mac80211: add the 'minstrel' rate control algorithm")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Link: https://lore.kernel.org/r/20201111183359.43528-2-nbd@nbd.name
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/rc80211_minstrel.c | 25 ++++---------------------
+ net/mac80211/rc80211_minstrel.h | 1 -
+ 2 files changed, 4 insertions(+), 22 deletions(-)
+
+--- a/net/mac80211/rc80211_minstrel.c
++++ b/net/mac80211/rc80211_minstrel.c
+@@ -289,12 +289,6 @@ minstrel_tx_status(void *priv, struct ie
+ mi->r[ndx].stats.success += success;
+ }
+
+- if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0))
+- mi->sample_packets++;
+-
+- if (mi->sample_deferred > 0)
+- mi->sample_deferred--;
+-
+ if (time_after(jiffies, mi->last_stats_update +
+ (mp->update_interval * HZ) / 1000))
+ minstrel_update_stats(mp, mi);
+@@ -373,7 +367,7 @@ minstrel_get_rate(void *priv, struct iee
+ return;
+
+ delta = (mi->total_packets * sampling_ratio / 100) -
+- (mi->sample_packets + mi->sample_deferred / 2);
++ mi->sample_packets;
+
+ /* delta < 0: no sampling required */
+ prev_sample = mi->prev_sample;
+@@ -382,7 +376,6 @@ minstrel_get_rate(void *priv, struct iee
+ return;
+
+ if (mi->total_packets >= 10000) {
+- mi->sample_deferred = 0;
+ mi->sample_packets = 0;
+ mi->total_packets = 0;
+ } else if (delta > mi->n_rates * 2) {
+@@ -407,19 +400,8 @@ minstrel_get_rate(void *priv, struct iee
+ * rate sampling method should be used.
+ * Respect such rates that are not sampled for 20 interations.
+ */
+- if (mrr_capable &&
+- msr->perfect_tx_time > mr->perfect_tx_time &&
+- msr->stats.sample_skipped < 20) {
+- /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
+- * packets that have the sampling rate deferred to the
+- * second MRR stage. Increase the sample counter only
+- * if the deferred sample rate was actually used.
+- * Use the sample_deferred counter to make sure that
+- * the sampling is not done in large bursts */
+- info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+- rate++;
+- mi->sample_deferred++;
+- } else {
++ if (msr->perfect_tx_time < mr->perfect_tx_time ||
++ msr->stats.sample_skipped >= 20) {
+ if (!msr->sample_limit)
+ return;
+
+@@ -439,6 +421,7 @@ minstrel_get_rate(void *priv, struct iee
+
+ rate->idx = mi->r[ndx].rix;
+ rate->count = minstrel_get_retry_count(&mi->r[ndx], info);
++ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+ }
+
+
+--- a/net/mac80211/rc80211_minstrel.h
++++ b/net/mac80211/rc80211_minstrel.h
+@@ -98,7 +98,6 @@ struct minstrel_sta_info {
+ u8 max_prob_rate;
+ unsigned int total_packets;
+ unsigned int sample_packets;
+- int sample_deferred;
+
+ unsigned int sample_row;
+ unsigned int sample_column;
--- /dev/null
+From 78d732e1f326f74f240d416af9484928303d9951 Mon Sep 17 00:00:00 2001
+From: Thomas Richter <tmricht@linux.ibm.com>
+Date: Wed, 11 Nov 2020 16:26:25 +0100
+Subject: s390/cpum_sf.c: fix file permission for cpum_sfb_size
+
+From: Thomas Richter <tmricht@linux.ibm.com>
+
+commit 78d732e1f326f74f240d416af9484928303d9951 upstream.
+
+This file is installed by the s390 CPU Measurement sampling
+facility device driver to export supported minimum and
+maximum sample buffer sizes.
+This file is read by lscpumf tool to display the details
+of the device driver capabilities. The lscpumf tool might
+be invoked by a non-root user. In this case it does not
+print anything because the file contents can not be read.
+
+Fix this by allowing read access for all users. Reading
+the file contents is ok, changing the file contents is
+left to the root user only.
+
+For further reference and details see:
+ [1] https://github.com/ibm-s390-tools/s390-tools/issues/97
+
+Fixes: 69f239ed335a ("s390/cpum_sf: Dynamically extend the sampling buffer if overflows occur")
+Cc: <stable@vger.kernel.org> # 3.14
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Acked-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/perf_cpum_sf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -1662,4 +1662,4 @@ out:
+ return err;
+ }
+ arch_initcall(init_cpum_sampling_pmu);
+-core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640);
++core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0644);
--- /dev/null
+From 6f117cb854a44a79898d844e6ae3fd23bd94e786 Mon Sep 17 00:00:00 2001
+From: Stefan Haberland <sth@linux.ibm.com>
+Date: Mon, 16 Nov 2020 16:23:47 +0100
+Subject: s390/dasd: fix null pointer dereference for ERP requests
+
+From: Stefan Haberland <sth@linux.ibm.com>
+
+commit 6f117cb854a44a79898d844e6ae3fd23bd94e786 upstream.
+
+When requeueing all requests on the device request queue to the blocklayer
+we might get to an ERP (error recovery) request that is a copy of an
+original CQR.
+
+Those requests do not have blocklayer request information or a pointer to
+the dasd_queue set. When trying to access those data it will lead to a
+null pointer dereference in dasd_requeue_all_requests().
+
+Fix by checking if the request is an ERP request that can simply be
+ignored. The blocklayer request will be requeued by the original CQR that
+is on the device queue right behind the ERP request.
+
+Fixes: 9487cfd3430d ("s390/dasd: fix handling of internal requests")
+Cc: <stable@vger.kernel.org> #4.16
+Signed-off-by: Stefan Haberland <sth@linux.ibm.com>
+Reviewed-by: Jan Hoeppner <hoeppner@linux.ibm.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/block/dasd.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -2891,6 +2891,12 @@ static int _dasd_requeue_request(struct
+
+ if (!block)
+ return -EINVAL;
++ /*
++ * If the request is an ERP request there is nothing to requeue.
++ * This will be done with the remaining original request.
++ */
++ if (cqr->refers)
++ return 0;
+ spin_lock_irq(&cqr->dq->lock);
+ req = (struct request *) cqr->callback_data;
+ blk_mq_requeue_request(req, false);
regulator-fix-memory-leak-with-repeated-set_machine_constraints.patch
regulator-avoid-resolve_supply-infinite-recursion.patch
regulator-workaround-self-referent-regulators.patch
+xtensa-disable-preemption-around-cache-alias-management-calls.patch
+mac80211-minstrel-remove-deferred-sampling-code.patch
+mac80211-minstrel-fix-tx-status-processing-corner-case.patch
+mac80211-free-sta-in-sta_info_insert_finish-on-errors.patch
+s390-cpum_sf.c-fix-file-permission-for-cpum_sfb_size.patch
+s390-dasd-fix-null-pointer-dereference-for-erp-requests.patch
+x86-microcode-intel-check-patch-signature-before-saving-microcode-for-early-loading.patch
--- /dev/null
+From 1a371e67dc77125736cc56d3a0893f06b75855b6 Mon Sep 17 00:00:00 2001
+From: Chen Yu <yu.c.chen@intel.com>
+Date: Fri, 13 Nov 2020 09:59:23 +0800
+Subject: x86/microcode/intel: Check patch signature before saving microcode for early loading
+
+From: Chen Yu <yu.c.chen@intel.com>
+
+commit 1a371e67dc77125736cc56d3a0893f06b75855b6 upstream.
+
+Currently, scan_microcode() leverages microcode_matches() to check
+if the microcode matches the CPU by comparing the family and model.
+However, the processor stepping and flags of the microcode signature
+should also be considered when saving a microcode patch for early
+update.
+
+Use find_matching_signature() in scan_microcode() and get rid of the
+now-unused microcode_matches() which is a good cleanup in itself.
+
+Complete the verification of the patch being saved for early loading in
+save_microcode_patch() directly. This needs to be done there too because
+save_mc_for_early() will call save_microcode_patch() too.
+
+The second reason why this needs to be done is because the loader still
+tries to support, at least hypothetically, mixed-steppings systems and
+thus adds all patches to the cache that belong to the same CPU model
+albeit with different steppings.
+
+For example:
+
+ microcode: CPU: sig=0x906ec, pf=0x2, rev=0xd6
+ microcode: mc_saved[0]: sig=0x906e9, pf=0x2a, rev=0xd6, total size=0x19400, date = 2020-04-23
+ microcode: mc_saved[1]: sig=0x906ea, pf=0x22, rev=0xd6, total size=0x19000, date = 2020-04-27
+ microcode: mc_saved[2]: sig=0x906eb, pf=0x2, rev=0xd6, total size=0x19400, date = 2020-04-23
+ microcode: mc_saved[3]: sig=0x906ec, pf=0x22, rev=0xd6, total size=0x19000, date = 2020-04-27
+ microcode: mc_saved[4]: sig=0x906ed, pf=0x22, rev=0xd6, total size=0x19400, date = 2020-04-23
+
+The patch which is being saved for early loading, however, can only be
+the one which fits the CPU this runs on so do the signature verification
+before saving.
+
+ [ bp: Do signature verification in save_microcode_patch()
+ and rewrite commit message. ]
+
+Fixes: ec400ddeff20 ("x86/microcode_intel_early.c: Early update ucode on Intel's CPU")
+Signed-off-by: Chen Yu <yu.c.chen@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=208535
+Link: https://lkml.kernel.org/r/20201113015923.13960-1-yu.c.chen@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/intel.c | 63 +++++-----------------------------
+ 1 file changed, 10 insertions(+), 53 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -103,53 +103,6 @@ static int has_newer_microcode(void *mc,
+ return find_matching_signature(mc, csig, cpf);
+ }
+
+-/*
+- * Given CPU signature and a microcode patch, this function finds if the
+- * microcode patch has matching family and model with the CPU.
+- *
+- * %true - if there's a match
+- * %false - otherwise
+- */
+-static bool microcode_matches(struct microcode_header_intel *mc_header,
+- unsigned long sig)
+-{
+- unsigned long total_size = get_totalsize(mc_header);
+- unsigned long data_size = get_datasize(mc_header);
+- struct extended_sigtable *ext_header;
+- unsigned int fam_ucode, model_ucode;
+- struct extended_signature *ext_sig;
+- unsigned int fam, model;
+- int ext_sigcount, i;
+-
+- fam = x86_family(sig);
+- model = x86_model(sig);
+-
+- fam_ucode = x86_family(mc_header->sig);
+- model_ucode = x86_model(mc_header->sig);
+-
+- if (fam == fam_ucode && model == model_ucode)
+- return true;
+-
+- /* Look for ext. headers: */
+- if (total_size <= data_size + MC_HEADER_SIZE)
+- return false;
+-
+- ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
+- ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
+- ext_sigcount = ext_header->count;
+-
+- for (i = 0; i < ext_sigcount; i++) {
+- fam_ucode = x86_family(ext_sig->sig);
+- model_ucode = x86_model(ext_sig->sig);
+-
+- if (fam == fam_ucode && model == model_ucode)
+- return true;
+-
+- ext_sig++;
+- }
+- return false;
+-}
+-
+ static struct ucode_patch *memdup_patch(void *data, unsigned int size)
+ {
+ struct ucode_patch *p;
+@@ -167,7 +120,7 @@ static struct ucode_patch *memdup_patch(
+ return p;
+ }
+
+-static void save_microcode_patch(void *data, unsigned int size)
++static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
+ {
+ struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
+ struct ucode_patch *iter, *tmp, *p = NULL;
+@@ -213,6 +166,9 @@ static void save_microcode_patch(void *d
+ if (!p)
+ return;
+
++ if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
++ return;
++
+ /*
+ * Save for early loading. On 32-bit, that needs to be a physical
+ * address as the APs are running from physical addresses, before
+@@ -347,13 +303,14 @@ scan_microcode(void *data, size_t size,
+
+ size -= mc_size;
+
+- if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
++ if (!find_matching_signature(data, uci->cpu_sig.sig,
++ uci->cpu_sig.pf)) {
+ data += mc_size;
+ continue;
+ }
+
+ if (save) {
+- save_microcode_patch(data, mc_size);
++ save_microcode_patch(uci, data, mc_size);
+ goto next;
+ }
+
+@@ -486,14 +443,14 @@ static void show_saved_mc(void)
+ * Save this microcode patch. It will be loaded early when a CPU is
+ * hot-added or resumes.
+ */
+-static void save_mc_for_early(u8 *mc, unsigned int size)
++static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size)
+ {
+ /* Synchronization during CPU hotplug. */
+ static DEFINE_MUTEX(x86_cpu_microcode_mutex);
+
+ mutex_lock(&x86_cpu_microcode_mutex);
+
+- save_microcode_patch(mc, size);
++ save_microcode_patch(uci, mc, size);
+ show_saved_mc();
+
+ mutex_unlock(&x86_cpu_microcode_mutex);
+@@ -937,7 +894,7 @@ static enum ucode_state generic_load_mic
+ * permanent memory. So it will be loaded early when a CPU is hot added
+ * or resumes.
+ */
+- save_mc_for_early(new_mc, new_mc_size);
++ save_mc_for_early(uci, new_mc, new_mc_size);
+
+ pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
+ cpu, new_rev, uci->cpu_sig.rev);
--- /dev/null
+From 3a860d165eb5f4d7cf0bf81ef6a5b5c5e1754422 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Mon, 16 Nov 2020 01:38:59 -0800
+Subject: xtensa: disable preemption around cache alias management calls
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 3a860d165eb5f4d7cf0bf81ef6a5b5c5e1754422 upstream.
+
+Although cache alias management calls set up and tear down TLB entries
+and fast_second_level_miss is able to restore TLB entry should it be
+evicted they absolutely cannot preempt each other because they use the
+same TLBTEMP area for different purposes.
+Disable preemption around all cache alias management calls to enforce
+that.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/mm/cache.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/arch/xtensa/mm/cache.c
++++ b/arch/xtensa/mm/cache.c
+@@ -74,8 +74,10 @@ static inline void kmap_invalidate_coher
+ kvaddr = TLBTEMP_BASE_1 +
+ (page_to_phys(page) & DCACHE_ALIAS_MASK);
+
++ preempt_disable();
+ __invalidate_dcache_page_alias(kvaddr,
+ page_to_phys(page));
++ preempt_enable();
+ }
+ }
+ }
+@@ -160,6 +162,7 @@ void flush_dcache_page(struct page *page
+ if (!alias && !mapping)
+ return;
+
++ preempt_disable();
+ virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(virt, phys);
+
+@@ -170,6 +173,7 @@ void flush_dcache_page(struct page *page
+
+ if (mapping)
+ __invalidate_icache_page_alias(virt, phys);
++ preempt_enable();
+ }
+
+ /* There shouldn't be an entry in the cache for this page anymore. */
+@@ -203,8 +207,10 @@ void local_flush_cache_page(struct vm_ar
+ unsigned long phys = page_to_phys(pfn_to_page(pfn));
+ unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
+
++ preempt_disable();
+ __flush_invalidate_dcache_page_alias(virt, phys);
+ __invalidate_icache_page_alias(virt, phys);
++ preempt_enable();
+ }
+ EXPORT_SYMBOL(local_flush_cache_page);
+
+@@ -231,11 +237,13 @@ update_mmu_cache(struct vm_area_struct *
+ unsigned long phys = page_to_phys(page);
+ unsigned long tmp;
+
++ preempt_disable();
+ tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(tmp, phys);
+ tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(tmp, phys);
+ __invalidate_icache_page_alias(tmp, phys);
++ preempt_enable();
+
+ clear_bit(PG_arch_1, &page->flags);
+ }
+@@ -269,7 +277,9 @@ void copy_to_user_page(struct vm_area_st
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
++ preempt_disable();
+ __flush_invalidate_dcache_page_alias(t, phys);
++ preempt_enable();
+ }
+
+ /* Copy data */
+@@ -284,9 +294,11 @@ void copy_to_user_page(struct vm_area_st
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+
++ preempt_disable();
+ __flush_invalidate_dcache_range((unsigned long) dst, len);
+ if ((vma->vm_flags & VM_EXEC) != 0)
+ __invalidate_icache_page_alias(t, phys);
++ preempt_enable();
+
+ } else if ((vma->vm_flags & VM_EXEC) != 0) {
+ __flush_dcache_range((unsigned long)dst,len);
+@@ -308,7 +320,9 @@ extern void copy_from_user_page(struct v
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
++ preempt_disable();
+ __flush_invalidate_dcache_page_alias(t, phys);
++ preempt_enable();
+ }
+
+ memcpy(dst, src, len);