--- /dev/null
+From b1187ddff48458bbc879fb8a492d5f485f6f0582 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Aug 2020 22:24:25 +0800
+Subject: ACPI: Add out of bounds and numa_off protections to pxm_to_node()
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+[ Upstream commit 8a3decac087aa897df5af04358c2089e52e70ac4 ]
+
+The function should check the validity of the pxm value before using
+it to index the pxm_to_node_map[] array.
+
+Whilst hardening this code may be good in general, the main intent
+here is to enable following patches that use this function to replace
+acpi_map_pxm_to_node() for non SRAT usecases which should return
+NO_NUMA_NODE for PXM entries not matching with those in SRAT.
+
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Barry Song <song.bao.hua@hisilicon.com>
+Reviewed-by: Hanjun Guo <guohanjun@huawei.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/numa/srat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
+index 15bbaab8500b9..1fb486f46ee20 100644
+--- a/drivers/acpi/numa/srat.c
++++ b/drivers/acpi/numa/srat.c
+@@ -31,7 +31,7 @@ int acpi_numa __initdata;
+
+ int pxm_to_node(int pxm)
+ {
+- if (pxm < 0)
++ if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
+ return NUMA_NO_NODE;
+ return pxm_to_node_map[pxm];
+ }
+--
+2.27.0
+
--- /dev/null
+From cd5250cc229abc5eaa58cede03ab3e4697d5842c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Sep 2020 22:05:45 +0800
+Subject: ACPI: HMAT: Fix handling of changes from ACPI 6.2 to ACPI 6.3
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+[ Upstream commit 2c5b9bde95c96942f2873cea6ef383c02800e4a8 ]
+
+In ACPI 6.3, the Memory Proximity Domain Attributes Structure
+changed substantially. One of those changes was that the flag
+for "Memory Proximity Domain field is valid" was deprecated.
+
+This was because the field "Proximity Domain for the Memory"
+became a required field and hence having a validity flag makes
+no sense.
+
+So the correct logic is to always assume the field is there.
+Current code assumes it never is.
+
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/numa/hmat.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
+index 2c32cfb723701..6a91a55229aee 100644
+--- a/drivers/acpi/numa/hmat.c
++++ b/drivers/acpi/numa/hmat.c
+@@ -424,7 +424,8 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
+ pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ p->flags, p->processor_PD, p->memory_PD);
+
+- if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
++ if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
++ hmat_revision > 1) {
+ target = find_mem_target(p->memory_PD);
+ if (!target) {
+ pr_debug("HMAT: Memory Domain missing from SRAT\n");
+--
+2.27.0
+
--- /dev/null
+From 1945578a43a3b46a3a5683e0143feaae9abd6ce0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Oct 2020 13:57:44 +0000
+Subject: afs: Alter dirty range encoding in page->private
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 65dd2d6072d393a3aa14ded8afa9a12f27d9c8ad ]
+
+Currently, page->private on an afs page is used to store the range of
+dirtied data within the page, where the range includes the lower bound, but
+excludes the upper bound (e.g. 0-1 is a range covering a single byte).
+
+This, however, requires a superfluous bit for the last-byte bound so that
+on a 4KiB page, it can say 0-4096 to indicate the whole page, the idea
+being that having both numbers the same would indicate an empty range.
+This is unnecessary as the PG_private bit is clear if it's an empty range
+(as is PG_dirty).
+
+Alter the way the dirty range is encoded in page->private such that the
+upper bound is reduced by 1 (e.g. 0-0 is then specified the same single
+byte range mentioned above).
+
+Applying this to both bounds frees up two bits, one of which can be used in
+a future commit.
+
+This allows the afs filesystem to be compiled on ppc32 with 64K pages;
+without this, the following warnings are seen:
+
+../fs/afs/internal.h: In function 'afs_page_dirty_to':
+../fs/afs/internal.h:881:15: warning: right shift count >= width of type [-Wshift-count-overflow]
+ 881 | return (priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK;
+ | ^~
+../fs/afs/internal.h: In function 'afs_page_dirty':
+../fs/afs/internal.h:886:28: warning: left shift count >= width of type [-Wshift-count-overflow]
+ 886 | return ((unsigned long)to << __AFS_PAGE_PRIV_SHIFT) | from;
+ | ^~
+
+Fixes: 4343d00872e1 ("afs: Get rid of the afs_writeback record")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/internal.h | 6 +++---
+ fs/afs/write.c | 2 +-
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 523bf9698ecdc..fc96c090893f7 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -862,7 +862,7 @@ struct afs_vnode_cache_aux {
+ * splitting the field into two parts. However, we need to represent a range
+ * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
+ */
+-#if PAGE_SIZE > 32768
++#ifdef CONFIG_64BIT
+ #define __AFS_PAGE_PRIV_MASK 0xffffffffUL
+ #define __AFS_PAGE_PRIV_SHIFT 32
+ #else
+@@ -877,12 +877,12 @@ static inline size_t afs_page_dirty_from(unsigned long priv)
+
+ static inline size_t afs_page_dirty_to(unsigned long priv)
+ {
+- return (priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK;
++ return ((priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK) + 1;
+ }
+
+ static inline unsigned long afs_page_dirty(size_t from, size_t to)
+ {
+- return ((unsigned long)to << __AFS_PAGE_PRIV_SHIFT) | from;
++ return ((unsigned long)(to - 1) << __AFS_PAGE_PRIV_SHIFT) | from;
+ }
+
+ #include <trace/events/afs.h>
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index ea1768b3c0b56..1a49f5c89342e 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -93,7 +93,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
+ /* We want to store information about how much of a page is altered in
+ * page->private.
+ */
+- BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
++ BUILD_BUG_ON(PAGE_SIZE - 1 > __AFS_PAGE_PRIV_MASK && sizeof(page->private) < 8);
+
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+--
+2.27.0
+
--- /dev/null
+From 5d15dac0c5a777ba82d14edcd4f267bdffda97f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Oct 2020 09:02:25 +0100
+Subject: afs: Don't assert on unpurgeable server records
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 7530d3eb3dcf1a30750e8e7f1f88b782b96b72b8 ]
+
+Don't give an assertion failure on unpurgeable afs_server records - which
+kills the thread - but rather emit a trace line when we are purging a
+record (which only happens during network namespace removal or rmmod) and
+print a notice of the problem.
+
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/server.c | 7 ++++++-
+ include/trace/events/afs.h | 2 ++
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index e82e452e26124..684a2b02b9ff7 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -550,7 +550,12 @@ void afs_manage_servers(struct work_struct *work)
+
+ _debug("manage %pU %u", &server->uuid, active);
+
+- ASSERTIFCMP(purging, active, ==, 0);
++ if (purging) {
++ trace_afs_server(server, atomic_read(&server->ref),
++ active, afs_server_trace_purging);
++ if (active != 0)
++ pr_notice("Can't purge s=%08x\n", server->debug_id);
++ }
+
+ if (active == 0) {
+ time64_t expire_at = server->unuse_time;
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index 13c05e28c0b6c..342b35fc33c59 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -40,6 +40,7 @@ enum afs_server_trace {
+ afs_server_trace_get_new_cbi,
+ afs_server_trace_get_probe,
+ afs_server_trace_give_up_cb,
++ afs_server_trace_purging,
+ afs_server_trace_put_call,
+ afs_server_trace_put_cbi,
+ afs_server_trace_put_find_rsq,
+@@ -270,6 +271,7 @@ enum afs_cb_break_reason {
+ EM(afs_server_trace_get_new_cbi, "GET cbi ") \
+ EM(afs_server_trace_get_probe, "GET probe") \
+ EM(afs_server_trace_give_up_cb, "giveup-cb") \
++ EM(afs_server_trace_purging, "PURGE ") \
+ EM(afs_server_trace_put_call, "PUT call ") \
+ EM(afs_server_trace_put_cbi, "PUT cbi ") \
+ EM(afs_server_trace_put_find_rsq, "PUT f-rsq") \
+--
+2.27.0
+
--- /dev/null
+From 83c75ee63f83d734f831c9c3dfa82d606d5df859 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Aug 2020 11:58:12 +0300
+Subject: afs: Fix a use after free in afs_xattr_get_acl()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 248c944e2159de4868bef558feea40214aaf8464 ]
+
+The "op" pointer is freed earlier when we call afs_put_operation().
+
+Fixes: e49c7b2f6de7 ("afs: Build an abstraction around an "operation" concept")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/xattr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
+index 84f3c4f575318..38884d6c57cdc 100644
+--- a/fs/afs/xattr.c
++++ b/fs/afs/xattr.c
+@@ -85,7 +85,7 @@ static int afs_xattr_get_acl(const struct xattr_handler *handler,
+ if (acl->size <= size)
+ memcpy(buffer, acl->data, acl->size);
+ else
+- op->error = -ERANGE;
++ ret = -ERANGE;
+ }
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 8f07a760aab11ef39d9e2e877095c94052333198 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Oct 2020 14:08:23 +0100
+Subject: afs: Fix afs_invalidatepage to adjust the dirty region
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit f86726a69dec5df6ba051baf9265584419478b64 ]
+
+Fix afs_invalidatepage() to adjust the dirty region recorded in
+page->private when truncating a page. If the dirty region is entirely
+removed, then the private data is cleared and the page dirty state is
+cleared.
+
+Without this, if the page is truncated and then expanded again by truncate,
+zeros from the expanded, but no-longer dirty region may get written back to
+the server if the page gets laundered due to a conflicting 3rd-party write.
+
+It mustn't, however, shorten the dirty region of the page if that page is
+still mmapped and has been marked dirty by afs_page_mkwrite(), so a flag is
+stored in page->private to record this.
+
+Fixes: 4343d00872e1 ("afs: Get rid of the afs_writeback record")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/file.c | 71 ++++++++++++++++++++++++++++++++------
+ fs/afs/internal.h | 16 +++++++--
+ fs/afs/write.c | 1 +
+ include/trace/events/afs.h | 5 +--
+ 4 files changed, 79 insertions(+), 14 deletions(-)
+
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index bdcf418e4a5c0..5015f2b107824 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -600,6 +600,63 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
+ return ret;
+ }
+
++/*
++ * Adjust the dirty region of the page on truncation or full invalidation,
++ * getting rid of the markers altogether if the region is entirely invalidated.
++ */
++static void afs_invalidate_dirty(struct page *page, unsigned int offset,
++ unsigned int length)
++{
++ struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
++ unsigned long priv;
++ unsigned int f, t, end = offset + length;
++
++ priv = page_private(page);
++
++ /* we clean up only if the entire page is being invalidated */
++ if (offset == 0 && length == thp_size(page))
++ goto full_invalidate;
++
++ /* If the page was dirtied by page_mkwrite(), the PTE stays writable
++ * and we don't get another notification to tell us to expand it
++ * again.
++ */
++ if (afs_is_page_dirty_mmapped(priv))
++ return;
++
++ /* We may need to shorten the dirty region */
++ f = afs_page_dirty_from(priv);
++ t = afs_page_dirty_to(priv);
++
++ if (t <= offset || f >= end)
++ return; /* Doesn't overlap */
++
++ if (f < offset && t > end)
++ return; /* Splits the dirty region - just absorb it */
++
++ if (f >= offset && t <= end)
++ goto undirty;
++
++ if (f < offset)
++ t = offset;
++ else
++ f = end;
++ if (f == t)
++ goto undirty;
++
++ priv = afs_page_dirty(f, t);
++ set_page_private(page, priv);
++ trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page->index, priv);
++ return;
++
++undirty:
++ trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page->index, priv);
++ clear_page_dirty_for_io(page);
++full_invalidate:
++ priv = (unsigned long)detach_page_private(page);
++ trace_afs_page_dirty(vnode, tracepoint_string("inval"), page->index, priv);
++}
++
+ /*
+ * invalidate part or all of a page
+ * - release a page and clean up its private data if offset is 0 (indicating
+@@ -608,29 +665,23 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
+ static void afs_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int length)
+ {
+- struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+- unsigned long priv;
+-
+ _enter("{%lu},%u,%u", page->index, offset, length);
+
+ BUG_ON(!PageLocked(page));
+
++#ifdef CONFIG_AFS_FSCACHE
+ /* we clean up only if the entire page is being invalidated */
+ if (offset == 0 && length == PAGE_SIZE) {
+-#ifdef CONFIG_AFS_FSCACHE
+ if (PageFsCache(page)) {
+ struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+ fscache_wait_on_page_write(vnode->cache, page);
+ fscache_uncache_page(vnode->cache, page);
+ }
++ }
+ #endif
+
+- if (PagePrivate(page)) {
+- priv = (unsigned long)detach_page_private(page);
+- trace_afs_page_dirty(vnode, tracepoint_string("inval"),
+- page->index, priv);
+- }
+- }
++ if (PagePrivate(page))
++ afs_invalidate_dirty(page, offset, length);
+
+ _leave("");
+ }
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index fc96c090893f7..dbe4120e9a422 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -863,11 +863,13 @@ struct afs_vnode_cache_aux {
+ * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
+ */
+ #ifdef CONFIG_64BIT
+-#define __AFS_PAGE_PRIV_MASK 0xffffffffUL
++#define __AFS_PAGE_PRIV_MASK 0x7fffffffUL
+ #define __AFS_PAGE_PRIV_SHIFT 32
++#define __AFS_PAGE_PRIV_MMAPPED 0x80000000UL
+ #else
+-#define __AFS_PAGE_PRIV_MASK 0xffffUL
++#define __AFS_PAGE_PRIV_MASK 0x7fffUL
+ #define __AFS_PAGE_PRIV_SHIFT 16
++#define __AFS_PAGE_PRIV_MMAPPED 0x8000UL
+ #endif
+
+ static inline size_t afs_page_dirty_from(unsigned long priv)
+@@ -885,6 +887,16 @@ static inline unsigned long afs_page_dirty(size_t from, size_t to)
+ return ((unsigned long)(to - 1) << __AFS_PAGE_PRIV_SHIFT) | from;
+ }
+
++static inline unsigned long afs_page_dirty_mmapped(unsigned long priv)
++{
++ return priv | __AFS_PAGE_PRIV_MMAPPED;
++}
++
++static inline bool afs_is_page_dirty_mmapped(unsigned long priv)
++{
++ return priv & __AFS_PAGE_PRIV_MMAPPED;
++}
++
+ #include <trace/events/afs.h>
+
+ /*****************************************************************************/
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index 1a49f5c89342e..a2511e3ad2cca 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -867,6 +867,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
+ wait_on_page_writeback(vmf->page);
+
+ priv = afs_page_dirty(0, PAGE_SIZE);
++ priv = afs_page_dirty_mmapped(priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
+ vmf->page->index, priv);
+ if (PagePrivate(vmf->page))
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index 05b5506cd24ca..13c05e28c0b6c 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -904,10 +904,11 @@ TRACE_EVENT(afs_page_dirty,
+ __entry->priv = priv;
+ ),
+
+- TP_printk("vn=%p %lx %s %zx-%zx",
++ TP_printk("vn=%p %lx %s %zx-%zx%s",
+ __entry->vnode, __entry->page, __entry->where,
+ afs_page_dirty_from(__entry->priv),
+- afs_page_dirty_to(__entry->priv))
++ afs_page_dirty_to(__entry->priv),
++ afs_is_page_dirty_mmapped(__entry->priv) ? " M" : "")
+ );
+
+ TRACE_EVENT(afs_call_state,
+--
+2.27.0
+
--- /dev/null
+From be3f06fb87edc51b3e0635c65e4ef8fdb51187ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Oct 2020 14:40:31 +0100
+Subject: afs: Fix afs_launder_page to not clear PG_writeback
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit d383e346f97d6bb0d654bb3d63c44ab106d92d29 ]
+
+Fix afs_launder_page() to not clear PG_writeback on the page it is
+laundering as the flag isn't set in this case.
+
+Fixes: 4343d00872e1 ("afs: Get rid of the afs_writeback record")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/internal.h | 1 +
+ fs/afs/write.c | 10 ++++++----
+ 2 files changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 06e617ee4cd1e..c8acb58ac5d8f 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -811,6 +811,7 @@ struct afs_operation {
+ pgoff_t last; /* last page in mapping to deal with */
+ unsigned first_offset; /* offset into mapping[first] */
+ unsigned last_to; /* amount of mapping[last] */
++ bool laundering; /* Laundering page, PG_writeback not set */
+ } store;
+ struct {
+ struct iattr *attr;
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index da12abd6db213..b937ec047ec98 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -396,7 +396,8 @@ static void afs_store_data_success(struct afs_operation *op)
+ op->ctime = op->file[0].scb.status.mtime_client;
+ afs_vnode_commit_status(op, &op->file[0]);
+ if (op->error == 0) {
+- afs_pages_written_back(vnode, op->store.first, op->store.last);
++ if (!op->store.laundering)
++ afs_pages_written_back(vnode, op->store.first, op->store.last);
+ afs_stat_v(vnode, n_stores);
+ atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
+ (op->store.first * PAGE_SIZE + op->store.first_offset),
+@@ -415,7 +416,7 @@ static const struct afs_operation_ops afs_store_data_operation = {
+ */
+ static int afs_store_data(struct address_space *mapping,
+ pgoff_t first, pgoff_t last,
+- unsigned offset, unsigned to)
++ unsigned offset, unsigned to, bool laundering)
+ {
+ struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+ struct afs_operation *op;
+@@ -448,6 +449,7 @@ static int afs_store_data(struct address_space *mapping,
+ op->store.last = last;
+ op->store.first_offset = offset;
+ op->store.last_to = to;
++ op->store.laundering = laundering;
+ op->mtime = vnode->vfs_inode.i_mtime;
+ op->flags |= AFS_OPERATION_UNINTR;
+ op->ops = &afs_store_data_operation;
+@@ -601,7 +603,7 @@ no_more:
+ if (end > i_size)
+ to = i_size & ~PAGE_MASK;
+
+- ret = afs_store_data(mapping, first, last, offset, to);
++ ret = afs_store_data(mapping, first, last, offset, to, false);
+ switch (ret) {
+ case 0:
+ ret = count;
+@@ -921,7 +923,7 @@ int afs_launder_page(struct page *page)
+
+ trace_afs_page_dirty(vnode, tracepoint_string("launder"),
+ page->index, priv);
+- ret = afs_store_data(mapping, page->index, page->index, t, f);
++ ret = afs_store_data(mapping, page->index, page->index, t, f, true);
+ }
+
+ trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
+--
+2.27.0
+
--- /dev/null
+From 67bab4b8f4924e5a391abfe04393d1c78811c9d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Oct 2020 12:08:39 +0000
+Subject: afs: Fix dirty-region encoding on ppc32 with 64K pages
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 2d9900f26ad61e63a34f239bc76c80d2f8a6ff41 ]
+
+The dirty region bounds stored in page->private on an afs page are 15 bits
+on a 32-bit box and can, at most, represent a range of up to 32K within a
+32K page with a resolution of 1 byte. This is a problem for powerpc32 with
+64K pages enabled.
+
+Further, transparent huge pages may get up to 2M, which will be a problem
+for the afs filesystem on all 32-bit arches in the future.
+
+Fix this by decreasing the resolution. For the moment, a 64K page will
+have a resolution determined from PAGE_SIZE. In the future, the page will
+need to be passed in to the helper functions so that the page size can be
+assessed and the resolution determined dynamically.
+
+Note that this might not be the ideal way to handle this, since it may
+allow some leakage of undirtied zero bytes to the server's copy in the case
+of a 3rd-party conflict. Fixing that would require a separately allocated
+record and is a more complicated fix.
+
+Fixes: 4343d00872e1 ("afs: Get rid of the afs_writeback record")
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/internal.h | 24 ++++++++++++++++++++----
+ fs/afs/write.c | 5 -----
+ 2 files changed, 20 insertions(+), 9 deletions(-)
+
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index dbe4120e9a422..17336cbb8419f 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -860,7 +860,8 @@ struct afs_vnode_cache_aux {
+ /*
+ * We use page->private to hold the amount of the page that we've written to,
+ * splitting the field into two parts. However, we need to represent a range
+- * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
++ * 0...PAGE_SIZE, so we reduce the resolution if the size of the page
++ * exceeds what we can encode.
+ */
+ #ifdef CONFIG_64BIT
+ #define __AFS_PAGE_PRIV_MASK 0x7fffffffUL
+@@ -872,19 +873,34 @@ struct afs_vnode_cache_aux {
+ #define __AFS_PAGE_PRIV_MMAPPED 0x8000UL
+ #endif
+
++static inline unsigned int afs_page_dirty_resolution(void)
++{
++ int shift = PAGE_SHIFT - (__AFS_PAGE_PRIV_SHIFT - 1);
++ return (shift > 0) ? shift : 0;
++}
++
+ static inline size_t afs_page_dirty_from(unsigned long priv)
+ {
+- return priv & __AFS_PAGE_PRIV_MASK;
++ unsigned long x = priv & __AFS_PAGE_PRIV_MASK;
++
++ /* The lower bound is inclusive */
++ return x << afs_page_dirty_resolution();
+ }
+
+ static inline size_t afs_page_dirty_to(unsigned long priv)
+ {
+- return ((priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK) + 1;
++ unsigned long x = (priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK;
++
++ /* The upper bound is immediately beyond the region */
++ return (x + 1) << afs_page_dirty_resolution();
+ }
+
+ static inline unsigned long afs_page_dirty(size_t from, size_t to)
+ {
+- return ((unsigned long)(to - 1) << __AFS_PAGE_PRIV_SHIFT) | from;
++ unsigned int res = afs_page_dirty_resolution();
++ from >>= res;
++ to = (to - 1) >> res;
++ return (to << __AFS_PAGE_PRIV_SHIFT) | from;
+ }
+
+ static inline unsigned long afs_page_dirty_mmapped(unsigned long priv)
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index a2511e3ad2cca..50371207f3273 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -90,11 +90,6 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
+ _enter("{%llx:%llu},{%lx},%u,%u",
+ vnode->fid.vid, vnode->fid.vnode, index, from, to);
+
+- /* We want to store information about how much of a page is altered in
+- * page->private.
+- */
+- BUILD_BUG_ON(PAGE_SIZE - 1 > __AFS_PAGE_PRIV_MASK && sizeof(page->private) < 8);
+-
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+--
+2.27.0
+
--- /dev/null
+From 688878e2ddfa186f04a1aea90ae8db7e09d9c296 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Oct 2020 14:03:03 +0100
+Subject: afs: Fix page leak on afs_write_begin() failure
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 21db2cdc667f744691a407105b7712bc18d74023 ]
+
+Fix the leak of the target page in afs_write_begin() when it fails.
+
+Fixes: 15b4650e55e0 ("afs: convert to new aops")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Nick Piggin <npiggin@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/write.c | 23 +++++++++++------------
+ 1 file changed, 11 insertions(+), 12 deletions(-)
+
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index 02facb19a0f1d..7fae9f8b38eb3 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -76,7 +76,7 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
+ */
+ int afs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+- struct page **pagep, void **fsdata)
++ struct page **_page, void **fsdata)
+ {
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct page *page;
+@@ -110,9 +110,6 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
+ SetPageUptodate(page);
+ }
+
+- /* page won't leak in error case: it eventually gets cleaned off LRU */
+- *pagep = page;
+-
+ try_again:
+ /* See if this page is already partially written in a way that we can
+ * merge the new write with.
+@@ -155,6 +152,7 @@ try_again:
+ set_page_private(page, priv);
+ else
+ attach_page_private(page, (void *)priv);
++ *_page = page;
+ _leave(" = 0");
+ return 0;
+
+@@ -164,17 +162,18 @@ try_again:
+ flush_conflicting_write:
+ _debug("flush conflict");
+ ret = write_one_page(page);
+- if (ret < 0) {
+- _leave(" = %d", ret);
+- return ret;
+- }
++ if (ret < 0)
++ goto error;
+
+ ret = lock_page_killable(page);
+- if (ret < 0) {
+- _leave(" = %d", ret);
+- return ret;
+- }
++ if (ret < 0)
++ goto error;
+ goto try_again;
++
++error:
++ put_page(page);
++ _leave(" = %d", ret);
++ return ret;
+ }
+
+ /*
+--
+2.27.0
+
--- /dev/null
+From 02815f7a98d6d5793b8fd30897d4eb8b42eb815e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Oct 2020 13:22:19 +0100
+Subject: afs: Fix to take ref on page when PG_private is set
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit fa04a40b169fcee615afbae97f71a09332993f64 ]
+
+Fix afs to take a ref on a page when it sets PG_private on it and to drop
+the ref when removing the flag.
+
+Note that in afs_write_begin(), a lot of the time, PG_private is already
+set on a page to which we're going to add some data. In such a case, we
+leave the bit set and mustn't increment the page count.
+
+As suggested by Matthew Wilcox, use attach/detach_page_private() where
+possible.
+
+Fixes: 31143d5d515e ("AFS: implement basic file write support")
+Reported-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/dir.c | 12 ++++--------
+ fs/afs/dir_edit.c | 6 ++----
+ fs/afs/file.c | 8 ++------
+ fs/afs/write.c | 18 ++++++++++--------
+ 4 files changed, 18 insertions(+), 26 deletions(-)
+
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 1d2e61e0ab047..1bb5b9d7f0a2c 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -281,8 +281,7 @@ retry:
+ if (ret < 0)
+ goto error;
+
+- set_page_private(req->pages[i], 1);
+- SetPagePrivate(req->pages[i]);
++ attach_page_private(req->pages[i], (void *)1);
+ unlock_page(req->pages[i]);
+ i++;
+ } else {
+@@ -1975,8 +1974,7 @@ static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags)
+
+ _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index);
+
+- set_page_private(page, 0);
+- ClearPagePrivate(page);
++ detach_page_private(page);
+
+ /* The directory will need reloading. */
+ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+@@ -2003,8 +2001,6 @@ static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
+ afs_stat_v(dvnode, n_inval);
+
+ /* we clean up only if the entire page is being invalidated */
+- if (offset == 0 && length == PAGE_SIZE) {
+- set_page_private(page, 0);
+- ClearPagePrivate(page);
+- }
++ if (offset == 0 && length == PAGE_SIZE)
++ detach_page_private(page);
+ }
+diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
+index b108528bf010d..2ffe09abae7fc 100644
+--- a/fs/afs/dir_edit.c
++++ b/fs/afs/dir_edit.c
+@@ -243,10 +243,8 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
+ index, gfp);
+ if (!page)
+ goto error;
+- if (!PagePrivate(page)) {
+- set_page_private(page, 1);
+- SetPagePrivate(page);
+- }
++ if (!PagePrivate(page))
++ attach_page_private(page, (void *)1);
+ dir_page = kmap(page);
+ }
+
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index 371d1488cc549..bdcf418e4a5c0 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -626,11 +626,9 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
+ #endif
+
+ if (PagePrivate(page)) {
+- priv = page_private(page);
++ priv = (unsigned long)detach_page_private(page);
+ trace_afs_page_dirty(vnode, tracepoint_string("inval"),
+ page->index, priv);
+- set_page_private(page, 0);
+- ClearPagePrivate(page);
+ }
+ }
+
+@@ -660,11 +658,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
+ #endif
+
+ if (PagePrivate(page)) {
+- priv = page_private(page);
++ priv = (unsigned long)detach_page_private(page);
+ trace_afs_page_dirty(vnode, tracepoint_string("rel"),
+ page->index, priv);
+- set_page_private(page, 0);
+- ClearPagePrivate(page);
+ }
+
+ /* indicate that the page can be released */
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index b937ec047ec98..02facb19a0f1d 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -151,8 +151,10 @@ try_again:
+ priv |= f;
+ trace_afs_page_dirty(vnode, tracepoint_string("begin"),
+ page->index, priv);
+- SetPagePrivate(page);
+- set_page_private(page, priv);
++ if (PagePrivate(page))
++ set_page_private(page, priv);
++ else
++ attach_page_private(page, (void *)priv);
+ _leave(" = 0");
+ return 0;
+
+@@ -334,10 +336,9 @@ static void afs_pages_written_back(struct afs_vnode *vnode,
+ ASSERTCMP(pv.nr, ==, count);
+
+ for (loop = 0; loop < count; loop++) {
+- priv = page_private(pv.pages[loop]);
++ priv = (unsigned long)detach_page_private(pv.pages[loop]);
+ trace_afs_page_dirty(vnode, tracepoint_string("clear"),
+ pv.pages[loop]->index, priv);
+- set_page_private(pv.pages[loop], 0);
+ end_page_writeback(pv.pages[loop]);
+ }
+ first += count;
+@@ -863,8 +864,10 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
+ priv |= 0; /* From */
+ trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
+ vmf->page->index, priv);
+- SetPagePrivate(vmf->page);
+- set_page_private(vmf->page, priv);
++ if (PagePrivate(vmf->page))
++ set_page_private(vmf->page, priv);
++ else
++ attach_page_private(vmf->page, (void *)priv);
+ file_update_time(file);
+
+ sb_end_pagefault(inode->i_sb);
+@@ -926,10 +929,9 @@ int afs_launder_page(struct page *page)
+ ret = afs_store_data(mapping, page->index, page->index, t, f, true);
+ }
+
++ priv = (unsigned long)detach_page_private(page);
+ trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
+ page->index, priv);
+- set_page_private(page, 0);
+- ClearPagePrivate(page);
+
+ #ifdef CONFIG_AFS_FSCACHE
+ if (PageFsCache(page)) {
+--
+2.27.0
+
--- /dev/null
+From e383edb03ba16ca346eabbd4a20efa730f291530 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Oct 2020 14:05:33 +0000
+Subject: afs: Fix where page->private is set during write
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit f792e3ac82fe2c6c863e93187eb7ddfccab68fa7 ]
+
+In afs, page->private is set to indicate the dirty region of a page. This
+is done in afs_write_begin(), but that can't take account of whether the
+copy into the page actually worked.
+
+Fix this by moving the change of page->private into afs_write_end().
+
+Fixes: 4343d00872e1 ("afs: Get rid of the afs_writeback record")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/write.c | 41 ++++++++++++++++++++++++++---------------
+ 1 file changed, 26 insertions(+), 15 deletions(-)
+
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index 7fae9f8b38eb3..f28d85c38cd89 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -135,23 +135,8 @@ try_again:
+ if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
+ (to < f || from > t))
+ goto flush_conflicting_write;
+- if (from < f)
+- f = from;
+- if (to > t)
+- t = to;
+- } else {
+- f = from;
+- t = to;
+ }
+
+- priv = (unsigned long)t << AFS_PRIV_SHIFT;
+- priv |= f;
+- trace_afs_page_dirty(vnode, tracepoint_string("begin"),
+- page->index, priv);
+- if (PagePrivate(page))
+- set_page_private(page, priv);
+- else
+- attach_page_private(page, (void *)priv);
+ *_page = page;
+ _leave(" = 0");
+ return 0;
+@@ -185,6 +170,9 @@ int afs_write_end(struct file *file, struct address_space *mapping,
+ {
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct key *key = afs_file_key(file);
++ unsigned long priv;
++ unsigned int f, from = pos & (PAGE_SIZE - 1);
++ unsigned int t, to = from + copied;
+ loff_t i_size, maybe_i_size;
+ int ret;
+
+@@ -216,6 +204,29 @@ int afs_write_end(struct file *file, struct address_space *mapping,
+ SetPageUptodate(page);
+ }
+
++ if (PagePrivate(page)) {
++ priv = page_private(page);
++ f = priv & AFS_PRIV_MAX;
++ t = priv >> AFS_PRIV_SHIFT;
++ if (from < f)
++ f = from;
++ if (to > t)
++ t = to;
++ priv = (unsigned long)t << AFS_PRIV_SHIFT;
++ priv |= f;
++ set_page_private(page, priv);
++ trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
++ page->index, priv);
++ } else {
++ f = from;
++ t = to;
++ priv = (unsigned long)t << AFS_PRIV_SHIFT;
++ priv |= f;
++ attach_page_private(page, (void *)priv);
++ trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
++ page->index, priv);
++ }
++
+ set_page_dirty(page);
+ if (PageDirty(page))
+ _debug("dirtied");
+--
+2.27.0
+
--- /dev/null
+From cad9e23e7d6750daa2666b6187abafb447c1c7bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Oct 2020 13:22:47 +0000
+Subject: afs: Wrap page->private manipulations in inline functions
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 185f0c7073bd5c78f86265f703f5daf1306ab5a7 ]
+
+The afs filesystem uses page->private to store the dirty range within a
+page such that in the event of a conflicting 3rd-party write to the server,
+we write back just the bits that got changed locally.
+
+However, there are a couple of problems with this:
+
+ (1) I need a bit to note if the page might be mapped so that partial
+ invalidation doesn't shrink the range.
+
+ (2) There aren't necessarily sufficient bits to store the entire range of
+ data altered (say it's a 32-bit system with 64KiB pages or transparent
+ huge pages are in use).
+
+So wrap the accesses in inline functions so that future commits can change
+how this works.
+
+Also move them out of the tracing header into the in-directory header.
+There's not really any need for them to be in the tracing header.
+
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/internal.h | 28 ++++++++++++++++++++++++++++
+ fs/afs/write.c | 31 +++++++++++++------------------
+ include/trace/events/afs.h | 19 +++----------------
+ 3 files changed, 44 insertions(+), 34 deletions(-)
+
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index c8acb58ac5d8f..523bf9698ecdc 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -857,6 +857,34 @@ struct afs_vnode_cache_aux {
+ u64 data_version;
+ } __packed;
+
++/*
++ * We use page->private to hold the amount of the page that we've written to,
++ * splitting the field into two parts. However, we need to represent a range
++ * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
++ */
++#if PAGE_SIZE > 32768
++#define __AFS_PAGE_PRIV_MASK 0xffffffffUL
++#define __AFS_PAGE_PRIV_SHIFT 32
++#else
++#define __AFS_PAGE_PRIV_MASK 0xffffUL
++#define __AFS_PAGE_PRIV_SHIFT 16
++#endif
++
++static inline size_t afs_page_dirty_from(unsigned long priv)
++{
++ return priv & __AFS_PAGE_PRIV_MASK;
++}
++
++static inline size_t afs_page_dirty_to(unsigned long priv)
++{
++ return (priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK;
++}
++
++static inline unsigned long afs_page_dirty(size_t from, size_t to)
++{
++ return ((unsigned long)to << __AFS_PAGE_PRIV_SHIFT) | from;
++}
++
+ #include <trace/events/afs.h>
+
+ /*****************************************************************************/
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index f28d85c38cd89..ea1768b3c0b56 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -117,8 +117,8 @@ try_again:
+ t = f = 0;
+ if (PagePrivate(page)) {
+ priv = page_private(page);
+- f = priv & AFS_PRIV_MAX;
+- t = priv >> AFS_PRIV_SHIFT;
++ f = afs_page_dirty_from(priv);
++ t = afs_page_dirty_to(priv);
+ ASSERTCMP(f, <=, t);
+ }
+
+@@ -206,22 +206,18 @@ int afs_write_end(struct file *file, struct address_space *mapping,
+
+ if (PagePrivate(page)) {
+ priv = page_private(page);
+- f = priv & AFS_PRIV_MAX;
+- t = priv >> AFS_PRIV_SHIFT;
++ f = afs_page_dirty_from(priv);
++ t = afs_page_dirty_to(priv);
+ if (from < f)
+ f = from;
+ if (to > t)
+ t = to;
+- priv = (unsigned long)t << AFS_PRIV_SHIFT;
+- priv |= f;
++ priv = afs_page_dirty(f, t);
+ set_page_private(page, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
+ page->index, priv);
+ } else {
+- f = from;
+- t = to;
+- priv = (unsigned long)t << AFS_PRIV_SHIFT;
+- priv |= f;
++ priv = afs_page_dirty(from, to);
+ attach_page_private(page, (void *)priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
+ page->index, priv);
+@@ -522,8 +518,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
+ */
+ start = primary_page->index;
+ priv = page_private(primary_page);
+- offset = priv & AFS_PRIV_MAX;
+- to = priv >> AFS_PRIV_SHIFT;
++ offset = afs_page_dirty_from(priv);
++ to = afs_page_dirty_to(priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("store"),
+ primary_page->index, priv);
+
+@@ -568,8 +564,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
+ }
+
+ priv = page_private(page);
+- f = priv & AFS_PRIV_MAX;
+- t = priv >> AFS_PRIV_SHIFT;
++ f = afs_page_dirty_from(priv);
++ t = afs_page_dirty_to(priv);
+ if (f != 0 &&
+ !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
+ unlock_page(page);
+@@ -870,8 +866,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
+ */
+ wait_on_page_writeback(vmf->page);
+
+- priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
+- priv |= 0; /* From */
++ priv = afs_page_dirty(0, PAGE_SIZE);
+ trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
+ vmf->page->index, priv);
+ if (PagePrivate(vmf->page))
+@@ -930,8 +925,8 @@ int afs_launder_page(struct page *page)
+ f = 0;
+ t = PAGE_SIZE;
+ if (PagePrivate(page)) {
+- f = priv & AFS_PRIV_MAX;
+- t = priv >> AFS_PRIV_SHIFT;
++ f = afs_page_dirty_from(priv);
++ t = afs_page_dirty_to(priv);
+ }
+
+ trace_afs_page_dirty(vnode, tracepoint_string("launder"),
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index 5f0c1cf1ea130..05b5506cd24ca 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -884,19 +884,6 @@ TRACE_EVENT(afs_dir_check_failed,
+ __entry->vnode, __entry->off, __entry->i_size)
+ );
+
+-/*
+- * We use page->private to hold the amount of the page that we've written to,
+- * splitting the field into two parts. However, we need to represent a range
+- * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
+- */
+-#if PAGE_SIZE > 32768
+-#define AFS_PRIV_MAX 0xffffffff
+-#define AFS_PRIV_SHIFT 32
+-#else
+-#define AFS_PRIV_MAX 0xffff
+-#define AFS_PRIV_SHIFT 16
+-#endif
+-
+ TRACE_EVENT(afs_page_dirty,
+ TP_PROTO(struct afs_vnode *vnode, const char *where,
+ pgoff_t page, unsigned long priv),
+@@ -917,10 +904,10 @@ TRACE_EVENT(afs_page_dirty,
+ __entry->priv = priv;
+ ),
+
+- TP_printk("vn=%p %lx %s %lu-%lu",
++ TP_printk("vn=%p %lx %s %zx-%zx",
+ __entry->vnode, __entry->page, __entry->where,
+- __entry->priv & AFS_PRIV_MAX,
+- __entry->priv >> AFS_PRIV_SHIFT)
++ afs_page_dirty_from(__entry->priv),
++ afs_page_dirty_to(__entry->priv))
+ );
+
+ TRACE_EVENT(afs_call_state,
+--
+2.27.0
+
--- /dev/null
+From 8aab66178df302a5b7ac467b9674f8dbfb05a509 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Sep 2020 15:17:54 +0800
+Subject: ARC: [dts] fix the errors detected by dtbs_check
+
+From: Zhen Lei <thunder.leizhen@huawei.com>
+
+[ Upstream commit 05b1be68c4d6d76970025e6139bfd735c2256ee5 ]
+
+xxx/arc/boot/dts/axs101.dt.yaml: dw-apb-ictl@e0012000: $nodename:0: \
+'dw-apb-ictl@e0012000' does not match '^interrupt-controller(@[0-9a-f,]+)*$'
+ From schema: xxx/interrupt-controller/snps,dw-apb-ictl.yaml
+
+The node name of the interrupt controller must start with
+"interrupt-controller" instead of "dw-apb-ictl".
+
+Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arc/boot/dts/axc001.dtsi | 2 +-
+ arch/arc/boot/dts/axc003.dtsi | 2 +-
+ arch/arc/boot/dts/axc003_idu.dtsi | 2 +-
+ arch/arc/boot/dts/vdk_axc003.dtsi | 2 +-
+ arch/arc/boot/dts/vdk_axc003_idu.dtsi | 2 +-
+ 5 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
+index 79ec27c043c1d..2a151607b0805 100644
+--- a/arch/arc/boot/dts/axc001.dtsi
++++ b/arch/arc/boot/dts/axc001.dtsi
+@@ -91,7 +91,7 @@
+ * avoid duplicating the MB dtsi file given that IRQ from
+ * this intc to cpu intc are different for axs101 and axs103
+ */
+- mb_intc: dw-apb-ictl@e0012000 {
++ mb_intc: interrupt-controller@e0012000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dw-apb-ictl";
+ reg = < 0x0 0xe0012000 0x0 0x200 >;
+diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
+index ac8e1b463a709..cd1edcf4f95ef 100644
+--- a/arch/arc/boot/dts/axc003.dtsi
++++ b/arch/arc/boot/dts/axc003.dtsi
+@@ -129,7 +129,7 @@
+ * avoid duplicating the MB dtsi file given that IRQ from
+ * this intc to cpu intc are different for axs101 and axs103
+ */
+- mb_intc: dw-apb-ictl@e0012000 {
++ mb_intc: interrupt-controller@e0012000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dw-apb-ictl";
+ reg = < 0x0 0xe0012000 0x0 0x200 >;
+diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
+index 9da21e7fd246f..70779386ca796 100644
+--- a/arch/arc/boot/dts/axc003_idu.dtsi
++++ b/arch/arc/boot/dts/axc003_idu.dtsi
+@@ -135,7 +135,7 @@
+ * avoid duplicating the MB dtsi file given that IRQ from
+ * this intc to cpu intc are different for axs101 and axs103
+ */
+- mb_intc: dw-apb-ictl@e0012000 {
++ mb_intc: interrupt-controller@e0012000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dw-apb-ictl";
+ reg = < 0x0 0xe0012000 0x0 0x200 >;
+diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
+index f8be7ba8dad49..c21d0eb07bf67 100644
+--- a/arch/arc/boot/dts/vdk_axc003.dtsi
++++ b/arch/arc/boot/dts/vdk_axc003.dtsi
+@@ -46,7 +46,7 @@
+
+ };
+
+- mb_intc: dw-apb-ictl@e0012000 {
++ mb_intc: interrupt-controller@e0012000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dw-apb-ictl";
+ reg = < 0xe0012000 0x200 >;
+diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+index 0afa3e53a4e39..4d348853ac7c5 100644
+--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
++++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+@@ -54,7 +54,7 @@
+
+ };
+
+- mb_intc: dw-apb-ictl@e0012000 {
++ mb_intc: interrupt-controller@e0012000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dw-apb-ictl";
+ reg = < 0xe0012000 0x200 >;
+--
+2.27.0
+
--- /dev/null
+From 3d185dc67c07b6fca82bb03b28b5c31740a34c15 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Aug 2020 23:24:35 +0100
+Subject: ARM: 8997/2: hw_breakpoint: Handle inexact watchpoint addresses
+
+From: Douglas Anderson <dianders@chromium.org>
+
+[ Upstream commit 22c9e58299e5f18274788ce54c03d4fb761e3c5d ]
+
+This is commit fdfeff0f9e3d ("arm64: hw_breakpoint: Handle inexact
+watchpoint addresses") but ported to arm32, which has the same
+problem.
+
+This problem was found by Android CTS tests, notably the
+"watchpoint_imprecise" test [1]. I tested locally against a copycat
+(simplified) version of the test though.
+
+[1] https://android.googlesource.com/platform/bionic/+/master/tests/sys_ptrace_test.cpp
+
+Link: https://lkml.kernel.org/r/20191019111216.1.I82eae759ca6dc28a245b043f485ca490e3015321@changeid
+
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Matthias Kaehlcke <mka@chromium.org>
+Acked-by: Will Deacon <will@kernel.org>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/kernel/hw_breakpoint.c | 100 +++++++++++++++++++++++---------
+ 1 file changed, 72 insertions(+), 28 deletions(-)
+
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 7a4853b1213a8..08660ae9dcbce 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -683,6 +683,40 @@ static void disable_single_step(struct perf_event *bp)
+ arch_install_hw_breakpoint(bp);
+ }
+
++/*
++ * Arm32 hardware does not always report a watchpoint hit address that matches
++ * one of the watchpoints set. It can also report an address "near" the
++ * watchpoint if a single instruction access both watched and unwatched
++ * addresses. There is no straight-forward way, short of disassembling the
++ * offending instruction, to map that address back to the watchpoint. This
++ * function computes the distance of the memory access from the watchpoint as a
++ * heuristic for the likelyhood that a given access triggered the watchpoint.
++ *
++ * See this same function in the arm64 platform code, which has the same
++ * problem.
++ *
++ * The function returns the distance of the address from the bytes watched by
++ * the watchpoint. In case of an exact match, it returns 0.
++ */
++static u32 get_distance_from_watchpoint(unsigned long addr, u32 val,
++ struct arch_hw_breakpoint_ctrl *ctrl)
++{
++ u32 wp_low, wp_high;
++ u32 lens, lene;
++
++ lens = __ffs(ctrl->len);
++ lene = __fls(ctrl->len);
++
++ wp_low = val + lens;
++ wp_high = val + lene;
++ if (addr < wp_low)
++ return wp_low - addr;
++ else if (addr > wp_high)
++ return addr - wp_high;
++ else
++ return 0;
++}
++
+ static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
+ struct arch_hw_breakpoint *info)
+ {
+@@ -692,23 +726,25 @@ static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
+ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+ {
+- int i, access;
+- u32 val, ctrl_reg, alignment_mask;
++ int i, access, closest_match = 0;
++ u32 min_dist = -1, dist;
++ u32 val, ctrl_reg;
+ struct perf_event *wp, **slots;
+ struct arch_hw_breakpoint *info;
+ struct arch_hw_breakpoint_ctrl ctrl;
+
+ slots = this_cpu_ptr(wp_on_reg);
+
++ /*
++ * Find all watchpoints that match the reported address. If no exact
++ * match is found. Attribute the hit to the closest watchpoint.
++ */
++ rcu_read_lock();
+ for (i = 0; i < core_num_wrps; ++i) {
+- rcu_read_lock();
+-
+ wp = slots[i];
+-
+ if (wp == NULL)
+- goto unlock;
++ continue;
+
+- info = counter_arch_bp(wp);
+ /*
+ * The DFAR is an unknown value on debug architectures prior
+ * to 7.1. Since we only allow a single watchpoint on these
+@@ -717,33 +753,31 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ */
+ if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
+ BUG_ON(i > 0);
++ info = counter_arch_bp(wp);
+ info->trigger = wp->attr.bp_addr;
+ } else {
+- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+- alignment_mask = 0x7;
+- else
+- alignment_mask = 0x3;
+-
+- /* Check if the watchpoint value matches. */
+- val = read_wb_reg(ARM_BASE_WVR + i);
+- if (val != (addr & ~alignment_mask))
+- goto unlock;
+-
+- /* Possible match, check the byte address select. */
+- ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
+- decode_ctrl_reg(ctrl_reg, &ctrl);
+- if (!((1 << (addr & alignment_mask)) & ctrl.len))
+- goto unlock;
+-
+ /* Check that the access type matches. */
+ if (debug_exception_updates_fsr()) {
+ access = (fsr & ARM_FSR_ACCESS_MASK) ?
+ HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+ if (!(access & hw_breakpoint_type(wp)))
+- goto unlock;
++ continue;
+ }
+
++ val = read_wb_reg(ARM_BASE_WVR + i);
++ ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
++ decode_ctrl_reg(ctrl_reg, &ctrl);
++ dist = get_distance_from_watchpoint(addr, val, &ctrl);
++ if (dist < min_dist) {
++ min_dist = dist;
++ closest_match = i;
++ }
++ /* Is this an exact match? */
++ if (dist != 0)
++ continue;
++
+ /* We have a winner. */
++ info = counter_arch_bp(wp);
+ info->trigger = addr;
+ }
+
+@@ -765,13 +799,23 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ * we can single-step over the watchpoint trigger.
+ */
+ if (!is_default_overflow_handler(wp))
+- goto unlock;
+-
++ continue;
+ step:
+ enable_single_step(wp, instruction_pointer(regs));
+-unlock:
+- rcu_read_unlock();
+ }
++
++ if (min_dist > 0 && min_dist != -1) {
++ /* No exact match found. */
++ wp = slots[closest_match];
++ info = counter_arch_bp(wp);
++ info->trigger = addr;
++ pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
++ perf_bp_event(wp, regs);
++ if (is_default_overflow_handler(wp))
++ enable_single_step(wp, instruction_pointer(regs));
++ }
++
++ rcu_read_unlock();
+ }
+
+ static void watchpoint_single_step_handler(unsigned long pc)
+--
+2.27.0
+
--- /dev/null
+From 96f5afa1159fa456b2ab497e300fce9ca96408a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2020 14:02:48 -0700
+Subject: ARM: dts: omap4: Fix sgx clock rate for 4430
+
+From: Tony Lindgren <tony@atomide.com>
+
+[ Upstream commit 19d3e9a0bdd57b90175f30390edeb06851f5f9f3 ]
+
+We currently have a different clock rate for droid4 compared to the
+stock v3.0.8 based Android Linux kernel:
+
+# cat /sys/kernel/debug/clk/dpll_*_m7x2_ck/clk_rate
+266666667
+307200000
+# cat /sys/kernel/debug/clk/l3_gfx_cm:clk:0000:0/clk_rate
+307200000
+
+Let's fix this by configuring sgx to use 153.6 MHz instead of 307.2 MHz.
+Looks like also at least duover needs this change to avoid hangs, so
+let's apply it for all 4430.
+
+This helps a bit with thermal issues that seem to be related to memory
+corruption when using sgx. It seems that other driver related issues
+still remain though.
+
+Cc: Arthur Demchenkov <spinal.by@gmail.com>
+Cc: Merlijn Wajer <merlijn@wizzup.org>
+Cc: Sebastian Reichel <sre@kernel.org>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/omap4.dtsi | 2 +-
+ arch/arm/boot/dts/omap443x.dtsi | 10 ++++++++++
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
+index 0282b9de3384f..52e8298275050 100644
+--- a/arch/arm/boot/dts/omap4.dtsi
++++ b/arch/arm/boot/dts/omap4.dtsi
+@@ -410,7 +410,7 @@
+ status = "disabled";
+ };
+
+- target-module@56000000 {
++ sgx_module: target-module@56000000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x5600fe00 0x4>,
+ <0x5600fe10 0x4>;
+diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
+index 8ed510ab00c52..cb309743de5da 100644
+--- a/arch/arm/boot/dts/omap443x.dtsi
++++ b/arch/arm/boot/dts/omap443x.dtsi
+@@ -74,3 +74,13 @@
+ };
+
+ /include/ "omap443x-clocks.dtsi"
++
++/*
++ * Use dpll_per for sgx at 153.6MHz like droid4 stock v3.0.8 Android kernel
++ */
++&sgx_module {
++ assigned-clocks = <&l3_gfx_clkctrl OMAP4_GPU_CLKCTRL 24>,
++ <&dpll_per_m7x2_ck>;
++ assigned-clock-rates = <0>, <153600000>;
++ assigned-clock-parents = <&dpll_per_m7x2_ck>;
++};
+--
+2.27.0
+
--- /dev/null
+From 78e085790394eba360888a3bd9939678038f3bbb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 18:11:27 +0200
+Subject: ARM: dts: s5pv210: add RTC 32 KHz clock in Aries family
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+[ Upstream commit 086c4498b0cc87fdb09188f3da7056e898814948 ]
+
+The S3C RTC requires 32768 Hz clock as input which is provided by PMIC.
+However there is no such clock provider but rather a regulator driver
+which registers the clock as a regulator. This is an old driver which
+will not be updated so add a workaround - a fixed-clock to fill missing
+clock phandle reference in S3C RTC.
+
+This fixes dtbs_check warnings:
+
+ rtc@e2800000: clocks: [[2, 145]] is too short
+ rtc@e2800000: clock-names: ['rtc'] is too short
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Tested-by: Jonathan Bakker <xc-racer2@live.ca>
+Link: https://lore.kernel.org/r/20200907161141.31034-12-krzk@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/s5pv210-aries.dtsi | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
+index a3f83f668ce14..8a98b35b9b0de 100644
+--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
+@@ -47,6 +47,13 @@
+ };
+ };
+
++ pmic_ap_clk: clock-0 {
++ /* Workaround for missing clock on PMIC */
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <32768>;
++ };
++
+ bt_codec: bt_sco {
+ compatible = "linux,bt-sco";
+ #sound-dai-cells = <0>;
+@@ -825,6 +832,11 @@
+ samsung,pwm-outputs = <1>;
+ };
+
++&rtc {
++ clocks = <&clocks CLK_RTC>, <&pmic_ap_clk>;
++ clock-names = "rtc", "rtc_src";
++};
++
+ &sdhci1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+--
+2.27.0
+
--- /dev/null
+From 8261744ef2ff9dca5f6188423d9de821f6f24ffd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 18:11:40 +0200
+Subject: ARM: dts: s5pv210: align SPI GPIO node name with dtschema in Aries
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+[ Upstream commit 1ed7f6d0bab2f1794f1eb4ed032e90575552fd21 ]
+
+The device tree schema expects SPI controller to be named "spi",
+otherwise dtbs_check complain with a warning like:
+
+ spi-gpio-0: $nodename:0: 'spi-gpio-0' does not match '^spi(@.*|-[0-9a-f])*$'
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Tested-by: Jonathan Bakker <xc-racer2@live.ca>
+Link: https://lore.kernel.org/r/20200907161141.31034-25-krzk@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/s5pv210-aries.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
+index 8a98b35b9b0de..3762098233c0c 100644
+--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
+@@ -545,7 +545,7 @@
+ value = <0x5200>;
+ };
+
+- spi_lcd: spi-gpio-0 {
++ spi_lcd: spi-2 {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+--
+2.27.0
+
--- /dev/null
+From 1fc1e3b32227c10fcddcc7711c03ef3e0817196d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Sep 2020 17:38:58 -0700
+Subject: ARM: dts: s5pv210: Enable audio on Aries boards
+
+From: Jonathan Bakker <xc-racer2@live.ca>
+
+[ Upstream commit cd972fe90008adf49de0790250c1275480ac5cdc ]
+
+Both the Galaxy S and the Fascinate4G have a WM8994 codec, but they
+differ slightly in their jack detection and micbias configuration.
+
+Signed-off-by: Jonathan Bakker <xc-racer2@live.ca>
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/s5pv210-aries.dtsi | 10 +++
+ arch/arm/boot/dts/s5pv210-fascinate4g.dts | 98 +++++++++++++++++++++++
+ arch/arm/boot/dts/s5pv210-galaxys.dts | 85 ++++++++++++++++++++
+ 3 files changed, 193 insertions(+)
+
+diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
+index 822207f63ee0a..a3f83f668ce14 100644
+--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
+@@ -47,6 +47,11 @@
+ };
+ };
+
++ bt_codec: bt_sco {
++ compatible = "linux,bt-sco";
++ #sound-dai-cells = <0>;
++ };
++
+ vibrator_pwr: regulator-fixed-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vibrator-en";
+@@ -624,6 +629,11 @@
+ };
+ };
+
++&i2s0 {
++ dmas = <&pdma0 9>, <&pdma0 10>, <&pdma0 11>;
++ status = "okay";
++};
++
+ &mfc {
+ memory-region = <&mfc_left>, <&mfc_right>;
+ };
+diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+index 65eed01cfced1..ca064359dd308 100644
+--- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
++++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+@@ -35,6 +35,80 @@
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
++
++ headset_micbias_reg: regulator-fixed-3 {
++ compatible = "regulator-fixed";
++ regulator-name = "Headset_Micbias";
++ gpio = <&gpj2 5 GPIO_ACTIVE_HIGH>;
++ enable-active-high;
++
++ pinctrl-names = "default";
++ pinctrl-0 = <&headset_micbias_ena>;
++ };
++
++ main_micbias_reg: regulator-fixed-4 {
++ compatible = "regulator-fixed";
++ regulator-name = "Main_Micbias";
++ gpio = <&gpj4 2 GPIO_ACTIVE_HIGH>;
++ enable-active-high;
++
++ pinctrl-names = "default";
++ pinctrl-0 = <&main_micbias_ena>;
++ };
++
++ sound {
++ compatible = "samsung,fascinate4g-wm8994";
++
++ model = "Fascinate4G";
++
++ extcon = <&fsa9480>;
++
++ main-micbias-supply = <&main_micbias_reg>;
++ headset-micbias-supply = <&headset_micbias_reg>;
++
++ earpath-sel-gpios = <&gpj2 6 GPIO_ACTIVE_HIGH>;
++
++ io-channels = <&adc 3>;
++ io-channel-names = "headset-detect";
++ headset-detect-gpios = <&gph0 6 GPIO_ACTIVE_HIGH>;
++ headset-key-gpios = <&gph3 6 GPIO_ACTIVE_HIGH>;
++
++ samsung,audio-routing =
++ "HP", "HPOUT1L",
++ "HP", "HPOUT1R",
++
++ "SPK", "SPKOUTLN",
++ "SPK", "SPKOUTLP",
++
++ "RCV", "HPOUT2N",
++ "RCV", "HPOUT2P",
++
++ "LINE", "LINEOUT2N",
++ "LINE", "LINEOUT2P",
++
++ "IN1LP", "Main Mic",
++ "IN1LN", "Main Mic",
++
++ "IN1RP", "Headset Mic",
++ "IN1RN", "Headset Mic",
++
++ "Modem Out", "Modem TX",
++ "Modem RX", "Modem In",
++
++ "Bluetooth SPK", "TX",
++ "RX", "Bluetooth Mic";
++
++ pinctrl-names = "default";
++ pinctrl-0 = <&headset_det &earpath_sel>;
++
++ cpu {
++ sound-dai = <&i2s0>, <&bt_codec>;
++ };
++
++ codec {
++ sound-dai = <&wm8994>;
++ };
++ };
+ };
+
+ &fg {
+@@ -51,6 +125,12 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&sleep_cfg>;
+
++ headset_det: headset-det {
++ samsung,pins = "gph0-6", "gph3-6";
++ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
++ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++ };
++
+ fg_irq: fg-irq {
+ samsung,pins = "gph3-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+@@ -58,6 +138,24 @@
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
++ headset_micbias_ena: headset-micbias-ena {
++ samsung,pins = "gpj2-5";
++ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
++ };
++
++ earpath_sel: earpath-sel {
++ samsung,pins = "gpj2-6";
++ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
++ };
++
++ main_micbias_ena: main-micbias-ena {
++ samsung,pins = "gpj4-2";
++ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
++ };
++
+ /* Based on vendor kernel v2.6.35.7 */
+ sleep_cfg: sleep-cfg {
+ PIN_SLP(gpa0-0, PREV, NONE);
+diff --git a/arch/arm/boot/dts/s5pv210-galaxys.dts b/arch/arm/boot/dts/s5pv210-galaxys.dts
+index 5d10dd67eacc5..560f830b6f6be 100644
+--- a/arch/arm/boot/dts/s5pv210-galaxys.dts
++++ b/arch/arm/boot/dts/s5pv210-galaxys.dts
+@@ -72,6 +72,73 @@
+ pinctrl-0 = <&fm_irq &fm_rst>;
+ };
+ };
++
++ micbias_reg: regulator-fixed-3 {
++ compatible = "regulator-fixed";
++ regulator-name = "MICBIAS";
++ gpio = <&gpj4 2 GPIO_ACTIVE_HIGH>;
++ enable-active-high;
++
++ pinctrl-names = "default";
++ pinctrl-0 = <&micbias_reg_ena>;
++ };
++
++ sound {
++ compatible = "samsung,aries-wm8994";
++
++ model = "Aries";
++
++ extcon = <&fsa9480>;
++
++ main-micbias-supply = <&micbias_reg>;
++ headset-micbias-supply = <&micbias_reg>;
++
++ earpath-sel-gpios = <&gpj2 6 GPIO_ACTIVE_HIGH>;
++
++ io-channels = <&adc 3>;
++ io-channel-names = "headset-detect";
++ headset-detect-gpios = <&gph0 6 GPIO_ACTIVE_LOW>;
++ headset-key-gpios = <&gph3 6 GPIO_ACTIVE_HIGH>;
++
++ samsung,audio-routing =
++ "HP", "HPOUT1L",
++ "HP", "HPOUT1R",
++
++ "SPK", "SPKOUTLN",
++ "SPK", "SPKOUTLP",
++
++ "RCV", "HPOUT2N",
++ "RCV", "HPOUT2P",
++
++ "LINE", "LINEOUT2N",
++ "LINE", "LINEOUT2P",
++
++ "IN1LP", "Main Mic",
++ "IN1LN", "Main Mic",
++
++ "IN1RP", "Headset Mic",
++ "IN1RN", "Headset Mic",
++
++ "IN2LN", "FM In",
++ "IN2RN", "FM In",
++
++ "Modem Out", "Modem TX",
++ "Modem RX", "Modem In",
++
++ "Bluetooth SPK", "TX",
++ "RX", "Bluetooth Mic";
++
++ pinctrl-names = "default";
++ pinctrl-0 = <&headset_det &earpath_sel>;
++
++ cpu {
++ sound-dai = <&i2s0>, <&bt_codec>;
++ };
++
++ codec {
++ sound-dai = <&wm8994>;
++ };
++ };
+ };
+
+ &aliases {
+@@ -88,6 +155,12 @@
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
++ headset_det: headset-det {
++ samsung,pins = "gph0-6", "gph3-6";
++ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
++ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++ };
++
+ fm_irq: fm-irq {
+ samsung,pins = "gpj2-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+@@ -102,6 +175,12 @@
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
++ earpath_sel: earpath-sel {
++ samsung,pins = "gpj2-6";
++ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
++ };
++
+ massmemory_en: massmemory-en {
+ samsung,pins = "gpj2-7";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+@@ -109,6 +188,12 @@
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
++ micbias_reg_ena: micbias-reg-ena {
++ samsung,pins = "gpj4-2";
++ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
++ };
++
+ /* Based on CyanogenMod 3.0.101 kernel */
+ sleep_cfg: sleep-cfg {
+ PIN_SLP(gpa0-0, PREV, NONE);
+--
+2.27.0
+
--- /dev/null
+From 77fd01a40af07379433b7c5e292dd56cc729e57a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 18:11:22 +0200
+Subject: ARM: dts: s5pv210: move fixed clocks under root node
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+[ Upstream commit d38cae370e5f2094cbc38db3082b8e9509ae52ce ]
+
+The fixed clocks are kept under dedicated 'external-clocks' node, thus a
+fake 'reg' was added. This is not correct with dtschema as fixed-clock
+binding does not have a 'reg' property. Moving fixed clocks out of
+'soc' to root node fixes multiple dtbs_check warnings:
+
+ external-clocks: $nodename:0: 'external-clocks' does not match '^([a-z][a-z0-9\\-]+-bus|bus|soc|axi|ahb|apb)(@[0-9a-f]+)?$'
+ external-clocks: #size-cells:0:0: 0 is not one of [1, 2]
+ external-clocks: oscillator@0:reg:0: [0] is too short
+ external-clocks: oscillator@1:reg:0: [1] is too short
+ external-clocks: 'ranges' is a required property
+ oscillator@0: 'reg' does not match any of the regexes: 'pinctrl-[0-9]+'
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Tested-by: Jonathan Bakker <xc-racer2@live.ca>
+Link: https://lore.kernel.org/r/20200907161141.31034-7-krzk@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/s5pv210.dtsi | 36 +++++++++++++---------------------
+ 1 file changed, 14 insertions(+), 22 deletions(-)
+
+diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
+index 84e4447931de5..5c760a6d79557 100644
+--- a/arch/arm/boot/dts/s5pv210.dtsi
++++ b/arch/arm/boot/dts/s5pv210.dtsi
+@@ -52,34 +52,26 @@
+ };
+ };
+
++ xxti: oscillator-0 {
++ compatible = "fixed-clock";
++ clock-frequency = <0>;
++ clock-output-names = "xxti";
++ #clock-cells = <0>;
++ };
++
++ xusbxti: oscillator-1 {
++ compatible = "fixed-clock";
++ clock-frequency = <0>;
++ clock-output-names = "xusbxti";
++ #clock-cells = <0>;
++ };
++
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+- external-clocks {
+- compatible = "simple-bus";
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- xxti: oscillator@0 {
+- compatible = "fixed-clock";
+- reg = <0>;
+- clock-frequency = <0>;
+- clock-output-names = "xxti";
+- #clock-cells = <0>;
+- };
+-
+- xusbxti: oscillator@1 {
+- compatible = "fixed-clock";
+- reg = <1>;
+- clock-frequency = <0>;
+- clock-output-names = "xusbxti";
+- #clock-cells = <0>;
+- };
+- };
+-
+ onenand: onenand@b0600000 {
+ compatible = "samsung,s5pv210-onenand";
+ reg = <0xb0600000 0x2000>,
+--
+2.27.0
+
--- /dev/null
+From def4434eacf19df7446f15f47dd36895400de71e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 18:11:23 +0200
+Subject: ARM: dts: s5pv210: move PMU node out of clock controller
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+[ Upstream commit bb98fff84ad1ea321823759edaba573a16fa02bd ]
+
+The Power Management Unit (PMU) is a separate device which has little
+common with clock controller. Moving it to one level up (from clock
+controller child to SoC) allows to remove fake simple-bus compatible and
+dtbs_check warnings like:
+
+ clock-controller@e0100000: $nodename:0:
+ 'clock-controller@e0100000' does not match '^([a-z][a-z0-9\\-]+-bus|bus|soc|axi|ahb|apb)(@[0-9a-f]+)?$'
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Tested-by: Jonathan Bakker <xc-racer2@live.ca>
+Link: https://lore.kernel.org/r/20200907161141.31034-8-krzk@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/s5pv210.dtsi | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
+index 5c760a6d79557..46221a5c8ce59 100644
+--- a/arch/arm/boot/dts/s5pv210.dtsi
++++ b/arch/arm/boot/dts/s5pv210.dtsi
+@@ -92,19 +92,16 @@
+ };
+
+ clocks: clock-controller@e0100000 {
+- compatible = "samsung,s5pv210-clock", "simple-bus";
++ compatible = "samsung,s5pv210-clock";
+ reg = <0xe0100000 0x10000>;
+ clock-names = "xxti", "xusbxti";
+ clocks = <&xxti>, <&xusbxti>;
+ #clock-cells = <1>;
+- #address-cells = <1>;
+- #size-cells = <1>;
+- ranges;
++ };
+
+- pmu_syscon: syscon@e0108000 {
+- compatible = "samsung-s5pv210-pmu", "syscon";
+- reg = <0xe0108000 0x8000>;
+- };
++ pmu_syscon: syscon@e0108000 {
++ compatible = "samsung-s5pv210-pmu", "syscon";
++ reg = <0xe0108000 0x8000>;
+ };
+
+ pinctrl0: pinctrl@e0200000 {
+--
+2.27.0
+
--- /dev/null
+From 4d70f915fdbffca158e7d56868fb2b720764b72f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 18:11:24 +0200
+Subject: ARM: dts: s5pv210: remove dedicated 'audio-subsystem' node
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+[ Upstream commit 6c17a2974abf68a58517f75741b15c4aba42b4b8 ]
+
+The 'audio-subsystem' node is an artificial creation, not representing
+real hardware. The hardware is described by its nodes - AUDSS clock
+controller and I2S0.
+
+Remove the 'audio-subsystem' node along with its undocumented compatible
+to fix dtbs_check warnings like:
+
+ audio-subsystem: $nodename:0: 'audio-subsystem' does not match '^([a-z][a-z0-9\\-]+-bus|bus|soc|axi|ahb|apb)(@[0-9a-f]+)?$'
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Tested-by: Jonathan Bakker <xc-racer2@live.ca>
+Link: https://lore.kernel.org/r/20200907161141.31034-9-krzk@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/s5pv210.dtsi | 65 +++++++++++++++-------------------
+ 1 file changed, 29 insertions(+), 36 deletions(-)
+
+diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
+index 46221a5c8ce59..2871351ab9074 100644
+--- a/arch/arm/boot/dts/s5pv210.dtsi
++++ b/arch/arm/boot/dts/s5pv210.dtsi
+@@ -223,43 +223,36 @@
+ status = "disabled";
+ };
+
+- audio-subsystem {
+- compatible = "samsung,s5pv210-audss", "simple-bus";
+- #address-cells = <1>;
+- #size-cells = <1>;
+- ranges;
+-
+- clk_audss: clock-controller@eee10000 {
+- compatible = "samsung,s5pv210-audss-clock";
+- reg = <0xeee10000 0x1000>;
+- clock-names = "hclk", "xxti",
+- "fout_epll",
+- "sclk_audio0";
+- clocks = <&clocks DOUT_HCLKP>, <&xxti>,
+- <&clocks FOUT_EPLL>,
+- <&clocks SCLK_AUDIO0>;
+- #clock-cells = <1>;
+- };
++ clk_audss: clock-controller@eee10000 {
++ compatible = "samsung,s5pv210-audss-clock";
++ reg = <0xeee10000 0x1000>;
++ clock-names = "hclk", "xxti",
++ "fout_epll",
++ "sclk_audio0";
++ clocks = <&clocks DOUT_HCLKP>, <&xxti>,
++ <&clocks FOUT_EPLL>,
++ <&clocks SCLK_AUDIO0>;
++ #clock-cells = <1>;
++ };
+
+- i2s0: i2s@eee30000 {
+- compatible = "samsung,s5pv210-i2s";
+- reg = <0xeee30000 0x1000>;
+- interrupt-parent = <&vic2>;
+- interrupts = <16>;
+- dma-names = "rx", "tx", "tx-sec";
+- dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>;
+- clock-names = "iis",
+- "i2s_opclk0",
+- "i2s_opclk1";
+- clocks = <&clk_audss CLK_I2S>,
+- <&clk_audss CLK_I2S>,
+- <&clk_audss CLK_DOUT_AUD_BUS>;
+- samsung,idma-addr = <0xc0010000>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&i2s0_bus>;
+- #sound-dai-cells = <0>;
+- status = "disabled";
+- };
++ i2s0: i2s@eee30000 {
++ compatible = "samsung,s5pv210-i2s";
++ reg = <0xeee30000 0x1000>;
++ interrupt-parent = <&vic2>;
++ interrupts = <16>;
++ dma-names = "rx", "tx", "tx-sec";
++ dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>;
++ clock-names = "iis",
++ "i2s_opclk0",
++ "i2s_opclk1";
++ clocks = <&clk_audss CLK_I2S>,
++ <&clk_audss CLK_I2S>,
++ <&clk_audss CLK_DOUT_AUD_BUS>;
++ samsung,idma-addr = <0xc0010000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&i2s0_bus>;
++ #sound-dai-cells = <0>;
++ status = "disabled";
+ };
+
+ i2s1: i2s@e2100000 {
+--
+2.27.0
+
--- /dev/null
+From d9507530c4dfdd79464df97c43176e6a491f6746 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 18:11:21 +0200
+Subject: ARM: dts: s5pv210: remove DMA controller bus node name to fix
+ dtschema warnings
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+[ Upstream commit ea4e792f3c8931fffec4d700cf6197d84e9f35a6 ]
+
+There is no need to keep DMA controller nodes under AMBA bus node.
+Remove the "amba" node to fix dtschema warnings like:
+
+ amba: $nodename:0: 'amba' does not match '^([a-z][a-z0-9\\-]+-bus|bus|soc|axi|ahb|apb)(@[0-9a-f]+)?$'
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Tested-by: Jonathan Bakker <xc-racer2@live.ca>
+Link: https://lore.kernel.org/r/20200907161141.31034-6-krzk@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/s5pv210.dtsi | 49 +++++++++++++++-------------------
+ 1 file changed, 21 insertions(+), 28 deletions(-)
+
+diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
+index 1b0ee884e91db..84e4447931de5 100644
+--- a/arch/arm/boot/dts/s5pv210.dtsi
++++ b/arch/arm/boot/dts/s5pv210.dtsi
+@@ -128,35 +128,28 @@
+ };
+ };
+
+- amba {
+- #address-cells = <1>;
+- #size-cells = <1>;
+- compatible = "simple-bus";
+- ranges;
+-
+- pdma0: dma@e0900000 {
+- compatible = "arm,pl330", "arm,primecell";
+- reg = <0xe0900000 0x1000>;
+- interrupt-parent = <&vic0>;
+- interrupts = <19>;
+- clocks = <&clocks CLK_PDMA0>;
+- clock-names = "apb_pclk";
+- #dma-cells = <1>;
+- #dma-channels = <8>;
+- #dma-requests = <32>;
+- };
++ pdma0: dma@e0900000 {
++ compatible = "arm,pl330", "arm,primecell";
++ reg = <0xe0900000 0x1000>;
++ interrupt-parent = <&vic0>;
++ interrupts = <19>;
++ clocks = <&clocks CLK_PDMA0>;
++ clock-names = "apb_pclk";
++ #dma-cells = <1>;
++ #dma-channels = <8>;
++ #dma-requests = <32>;
++ };
+
+- pdma1: dma@e0a00000 {
+- compatible = "arm,pl330", "arm,primecell";
+- reg = <0xe0a00000 0x1000>;
+- interrupt-parent = <&vic0>;
+- interrupts = <20>;
+- clocks = <&clocks CLK_PDMA1>;
+- clock-names = "apb_pclk";
+- #dma-cells = <1>;
+- #dma-channels = <8>;
+- #dma-requests = <32>;
+- };
++ pdma1: dma@e0a00000 {
++ compatible = "arm,pl330", "arm,primecell";
++ reg = <0xe0a00000 0x1000>;
++ interrupt-parent = <&vic0>;
++ interrupts = <20>;
++ clocks = <&clocks CLK_PDMA1>;
++ clock-names = "apb_pclk";
++ #dma-cells = <1>;
++ #dma-channels = <8>;
++ #dma-requests = <32>;
+ };
+
+ adc: adc@e1700000 {
+--
+2.27.0
+
--- /dev/null
+From b8dd870db79a4e148ba7974a75b20b1e5b35ea37 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Aug 2020 17:47:49 +0200
+Subject: arm64: dts: qcom: kitakami: Temporarily disable SDHCI1
+
+From: Konrad Dybcio <konradybcio@gmail.com>
+
+[ Upstream commit e884fb6cc89dce1debeae33704edd7735a3d6d9c ]
+
+There is an issue with Kitakami eMMCs dying when a quirk
+isn't addressed. Until that happens, disable it.
+
+Signed-off-by: Konrad Dybcio <konradybcio@gmail.com>
+Link: https://lore.kernel.org/r/20200814154749.257837-1-konradybcio@gmail.com
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
+index 4032b7478f044..791f254ac3f87 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
+@@ -221,7 +221,12 @@
+ };
+
+ &sdhc1 {
+- status = "okay";
++ /* There is an issue with the eMMC causing permanent
++ * damage to the card if a quirk isn't addressed.
++ * Until it's fixed, disable the MMC so as not to brick
++ * devices.
++ */
++ status = "disabled";
+
+ /* Downstream pushes 2.95V to the sdhci device,
+ * but upstream driver REALLY wants to make vmmc 1.8v
+--
+2.27.0
+
--- /dev/null
+From 86a521610e0ecf5d4ad56ea18a74b7911209de45 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jul 2020 21:33:21 +0900
+Subject: arm64: dts: renesas: ulcb: add full-pwr-cycle-in-suspend into eMMC
+ nodes
+
+From: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+[ Upstream commit 992d7a8b88c83c05664b649fc54501ce58e19132 ]
+
+Add full-pwr-cycle-in-suspend property to do a graceful shutdown of
+the eMMC device in system suspend.
+
+Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Link: https://lore.kernel.org/r/1594989201-24228-1-git-send-email-yoshihiro.shimoda.uh@renesas.com
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/renesas/ulcb.dtsi | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
+index ff88af8e39d3f..a2e085db87c53 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
+@@ -469,6 +469,7 @@
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ non-removable;
++ full-pwr-cycle-in-suspend;
+ status = "okay";
+ };
+
+--
+2.27.0
+
--- /dev/null
+From 9f929198112ea6b0e34f08d6f4419b55cd286b2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Oct 2020 08:32:06 +0100
+Subject: arm64: efi: increase EFI PE/COFF header padding to 64 KB
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Upstream commit a2d50c1c77aa879af24f9f67b33186737b3d4885 ]
+
+Commit 76085aff29f5 ("efi/libstub/arm64: align PE/COFF sections to segment
+alignment") increased the PE/COFF section alignment to match the minimum
+segment alignment of the kernel image, which ensures that the kernel does
+not need to be moved around in memory by the EFI stub if it was built as
+relocatable.
+
+However, the first PE/COFF section starts at _stext, which is only 4 KB
+aligned, and so the section layout is inconsistent. Existing EFI loaders
+seem to care little about this, but it is better to clean this up.
+
+So let's pad the header to 64 KB to match the PE/COFF section alignment.
+
+Fixes: 76085aff29f5 ("efi/libstub/arm64: align PE/COFF sections to segment alignment")
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Link: https://lore.kernel.org/r/20201027073209.2897-2-ardb@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/efi-header.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
+index df67c0f2a077e..a71844fb923ee 100644
+--- a/arch/arm64/kernel/efi-header.S
++++ b/arch/arm64/kernel/efi-header.S
+@@ -147,6 +147,6 @@ efi_debug_entry:
+ * correctly at this alignment, we must ensure that .text is
+ * placed at a 4k boundary in the Image to begin with.
+ */
+- .align 12
++ .balign SEGMENT_ALIGN
+ efi_header_end:
+ .endm
+--
+2.27.0
+
--- /dev/null
+From 5e85d2382df7708d08e338a710d3255c414d7fe6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Sep 2020 10:39:36 +0800
+Subject: arm64/mm: return cpu_all_mask when node is NUMA_NO_NODE
+
+From: Zhengyuan Liu <liuzhengyuan@tj.kylinos.cn>
+
+[ Upstream commit a194c5f2d2b3a05428805146afcabe5140b5d378 ]
+
+The @node passed to cpumask_of_node() can be NUMA_NO_NODE, in that
+case it will trigger the following WARN_ON(node >= nr_node_ids) due to
+mismatched data types of @node and @nr_node_ids. Actually we should
+return cpu_all_mask just like most other architectures do if passed
+NUMA_NO_NODE.
+
+Also add a similar check to the inline cpumask_of_node() in numa.h.
+
+Signed-off-by: Zhengyuan Liu <liuzhengyuan@tj.kylinos.cn>
+Reviewed-by: Gavin Shan <gshan@redhat.com>
+Link: https://lore.kernel.org/r/20200921023936.21846-1-liuzhengyuan@tj.kylinos.cn
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/numa.h | 3 +++
+ arch/arm64/mm/numa.c | 6 +++++-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
+index 626ad01e83bf0..dd870390d639f 100644
+--- a/arch/arm64/include/asm/numa.h
++++ b/arch/arm64/include/asm/numa.h
+@@ -25,6 +25,9 @@ const struct cpumask *cpumask_of_node(int node);
+ /* Returns a pointer to the cpumask of CPUs on Node 'node'. */
+ static inline const struct cpumask *cpumask_of_node(int node)
+ {
++ if (node == NUMA_NO_NODE)
++ return cpu_all_mask;
++
+ return node_to_cpumask_map[node];
+ }
+ #endif
+diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
+index 73f8b49d485c2..88e51aade0da0 100644
+--- a/arch/arm64/mm/numa.c
++++ b/arch/arm64/mm/numa.c
+@@ -46,7 +46,11 @@ EXPORT_SYMBOL(node_to_cpumask_map);
+ */
+ const struct cpumask *cpumask_of_node(int node)
+ {
+- if (WARN_ON(node >= nr_node_ids))
++
++ if (node == NUMA_NO_NODE)
++ return cpu_all_mask;
++
++ if (WARN_ON(node < 0 || node >= nr_node_ids))
+ return cpu_none_mask;
+
+ if (WARN_ON(node_to_cpumask_map[node] == NULL))
+--
+2.27.0
+
--- /dev/null
+From b3fd11391e8bc10b2065daf7ceb7046d58e55a4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Aug 2020 14:00:16 +0100
+Subject: arm64: topology: Stop using MPIDR for topology information
+
+From: Valentin Schneider <valentin.schneider@arm.com>
+
+[ Upstream commit 3102bc0e6ac752cc5df896acb557d779af4d82a1 ]
+
+In the absence of ACPI or DT topology data, we fallback to haphazardly
+decoding *something* out of MPIDR. Sadly, the contents of that register are
+mostly unusable due to the implementation leniancy and things like Aff0
+having to be capped to 15 (despite being encoded on 8 bits).
+
+Consider a simple system with a single package of 32 cores, all under the
+same LLC. We ought to be shoving them in the same core_sibling mask, but
+MPIDR is going to look like:
+
+ | CPU | 0 | ... | 15 | 16 | ... | 31 |
+ |------+---+-----+----+----+-----+----+
+ | Aff0 | 0 | ... | 15 | 0 | ... | 15 |
+ | Aff1 | 0 | ... | 0 | 1 | ... | 1 |
+ | Aff2 | 0 | ... | 0 | 0 | ... | 0 |
+
+Which will eventually yield
+
+ core_sibling(0-15) == 0-15
+ core_sibling(16-31) == 16-31
+
+NUMA woes
+=========
+
+If we try to play games with this and set up NUMA boundaries within those
+groups of 16 cores via e.g. QEMU:
+
+ # Node0: 0-9; Node1: 10-19
+ $ qemu-system-aarch64 <blah> \
+ -smp 20 -numa node,cpus=0-9,nodeid=0 -numa node,cpus=10-19,nodeid=1
+
+The scheduler's MC domain (all CPUs with same LLC) is going to be built via
+
+ arch_topology.c::cpu_coregroup_mask()
+
+In there we try to figure out a sensible mask out of the topology
+information we have. In short, here we'll pick the smallest of NUMA or
+core sibling mask.
+
+ node_mask(CPU9) == 0-9
+ core_sibling(CPU9) == 0-15
+
+MC mask for CPU9 will thus be 0-9, not a problem.
+
+ node_mask(CPU10) == 10-19
+ core_sibling(CPU10) == 0-15
+
+MC mask for CPU10 will thus be 10-19, not a problem.
+
+ node_mask(CPU16) == 10-19
+ core_sibling(CPU16) == 16-19
+
+MC mask for CPU16 will thus be 16-19... Uh oh. CPUs 16-19 are in two
+different unique MC spans, and the scheduler has no idea what to make of
+that. That triggers the WARN_ON() added by commit
+
+ ccf74128d66c ("sched/topology: Assert non-NUMA topology masks don't (partially) overlap")
+
+Fixing MPIDR-derived topology
+=============================
+
+We could try to come up with some cleverer scheme to figure out which of
+the available masks to pick, but really if one of those masks resulted from
+MPIDR then it should be discarded because it's bound to be bogus.
+
+I was hoping to give MPIDR a chance for SMT, to figure out which threads are
+in the same core using Aff1-3 as core ID, but Sudeep and Robin pointed out
+to me that there are systems out there where *all* cores have non-zero
+values in their higher affinity fields (e.g. RK3288 has "5" in all of its
+cores' MPIDR.Aff1), which would expose a bogus core ID to userspace.
+
+Stop using MPIDR for topology information. When no other source of topology
+information is available, mark each CPU as its own core and its NUMA node
+as its LLC domain.
+
+Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
+Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
+Link: https://lore.kernel.org/r/20200829130016.26106-1-valentin.schneider@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/topology.c | 32 +++++++++++++++++---------------
+ 1 file changed, 17 insertions(+), 15 deletions(-)
+
+diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
+index 0801a0f3c156a..ff1dd1dbfe641 100644
+--- a/arch/arm64/kernel/topology.c
++++ b/arch/arm64/kernel/topology.c
+@@ -36,21 +36,23 @@ void store_cpu_topology(unsigned int cpuid)
+ if (mpidr & MPIDR_UP_BITMASK)
+ return;
+
+- /* Create cpu topology mapping based on MPIDR. */
+- if (mpidr & MPIDR_MT_BITMASK) {
+- /* Multiprocessor system : Multi-threads per core */
+- cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+- cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+- cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
+- MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
+- } else {
+- /* Multiprocessor system : Single-thread per core */
+- cpuid_topo->thread_id = -1;
+- cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+- cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
+- MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
+- MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
+- }
++ /*
++ * This would be the place to create cpu topology based on MPIDR.
++ *
++ * However, it cannot be trusted to depict the actual topology; some
++ * pieces of the architecture enforce an artificial cap on Aff0 values
++ * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
++ * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
++ * having absolutely no relationship to the actual underlying system
++ * topology, and cannot be reasonably used as core / package ID.
++ *
++ * If the MT bit is set, Aff0 *could* be used to define a thread ID, but
++ * we still wouldn't be able to obtain a sane core ID. This means we
++ * need to entirely ignore MPIDR for any topology deduction.
++ */
++ cpuid_topo->thread_id = -1;
++ cpuid_topo->core_id = cpuid;
++ cpuid_topo->package_id = cpu_to_node(cpuid);
+
+ pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
+ cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
+--
+2.27.0
+
--- /dev/null
+From dc1008eb36c121cec7621c9bd4e984e9ac3e6115 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Aug 2020 00:24:20 +0530
+Subject: ASoC: AMD: Clean kernel log from deferred probe error messages
+
+From: Akshu Agrawal <akshu.agrawal@amd.com>
+
+[ Upstream commit f7660445c8e7fda91e8b944128554249d886b1d4 ]
+
+While the driver waits for DAIs to be probed and retries probing,
+have the error messages at debug level instead of error.
+
+Signed-off-by: Akshu Agrawal <akshu.agrawal@amd.com>
+Link: https://lore.kernel.org/r/20200826185454.5545-1-akshu.agrawal@amd.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/amd/acp3x-rt5682-max9836.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/sound/soc/amd/acp3x-rt5682-max9836.c b/sound/soc/amd/acp3x-rt5682-max9836.c
+index 406526e79af34..1a4e8ca0f99c2 100644
+--- a/sound/soc/amd/acp3x-rt5682-max9836.c
++++ b/sound/soc/amd/acp3x-rt5682-max9836.c
+@@ -472,12 +472,17 @@ static int acp3x_probe(struct platform_device *pdev)
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret) {
+- dev_err(&pdev->dev,
++ if (ret != -EPROBE_DEFER)
++ dev_err(&pdev->dev,
+ "devm_snd_soc_register_card(%s) failed: %d\n",
+ card->name, ret);
+- return ret;
++ else
++ dev_dbg(&pdev->dev,
++ "devm_snd_soc_register_card(%s) probe deferred: %d\n",
++ card->name, ret);
+ }
+- return 0;
++
++ return ret;
+ }
+
+ static const struct acpi_device_id acp3x_audio_acpi_match[] = {
+--
+2.27.0
+
--- /dev/null
+From 7d489482ee2b04ce8ed2192f66389024fd520bbe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Aug 2020 16:50:35 -0700
+Subject: ASoC: SOF: fix a runtime pm issue in SOF when HDMI codec doesn't work
+
+From: Rander Wang <rander.wang@intel.com>
+
+[ Upstream commit 6c63c954e1c52f1262f986f36d95f557c6f8fa94 ]
+
+When hda_codec_probe() doesn't initialize audio component, we disable
+the codec and keep going. However,the resources are not released. The
+child_count of SOF device is increased in snd_hdac_ext_bus_device_init
+but is not decrease in error case, so SOF can't get suspended.
+
+snd_hdac_ext_bus_device_exit will be invoked in HDA framework if it
+gets a error. Now copy this behavior to release resources and decrease
+SOF device child_count to release SOF device.
+
+Signed-off-by: Rander Wang <rander.wang@intel.com>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Reviewed-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
+Signed-off-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Link: https://lore.kernel.org/r/20200825235040.1586478-3-ranjani.sridharan@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/intel/hda-codec.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
+index 2c5c451fa19d7..c475955c6eeba 100644
+--- a/sound/soc/sof/intel/hda-codec.c
++++ b/sound/soc/sof/intel/hda-codec.c
+@@ -151,7 +151,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
+ if (!hdev->bus->audio_component) {
+ dev_dbg(sdev->dev,
+ "iDisp hw present but no driver\n");
+- return -ENOENT;
++ goto error;
+ }
+ hda_priv->need_display_power = true;
+ }
+@@ -174,7 +174,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
+ * other return codes without modification
+ */
+ if (ret == 0)
+- ret = -ENOENT;
++ goto error;
+ }
+
+ return ret;
+--
+2.27.0
+
--- /dev/null
+From cd0bd5a919352f6bb7d997e50b1cf77f3afc7006 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2020 07:28:19 +0200
+Subject: ata: sata_nv: Fix retrieving of active qcs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+[ Upstream commit 8e4c309f9f33b76c09daa02b796ef87918eee494 ]
+
+ata_qc_complete_multiple() has to be called with the tags physically
+active, that is the hw tag is at bit 0. ap->qc_active has the same tag
+at bit ATA_TAG_INTERNAL instead, so call ata_qc_get_active() to fix that
+up. This is done in the vein of 8385d756e114 ("libata: Fix retrieving of
+active qcs").
+
+Fixes: 28361c403683 ("libata: add extra internal command")
+Tested-by: Pali Rohár <pali@kernel.org>
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/sata_nv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
+index eb9dc14e5147a..20190f66ced98 100644
+--- a/drivers/ata/sata_nv.c
++++ b/drivers/ata/sata_nv.c
+@@ -2100,7 +2100,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
+ pp->dhfis_bits &= ~done_mask;
+ pp->dmafis_bits &= ~done_mask;
+ pp->sdbfis_bits |= done_mask;
+- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
++ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
+
+ if (!ap->qc_active) {
+ DPRINTK("over\n");
+--
+2.27.0
+
--- /dev/null
+From 2d6a14062f1591dea9eeabb5cf52d26532badbf7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 20:29:01 +0300
+Subject: ath10k: fix retry packets update in station dump
+
+From: Venkateswara Naralasetty <vnaralas@codeaurora.org>
+
+[ Upstream commit 67b927f9820847d30e97510b2f00cd142b9559b6 ]
+
+When tx status enabled, retry count is updated from tx completion status.
+which is not working as expected due to firmware limitation where
+firmware can not provide per MSDU rate statistics from tx completion
+status. Due to this tx retry count is always 0 in station dump.
+
+Fix this issue by updating the retry packet count from per peer
+statistics. This patch will not break on SDIO devices since, this retry
+count is already updating from peer statistics for SDIO devices.
+
+Tested-on: QCA9984 PCI 10.4-3.6-00104
+Tested-on: QCA9882 PCI 10.2.4-1.0-00047
+
+Signed-off-by: Venkateswara Naralasetty <vnaralas@codeaurora.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/1591856446-26977-1-git-send-email-vnaralas@codeaurora.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath10k/htt_rx.c | 8 +++++---
+ drivers/net/wireless/ath/ath10k/mac.c | 5 +++--
+ 2 files changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 215ade6faf328..69ad4ca1a87c1 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -3583,12 +3583,14 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
+ }
+
+ if (ar->htt.disable_tx_comp) {
+- arsta->tx_retries += peer_stats->retry_pkts;
+ arsta->tx_failed += peer_stats->failed_pkts;
+- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d tx failed %d\n",
+- arsta->tx_retries, arsta->tx_failed);
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
++ arsta->tx_failed);
+ }
+
++ arsta->tx_retries += peer_stats->retry_pkts;
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
++
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar))
+ ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
+ rate_idx);
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 2177e9d92bdff..03c7edf05a1d1 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -8542,12 +8542,13 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ if (ar->htt.disable_tx_comp) {
+- sinfo->tx_retries = arsta->tx_retries;
+- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ sinfo->tx_failed = arsta->tx_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+ }
+
++ sinfo->tx_retries = arsta->tx_retries;
++ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
++
+ ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 9a380c968dabb038034cc411734254de13f53436 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Aug 2020 13:46:11 +0530
+Subject: ath10k: fix VHT NSS calculation when STBC is enabled
+
+From: Sathishkumar Muruganandam <murugana@codeaurora.org>
+
+[ Upstream commit 99f41b8e43b8b4b31262adb8ac3e69088fff1289 ]
+
+When STBC is enabled, NSTS_SU value need to be accounted for VHT NSS
+calculation for SU case.
+
+Without this fix, 1SS + STBC enabled case was reported wrongly as 2SS
+in radiotap header on monitor mode capture.
+
+Tested-on: QCA9984 10.4-3.10-00047
+
+Signed-off-by: Sathishkumar Muruganandam <murugana@codeaurora.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/1597392971-3897-1-git-send-email-murugana@codeaurora.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath10k/htt_rx.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 69ad4ca1a87c1..a00498338b1cc 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -949,6 +949,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+ u8 preamble = 0;
+ u8 group_id;
+ u32 info1, info2, info3;
++ u32 stbc, nsts_su;
+
+ info1 = __le32_to_cpu(rxd->ppdu_start.info1);
+ info2 = __le32_to_cpu(rxd->ppdu_start.info2);
+@@ -993,11 +994,16 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+ */
+ bw = info2 & 3;
+ sgi = info3 & 1;
++ stbc = (info2 >> 3) & 1;
+ group_id = (info2 >> 4) & 0x3F;
+
+ if (GROUP_ID_IS_SU_MIMO(group_id)) {
+ mcs = (info3 >> 4) & 0x0F;
+- nss = ((info2 >> 10) & 0x07) + 1;
++ nsts_su = ((info2 >> 10) & 0x07);
++ if (stbc)
++ nss = (nsts_su >> 2) + 1;
++ else
++ nss = (nsts_su + 1);
+ } else {
+ /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
+ * so it's impossible to decode MCS. Also since
+--
+2.27.0
+
--- /dev/null
+From 84c6ebc6e4ef7d4033a816d8368786c884d253d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Aug 2020 18:17:08 +0300
+Subject: ath10k: start recovery process when payload length exceeds max htc
+ length for sdio
+
+From: Wen Gong <wgong@codeaurora.org>
+
+[ Upstream commit 2fd3c8f34d08af0a6236085f9961866ad92ef9ec ]
+
+When simulate random transfer fail for sdio write and read, it happened
+"payload length exceeds max htc length" and recovery later sometimes.
+
+Test steps:
+1. Add config and update kernel:
+CONFIG_FAIL_MMC_REQUEST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+
+2. Run simulate fail:
+cd /sys/kernel/debug/mmc1/fail_mmc_request
+echo 10 > probability
+echo 10 > times # repeat until hitting issues
+
+3. It happened payload length exceeds max htc length.
+[ 199.935506] ath10k_sdio mmc1:0001:1: payload length 57005 exceeds max htc length: 4088
+....
+[ 264.990191] ath10k_sdio mmc1:0001:1: payload length 57005 exceeds max htc length: 4088
+
+4. after some time, such as 60 seconds, it start recovery which triggered
+by wmi command timeout for periodic scan.
+[ 269.229232] ieee80211 phy0: Hardware restart was requested
+[ 269.734693] ath10k_sdio mmc1:0001:1: device successfully recovered
+
+The simulate fail of sdio is not a real sdio transter fail, it only
+set an error status in mmc_should_fail_request after the transfer end,
+actually the transfer is success, then sdio_io_rw_ext_helper will
+return error status and stop transfer the left data. For example,
+the really RX len is 286 bytes, then it will split to 2 blocks in
+sdio_io_rw_ext_helper, one is 256 bytes, left is 30 bytes, if the
+first 256 bytes get an error status by mmc_should_fail_request,then
+the left 30 bytes will not read in this RX operation. Then when the
+next RX arrive, the left 30 bytes will be considered as the header
+of the read, the top 4 bytes of the 30 bytes will be considered as
+lookaheads, but actually the 4 bytes is not the lookaheads, so the len
+from this lookaheads is not correct, it exceeds max htc length 4088
+sometimes. When happened exceeds, the buffer chain is not matched between
+firmware and ath10k, then it need to start recovery ASAP. Recently then
+recovery will be started by wmi command timeout, but it will be long time
+later, for example, it is 60+ seconds later from the periodic scan, if
+it does not have periodic scan, it will be longer.
+
+Start recovery when it happened "payload length exceeds max htc length"
+will be reasonable.
+
+This patch only effect sdio chips.
+
+Tested with QCA6174 SDIO with firmware WLAN.RMH.4.4.1-00029.
+
+Signed-off-by: Wen Gong <wgong@codeaurora.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/20200108031957.22308-3-wgong@codeaurora.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath10k/sdio.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
+index 63f882c690bff..0841e69b10b1a 100644
+--- a/drivers/net/wireless/ath/ath10k/sdio.c
++++ b/drivers/net/wireless/ath/ath10k/sdio.c
+@@ -557,6 +557,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
+ le16_to_cpu(htc_hdr->len),
+ ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
+ ret = -ENOMEM;
++
++ queue_work(ar->workqueue, &ar->restart_work);
++ ath10k_warn(ar, "exceeds length, start recovery\n");
++
+ goto err;
+ }
+
+--
+2.27.0
+
--- /dev/null
+From fc59a7c19797eb397cc1174afbfe119cec5558eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Sep 2020 20:15:34 +0300
+Subject: ath11k: change to disable softirqs for ath11k_regd_update to solve
+ deadlock
+
+From: Wen Gong <wgong@codeaurora.org>
+
+[ Upstream commit df648808c6b9989555e247530d8ca0ad0094b361 ]
+
+After base_lock which occupy by ath11k_regd_update, the softirq run for
+WMI_REG_CHAN_LIST_CC_EVENTID maybe arrived and it also need to accuire
+the spin lock, then deadlock happend, change to disable softirqis to solve it.
+
+[ 235.576990] ================================
+[ 235.576991] WARNING: inconsistent lock state
+[ 235.576993] 5.9.0-rc5-wt-ath+ #196 Not tainted
+[ 235.576994] --------------------------------
+[ 235.576995] inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
+[ 235.576997] kworker/u16:1/98 [HC0[0]:SC0[0]:HE1:SE1] takes:
+[ 235.576998] ffff9655f75cad98 (&ab->base_lock){+.?.}-{2:2}, at: ath11k_regd_update+0x28/0x1d0 [ath11k]
+[ 235.577009] {IN-SOFTIRQ-W} state was registered at:
+[ 235.577013] __lock_acquire+0x219/0x6e0
+[ 235.577015] lock_acquire+0xb6/0x270
+[ 235.577018] _raw_spin_lock+0x2c/0x70
+[ 235.577023] ath11k_reg_chan_list_event.isra.0+0x10d/0x1e0 [ath11k]
+[ 235.577028] ath11k_wmi_tlv_op_rx+0x3c3/0x560 [ath11k]
+[ 235.577033] ath11k_htc_rx_completion_handler+0x207/0x370 [ath11k]
+[ 235.577039] ath11k_ce_recv_process_cb+0x15e/0x1e0 [ath11k]
+[ 235.577041] ath11k_pci_ce_tasklet+0x10/0x30 [ath11k_pci]
+[ 235.577043] tasklet_action_common.constprop.0+0xd4/0xf0
+[ 235.577045] __do_softirq+0xc9/0x482
+[ 235.577046] asm_call_on_stack+0x12/0x20
+[ 235.577048] do_softirq_own_stack+0x49/0x60
+[ 235.577049] irq_exit_rcu+0x9a/0xd0
+[ 235.577050] common_interrupt+0xa1/0x190
+[ 235.577052] asm_common_interrupt+0x1e/0x40
+[ 235.577053] cpu_idle_poll.isra.0+0x2e/0x60
+[ 235.577055] do_idle+0x5f/0xe0
+[ 235.577056] cpu_startup_entry+0x14/0x20
+[ 235.577058] start_kernel+0x443/0x464
+[ 235.577060] secondary_startup_64+0xa4/0xb0
+[ 235.577061] irq event stamp: 432035
+[ 235.577063] hardirqs last enabled at (432035): [<ffffffff968d12b4>] _raw_spin_unlock_irqrestore+0x34/0x40
+[ 235.577064] hardirqs last disabled at (432034): [<ffffffff968d10d3>] _raw_spin_lock_irqsave+0x63/0x80
+[ 235.577066] softirqs last enabled at (431998): [<ffffffff967115c1>] inet6_fill_ifla6_attrs+0x3f1/0x430
+[ 235.577067] softirqs last disabled at (431996): [<ffffffff9671159f>] inet6_fill_ifla6_attrs+0x3cf/0x430
+[ 235.577068]
+[ 235.577068] other info that might help us debug this:
+[ 235.577069] Possible unsafe locking scenario:
+[ 235.577069]
+[ 235.577070] CPU0
+[ 235.577070] ----
+[ 235.577071] lock(&ab->base_lock);
+[ 235.577072] <Interrupt>
+[ 235.577073] lock(&ab->base_lock);
+[ 235.577074]
+[ 235.577074] *** DEADLOCK ***
+[ 235.577074]
+[ 235.577075] 3 locks held by kworker/u16:1/98:
+[ 235.577076] #0: ffff9655f75b1d48 ((wq_completion)ath11k_qmi_driver_event){+.+.}-{0:0}, at: process_one_work+0x1d3/0x5d0
+[ 235.577079] #1: ffffa33cc02f3e70 ((work_completion)(&ab->qmi.event_work)){+.+.}-{0:0}, at: process_one_work+0x1d3/0x5d0
+[ 235.577081] #2: ffff9655f75cad50 (&ab->core_lock){+.+.}-{3:3}, at: ath11k_core_qmi_firmware_ready.part.0+0x4e/0x160 [ath11k]
+[ 235.577087]
+[ 235.577087] stack backtrace:
+[ 235.577088] CPU: 3 PID: 98 Comm: kworker/u16:1 Not tainted 5.9.0-rc5-wt-ath+ #196
+[ 235.577089] Hardware name: Intel(R) Client Systems NUC8i7HVK/NUC8i7HVB, BIOS HNKBLi70.86A.0049.2018.0801.1601 08/01/2018
+[ 235.577095] Workqueue: ath11k_qmi_driver_event ath11k_qmi_driver_event_work [ath11k]
+[ 235.577096] Call Trace:
+[ 235.577100] dump_stack+0x77/0xa0
+[ 235.577102] mark_lock_irq.cold+0x15/0x3c
+[ 235.577104] mark_lock+0x1d7/0x540
+[ 235.577105] mark_usage+0xc7/0x140
+[ 235.577107] __lock_acquire+0x219/0x6e0
+[ 235.577108] ? sched_clock_cpu+0xc/0xb0
+[ 235.577110] lock_acquire+0xb6/0x270
+[ 235.577116] ? ath11k_regd_update+0x28/0x1d0 [ath11k]
+[ 235.577118] ? atomic_notifier_chain_register+0x2d/0x40
+[ 235.577120] _raw_spin_lock+0x2c/0x70
+[ 235.577125] ? ath11k_regd_update+0x28/0x1d0 [ath11k]
+[ 235.577130] ath11k_regd_update+0x28/0x1d0 [ath11k]
+[ 235.577136] __ath11k_mac_register+0x3fb/0x480 [ath11k]
+[ 235.577141] ath11k_mac_register+0x119/0x180 [ath11k]
+[ 235.577146] ath11k_core_pdev_create+0x17/0xe0 [ath11k]
+[ 235.577150] ath11k_core_qmi_firmware_ready.part.0+0x65/0x160 [ath11k]
+[ 235.577155] ath11k_qmi_driver_event_work+0x1c5/0x230 [ath11k]
+[ 235.577158] process_one_work+0x265/0x5d0
+[ 235.577160] worker_thread+0x49/0x300
+[ 235.577161] ? process_one_work+0x5d0/0x5d0
+[ 235.577163] kthread+0x135/0x150
+[ 235.577164] ? kthread_create_worker_on_cpu+0x60/0x60
+[ 235.577166] ret_from_fork+0x22/0x30
+
+Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-01740-QCAHSTSWPLZ_V2_TO_X86-1
+
+Signed-off-by: Wen Gong <wgong@codeaurora.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/1601399736-3210-7-git-send-email-kvalo@codeaurora.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath11k/reg.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
+index 7c9dc91cc48a9..c79a7c7eb56ee 100644
+--- a/drivers/net/wireless/ath/ath11k/reg.c
++++ b/drivers/net/wireless/ath/ath11k/reg.c
+@@ -206,7 +206,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
+ ab = ar->ab;
+ pdev_id = ar->pdev_idx;
+
+- spin_lock(&ab->base_lock);
++ spin_lock_bh(&ab->base_lock);
+
+ if (init) {
+ /* Apply the regd received during init through
+@@ -227,7 +227,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
+
+ if (!regd) {
+ ret = -EINVAL;
+- spin_unlock(&ab->base_lock);
++ spin_unlock_bh(&ab->base_lock);
+ goto err;
+ }
+
+@@ -238,7 +238,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
+ if (regd_copy)
+ ath11k_copy_regd(regd, regd_copy);
+
+- spin_unlock(&ab->base_lock);
++ spin_unlock_bh(&ab->base_lock);
+
+ if (!regd_copy) {
+ ret = -ENOMEM;
+--
+2.27.0
+
--- /dev/null
+From f5bc0c6909d081519f48eaaa3b91a36727136d5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Sep 2020 13:51:12 +0300
+Subject: ath11k: fix warning caused by lockdep_assert_held
+
+From: Carl Huang <cjhuang@codeaurora.org>
+
+[ Upstream commit 2f588660e34a982377109872757f1b99d7748d21 ]
+
+Fix warning caused by lockdep_assert_held when CONFIG_LOCKDEP is enabled.
+
+[ 271.940647] WARNING: CPU: 6 PID: 0 at drivers/net/wireless/ath/ath11k/hal.c:818 ath11k_hal_srng_access_begin+0x31/0x40 [ath11k]
+[ 271.940655] Modules linked in: qrtr_mhi qrtr ns ath11k_pci mhi ath11k qmi_helpers nvme nvme_core
+[ 271.940675] CPU: 6 PID: 0 Comm: swapper/6 Kdump: loaded Tainted: G W 5.9.0-rc5-kalle-bringup-wt-ath+ #4
+[ 271.940682] Hardware name: Dell Inc. Inspiron 7590/08717F, BIOS 1.3.0 07/22/2019
+[ 271.940698] RIP: 0010:ath11k_hal_srng_access_begin+0x31/0x40 [ath11k]
+[ 271.940708] Code: 48 89 f3 85 c0 75 11 48 8b 83 a8 00 00 00 8b 00 89 83 b0 00 00 00 5b c3 48 8d 7e 58 be ff ff ff ff e8 53 24 ec fa 85 c0 75 dd <0f> 0b eb d9 90 66 2e 0f 1f 84 00 00 00 00 00 55 53 48 89 f3 8b 35
+[ 271.940718] RSP: 0018:ffffbdf0c0230df8 EFLAGS: 00010246
+[ 271.940727] RAX: 0000000000000000 RBX: ffffa12b34e67680 RCX: ffffa12b57a0d800
+[ 271.940735] RDX: 0000000000000000 RSI: 00000000ffffffff RDI: ffffa12b34e676d8
+[ 271.940742] RBP: ffffa12b34e60000 R08: 0000000000000001 R09: 0000000000000001
+[ 271.940753] R10: 0000000000000001 R11: 0000000000000046 R12: 0000000000000000
+[ 271.940763] R13: ffffa12b34e60000 R14: ffffa12b34e60000 R15: 0000000000000000
+[ 271.940774] FS: 0000000000000000(0000) GS:ffffa12b5a400000(0000) knlGS:0000000000000000
+[ 271.940788] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 271.940798] CR2: 00007f8bef282008 CR3: 00000001f4224004 CR4: 00000000003706e0
+[ 271.940805] Call Trace:
+[ 271.940813] <IRQ>
+[ 271.940835] ath11k_dp_tx_completion_handler+0x9e/0x950 [ath11k]
+[ 271.940847] ? lock_acquire+0xba/0x3b0
+[ 271.940876] ath11k_dp_service_srng+0x5a/0x2e0 [ath11k]
+[ 271.940893] ath11k_pci_ext_grp_napi_poll+0x1e/0x80 [ath11k_pci]
+[ 271.940908] net_rx_action+0x283/0x4f0
+[ 271.940931] __do_softirq+0xcb/0x499
+[ 271.940950] asm_call_on_stack+0x12/0x20
+[ 271.940963] </IRQ>
+[ 271.940979] do_softirq_own_stack+0x4d/0x60
+[ 271.940991] irq_exit_rcu+0xb0/0xc0
+[ 271.941001] common_interrupt+0xce/0x190
+[ 271.941014] asm_common_interrupt+0x1e/0x40
+[ 271.941026] RIP: 0010:cpuidle_enter_state+0x115/0x500
+
+Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-01740-QCAHSTSWPLZ_V2_TO_X86-1
+
+Signed-off-by: Carl Huang <cjhuang@codeaurora.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/1601463073-12106-5-git-send-email-kvalo@codeaurora.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath11k/dp_tx.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
+index 1af76775b1a87..99cff8fb39773 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
+@@ -514,6 +514,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
+ u32 msdu_id;
+ u8 mac_id;
+
++ spin_lock_bh(&status_ring->lock);
++
+ ath11k_hal_srng_access_begin(ab, status_ring);
+
+ while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
+@@ -533,6 +535,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
+
+ ath11k_hal_srng_access_end(ab, status_ring);
+
++ spin_unlock_bh(&status_ring->lock);
++
+ while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ struct hal_wbm_release_ring *tx_status;
+ u32 desc_id;
+--
+2.27.0
+
--- /dev/null
+From 0e63accdacf869cb692e2451633c6f8ae4ce81fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Sep 2020 20:15:35 +0300
+Subject: ath11k: Use GFP_ATOMIC instead of GFP_KERNEL in
+ ath11k_dp_htt_get_ppdu_desc
+
+From: Wen Gong <wgong@codeaurora.org>
+
+[ Upstream commit 6a8be1baa9116a038cb4f6158cc10134387ca0d0 ]
+
+With SLUB DEBUG CONFIG below crash is seen as kmem_cache_alloc
+is being called in non-atomic context.
+
+To fix this issue, use GFP_ATOMIC instead of GFP_KERNEL kzalloc.
+
+[ 357.217088] BUG: sleeping function called from invalid context at mm/slab.h:498
+[ 357.217091] in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 0, name: swapper/0
+[ 357.217092] INFO: lockdep is turned off.
+[ 357.217095] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G W 5.9.0-rc5-wt-ath+ #196
+[ 357.217096] Hardware name: Intel(R) Client Systems NUC8i7HVK/NUC8i7HVB, BIOS HNKBLi70.86A.0049.2018.0801.1601 08/01/2018
+[ 357.217097] Call Trace:
+[ 357.217098] <IRQ>
+[ 357.217107] ? ath11k_dp_htt_get_ppdu_desc+0xa9/0x170 [ath11k]
+[ 357.217110] dump_stack+0x77/0xa0
+[ 357.217113] ___might_sleep.cold+0xa6/0xb6
+[ 357.217116] kmem_cache_alloc_trace+0x1f2/0x270
+[ 357.217122] ath11k_dp_htt_get_ppdu_desc+0xa9/0x170 [ath11k]
+[ 357.217129] ath11k_htt_pull_ppdu_stats.isra.0+0x96/0x270 [ath11k]
+[ 357.217135] ath11k_dp_htt_htc_t2h_msg_handler+0xe7/0x1d0 [ath11k]
+[ 357.217137] ? trace_hardirqs_on+0x1c/0x100
+[ 357.217143] ath11k_htc_rx_completion_handler+0x207/0x370 [ath11k]
+[ 357.217149] ath11k_ce_recv_process_cb+0x15e/0x1e0 [ath11k]
+[ 357.217151] ? handle_irq_event+0x70/0xa8
+[ 357.217154] ath11k_pci_ce_tasklet+0x10/0x30 [ath11k_pci]
+[ 357.217157] tasklet_action_common.constprop.0+0xd4/0xf0
+[ 357.217160] __do_softirq+0xc9/0x482
+[ 357.217162] asm_call_on_stack+0x12/0x20
+[ 357.217163] </IRQ>
+[ 357.217166] do_softirq_own_stack+0x49/0x60
+[ 357.217167] irq_exit_rcu+0x9a/0xd0
+[ 357.217169] common_interrupt+0xa1/0x190
+[ 357.217171] asm_common_interrupt+0x1e/0x40
+[ 357.217173] RIP: 0010:cpu_idle_poll.isra.0+0x2e/0x60
+[ 357.217175] Code: 8b 35 26 27 74 69 e8 11 c8 3d ff e8 bc fa 42 ff e8 e7 9f 4a ff fb 65 48 8b 1c 25 80 90 01 00 48 8b 03 a8 08 74 0b eb 1c f3 90 <48> 8b 03 a8 08 75 13 8b 0
+[ 357.217177] RSP: 0018:ffffffff97403ee0 EFLAGS: 00000202
+[ 357.217178] RAX: 0000000000000001 RBX: ffffffff9742b8c0 RCX: 0000000000b890ca
+[ 357.217180] RDX: 0000000000b890ca RSI: 0000000000000001 RDI: ffffffff968d0c49
+[ 357.217181] RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000001
+[ 357.217182] R10: ffffffff9742b8c0 R11: 0000000000000046 R12: 0000000000000000
+[ 357.217183] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000066fdf520
+[ 357.217186] ? cpu_idle_poll.isra.0+0x19/0x60
+[ 357.217189] do_idle+0x5f/0xe0
+[ 357.217191] cpu_startup_entry+0x14/0x20
+[ 357.217193] start_kernel+0x443/0x464
+[ 357.217196] secondary_startup_64+0xa4/0xb0
+
+Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-01740-QCAHSTSWPLZ_V2_TO_X86-1
+
+Signed-off-by: Wen Gong <wgong@codeaurora.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/1601399736-3210-8-git-send-email-kvalo@codeaurora.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath11k/dp_rx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 791d971784ce0..055c3bb61e4c5 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -1421,7 +1421,7 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+- ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
++ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
+ if (!ppdu_info)
+ return NULL;
+
+--
+2.27.0
+
--- /dev/null
+From 5a8566ce9609c30cbf147786160e9e67643816a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Sep 2020 21:30:23 -0700
+Subject: bindings: soc: ti: soc: ringacc: remove ti,dma-ring-reset-quirk
+
+From: Grygorii Strashko <grygorii.strashko@ti.com>
+
+[ Upstream commit aee123f48f387ea62002cddb46c7cb04c96628df ]
+
+Remove "ti,dma-ring-reset-quirk" DT property as proper w/a handling is
+implemented now in Ringacc driver using SoC info.
+
+Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
+Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml b/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml
+index ae33fc957141f..c3c595e235a86 100644
+--- a/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml
++++ b/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml
+@@ -62,11 +62,6 @@ properties:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: TI-SCI device id of the ring accelerator
+
+- ti,dma-ring-reset-quirk:
+- $ref: /schemas/types.yaml#definitions/flag
+- description: |
+- enable ringacc/udma ring state interoperability issue software w/a
+-
+ required:
+ - compatible
+ - reg
+@@ -94,7 +89,6 @@ examples:
+ reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
+ ti,num-rings = <818>;
+ ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+- ti,dma-ring-reset-quirk;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <187>;
+ msi-parent = <&inta_main_udmass>;
+--
+2.27.0
+
--- /dev/null
+From f2b9f3995bbb9441092d1e577766bb8f36f9357f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Oct 2020 15:41:25 -0400
+Subject: block: Consider only dispatched requests for inflight statistic
+
+From: Gabriel Krisman Bertazi <krisman@collabora.com>
+
+[ Upstream commit a926c7afffcc0f2e35e6acbccb16921bacf34617 ]
+
+According to Documentation/block/stat.rst, inflight should not include
+I/O requests that are in the queue but not yet dispatched to the device,
+but blk-mq identifies as inflight any request that has a tag allocated,
+which, for queues without elevator, happens at request allocation time
+and before it is queued in the ctx (default case in blk_mq_submit_bio).
+
+In addition, current behavior is different for queues with elevator from
+queues without it, since for the former the driver tag is allocated at
+dispatch time. A more precise approach would be to only consider
+requests with state MQ_RQ_IN_FLIGHT.
+
+This effectively reverts commit 6131837b1de6 ("blk-mq: count allocated
+but not started requests in iostats inflight") to consolidate blk-mq
+behavior with itself (elevator case) and with original documentation,
+but it differs from the behavior used by the legacy path.
+
+This version differs from v1 by using blk_mq_rq_state to access the
+state attribute. Avoid using blk_mq_request_started, which was
+suggested, since we don't want to include MQ_RQ_COMPLETE.
+
+Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
+Cc: Omar Sandoval <osandov@fb.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 94a53d779c12b..ca2fdb58e7af5 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -105,7 +105,7 @@ static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
+ {
+ struct mq_inflight *mi = priv;
+
+- if (rq->part == mi->part)
++ if (rq->part == mi->part && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
+ mi->inflight[rq_data_dir(rq)]++;
+
+ return true;
+--
+2.27.0
+
--- /dev/null
+From 4fb51538df618e94f0647598387cdddfda76c271 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Oct 2020 05:10:51 -0400
+Subject: bnxt_en: Log unknown link speed appropriately.
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit 8eddb3e7ce124dd6375d3664f1aae13873318b0f ]
+
+If the VF virtual link is set to always enabled, the speed may be
+unknown when the physical link is down. The driver currently logs
+the link speed as 4294967295 Mbps which is SPEED_UNKNOWN. Modify
+the link up log message as "speed unknown" which makes more sense.
+
+Reviewed-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Reviewed-by: Edwin Peer <edwin.peer@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Link: https://lore.kernel.org/r/1602493854-29283-7-git-send-email-michael.chan@broadcom.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 7b5d521924872..b8d534b719d4f 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8735,6 +8735,11 @@ static void bnxt_report_link(struct bnxt *bp)
+ u16 fec;
+
+ netif_carrier_on(bp->dev);
++ speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
++ if (speed == SPEED_UNKNOWN) {
++ netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
++ return;
++ }
+ if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
+ duplex = "full";
+ else
+@@ -8747,7 +8752,6 @@ static void bnxt_report_link(struct bnxt *bp)
+ flow_ctrl = "ON - receive";
+ else
+ flow_ctrl = "none";
+- speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
+ speed, duplex, flow_ctrl);
+ if (bp->flags & BNXT_FLAG_EEE_CAP)
+--
+2.27.0
+
--- /dev/null
+From d84552ed132315a2a09c28447f152e8239758b1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Sep 2020 10:57:02 -0700
+Subject: bpf: Permit map_ptr arithmetic with opcode add and offset 0
+
+From: Yonghong Song <yhs@fb.com>
+
+[ Upstream commit 7c6967326267bd5c0dded0a99541357d70dd11ac ]
+
+Commit 41c48f3a98231 ("bpf: Support access
+to bpf map fields") added support to access map fields
+with CORE support. For example,
+
+ struct bpf_map {
+ __u32 max_entries;
+ } __attribute__((preserve_access_index));
+
+ struct bpf_array {
+ struct bpf_map map;
+ __u32 elem_size;
+ } __attribute__((preserve_access_index));
+
+ struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 4);
+ __type(key, __u32);
+ __type(value, __u32);
+ } m_array SEC(".maps");
+
+ SEC("cgroup_skb/egress")
+ int cg_skb(void *ctx)
+ {
+ struct bpf_array *array = (struct bpf_array *)&m_array;
+
+ /* .. array->map.max_entries .. */
+ }
+
+In kernel, bpf_htab has similar structure,
+
+ struct bpf_htab {
+ struct bpf_map map;
+ ...
+ }
+
+In the above cg_skb(), to access array->map.max_entries, with CORE, the clang will
+generate two builtin's.
+ base = &m_array;
+ /* access array.map */
+ map_addr = __builtin_preserve_struct_access_info(base, 0, 0);
+ /* access array.map.max_entries */
+ max_entries_addr = __builtin_preserve_struct_access_info(map_addr, 0, 0);
+ max_entries = *max_entries_addr;
+
+In the current llvm, if two builtin's are in the same function or
+in the same function after inlining, the compiler is smart enough to chain
+them together and generates like below:
+ base = &m_array;
+ max_entries = *(base + reloc_offset); /* reloc_offset = 0 in this case */
+and we are fine.
+
+But if we force no inlining for one of functions in test_map_ptr() selftest, e.g.,
+check_default(), the above two __builtin_preserve_* will be in two different
+functions. In this case, we will have code like:
+ func check_hash():
+ reloc_offset_map = 0;
+ base = &m_array;
+ map_base = base + reloc_offset_map;
+ check_default(map_base, ...)
+ func check_default(map_base, ...):
+ max_entries = *(map_base + reloc_offset_max_entries);
+
+In kernel, map_ptr (CONST_PTR_TO_MAP) does not allow any arithmetic.
+The above "map_base = base + reloc_offset_map" will trigger a verifier failure.
+ ; VERIFY(check_default(&hash->map, map));
+ 0: (18) r7 = 0xffffb4fe8018a004
+ 2: (b4) w1 = 110
+ 3: (63) *(u32 *)(r7 +0) = r1
+ R1_w=invP110 R7_w=map_value(id=0,off=4,ks=4,vs=8,imm=0) R10=fp0
+ ; VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
+ 4: (18) r1 = 0xffffb4fe8018a000
+ 6: (b4) w2 = 1
+ 7: (63) *(u32 *)(r1 +0) = r2
+ R1_w=map_value(id=0,off=0,ks=4,vs=8,imm=0) R2_w=invP1 R7_w=map_value(id=0,off=4,ks=4,vs=8,imm=0) R10=fp0
+ 8: (b7) r2 = 0
+ 9: (18) r8 = 0xffff90bcb500c000
+ 11: (18) r1 = 0xffff90bcb500c000
+ 13: (0f) r1 += r2
+ R1 pointer arithmetic on map_ptr prohibited
+
+To fix the issue, let us permit map_ptr + 0 arithmetic which will
+result in exactly the same map_ptr.
+
+Signed-off-by: Yonghong Song <yhs@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/20200908175702.2463625-1-yhs@fb.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 43cd175c66a55..718bbdc8b3c66 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5246,6 +5246,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ dst, reg_type_str[ptr_reg->type]);
+ return -EACCES;
+ case CONST_PTR_TO_MAP:
++ /* smin_val represents the known value */
++ if (known && smin_val == 0 && opcode == BPF_ADD)
++ break;
++ /* fall-through */
+ case PTR_TO_PACKET_END:
+ case PTR_TO_SOCKET:
+ case PTR_TO_SOCKET_OR_NULL:
+--
+2.27.0
+
--- /dev/null
+From bce2bff21083e8e8a478ff89617a051f4dd0a8be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Sep 2020 00:49:22 -0500
+Subject: brcmfmac: Fix warning message after dongle setup failed
+
+From: Wright Feng <wright.feng@cypress.com>
+
+[ Upstream commit 6aa5a83a7ed8036c1388a811eb8bdfa77b21f19c ]
+
+Brcmfmac showed warning message in fweh.c when checking the size of event
+queue which is not initialized. Therefore, we only cancel the worker and
+reset event handler only when it is initialized.
+
+[ 145.505899] brcmfmac 0000:02:00.0: brcmf_pcie_setup: Dongle setup
+[ 145.929970] ------------[ cut here ]------------
+[ 145.929994] WARNING: CPU: 0 PID: 288 at drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c:312
+brcmf_fweh_detach+0xbc/0xd0 [brcmfmac]
+...
+[ 145.930029] Call Trace:
+[ 145.930036] brcmf_detach+0x77/0x100 [brcmfmac]
+[ 145.930043] brcmf_pcie_remove+0x79/0x130 [brcmfmac]
+[ 145.930046] pci_device_remove+0x39/0xc0
+[ 145.930048] device_release_driver_internal+0x141/0x200
+[ 145.930049] device_release_driver+0x12/0x20
+[ 145.930054] brcmf_pcie_setup+0x101/0x3c0 [brcmfmac]
+[ 145.930060] brcmf_fw_request_done+0x11d/0x1f0 [brcmfmac]
+[ 145.930062] ? lock_timer_base+0x7d/0xa0
+[ 145.930063] ? internal_add_timer+0x1f/0xa0
+[ 145.930064] ? add_timer+0x11a/0x1d0
+[ 145.930066] ? __kmalloc_track_caller+0x18c/0x230
+[ 145.930068] ? kstrdup_const+0x23/0x30
+[ 145.930069] ? add_dr+0x46/0x80
+[ 145.930070] ? devres_add+0x3f/0x50
+[ 145.930072] ? usermodehelper_read_unlock+0x15/0x20
+[ 145.930073] ? _request_firmware+0x288/0xa20
+[ 145.930075] request_firmware_work_func+0x36/0x60
+[ 145.930077] process_one_work+0x144/0x360
+[ 145.930078] worker_thread+0x4d/0x3c0
+[ 145.930079] kthread+0x112/0x150
+[ 145.930080] ? rescuer_thread+0x340/0x340
+[ 145.930081] ? kthread_park+0x60/0x60
+[ 145.930083] ret_from_fork+0x25/0x30
+
+Signed-off-by: Wright Feng <wright.feng@cypress.com>
+Signed-off-by: Chi-hsien Lin <chi-hsien.lin@cypress.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/20200928054922.44580-3-wright.feng@cypress.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/wireless/broadcom/brcm80211/brcmfmac/fweh.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+index a5cced2c89ac6..921b94c4f5f9a 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+@@ -304,10 +304,12 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr)
+ {
+ struct brcmf_fweh_info *fweh = &drvr->fweh;
+
+- /* cancel the worker */
+- cancel_work_sync(&fweh->event_work);
+- WARN_ON(!list_empty(&fweh->event_q));
+- memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
++ /* cancel the worker if initialized */
++ if (fweh->event_work.func) {
++ cancel_work_sync(&fweh->event_work);
++ WARN_ON(!list_empty(&fweh->event_q));
++ memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
++ }
+ }
+
+ /**
+--
+2.27.0
+
--- /dev/null
+From 9508cdaf0a8bcfa1d512098bf0ba050b1f43ec62 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 30 Aug 2020 22:14:37 +0300
+Subject: brcmfmac: increase F2 watermark for BCM4329
+
+From: Dmitry Osipenko <digetx@gmail.com>
+
+[ Upstream commit 317da69d10b0247c4042354eb90c75b81620ce9d ]
+
+This patch fixes SDHCI CRC errors during of RX throughput testing on
+BCM4329 chip if SDIO BUS is clocked above 25MHz. In particular the
+checksum problem is observed on NVIDIA Tegra20 SoCs. The good watermark
+value is borrowed from downstream BCMDHD driver and it's matching to the
+value that is already used for the BCM4339 chip, hence let's re-use it
+for BCM4329.
+
+Reviewed-by: Arend van Spriel <arend.vanspriel@broadcom.com>
+Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/20200830191439.10017-2-digetx@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index 3c07d1bbe1c6e..ac3ee93a23780 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -4278,6 +4278,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
+ brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+ CY_43012_MESBUSYCTRL, &err);
+ break;
++ case SDIO_DEVICE_ID_BROADCOM_4329:
+ case SDIO_DEVICE_ID_BROADCOM_4339:
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 4339\n",
+ CY_4339_F2_WATERMARK);
+--
+2.27.0
+
--- /dev/null
+From 3faa387bbd934b22b5a9428f86828339e2dc3986 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 5 Sep 2020 01:34:22 +0800
+Subject: btrfs: fix replace of seed device
+
+From: Anand Jain <anand.jain@oracle.com>
+
+[ Upstream commit c6a5d954950c5031444173ad2195efc163afcac9 ]
+
+If you replace a seed device in a sprouted fs, it appears to have
+successfully replaced the seed device, but if you look closely, it
+didn't. Here is an example.
+
+ $ mkfs.btrfs /dev/sda
+ $ btrfstune -S1 /dev/sda
+ $ mount /dev/sda /btrfs
+ $ btrfs device add /dev/sdb /btrfs
+ $ umount /btrfs
+ $ btrfs device scan --forget
+ $ mount -o device=/dev/sda /dev/sdb /btrfs
+ $ btrfs replace start -f /dev/sda /dev/sdc /btrfs
+ $ echo $?
+ 0
+
+ BTRFS info (device sdb): dev_replace from /dev/sda (devid 1) to /dev/sdc started
+ BTRFS info (device sdb): dev_replace from /dev/sda (devid 1) to /dev/sdc finished
+
+ $ btrfs fi show
+ Label: none uuid: ab2c88b7-be81-4a7e-9849-c3666e7f9f4f
+ Total devices 2 FS bytes used 256.00KiB
+ devid 1 size 3.00GiB used 520.00MiB path /dev/sdc
+ devid 2 size 3.00GiB used 896.00MiB path /dev/sdb
+
+ Label: none uuid: 10bd3202-0415-43af-96a8-d5409f310a7e
+ Total devices 1 FS bytes used 128.00KiB
+ devid 1 size 3.00GiB used 536.00MiB path /dev/sda
+
+So as per the replace start command and kernel log replace was successful.
+Now let's try to clean mount.
+
+ $ umount /btrfs
+ $ btrfs device scan --forget
+
+ $ mount -o device=/dev/sdc /dev/sdb /btrfs
+ mount: /btrfs: wrong fs type, bad option, bad superblock on /dev/sdb, missing codepage or helper program, or other error.
+
+ [ 636.157517] BTRFS error (device sdc): failed to read chunk tree: -2
+ [ 636.180177] BTRFS error (device sdc): open_ctree failed
+
+That's because per dev items it is still looking for the original seed
+device.
+
+ $ btrfs inspect-internal dump-tree -d /dev/sdb
+
+ item 0 key (DEV_ITEMS DEV_ITEM 1) itemoff 16185 itemsize 98
+ devid 1 total_bytes 3221225472 bytes_used 545259520
+ io_align 4096 io_width 4096 sector_size 4096 type 0
+ generation 6 start_offset 0 dev_group 0
+ seek_speed 0 bandwidth 0
+ uuid 59368f50-9af2-4b17-91da-8a783cc418d4 <--- seed uuid
+ fsid 10bd3202-0415-43af-96a8-d5409f310a7e <--- seed fsid
+ item 1 key (DEV_ITEMS DEV_ITEM 2) itemoff 16087 itemsize 98
+ devid 2 total_bytes 3221225472 bytes_used 939524096
+ io_align 4096 io_width 4096 sector_size 4096 type 0
+ generation 0 start_offset 0 dev_group 0
+ seek_speed 0 bandwidth 0
+ uuid 56a0a6bc-4630-4998-8daf-3c3030c4256a <- sprout uuid
+ fsid ab2c88b7-be81-4a7e-9849-c3666e7f9f4f <- sprout fsid
+
+But the replaced target has the following uuid+fsid in its superblock
+which doesn't match with the expected uuid+fsid in its devitem.
+
+ $ btrfs in dump-super /dev/sdc | egrep '^generation|dev_item.uuid|dev_item.fsid|devid'
+ generation 20
+ dev_item.uuid 59368f50-9af2-4b17-91da-8a783cc418d4
+ dev_item.fsid ab2c88b7-be81-4a7e-9849-c3666e7f9f4f [match]
+ dev_item.devid 1
+
+So if you provide the original seed device the mount shall be
+successful. Which so long happening in the test case btrfs/163.
+
+ $ btrfs device scan --forget
+ $ mount -o device=/dev/sda /dev/sdb /btrfs
+
+Fix in this patch:
+If a seed is not sprouted then there is no replacement of it, because of
+its read-only filesystem with a read-only device. Similarly, in the case
+of a sprouted filesystem, the seed device is still read only. So, mark
+it as you can't replace a seed device, you can only add a new device and
+then delete the seed device. If replace is attempted then returns
+-EINVAL.
+
+Signed-off-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/dev-replace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index e4a1c6afe35dc..0cb36746060da 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -230,7 +230,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+ int ret = 0;
+
+ *device_out = NULL;
+- if (fs_info->fs_devices->seeding) {
++ if (srcdev->fs_devices->seeding) {
+ btrfs_err(fs_info, "the filesystem is a seed filesystem!");
+ return -EINVAL;
+ }
+--
+2.27.0
+
--- /dev/null
+From 21b81dbd8e4c235223aab809abb32aeca165e4f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Sep 2020 11:54:38 +0300
+Subject: bus/fsl_mc: Do not rely on caller to provide non NULL mc_io
+
+From: Diana Craciun <diana.craciun@oss.nxp.com>
+
+[ Upstream commit 5026cf605143e764e1785bbf9158559d17f8d260 ]
+
+Before destroying the mc_io, check first that it was
+allocated.
+
+Reviewed-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Acked-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Signed-off-by: Diana Craciun <diana.craciun@oss.nxp.com>
+Link: https://lore.kernel.org/r/20200929085441.17448-11-diana.craciun@oss.nxp.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/fsl-mc/mc-io.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
+index a30b53f1d87d8..305015486b91c 100644
+--- a/drivers/bus/fsl-mc/mc-io.c
++++ b/drivers/bus/fsl-mc/mc-io.c
+@@ -129,7 +129,12 @@ error_destroy_mc_io:
+ */
+ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
+ {
+- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
++ struct fsl_mc_device *dpmcp_dev;
++
++ if (!mc_io)
++ return;
++
++ dpmcp_dev = mc_io->dpmcp_dev;
+
+ if (dpmcp_dev)
+ fsl_mc_io_unset_dpmcp(mc_io);
+--
+2.27.0
+
--- /dev/null
+From cbf2cc1400ab755bc946cab4c738b99653f8a329 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Sep 2020 23:22:02 +0530
+Subject: bus: mhi: core: Abort suspends due to outgoing pending packets
+
+From: Bhaumik Bhatt <bbhatt@codeaurora.org>
+
+[ Upstream commit 515847c557dd33167be86cb429fc0674a331bc88 ]
+
+Add the missing check to abort suspends if a client driver has pending
+outgoing packets to send to the device. This allows better utilization
+of the MHI bus wherein clients on the host are not left waiting for
+longer suspend or resume cycles to finish for data transfers.
+
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Bhaumik Bhatt <bbhatt@codeaurora.org>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/20200929175218.8178-4-manivannan.sadhasivam@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/mhi/core/pm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
+index 7960980780832..661d704c8093d 100644
+--- a/drivers/bus/mhi/core/pm.c
++++ b/drivers/bus/mhi/core/pm.c
+@@ -686,7 +686,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+ return -EIO;
+
+ /* Return busy if there are any pending resources */
+- if (atomic_read(&mhi_cntrl->dev_wake))
++ if (atomic_read(&mhi_cntrl->dev_wake) ||
++ atomic_read(&mhi_cntrl->pending_pkts))
+ return -EBUSY;
+
+ /* Take MHI out of M2 state */
+@@ -712,7 +713,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+- if (atomic_read(&mhi_cntrl->dev_wake)) {
++ if (atomic_read(&mhi_cntrl->dev_wake) ||
++ atomic_read(&mhi_cntrl->pending_pkts)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ return -EBUSY;
+ }
+--
+2.27.0
+
--- /dev/null
+From 8a738e108f860d593cf14464710ee969ba078001 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Dec 2019 09:00:13 +0000
+Subject: can: flexcan: disable clocks during stop mode
+
+From: Joakim Zhang <qiangqing.zhang@nxp.com>
+
+[ Upstream commit 02f71c6605e1f8259c07f16178330db766189a74 ]
+
+Disable clocks while CAN core is in stop mode.
+
+Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com>
+Tested-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://lore.kernel.org/r/20191210085721.9853-2-qiangqing.zhang@nxp.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/flexcan.c | 30 ++++++++++++++++++++----------
+ 1 file changed, 20 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 2ac7a667bde35..bc21a82cf3a76 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1722,8 +1722,6 @@ static int __maybe_unused flexcan_suspend(struct device *device)
+ err = flexcan_chip_disable(priv);
+ if (err)
+ return err;
+-
+- err = pm_runtime_force_suspend(device);
+ }
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
+@@ -1749,10 +1747,6 @@ static int __maybe_unused flexcan_resume(struct device *device)
+ if (err)
+ return err;
+ } else {
+- err = pm_runtime_force_resume(device);
+- if (err)
+- return err;
+-
+ err = flexcan_chip_enable(priv);
+ }
+ }
+@@ -1783,8 +1777,16 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+- if (netif_running(dev) && device_may_wakeup(device))
+- flexcan_enable_wakeup_irq(priv, true);
++ if (netif_running(dev)) {
++ int err;
++
++ if (device_may_wakeup(device))
++ flexcan_enable_wakeup_irq(priv, true);
++
++ err = pm_runtime_force_suspend(device);
++ if (err)
++ return err;
++ }
+
+ return 0;
+ }
+@@ -1794,8 +1796,16 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+- if (netif_running(dev) && device_may_wakeup(device))
+- flexcan_enable_wakeup_irq(priv, false);
++ if (netif_running(dev)) {
++ int err;
++
++ err = pm_runtime_force_resume(device);
++ if (err)
++ return err;
++
++ if (device_may_wakeup(device))
++ flexcan_enable_wakeup_irq(priv, false);
++ }
+
+ return 0;
+ }
+--
+2.27.0
+
--- /dev/null
+From af257051b6a083e105678d518d49dab834c7dcbc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Aug 2020 15:23:03 +0800
+Subject: ceph: encode inodes' parent/d_name in cap reconnect message
+
+From: Yan, Zheng <zyan@redhat.com>
+
+[ Upstream commit a33f6432b3a63a4909dbbb0967f7c9df8ff2de91 ]
+
+Since nautilus, MDS tracks dirfrags whose child inodes have caps in open
+file table. When MDS recovers, it prefetches all of these dirfrags. This
+avoids using backtrace to load inodes. But dirfrags prefetch may load
+lots of useless inodes into cache, and make MDS run out of memory.
+
+Recent MDS adds an option that disables dirfrags prefetch. When dirfrags
+prefetch is disabled. Recovering MDS only prefetches corresponding dir
+inodes. Including inodes' parent/d_name in cap reconnect message can
+help MDS to load inodes into its cache.
+
+Signed-off-by: "Yan, Zheng" <zyan@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/mds_client.c | 89 ++++++++++++++++++++++++++++++--------------
+ 1 file changed, 61 insertions(+), 28 deletions(-)
+
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 4a26862d7667e..76d8d9495d1d4 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -3612,6 +3612,39 @@ fail_msg:
+ return err;
+ }
+
++static struct dentry* d_find_primary(struct inode *inode)
++{
++ struct dentry *alias, *dn = NULL;
++
++ if (hlist_empty(&inode->i_dentry))
++ return NULL;
++
++ spin_lock(&inode->i_lock);
++ if (hlist_empty(&inode->i_dentry))
++ goto out_unlock;
++
++ if (S_ISDIR(inode->i_mode)) {
++ alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
++ if (!IS_ROOT(alias))
++ dn = dget(alias);
++ goto out_unlock;
++ }
++
++ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
++ spin_lock(&alias->d_lock);
++ if (!d_unhashed(alias) &&
++ (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
++ dn = dget_dlock(alias);
++ }
++ spin_unlock(&alias->d_lock);
++ if (dn)
++ break;
++ }
++out_unlock:
++ spin_unlock(&inode->i_lock);
++ return dn;
++}
++
+ /*
+ * Encode information about a cap for a reconnect with the MDS.
+ */
+@@ -3625,13 +3658,32 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ struct ceph_inode_info *ci = cap->ci;
+ struct ceph_reconnect_state *recon_state = arg;
+ struct ceph_pagelist *pagelist = recon_state->pagelist;
+- int err;
++ struct dentry *dentry;
++ char *path;
++ int pathlen, err;
++ u64 pathbase;
+ u64 snap_follows;
+
+ dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
+ inode, ceph_vinop(inode), cap, cap->cap_id,
+ ceph_cap_string(cap->issued));
+
++ dentry = d_find_primary(inode);
++ if (dentry) {
++ /* set pathbase to parent dir when msg_version >= 2 */
++ path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
++ recon_state->msg_version >= 2);
++ dput(dentry);
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
++ goto out_err;
++ }
++ } else {
++ path = NULL;
++ pathlen = 0;
++ pathbase = 0;
++ }
++
+ spin_lock(&ci->i_ceph_lock);
+ cap->seq = 0; /* reset cap seq */
+ cap->issue_seq = 0; /* and issue_seq */
+@@ -3652,7 +3704,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
+ rec.v2.issued = cpu_to_le32(cap->issued);
+ rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+- rec.v2.pathbase = 0;
++ rec.v2.pathbase = cpu_to_le64(pathbase);
+ rec.v2.flock_len = (__force __le32)
+ ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
+ } else {
+@@ -3663,7 +3715,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
+ ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
+ rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+- rec.v1.pathbase = 0;
++ rec.v1.pathbase = cpu_to_le64(pathbase);
+ }
+
+ if (list_empty(&ci->i_cap_snaps)) {
+@@ -3725,7 +3777,7 @@ encode_again:
+ sizeof(struct ceph_filelock);
+ rec.v2.flock_len = cpu_to_le32(struct_len);
+
+- struct_len += sizeof(u32) + sizeof(rec.v2);
++ struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
+
+ if (struct_v >= 2)
+ struct_len += sizeof(u64); /* snap_follows */
+@@ -3749,7 +3801,7 @@ encode_again:
+ ceph_pagelist_encode_8(pagelist, 1);
+ ceph_pagelist_encode_32(pagelist, struct_len);
+ }
+- ceph_pagelist_encode_string(pagelist, NULL, 0);
++ ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
+ ceph_locks_to_pagelist(flocks, pagelist,
+ num_fcntl_locks, num_flock_locks);
+@@ -3758,39 +3810,20 @@ encode_again:
+ out_freeflocks:
+ kfree(flocks);
+ } else {
+- u64 pathbase = 0;
+- int pathlen = 0;
+- char *path = NULL;
+- struct dentry *dentry;
+-
+- dentry = d_find_alias(inode);
+- if (dentry) {
+- path = ceph_mdsc_build_path(dentry,
+- &pathlen, &pathbase, 0);
+- dput(dentry);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
+- goto out_err;
+- }
+- rec.v1.pathbase = cpu_to_le64(pathbase);
+- }
+-
+ err = ceph_pagelist_reserve(pagelist,
+ sizeof(u64) + sizeof(u32) +
+ pathlen + sizeof(rec.v1));
+- if (err) {
+- goto out_freepath;
+- }
++ if (err)
++ goto out_err;
+
+ ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
+ ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
+-out_freepath:
+- ceph_mdsc_free_path(path, pathlen);
+ }
+
+ out_err:
+- if (err >= 0)
++ ceph_mdsc_free_path(path, pathlen);
++ if (!err)
+ recon_state->nr_caps++;
+ return err;
+ }
+--
+2.27.0
+
--- /dev/null
+From 11f07b9db0facc3b6fcced9e8e645eefb02cf71b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Oct 2020 09:32:56 +1000
+Subject: cifs: handle -EINTR in cifs_setattr
+
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+
+[ Upstream commit c6cc4c5a72505a0ecefc9b413f16bec512f38078 ]
+
+RHBZ: 1848178
+
+Some calls that set attributes, like utimensat(), are not supposed to return
+-EINTR and thus do not have handlers for this in glibc which causes us
+to leak -EINTR to the applications which are also unprepared to handle it.
+
+For example tar will break if utimensat() return -EINTR and abort unpacking
+the archive. Other applications may break too.
+
+To handle this we add checks, and retry, for -EINTR in cifs_setattr()
+
+Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/inode.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 1f75b25e559a7..daec31be85718 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -2883,13 +2883,18 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
+ {
+ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
++ int rc, retries = 0;
+
+- if (pTcon->unix_ext)
+- return cifs_setattr_unix(direntry, attrs);
+-
+- return cifs_setattr_nounix(direntry, attrs);
++ do {
++ if (pTcon->unix_ext)
++ rc = cifs_setattr_unix(direntry, attrs);
++ else
++ rc = cifs_setattr_nounix(direntry, attrs);
++ retries++;
++ } while (is_retryable_error(rc) && retries < 2);
+
+ /* BB: add cifs_setattr_legacy for really old servers */
++ return rc;
+ }
+
+ #if 0
+--
+2.27.0
+
--- /dev/null
+From fc2844a1ca251cd1eb0838bcfb36e0b3e177d9aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 11:25:59 +0300
+Subject: clk: ti: clockdomain: fix static checker warning
+
+From: Tero Kristo <t-kristo@ti.com>
+
+[ Upstream commit b7a7943fe291b983b104bcbd2f16e8e896f56590 ]
+
+Fix a memory leak induced by not calling clk_put after doing of_clk_get.
+
+Reported-by: Dan Murphy <dmurphy@ti.com>
+Signed-off-by: Tero Kristo <t-kristo@ti.com>
+Link: https://lore.kernel.org/r/20200907082600.454-3-t-kristo@ti.com
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/ti/clockdomain.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
+index ee56306f79d5f..700b7f44f6716 100644
+--- a/drivers/clk/ti/clockdomain.c
++++ b/drivers/clk/ti/clockdomain.c
+@@ -148,10 +148,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
+ if (!omap2_clk_is_hw_omap(clk_hw)) {
+ pr_warn("can't setup clkdm for basic clk %s\n",
+ __clk_get_name(clk));
++ clk_put(clk);
+ continue;
+ }
+ to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
+ omap2_init_clk_clkdm(clk_hw);
++ clk_put(clk);
+ }
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 1edcdf2f87e3daebe7299f5d403dca6ad621aaa9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Sep 2020 13:17:35 -0600
+Subject: coresight: Make sysfs functional on topologies with per core sink
+
+From: Linu Cherian <lcherian@marvell.com>
+
+[ Upstream commit 6d578258b955fc8888e1bbd9a8fefe7b10065a84 ]
+
+Coresight driver assumes sink is common across all the ETMs,
+and tries to build a path between ETM and the first enabled
+sink found using bus based search. This breaks sysFS usage
+on implementations that has multiple per core sinks in
+enabled state.
+
+To fix this, coresight_get_enabled_sink API is updated to
+do a connection based search starting from the given source,
+instead of bus based search.
+With sink selection using sysfs depecrated for perf interface,
+provision for reset is removed as well in this API.
+
+Signed-off-by: Linu Cherian <lcherian@marvell.com>
+[Fixed indentation problem and removed obsolete comment]
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Link: https://lore.kernel.org/r/20200916191737.4001561-15-mathieu.poirier@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwtracing/coresight/coresight-priv.h | 3 +-
+ drivers/hwtracing/coresight/coresight.c | 62 +++++++++-----------
+ 2 files changed, 29 insertions(+), 36 deletions(-)
+
+diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
+index f2dc625ea5856..5fe773c4d6cc5 100644
+--- a/drivers/hwtracing/coresight/coresight-priv.h
++++ b/drivers/hwtracing/coresight/coresight-priv.h
+@@ -148,7 +148,8 @@ static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
+ void coresight_disable_path(struct list_head *path);
+ int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
+ struct coresight_device *coresight_get_sink(struct list_head *path);
+-struct coresight_device *coresight_get_enabled_sink(bool reset);
++struct coresight_device *
++coresight_get_enabled_sink(struct coresight_device *source);
+ struct coresight_device *coresight_get_sink_by_id(u32 id);
+ struct coresight_device *
+ coresight_find_default_sink(struct coresight_device *csdev);
+diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
+index cdcb1917216fd..fd46216669449 100644
+--- a/drivers/hwtracing/coresight/coresight.c
++++ b/drivers/hwtracing/coresight/coresight.c
+@@ -540,50 +540,46 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
+ return csdev;
+ }
+
+-static int coresight_enabled_sink(struct device *dev, const void *data)
++static struct coresight_device *
++coresight_find_enabled_sink(struct coresight_device *csdev)
+ {
+- const bool *reset = data;
+- struct coresight_device *csdev = to_coresight_device(dev);
++ int i;
++ struct coresight_device *sink;
+
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+- csdev->activated) {
+- /*
+- * Now that we have a handle on the sink for this session,
+- * disable the sysFS "enable_sink" flag so that possible
+- * concurrent perf session that wish to use another sink don't
+- * trip on it. Doing so has no ramification for the current
+- * session.
+- */
+- if (*reset)
+- csdev->activated = false;
++ csdev->activated)
++ return csdev;
+
+- return 1;
++ /*
++ * Recursively explore each port found on this element.
++ */
++ for (i = 0; i < csdev->pdata->nr_outport; i++) {
++ struct coresight_device *child_dev;
++
++ child_dev = csdev->pdata->conns[i].child_dev;
++ if (child_dev)
++ sink = coresight_find_enabled_sink(child_dev);
++ if (sink)
++ return sink;
+ }
+
+- return 0;
++ return NULL;
+ }
+
+ /**
+- * coresight_get_enabled_sink - returns the first enabled sink found on the bus
+- * @deactivate: Whether the 'enable_sink' flag should be reset
+- *
+- * When operated from perf the deactivate parameter should be set to 'true'.
+- * That way the "enabled_sink" flag of the sink that was selected can be reset,
+- * allowing for other concurrent perf sessions to choose a different sink.
++ * coresight_get_enabled_sink - returns the first enabled sink using
++ * connection based search starting from the source reference
+ *
+- * When operated from sysFS users have full control and as such the deactivate
+- * parameter should be set to 'false', hence mandating users to explicitly
+- * clear the flag.
++ * @source: Coresight source device reference
+ */
+-struct coresight_device *coresight_get_enabled_sink(bool deactivate)
++struct coresight_device *
++coresight_get_enabled_sink(struct coresight_device *source)
+ {
+- struct device *dev = NULL;
+-
+- dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
+- coresight_enabled_sink);
++ if (!source)
++ return NULL;
+
+- return dev ? to_coresight_device(dev) : NULL;
++ return coresight_find_enabled_sink(source);
+ }
+
+ static int coresight_sink_by_id(struct device *dev, const void *data)
+@@ -988,11 +984,7 @@ int coresight_enable(struct coresight_device *csdev)
+ goto out;
+ }
+
+- /*
+- * Search for a valid sink for this session but don't reset the
+- * "enable_sink" flag in sysFS. Users get to do that explicitly.
+- */
+- sink = coresight_get_enabled_sink(false);
++ sink = coresight_get_enabled_sink(csdev);
+ if (!sink) {
+ ret = -EINVAL;
+ goto out;
+--
+2.27.0
+
--- /dev/null
+From 6513a462c5981b6afccd973462ab4e3540b7f134 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 31 Aug 2020 08:10:11 +0200
+Subject: cpufreq: sti-cpufreq: add stih418 support
+
+From: Alain Volmat <avolmat@me.com>
+
+[ Upstream commit 01a163c52039e9426c7d3d3ab16ca261ad622597 ]
+
+The STiH418 can be controlled the same way as STiH407 &
+STiH410 regarding cpufreq.
+
+Signed-off-by: Alain Volmat <avolmat@me.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/sti-cpufreq.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
+index a5ad96d29adca..4ac6fb23792a0 100644
+--- a/drivers/cpufreq/sti-cpufreq.c
++++ b/drivers/cpufreq/sti-cpufreq.c
+@@ -141,7 +141,8 @@ static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = {
+ static const struct reg_field *sti_cpufreq_match(void)
+ {
+ if (of_machine_is_compatible("st,stih407") ||
+- of_machine_is_compatible("st,stih410"))
++ of_machine_is_compatible("st,stih410") ||
++ of_machine_is_compatible("st,stih418"))
+ return sti_stih407_dvfs_regfields;
+
+ return NULL;
+@@ -258,7 +259,8 @@ static int sti_cpufreq_init(void)
+ int ret;
+
+ if ((!of_machine_is_compatible("st,stih407")) &&
+- (!of_machine_is_compatible("st,stih410")))
++ (!of_machine_is_compatible("st,stih410")) &&
++ (!of_machine_is_compatible("st,stih418")))
+ return -ENODEV;
+
+ ddata.cpu = get_cpu_device(0);
+--
+2.27.0
+
--- /dev/null
+From 493efea73dfc5e4272483fc6b7ac174d440ab4ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Jul 2020 20:35:32 +0300
+Subject: cpuidle: tegra: Correctly handle result of arm_cpuidle_simple_enter()
+
+From: Dmitry Osipenko <digetx@gmail.com>
+
+[ Upstream commit 1170433e6611402b869c583fa1fbfd85106ff066 ]
+
+The enter() callback of CPUIDLE drivers returns index of the entered idle
+state on success or a negative value on failure. The negative value could
+any negative value, i.e. it doesn't necessarily needs to be a error code.
+That's because CPUIDLE core only cares about the fact of failure and not
+about the reason of the enter() failure.
+
+Like every other enter() callback, the arm_cpuidle_simple_enter() returns
+the entered idle-index on success. Unlike some of other drivers, it never
+fails. It happened that TEGRA_C1=index=err=0 in the code of cpuidle-tegra
+driver, and thus, there is no problem for the cpuidle-tegra driver created
+by the typo in the code which assumes that the arm_cpuidle_simple_enter()
+returns a error code.
+
+The arm_cpuidle_simple_enter() also may return a -ENODEV error if CPU_IDLE
+is disabled in a kernel's config, but all CPUIDLE drivers are disabled if
+CPU_IDLE is disabled, including the cpuidle-tegra driver. So we can't ever
+see the error code from arm_cpuidle_simple_enter() today.
+
+Of course the code may get some changes in the future and then the
+typo may transform into a real bug, so let's correct the typo! The
+tegra_cpuidle_state_enter() is now changed to make it return the entered
+idle-index on success and negative error code on fail, which puts it on
+par with the arm_cpuidle_simple_enter(), making code consistent in regards
+to the error handling.
+
+This patch fixes a minor typo in the code, it doesn't fix any bugs.
+
+Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
+Reviewed-by: Jon Hunter <jonathanh@nvidia.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpuidle/cpuidle-tegra.c | 34 +++++++++++++++++++--------------
+ 1 file changed, 20 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
+index a12fb141875a7..e8956706a2917 100644
+--- a/drivers/cpuidle/cpuidle-tegra.c
++++ b/drivers/cpuidle/cpuidle-tegra.c
+@@ -172,7 +172,7 @@ static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev)
+ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ int index, unsigned int cpu)
+ {
+- int ret;
++ int err;
+
+ /*
+ * CC6 state is the "CPU cluster power-off" state. In order to
+@@ -183,9 +183,9 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ * CPU cores, GIC and L2 cache).
+ */
+ if (index == TEGRA_CC6) {
+- ret = tegra_cpuidle_coupled_barrier(dev);
+- if (ret)
+- return ret;
++ err = tegra_cpuidle_coupled_barrier(dev);
++ if (err)
++ return err;
+ }
+
+ local_fiq_disable();
+@@ -194,15 +194,15 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+
+ switch (index) {
+ case TEGRA_C7:
+- ret = tegra_cpuidle_c7_enter();
++ err = tegra_cpuidle_c7_enter();
+ break;
+
+ case TEGRA_CC6:
+- ret = tegra_cpuidle_cc6_enter(cpu);
++ err = tegra_cpuidle_cc6_enter(cpu);
+ break;
+
+ default:
+- ret = -EINVAL;
++ err = -EINVAL;
+ break;
+ }
+
+@@ -210,7 +210,7 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ tegra_pm_clear_cpu_in_lp2();
+ local_fiq_enable();
+
+- return ret;
++ return err ?: index;
+ }
+
+ static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
+@@ -236,21 +236,27 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev,
+ int index)
+ {
+ unsigned int cpu = cpu_logical_map(dev->cpu);
+- int err;
++ int ret;
+
+ index = tegra_cpuidle_adjust_state_index(index, cpu);
+ if (dev->states_usage[index].disable)
+ return -1;
+
+ if (index == TEGRA_C1)
+- err = arm_cpuidle_simple_enter(dev, drv, index);
++ ret = arm_cpuidle_simple_enter(dev, drv, index);
+ else
+- err = tegra_cpuidle_state_enter(dev, index, cpu);
++ ret = tegra_cpuidle_state_enter(dev, index, cpu);
+
+- if (err && (err != -EINTR || index != TEGRA_CC6))
+- pr_err_once("failed to enter state %d err: %d\n", index, err);
++ if (ret < 0) {
++ if (ret != -EINTR || index != TEGRA_CC6)
++ pr_err_once("failed to enter state %d err: %d\n",
++ index, ret);
++ index = -1;
++ } else {
++ index = ret;
++ }
+
+- return err ? -1 : index;
++ return index;
+ }
+
+ static int tegra114_enter_s2idle(struct cpuidle_device *dev,
+--
+2.27.0
+
--- /dev/null
+From 4f7a91ca56457bd3b42563447766a8311033147f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Sep 2020 05:56:43 -0700
+Subject: drivers/net/wan/hdlc_fr: Correctly handle special skb->protocol
+ values
+
+From: Xie He <xie.he.0141@gmail.com>
+
+[ Upstream commit 8306266c1d51aac9aa7aa907fe99032a58c6382c ]
+
+The fr_hard_header function is used to prepend the header to skbs before
+transmission. It is used in 3 situations:
+1) When a control packet is generated internally in this driver;
+2) When a user sends an skb on an Ethernet-emulating PVC device;
+3) When a user sends an skb on a normal PVC device.
+
+These 3 situations need to be handled differently by fr_hard_header.
+Different headers should be prepended to the skb in different situations.
+
+Currently fr_hard_header distinguishes these 3 situations using
+skb->protocol. For situation 1 and 2, a special skb->protocol value
+will be assigned before calling fr_hard_header, so that it can recognize
+these 2 situations. All skb->protocol values other than these special ones
+are treated by fr_hard_header as situation 3.
+
+However, it is possible that in situation 3, the user sends an skb with
+one of the special skb->protocol values. In this case, fr_hard_header
+would incorrectly treat it as situation 1 or 2.
+
+This patch tries to solve this issue by using skb->dev instead of
+skb->protocol to distinguish between these 3 situations. For situation
+1, skb->dev would be NULL; for situation 2, skb->dev->type would be
+ARPHRD_ETHER; and for situation 3, skb->dev->type would be ARPHRD_DLCI.
+
+This way fr_hard_header would be able to distinguish these 3 situations
+correctly regardless what skb->protocol value the user tries to use in
+situation 3.
+
+Cc: Krzysztof Halasa <khc@pm.waw.pl>
+Signed-off-by: Xie He <xie.he.0141@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wan/hdlc_fr.c | 98 ++++++++++++++++++++-------------------
+ 1 file changed, 51 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
+index d6cfd51613ed8..3a44dad87602d 100644
+--- a/drivers/net/wan/hdlc_fr.c
++++ b/drivers/net/wan/hdlc_fr.c
+@@ -273,63 +273,69 @@ static inline struct net_device **get_dev_p(struct pvc_device *pvc,
+
+ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
+ {
+- u16 head_len;
+ struct sk_buff *skb = *skb_p;
+
+- switch (skb->protocol) {
+- case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
+- head_len = 4;
+- skb_push(skb, head_len);
+- skb->data[3] = NLPID_CCITT_ANSI_LMI;
+- break;
+-
+- case cpu_to_be16(NLPID_CISCO_LMI):
+- head_len = 4;
+- skb_push(skb, head_len);
+- skb->data[3] = NLPID_CISCO_LMI;
+- break;
+-
+- case cpu_to_be16(ETH_P_IP):
+- head_len = 4;
+- skb_push(skb, head_len);
+- skb->data[3] = NLPID_IP;
+- break;
+-
+- case cpu_to_be16(ETH_P_IPV6):
+- head_len = 4;
+- skb_push(skb, head_len);
+- skb->data[3] = NLPID_IPV6;
+- break;
+-
+- case cpu_to_be16(ETH_P_802_3):
+- head_len = 10;
+- if (skb_headroom(skb) < head_len) {
+- struct sk_buff *skb2 = skb_realloc_headroom(skb,
+- head_len);
++ if (!skb->dev) { /* Control packets */
++ switch (dlci) {
++ case LMI_CCITT_ANSI_DLCI:
++ skb_push(skb, 4);
++ skb->data[3] = NLPID_CCITT_ANSI_LMI;
++ break;
++
++ case LMI_CISCO_DLCI:
++ skb_push(skb, 4);
++ skb->data[3] = NLPID_CISCO_LMI;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ } else if (skb->dev->type == ARPHRD_DLCI) {
++ switch (skb->protocol) {
++ case htons(ETH_P_IP):
++ skb_push(skb, 4);
++ skb->data[3] = NLPID_IP;
++ break;
++
++ case htons(ETH_P_IPV6):
++ skb_push(skb, 4);
++ skb->data[3] = NLPID_IPV6;
++ break;
++
++ default:
++ skb_push(skb, 10);
++ skb->data[3] = FR_PAD;
++ skb->data[4] = NLPID_SNAP;
++ /* OUI 00-00-00 indicates an Ethertype follows */
++ skb->data[5] = 0x00;
++ skb->data[6] = 0x00;
++ skb->data[7] = 0x00;
++ /* This should be an Ethertype: */
++ *(__be16 *)(skb->data + 8) = skb->protocol;
++ }
++
++ } else if (skb->dev->type == ARPHRD_ETHER) {
++ if (skb_headroom(skb) < 10) {
++ struct sk_buff *skb2 = skb_realloc_headroom(skb, 10);
+ if (!skb2)
+ return -ENOBUFS;
+ dev_kfree_skb(skb);
+ skb = *skb_p = skb2;
+ }
+- skb_push(skb, head_len);
++ skb_push(skb, 10);
+ skb->data[3] = FR_PAD;
+ skb->data[4] = NLPID_SNAP;
+- skb->data[5] = FR_PAD;
++ /* OUI 00-80-C2 stands for the 802.1 organization */
++ skb->data[5] = 0x00;
+ skb->data[6] = 0x80;
+ skb->data[7] = 0xC2;
++ /* PID 00-07 stands for Ethernet frames without FCS */
+ skb->data[8] = 0x00;
+- skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
+- break;
++ skb->data[9] = 0x07;
+
+- default:
+- head_len = 10;
+- skb_push(skb, head_len);
+- skb->data[3] = FR_PAD;
+- skb->data[4] = NLPID_SNAP;
+- skb->data[5] = FR_PAD;
+- skb->data[6] = FR_PAD;
+- skb->data[7] = FR_PAD;
+- *(__be16*)(skb->data + 8) = skb->protocol;
++ } else {
++ return -EINVAL;
+ }
+
+ dlci_to_q922(skb->data, dlci);
+@@ -425,8 +431,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+ skb_put(skb, pad);
+ memset(skb->data + len, 0, pad);
+ }
+- skb->protocol = cpu_to_be16(ETH_P_802_3);
+ }
++ skb->dev = dev;
+ if (!fr_hard_header(&skb, pvc->dlci)) {
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+@@ -494,10 +500,8 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
+ memset(skb->data, 0, len);
+ skb_reserve(skb, 4);
+ if (lmi == LMI_CISCO) {
+- skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
+ fr_hard_header(&skb, LMI_CISCO_DLCI);
+ } else {
+- skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
+ fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
+ }
+ data = skb_tail_pointer(skb);
+--
+2.27.0
+
--- /dev/null
+From 20c3f7c8274f034581a4e2a935a49febcc61d3e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Aug 2020 16:59:02 +0530
+Subject: drivers: watchdog: rdc321x_wdt: Fix race condition bugs
+
+From: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
+
+[ Upstream commit 4b2e7f99cdd314263c9d172bc17193b8b6bba463 ]
+
+In rdc321x_wdt_probe(), rdc321x_wdt_device.queue is initialized
+after misc_register(), hence if ioctl is called before its
+initialization which can call rdc321x_wdt_start() function,
+it will see an uninitialized value of rdc321x_wdt_device.queue,
+hence initialize it before misc_register().
+Also, rdc321x_wdt_device.default_ticks is accessed in reset()
+function called from write callback, thus initialize it before
+misc_register().
+
+Found by Linux Driver Verification project (linuxtesting.org).
+
+Signed-off-by: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Link: https://lore.kernel.org/r/20200807112902.28764-1-madhuparnabhowmik10@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Wim Van Sebroeck <wim@linux-watchdog.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/watchdog/rdc321x_wdt.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
+index 57187efeb86f1..f0c94ea51c3e4 100644
+--- a/drivers/watchdog/rdc321x_wdt.c
++++ b/drivers/watchdog/rdc321x_wdt.c
+@@ -231,6 +231,8 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
+
+ rdc321x_wdt_device.sb_pdev = pdata->sb_pdev;
+ rdc321x_wdt_device.base_reg = r->start;
++ rdc321x_wdt_device.queue = 0;
++ rdc321x_wdt_device.default_ticks = ticks;
+
+ err = misc_register(&rdc321x_wdt_misc);
+ if (err < 0) {
+@@ -245,14 +247,11 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
+ rdc321x_wdt_device.base_reg, RDC_WDT_RST);
+
+ init_completion(&rdc321x_wdt_device.stop);
+- rdc321x_wdt_device.queue = 0;
+
+ clear_bit(0, &rdc321x_wdt_device.inuse);
+
+ timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
+
+- rdc321x_wdt_device.default_ticks = ticks;
+-
+ dev_info(&pdev->dev, "watchdog init success\n");
+
+ return 0;
+--
+2.27.0
+
--- /dev/null
+From 27d9f264e5ee66bb728a41f13582a6cc59ba2536 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Sep 2020 13:57:54 -0400
+Subject: drm/amd/display: Avoid set zero in the requested clk
+
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+
+[ Upstream commit 2f8be0e516803cc3fd87c1671247896571a5a8fb ]
+
+[Why]
+Sometimes CRTCs can be disabled due to display unplugging or temporarily
+transition in the userspace; in these circumstances, DCE tries to set
+the minimum clock threshold. When we have this situation, the function
+bw_calcs is invoked with number_of_displays set to zero, making DCE set
+dispclk_khz and sclk_khz to zero. For these reasons, we have seen some
+ATOM bios errors that look like:
+
+[drm:atom_op_jump [amdgpu]] *ERROR* atombios stuck in loop for more than
+5secs aborting
+[drm:amdgpu_atom_execute_table_locked [amdgpu]] *ERROR* atombios stuck
+executing EA8A (len 761, WS 0, PS 0) @ 0xEABA
+
+[How]
+This error happens due to an attempt to optimize the bandwidth using the
+sclk, and the dispclk clock set to zero. Technically we handle this in
+the function dce112_set_clock, but we are not considering the case that
+this value is set to zero. This commit fixes this issue by ensuring that
+we never set a minimum value below the minimum clock threshold.
+
+Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Acked-by: Eryk Brol <eryk.brol@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+index d031bd3d30724..807dca8f7d7aa 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+@@ -79,8 +79,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+- if (requested_clk_khz > 0)
+- requested_clk_khz = max(requested_clk_khz,
++ requested_clk_khz = max(requested_clk_khz,
+ clk_mgr_dce->base.dentist_vco_freq_khz / 62);
+
+ dce_clk_params.target_clock_frequency = requested_clk_khz;
+--
+2.27.0
+
--- /dev/null
+From ea66d3f1d7467b31a4d9f51fd46c545c12b3210b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Aug 2020 14:26:07 -0400
+Subject: drm/amd/display: Check clock table return
+
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+
+[ Upstream commit 4b4f21ff7f5d11bb77e169b306dcbc5b216f5db5 ]
+
+During the load processes for Renoir, our display code needs to retrieve
+the SMU clock and voltage table, however, this operation can fail which
+means that we have to check this scenario. Currently, we are not
+handling this case properly and as a result, we have seen the following
+dmesg log during the boot:
+
+RIP: 0010:rn_clk_mgr_construct+0x129/0x3d0 [amdgpu]
+...
+Call Trace:
+ dc_clk_mgr_create+0x16a/0x1b0 [amdgpu]
+ dc_create+0x231/0x760 [amdgpu]
+
+This commit fixes this issue by checking the return status retrieved
+from the clock table before try to populate any bandwidth.
+
+Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Acked-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+index 21a3073c8929e..2f8fee05547ac 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+@@ -761,6 +761,7 @@ void rn_clk_mgr_construct(
+ {
+ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dpm_clocks clock_table = { 0 };
++ enum pp_smu_status status = 0;
+
+ clk_mgr->base.ctx = ctx;
+ clk_mgr->base.funcs = &dcn21_funcs;
+@@ -817,8 +818,10 @@ void rn_clk_mgr_construct(
+ clk_mgr->base.bw_params = &rn_bw_params;
+
+ if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
+- pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
+- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
++ status = pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
++
++ if (status == PP_SMU_RESULT_OK &&
++ ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
+ }
+ }
+--
+2.27.0
+
--- /dev/null
+From 1e69476b6e7320d81c8b5dd6bf8fb01bb4c6c004 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Sep 2020 17:52:43 -0400
+Subject: drm/amd/display: HDMI remote sink need mode validation for Linux
+
+From: Fangzhi Zuo <Jerry.Zuo@amd.com>
+
+[ Upstream commit 95d620adb48f7728e67d82f56f756e8d451cf8d2 ]
+
+[Why]
+Currently mode validation is bypassed if remote sink exists. That
+leads to mode set issue when a BW bottle neck exists in the link path,
+e.g., a DP-to-HDMI converter that only supports HDMI 1.4.
+
+Any invalid mode passed to Linux user space will cause the modeset
+failure due to limitation of Linux user space implementation.
+
+[How]
+Mode validation is skipped only if in edid override. For real remote
+sink, clock limit check should be done for HDMI remote sink.
+
+Have HDMI related remote sink going through mode validation to
+elimiate modes which pixel clock exceeds BW limitation.
+
+Signed-off-by: Fangzhi Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Acked-by: Eryk Brol <eryk.brol@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 437d1a7a16fe7..b0f8bfd48d102 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2441,7 +2441,7 @@ enum dc_status dc_link_validate_mode_timing(
+ /* A hack to avoid failing any modes for EDID override feature on
+ * topology change such as lower quality cable for DP or different dongle
+ */
+- if (link->remote_sinks[0])
++ if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
+ return DC_OK;
+
+ /* Passive Dongle */
+--
+2.27.0
+
--- /dev/null
+From 695d0eb277f53695fdb68df99f9f47a8e98bfb20 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Sep 2020 13:03:50 -0400
+Subject: drm/amdgpu: No sysfs, not an error condition
+
+From: Luben Tuikov <luben.tuikov@amd.com>
+
+[ Upstream commit 5aea5327ea2ddf544cbeff096f45fc2319b0714e ]
+
+Not being able to create amdgpu sysfs attributes
+is not a fatal error warranting not to continue
+to try to bring up the display. Thus, if we get
+an error trying to create amdgpu sysfs attrs,
+report it and continue on to try to bring up
+a display.
+
+Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
+Acked-by: Slava Abramov <slava.abramov@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index d0b8d0d341af5..2576c299958c5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3316,10 +3316,8 @@ fence_driver_init:
+ flush_delayed_work(&adev->delayed_init_work);
+
+ r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
+- if (r) {
++ if (r)
+ dev_err(adev->dev, "Could not create amdgpu device attr\n");
+- return r;
+- }
+
+ if (IS_ENABLED(CONFIG_PERF_EVENTS))
+ r = amdgpu_pmu_init(adev);
+--
+2.27.0
+
--- /dev/null
+From 1e82ee1d5be22d9124dff6186378bfaca9ee5def Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jul 2020 10:37:01 +0800
+Subject: drm/amdgpu: restore ras flags when user resets eeprom(v2)
+
+From: Guchun Chen <guchun.chen@amd.com>
+
+[ Upstream commit bf0b91b78f002faa1be1902a75eeb0797f9fbcf3 ]
+
+RAS flags needs to be cleaned as well when user requires
+one clean eeprom.
+
+v2: RAS flags shall be restored after eeprom reset succeeds.
+
+Signed-off-by: Guchun Chen <guchun.chen@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 1bedb416eebd0..b4fb5a473df5a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -367,12 +367,19 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
+ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+ {
+- struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
++ struct amdgpu_device *adev =
++ (struct amdgpu_device *)file_inode(f)->i_private;
+ int ret;
+
+- ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
++ ret = amdgpu_ras_eeprom_reset_table(
++ &(amdgpu_ras_get_context(adev)->eeprom_control));
+
+- return ret == 1 ? size : -EIO;
++ if (ret == 1) {
++ amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
++ return size;
++ } else {
++ return -EIO;
++ }
+ }
+
+ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
+--
+2.27.0
+
--- /dev/null
+From dec9693c84a7ef19a19c5c305b25355470a18b9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Jul 2020 15:51:59 +0200
+Subject: drm/ast: Separate DRM driver from PCI code
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit d50ace1e72f05708cc5dbc89b9bbb9873f150092 ]
+
+Putting the DRM driver to the top of the file and the PCI code to the
+bottom makes ast_drv.c more readable. While at it, the patch prefixes
+file-scope variables with ast_.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200730135206.30239-3-tzimmermann@suse.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/ast/ast_drv.c | 59 ++++++++++++++++++-----------------
+ 1 file changed, 31 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
+index 0b58f7aee6b01..9d04f2b5225cf 100644
+--- a/drivers/gpu/drm/ast/ast_drv.c
++++ b/drivers/gpu/drm/ast/ast_drv.c
+@@ -43,9 +43,33 @@ int ast_modeset = -1;
+ MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+ module_param_named(modeset, ast_modeset, int, 0400);
+
+-#define PCI_VENDOR_ASPEED 0x1a03
++/*
++ * DRM driver
++ */
++
++DEFINE_DRM_GEM_FOPS(ast_fops);
++
++static struct drm_driver ast_driver = {
++ .driver_features = DRIVER_ATOMIC |
++ DRIVER_GEM |
++ DRIVER_MODESET,
++
++ .fops = &ast_fops,
++ .name = DRIVER_NAME,
++ .desc = DRIVER_DESC,
++ .date = DRIVER_DATE,
++ .major = DRIVER_MAJOR,
++ .minor = DRIVER_MINOR,
++ .patchlevel = DRIVER_PATCHLEVEL,
+
+-static struct drm_driver driver;
++ DRM_GEM_VRAM_DRIVER
++};
++
++/*
++ * PCI driver
++ */
++
++#define PCI_VENDOR_ASPEED 0x1a03
+
+ #define AST_VGA_DEVICE(id, info) { \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, \
+@@ -56,13 +80,13 @@ static struct drm_driver driver;
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+-static const struct pci_device_id pciidlist[] = {
++static const struct pci_device_id ast_pciidlist[] = {
+ AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
+ AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
+ {0, 0, 0},
+ };
+
+-MODULE_DEVICE_TABLE(pci, pciidlist);
++MODULE_DEVICE_TABLE(pci, ast_pciidlist);
+
+ static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
+ {
+@@ -94,7 +118,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if (ret)
+ return ret;
+
+- dev = drm_dev_alloc(&driver, &pdev->dev);
++ dev = drm_dev_alloc(&ast_driver, &pdev->dev);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+@@ -118,11 +142,9 @@ err_ast_driver_unload:
+ err_drm_dev_put:
+ drm_dev_put(dev);
+ return ret;
+-
+ }
+
+-static void
+-ast_pci_remove(struct pci_dev *pdev)
++static void ast_pci_remove(struct pci_dev *pdev)
+ {
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+@@ -217,30 +239,12 @@ static const struct dev_pm_ops ast_pm_ops = {
+
+ static struct pci_driver ast_pci_driver = {
+ .name = DRIVER_NAME,
+- .id_table = pciidlist,
++ .id_table = ast_pciidlist,
+ .probe = ast_pci_probe,
+ .remove = ast_pci_remove,
+ .driver.pm = &ast_pm_ops,
+ };
+
+-DEFINE_DRM_GEM_FOPS(ast_fops);
+-
+-static struct drm_driver driver = {
+- .driver_features = DRIVER_ATOMIC |
+- DRIVER_GEM |
+- DRIVER_MODESET,
+-
+- .fops = &ast_fops,
+- .name = DRIVER_NAME,
+- .desc = DRIVER_DESC,
+- .date = DRIVER_DATE,
+- .major = DRIVER_MAJOR,
+- .minor = DRIVER_MINOR,
+- .patchlevel = DRIVER_PATCHLEVEL,
+-
+- DRM_GEM_VRAM_DRIVER
+-};
+-
+ static int __init ast_init(void)
+ {
+ if (vgacon_text_force() && ast_modeset == -1)
+@@ -261,4 +265,3 @@ module_exit(ast_exit);
+ MODULE_AUTHOR(DRIVER_AUTHOR);
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL and additional rights");
+-
+--
+2.27.0
+
--- /dev/null
+From de5e20121d4dc8e3fcd3c892d2c8b3fc38687102 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Jul 2020 21:42:34 +0200
+Subject: drm/bridge/synopsys: dsi: add support for non-continuous HS clock
+
+From: Antonio Borneo <antonio.borneo@st.com>
+
+[ Upstream commit c6d94e37bdbb6dfe7e581e937a915ab58399b8a5 ]
+
+Current code enables the HS clock when video mode is started or to
+send out a HS command, and disables the HS clock to send out a LP
+command. This is not what DSI spec specify.
+
+Enable HS clock either in command and in video mode.
+Set automatic HS clock management for panels and devices that
+support non-continuous HS clock.
+
+Signed-off-by: Antonio Borneo <antonio.borneo@st.com>
+Tested-by: Philippe Cornu <philippe.cornu@st.com>
+Reviewed-by: Philippe Cornu <philippe.cornu@st.com>
+Acked-by: Neil Armstrong <narmstrong@baylibre.com>
+Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200701194234.18123-1-yannick.fertre@st.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+index d580b2aa4ce98..979acaa90d002 100644
+--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
++++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+@@ -365,7 +365,6 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
+ if (lpm)
+ val |= CMD_MODE_ALL_LP;
+
+- dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
+ dsi_write(dsi, DSI_CMD_MODE_CFG, val);
+ }
+
+@@ -541,16 +540,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
+ static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
+ unsigned long mode_flags)
+ {
++ u32 val;
++
+ dsi_write(dsi, DSI_PWR_UP, RESET);
+
+ if (mode_flags & MIPI_DSI_MODE_VIDEO) {
+ dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
+ dw_mipi_dsi_video_mode_config(dsi);
+- dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
+ } else {
+ dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
+ }
+
++ val = PHY_TXREQUESTCLKHS;
++ if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
++ val |= AUTO_CLKLANE_CTRL;
++ dsi_write(dsi, DSI_LPCLK_CTRL, val);
++
+ dsi_write(dsi, DSI_PWR_UP, POWERUP);
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 16ecc37d3a91a24974a9da7069fb97f57a3215fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Aug 2020 10:15:22 +0200
+Subject: drm/bridge_connector: Set default status connected for eDP connectors
+
+From: Enric Balletbo i Serra <enric.balletbo@collabora.com>
+
+[ Upstream commit c5589b39549d1875bb506da473bf4580c959db8c ]
+
+In an eDP application, HPD is not required and on most bridge chips
+useless. If HPD is not used, we need to set initial status as connected,
+otherwise the connector created by the drm_bridge_connector API remains
+in an unknown state.
+
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
+Reviewed-by: Bilal Wasim <bwasim.lkml@gmail.com>
+Tested-by: Bilal Wasim <bwasim.lkml@gmail.com>
+Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200826081526.674866-2-enric.balletbo@collabora.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_bridge_connector.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
+index c6994fe673f31..a58cbde59c34a 100644
+--- a/drivers/gpu/drm/drm_bridge_connector.c
++++ b/drivers/gpu/drm/drm_bridge_connector.c
+@@ -187,6 +187,7 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
+ case DRM_MODE_CONNECTOR_DPI:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DSI:
++ case DRM_MODE_CONNECTOR_eDP:
+ status = connector_status_connected;
+ break;
+ default:
+--
+2.27.0
+
--- /dev/null
+From decc88df84b45d7bf7cfd116d63b92fbac862002 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Aug 2020 17:37:56 +0300
+Subject: drm/brige/megachips: Add checking if ge_b850v3_lvds_init() is working
+ correctly
+
+From: Nadezda Lutovinova <lutovinova@ispras.ru>
+
+[ Upstream commit f688a345f0d7a6df4dd2aeca8e4f3c05e123a0ee ]
+
+If ge_b850v3_lvds_init() does not allocate memory for ge_b850v3_lvds_ptr,
+then a null pointer dereference is accessed.
+
+The patch adds checking of the return value of ge_b850v3_lvds_init().
+
+Found by Linux Driver Verification project (linuxtesting.org).
+
+Signed-off-by: Nadezda Lutovinova <lutovinova@ispras.ru>
+Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200819143756.30626-1-lutovinova@ispras.ru
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+index 6200f12a37e69..ab8174831cf40 100644
+--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
++++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+@@ -302,8 +302,12 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
+ const struct i2c_device_id *id)
+ {
+ struct device *dev = &stdp4028_i2c->dev;
++ int ret;
++
++ ret = ge_b850v3_lvds_init(dev);
+
+- ge_b850v3_lvds_init(dev);
++ if (ret)
++ return ret;
+
+ ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
+ i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
+@@ -361,8 +365,12 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
+ const struct i2c_device_id *id)
+ {
+ struct device *dev = &stdp2690_i2c->dev;
++ int ret;
++
++ ret = ge_b850v3_lvds_init(dev);
+
+- ge_b850v3_lvds_init(dev);
++ if (ret)
++ return ret;
+
+ ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c;
+ i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr);
+--
+2.27.0
+
--- /dev/null
+From cbf09081da232a9d06bf0f27dcdf9cd73465fb91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2020 13:08:41 +0200
+Subject: drm: exynos: fix common struct sg_table related issues
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+[ Upstream commit 84404614167b829f7b58189cd24b6c0c74897171 ]
+
+The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function
+returns the number of the created entries in the DMA address space.
+However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and
+dma_unmap_sg must be called with the original number of the entries
+passed to the dma_map_sg().
+
+struct sg_table is a common structure used for describing a non-contiguous
+memory buffer, used commonly in the DRM and graphics subsystems. It
+consists of a scatterlist with memory pages and DMA addresses (sgl entry),
+as well as the number of scatterlist entries: CPU pages (orig_nents entry)
+and DMA mapped pages (nents entry).
+
+It turned out that it was a common mistake to misuse nents and orig_nents
+entries, calling DMA-mapping functions with a wrong number of entries or
+ignoring the number of mapped entries returned by the dma_map_sg()
+function.
+
+To avoid such issues, lets use a common dma-mapping wrappers operating
+directly on the struct sg_table objects and use scatterlist page
+iterators where possible. This, almost always, hides references to the
+nents and orig_nents entries, making the code robust, easier to follow
+and copy/paste safe.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
+Acked-by : Inki Dae <inki.dae@samsung.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/exynos/exynos_drm_g2d.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+index 03be314271811..967a5cdc120e3 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+@@ -395,8 +395,8 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
+ return;
+
+ out:
+- dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl,
+- g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
++ dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
++ DMA_BIDIRECTIONAL, 0);
+
+ pages = frame_vector_pages(g2d_userptr->vec);
+ if (!IS_ERR(pages)) {
+@@ -511,10 +511,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
+
+ g2d_userptr->sgt = sgt;
+
+- if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
+- DMA_BIDIRECTIONAL)) {
++ ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt,
++ DMA_BIDIRECTIONAL, 0);
++ if (ret) {
+ DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
+- ret = -ENOMEM;
+ goto err_sg_free_table;
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 61f616a34c2625b0f41dcb6cb4c77b795edef039 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2020 13:09:11 +0200
+Subject: drm: lima: fix common struct sg_table related issues
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+[ Upstream commit c3d9c17f486d5c54940487dc31a54ebfdeeb371a ]
+
+The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function
+returns the number of the created entries in the DMA address space.
+However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and
+dma_unmap_sg must be called with the original number of the entries
+passed to the dma_map_sg().
+
+struct sg_table is a common structure used for describing a non-contiguous
+memory buffer, used commonly in the DRM and graphics subsystems. It
+consists of a scatterlist with memory pages and DMA addresses (sgl entry),
+as well as the number of scatterlist entries: CPU pages (orig_nents entry)
+and DMA mapped pages (nents entry).
+
+It turned out that it was a common mistake to misuse nents and orig_nents
+entries, calling DMA-mapping functions with a wrong number of entries or
+ignoring the number of mapped entries returned by the dma_map_sg()
+function.
+
+To avoid such issues, lets use a common dma-mapping wrappers operating
+directly on the struct sg_table objects and use scatterlist page
+iterators where possible. This, almost always, hides references to the
+nents and orig_nents entries, making the code robust, easier to follow
+and copy/paste safe.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Qiang Yu <yuq825@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/lima/lima_gem.c | 11 ++++++++---
+ drivers/gpu/drm/lima/lima_vm.c | 5 ++---
+ 2 files changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
+index 155f2b4b4030a..11223fe348dfe 100644
+--- a/drivers/gpu/drm/lima/lima_gem.c
++++ b/drivers/gpu/drm/lima/lima_gem.c
+@@ -69,8 +69,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+ return ret;
+
+ if (bo->base.sgt) {
+- dma_unmap_sg(dev, bo->base.sgt->sgl,
+- bo->base.sgt->nents, DMA_BIDIRECTIONAL);
++ dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->base.sgt);
+ } else {
+ bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
+@@ -80,7 +79,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+ }
+ }
+
+- dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL);
++ ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
++ if (ret) {
++ sg_free_table(&sgt);
++ kfree(bo->base.sgt);
++ bo->base.sgt = NULL;
++ return ret;
++ }
+
+ *bo->base.sgt = sgt;
+
+diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
+index 5b92fb82674a9..2b2739adc7f53 100644
+--- a/drivers/gpu/drm/lima/lima_vm.c
++++ b/drivers/gpu/drm/lima/lima_vm.c
+@@ -124,7 +124,7 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
+ if (err)
+ goto err_out1;
+
+- for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
++ for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ bo_va->node.start + offset);
+ if (err)
+@@ -298,8 +298,7 @@ int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
+ mutex_lock(&vm->lock);
+
+ base = bo_va->node.start + (pageoff << PAGE_SHIFT);
+- for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
+- bo->base.sgt->nents, pageoff) {
++ for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ base + offset);
+ if (err)
+--
+2.27.0
+
--- /dev/null
+From c21d6d333424f049606dcc2eb6ed9c03cc77a1d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2020 13:09:35 +0200
+Subject: drm: panfrost: fix common struct sg_table related issues
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+[ Upstream commit 34a4e66faf8b22c8409cbd46839ba5e488b1e6a9 ]
+
+The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function
+returns the number of the created entries in the DMA address space.
+However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and
+dma_unmap_sg must be called with the original number of the entries
+passed to the dma_map_sg().
+
+struct sg_table is a common structure used for describing a non-contiguous
+memory buffer, used commonly in the DRM and graphics subsystems. It
+consists of a scatterlist with memory pages and DMA addresses (sgl entry),
+as well as the number of scatterlist entries: CPU pages (orig_nents entry)
+and DMA mapped pages (nents entry).
+
+It turned out that it was a common mistake to misuse nents and orig_nents
+entries, calling DMA-mapping functions with a wrong number of entries or
+ignoring the number of mapped entries returned by the dma_map_sg()
+function.
+
+To avoid such issues, lets use a common dma-mapping wrappers operating
+directly on the struct sg_table objects and use scatterlist page
+iterators where possible. This, almost always, hides references to the
+nents and orig_nents entries, making the code robust, easier to follow
+and copy/paste safe.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Steven Price <steven.price@arm.com>
+Reviewed-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panfrost/panfrost_gem.c | 4 ++--
+ drivers/gpu/drm/panfrost/panfrost_mmu.c | 7 +++----
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
+index 33355dd302f11..1a6cea0e0bd74 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
+@@ -41,8 +41,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
+
+ for (i = 0; i < n_sgt; i++) {
+ if (bo->sgts[i].sgl) {
+- dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
+- bo->sgts[i].nents, DMA_BIDIRECTIONAL);
++ dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
++ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(&bo->sgts[i]);
+ }
+ }
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index e8f7b11352d27..776448c527ea9 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -253,7 +253,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
+ struct io_pgtable_ops *ops = mmu->pgtbl_ops;
+ u64 start_iova = iova;
+
+- for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
++ for_each_sgtable_dma_sg(sgt, sgl, count) {
+ unsigned long paddr = sg_dma_address(sgl);
+ size_t len = sg_dma_len(sgl);
+
+@@ -517,10 +517,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ if (ret)
+ goto err_pages;
+
+- if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
+- ret = -EINVAL;
++ ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
++ if (ret)
+ goto err_map;
+- }
+
+ mmu_map_sg(pfdev, bomapping->mmu, addr,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+--
+2.27.0
+
--- /dev/null
+From 6f74672e57a0ff9cddbce39de1110b0412849caa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Aug 2020 19:59:58 -0400
+Subject: drm/scheduler: Scheduler priority fixes (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Luben Tuikov <luben.tuikov@amd.com>
+
+[ Upstream commit e2d732fdb7a9e421720a644580cd6a9400f97f60 ]
+
+Remove DRM_SCHED_PRIORITY_LOW, as it was used
+in only one place.
+
+Rename and separate by a line
+DRM_SCHED_PRIORITY_MAX to DRM_SCHED_PRIORITY_COUNT
+as it represents a (total) count of said
+priorities and it is used as such in loops
+throughout the code. (0-based indexing is the
+the count number.)
+
+Remove redundant word HIGH in priority names,
+and rename *KERNEL* to *HIGH*, as it really
+means that, high.
+
+v2: Add back KERNEL and remove SW and HW,
+ in lieu of a single HIGH between NORMAL and KERNEL.
+
+Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 6 +++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +-
+ drivers/gpu/drm/scheduler/sched_main.c | 4 ++--
+ include/drm/gpu_scheduler.h | 12 +++++++-----
+ 8 files changed, 18 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 8842c55d4490b..fc695126b6e75 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
+ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+ enum drm_sched_priority priority)
+ {
+- if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
++ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
+ return -EINVAL;
+
+ /* NORMAL and below are accessible by everyone */
+@@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+ static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+ {
+ switch (prio) {
+- case DRM_SCHED_PRIORITY_HIGH_HW:
++ case DRM_SCHED_PRIORITY_HIGH:
+ case DRM_SCHED_PRIORITY_KERNEL:
+ return AMDGPU_GFX_PIPE_PRIO_HIGH;
+ default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 937029ad5271a..dcfe8a3b03ffb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
+ int i;
+
+ /* Signal all jobs not yet scheduled */
+- for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
++ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ if (!rq)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 13ea8ebc421c6..6d4fc79bf84aa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ &ring->sched;
+ }
+
+- for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
++ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
+ atomic_set(&ring->num_jobs[i], 0);
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index da871d84b7424..7112137689db0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -243,7 +243,7 @@ struct amdgpu_ring {
+ bool has_compute_vm_bug;
+ bool no_scheduler;
+
+- atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
++ atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
+ struct mutex priority_mutex;
+ /* protected by priority_mutex */
+ int priority;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+index c799691dfa848..17661ede94885 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+@@ -36,14 +36,14 @@ enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+ {
+ switch (amdgpu_priority) {
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+- return DRM_SCHED_PRIORITY_HIGH_HW;
++ return DRM_SCHED_PRIORITY_HIGH;
+ case AMDGPU_CTX_PRIORITY_HIGH:
+- return DRM_SCHED_PRIORITY_HIGH_SW;
++ return DRM_SCHED_PRIORITY_HIGH;
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+ return DRM_SCHED_PRIORITY_NORMAL;
+ case AMDGPU_CTX_PRIORITY_LOW:
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+- return DRM_SCHED_PRIORITY_LOW;
++ return DRM_SCHED_PRIORITY_MIN;
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ return DRM_SCHED_PRIORITY_UNSET;
+ default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 978bae7313980..b7fd0cdffce0e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2101,7 +2101,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
+ ring = adev->mman.buffer_funcs_ring;
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->mman.entity,
+- DRM_SCHED_PRIORITY_KERNEL, &sched,
++ DRM_SCHED_PRIORITY_KERNEL, &sched,
+ 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index 96f763d888af5..9a0d77a680180 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -625,7 +625,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
+ return NULL;
+
+ /* Kernel run queue has higher priority than normal run queue*/
+- for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
++ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
+ if (entity)
+ break;
+@@ -852,7 +852,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
+ sched->name = name;
+ sched->timeout = timeout;
+ sched->hang_limit = hang_limit;
+- for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
++ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
+ drm_sched_rq_init(sched, &sched->sched_rq[i]);
+
+ init_waitqueue_head(&sched->wake_up_worker);
+diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
+index b9780ae9dd26c..72dc3a95fbaad 100644
+--- a/include/drm/gpu_scheduler.h
++++ b/include/drm/gpu_scheduler.h
+@@ -33,14 +33,16 @@
+ struct drm_gpu_scheduler;
+ struct drm_sched_rq;
+
++/* These are often used as an (initial) index
++ * to an array, and as such should start at 0.
++ */
+ enum drm_sched_priority {
+ DRM_SCHED_PRIORITY_MIN,
+- DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
+ DRM_SCHED_PRIORITY_NORMAL,
+- DRM_SCHED_PRIORITY_HIGH_SW,
+- DRM_SCHED_PRIORITY_HIGH_HW,
++ DRM_SCHED_PRIORITY_HIGH,
+ DRM_SCHED_PRIORITY_KERNEL,
+- DRM_SCHED_PRIORITY_MAX,
++
++ DRM_SCHED_PRIORITY_COUNT,
+ DRM_SCHED_PRIORITY_INVALID = -1,
+ DRM_SCHED_PRIORITY_UNSET = -2
+ };
+@@ -274,7 +276,7 @@ struct drm_gpu_scheduler {
+ uint32_t hw_submission_limit;
+ long timeout;
+ const char *name;
+- struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
++ struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
+ wait_queue_head_t wake_up_worker;
+ wait_queue_head_t job_scheduled;
+ atomic_t hw_rq_count;
+--
+2.27.0
+
--- /dev/null
+From bdd20bef7639c0699686eaae3ed3da782e401cbd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Aug 2020 12:45:53 +0000
+Subject: drm/vkms: avoid warning in vkms_get_vblank_timestamp
+
+From: Sidong Yang <realwakka@gmail.com>
+
+[ Upstream commit 05ca530268a9d0ab3547e7b288635e35990a77c4 ]
+
+This patch avoid the warning in vkms_get_vblank_timestamp when vblanks
+aren't enabled. When running igt test kms_cursor_crc just after vkms
+module, the warning raised like below. Initial value of vblank time is
+zero and hrtimer.node.expires is also zero if vblank aren't enabled
+before. vkms module isn't real hardware but just virtual hardware
+module. so vkms can't generate a resonable timestamp when hrtimer is
+off. it's best to grab the current time.
+
+[106444.464503] [IGT] kms_cursor_crc: starting subtest pipe-A-cursor-size-change
+[106444.471475] WARNING: CPU: 0 PID: 10109 at
+vkms_get_vblank_timestamp+0x42/0x50 [vkms]
+[106444.471511] CPU: 0 PID: 10109 Comm: kms_cursor_crc Tainted: G W OE
+5.9.0-rc1+ #6
+[106444.471514] RIP: 0010:vkms_get_vblank_timestamp+0x42/0x50 [vkms]
+[106444.471528] Call Trace:
+[106444.471551] drm_get_last_vbltimestamp+0xb9/0xd0 [drm]
+[106444.471566] drm_reset_vblank_timestamp+0x63/0xe0 [drm]
+[106444.471579] drm_crtc_vblank_on+0x85/0x150 [drm]
+[106444.471582] vkms_crtc_atomic_enable+0xe/0x10 [vkms]
+[106444.471592] drm_atomic_helper_commit_modeset_enables+0x1db/0x230
+[drm_kms_helper]
+[106444.471594] vkms_atomic_commit_tail+0x38/0xc0 [vkms]
+[106444.471601] commit_tail+0x97/0x130 [drm_kms_helper]
+[106444.471608] drm_atomic_helper_commit+0x117/0x140 [drm_kms_helper]
+[106444.471622] drm_atomic_commit+0x4a/0x50 [drm]
+[106444.471629] drm_atomic_helper_set_config+0x63/0xb0 [drm_kms_helper]
+[106444.471642] drm_mode_setcrtc+0x1d9/0x7b0 [drm]
+[106444.471654] ? drm_mode_getcrtc+0x1a0/0x1a0 [drm]
+[106444.471666] drm_ioctl_kernel+0xb6/0x100 [drm]
+[106444.471677] drm_ioctl+0x3ad/0x470 [drm]
+[106444.471688] ? drm_mode_getcrtc+0x1a0/0x1a0 [drm]
+[106444.471692] ? tomoyo_file_ioctl+0x19/0x20
+[106444.471694] __x64_sys_ioctl+0x96/0xd0
+[106444.471697] do_syscall_64+0x37/0x80
+[106444.471699] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
+Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
+Cc: Melissa Wen <melissa.srw@gmail.com>
+
+Signed-off-by: Sidong Yang <realwakka@gmail.com>
+Reviewed-by: Melissa Wen <melissa.srw@gmail.com>
+Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200828124553.2178-1-realwakka@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vkms/vkms_crtc.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
+index ac85e17428f88..09c012d54d58f 100644
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -86,6 +86,11 @@ static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ struct vkms_output *output = &vkmsdev->output;
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
++ if (!READ_ONCE(vblank->enabled)) {
++ *vblank_time = ktime_get();
++ return true;
++ }
++
+ *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
+
+ if (WARN_ON(*vblank_time == vblank->time))
+--
+2.27.0
+
--- /dev/null
+From 887c5d2da4fb85f5dda5ba46b0246f5c3f65af43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Oct 2020 13:03:30 +0200
+Subject: ext4: Detect already used quota file early
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit e0770e91424f694b461141cbc99adf6b23006b60 ]
+
+When we try to use file already used as a quota file again (for the same
+or different quota type), strange things can happen. At the very least
+lockdep annotations may be wrong but also inode flags may be wrongly set
+/ reset. When the file is used for two quota types at once we can even
+corrupt the file and likely crash the kernel. Catch all these cases by
+checking whether passed file is already used as quota file and bail
+early in that case.
+
+This fixes occasional generic/219 failure due to lockdep complaint.
+
+Reviewed-by: Andreas Dilger <adilger@dilger.ca>
+Reported-by: Ritesh Harjani <riteshh@linux.ibm.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20201015110330.28716-1-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/super.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ea425b49b3456..d31ae5a878594 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -6042,6 +6042,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
+ /* Quotafile not on the same filesystem? */
+ if (path->dentry->d_sb != sb)
+ return -EXDEV;
++
++ /* Quota already enabled for this file? */
++ if (IS_NOQUOTA(d_inode(path->dentry)))
++ return -EBUSY;
++
+ /* Journaling quota? */
+ if (EXT4_SB(sb)->s_qf_names[type]) {
+ /* Quotafile not in fs root? */
+--
+2.27.0
+
--- /dev/null
+From fb247d4a035cd31075abfc612683b745fe3966f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Sep 2020 20:45:44 +0800
+Subject: f2fs: add trace exit in exception path
+
+From: Zhang Qilong <zhangqilong3@huawei.com>
+
+[ Upstream commit 9b66482282888d02832b7d90239e1cdb18e4b431 ]
+
+Missing the trace exit in f2fs_sync_dirty_inodes
+
+Signed-off-by: Zhang Qilong <zhangqilong3@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/checkpoint.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index ff807e14c8911..bf190a718ca6c 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -1047,8 +1047,12 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
+ get_pages(sbi, is_dir ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
+ retry:
+- if (unlikely(f2fs_cp_error(sbi)))
++ if (unlikely(f2fs_cp_error(sbi))) {
++ trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
++ get_pages(sbi, is_dir ?
++ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
+ return -EIO;
++ }
+
+ spin_lock(&sbi->inode_lock[type]);
+
+--
+2.27.0
+
--- /dev/null
+From 5a4a1b995ec6a7131aa929efce28ed9ba8a36061 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Sep 2020 15:01:52 +0800
+Subject: f2fs: allocate proper size memory for zstd decompress
+
+From: Chao Yu <yuchao0@huawei.com>
+
+[ Upstream commit 0e2b7385cb59e566520cfd0a04b4b53bc9461e98 ]
+
+As 5kft <5kft@5kft.org> reported:
+
+ kworker/u9:3: page allocation failure: order:9, mode:0x40c40(GFP_NOFS|__GFP_COMP), nodemask=(null),cpuset=/,mems_allowed=0
+ CPU: 3 PID: 8168 Comm: kworker/u9:3 Tainted: G C 5.8.3-sunxi #trunk
+ Hardware name: Allwinner sun8i Family
+ Workqueue: f2fs_post_read_wq f2fs_post_read_work
+ [<c010d6d5>] (unwind_backtrace) from [<c0109a55>] (show_stack+0x11/0x14)
+ [<c0109a55>] (show_stack) from [<c056d489>] (dump_stack+0x75/0x84)
+ [<c056d489>] (dump_stack) from [<c0243b53>] (warn_alloc+0xa3/0x104)
+ [<c0243b53>] (warn_alloc) from [<c024473b>] (__alloc_pages_nodemask+0xb87/0xc40)
+ [<c024473b>] (__alloc_pages_nodemask) from [<c02267c5>] (kmalloc_order+0x19/0x38)
+ [<c02267c5>] (kmalloc_order) from [<c02267fd>] (kmalloc_order_trace+0x19/0x90)
+ [<c02267fd>] (kmalloc_order_trace) from [<c047c665>] (zstd_init_decompress_ctx+0x21/0x88)
+ [<c047c665>] (zstd_init_decompress_ctx) from [<c047e9cf>] (f2fs_decompress_pages+0x97/0x228)
+ [<c047e9cf>] (f2fs_decompress_pages) from [<c045d0ab>] (__read_end_io+0xfb/0x130)
+ [<c045d0ab>] (__read_end_io) from [<c045d141>] (f2fs_post_read_work+0x61/0x84)
+ [<c045d141>] (f2fs_post_read_work) from [<c0130b2f>] (process_one_work+0x15f/0x3b0)
+ [<c0130b2f>] (process_one_work) from [<c0130e7b>] (worker_thread+0xfb/0x3e0)
+ [<c0130e7b>] (worker_thread) from [<c0135c3b>] (kthread+0xeb/0x10c)
+ [<c0135c3b>] (kthread) from [<c0100159>]
+
+zstd may allocate large size memory for {,de}compression, it may cause
+file copy failure on low-end device which has very few memory.
+
+For decompression, let's just allocate proper size memory based on current
+file's cluster size instead of max cluster size.
+
+Reported-by: 5kft <5kft@5kft.org>
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/compress.c | 7 ++++---
+ fs/f2fs/f2fs.h | 2 +-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 1dfb126a0cb20..1cd4b3f9c9f8c 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -382,16 +382,17 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
+ ZSTD_DStream *stream;
+ void *workspace;
+ unsigned int workspace_size;
++ unsigned int max_window_size =
++ MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
+
+- workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE);
++ workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
+
+ workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
+ workspace_size, GFP_NOFS);
+ if (!workspace)
+ return -ENOMEM;
+
+- stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE,
+- workspace, workspace_size);
++ stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
+ if (!stream) {
+ printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
+ KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index d9e52a7f3702f..98c4b166f192b 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1394,7 +1394,7 @@ struct decompress_io_ctx {
+ #define NULL_CLUSTER ((unsigned int)(~0))
+ #define MIN_COMPRESS_LOG_SIZE 2
+ #define MAX_COMPRESS_LOG_SIZE 8
+-#define MAX_COMPRESS_WINDOW_SIZE ((PAGE_SIZE) << MAX_COMPRESS_LOG_SIZE)
++#define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size))
+
+ struct f2fs_sb_info {
+ struct super_block *sb; /* pointer to VFS super block */
+--
+2.27.0
+
--- /dev/null
+From 2b7feb84b0ebbf4b7b45f93be6903f3df4ea43da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Sep 2020 11:03:49 +0800
+Subject: f2fs: compress: fix to disallow enabling compress on non-empty file
+
+From: Chao Yu <yuchao0@huawei.com>
+
+[ Upstream commit 519a5a2f37b850f4eb86674a10d143088670a390 ]
+
+Compressed inode and normal inode has different layout, so we should
+disallow enabling compress on non-empty file to avoid race condition
+during inode .i_addr array parsing and updating.
+
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+[Jaegeuk Kim: Fix missing condition]
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/file.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 8a422400e824d..4ec10256dc67f 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1836,6 +1836,8 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
+ if (iflags & F2FS_COMPR_FL) {
+ if (!f2fs_may_compress(inode))
+ return -EINVAL;
++ if (S_ISREG(inode->i_mode) && inode->i_size)
++ return -EINVAL;
+
+ set_compress_context(inode);
+ }
+--
+2.27.0
+
--- /dev/null
+From 0ecef49d576463df188050198f453ccb0ff385d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Sep 2020 20:53:13 +0800
+Subject: f2fs: do sanity check on zoned block device path
+
+From: Chao Yu <yuchao0@huawei.com>
+
+[ Upstream commit 07eb1d699452de04e9d389ff17fb8fc9e975d7bf ]
+
+sbi->devs would be initialized only if image enables multiple device
+feature or blkzoned feature, if blkzoned feature flag was set by fuzz
+in non-blkzoned device, we will suffer below panic:
+
+get_zone_idx fs/f2fs/segment.c:4892 [inline]
+f2fs_usable_zone_blks_in_seg fs/f2fs/segment.c:4943 [inline]
+f2fs_usable_blks_in_seg+0x39b/0xa00 fs/f2fs/segment.c:4999
+Call Trace:
+ check_block_count+0x69/0x4e0 fs/f2fs/segment.h:704
+ build_sit_entries fs/f2fs/segment.c:4403 [inline]
+ f2fs_build_segment_manager+0x51da/0xa370 fs/f2fs/segment.c:5100
+ f2fs_fill_super+0x3880/0x6ff0 fs/f2fs/super.c:3684
+ mount_bdev+0x32e/0x3f0 fs/super.c:1417
+ legacy_get_tree+0x105/0x220 fs/fs_context.c:592
+ vfs_get_tree+0x89/0x2f0 fs/super.c:1547
+ do_new_mount fs/namespace.c:2896 [inline]
+ path_mount+0x12ae/0x1e70 fs/namespace.c:3216
+ do_mount fs/namespace.c:3229 [inline]
+ __do_sys_mount fs/namespace.c:3437 [inline]
+ __se_sys_mount fs/namespace.c:3414 [inline]
+ __x64_sys_mount+0x27f/0x300 fs/namespace.c:3414
+ do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
+
+Add sanity check to inconsistency on factors: blkzoned flag, device
+path and device character to avoid above panic.
+
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/super.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index dfa072fa80815..be5050292caa5 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2832,6 +2832,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
+ segment_count, dev_seg_count);
+ return -EFSCORRUPTED;
+ }
++ } else {
++ if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
++ !bdev_is_zoned(sbi->sb->s_bdev)) {
++ f2fs_info(sbi, "Zoned block device path is missing");
++ return -EFSCORRUPTED;
++ }
+ }
+
+ if (secs_per_zone > total_sections || !secs_per_zone) {
+--
+2.27.0
+
--- /dev/null
+From 1cb1b96a378e58668d3f32a89e243a358d8eefbe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Sep 2020 09:23:12 +0800
+Subject: f2fs: fix to check segment boundary during SIT page readahead
+
+From: Chao Yu <yuchao0@huawei.com>
+
+[ Upstream commit 6a257471fa42c8c9c04a875cd3a2a22db148e0f0 ]
+
+As syzbot reported:
+
+kernel BUG at fs/f2fs/segment.h:657!
+invalid opcode: 0000 [#1] PREEMPT SMP KASAN
+CPU: 1 PID: 16220 Comm: syz-executor.0 Not tainted 5.9.0-rc5-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:f2fs_ra_meta_pages+0xa51/0xdc0 fs/f2fs/segment.h:657
+Call Trace:
+ build_sit_entries fs/f2fs/segment.c:4195 [inline]
+ f2fs_build_segment_manager+0x4b8a/0xa3c0 fs/f2fs/segment.c:4779
+ f2fs_fill_super+0x377d/0x6b80 fs/f2fs/super.c:3633
+ mount_bdev+0x32e/0x3f0 fs/super.c:1417
+ legacy_get_tree+0x105/0x220 fs/fs_context.c:592
+ vfs_get_tree+0x89/0x2f0 fs/super.c:1547
+ do_new_mount fs/namespace.c:2875 [inline]
+ path_mount+0x1387/0x2070 fs/namespace.c:3192
+ do_mount fs/namespace.c:3205 [inline]
+ __do_sys_mount fs/namespace.c:3413 [inline]
+ __se_sys_mount fs/namespace.c:3390 [inline]
+ __x64_sys_mount+0x27f/0x300 fs/namespace.c:3390
+ do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+@blkno in f2fs_ra_meta_pages could exceed max segment count, causing panic
+in following sanity check in current_sit_addr(), add check condition to
+avoid this issue.
+
+Reported-by: syzbot+3698081bcf0bb2d12174@syzkaller.appspotmail.com
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/checkpoint.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index bf190a718ca6c..0b7aec059f112 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -243,6 +243,8 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+ blkno * NAT_ENTRY_PER_BLOCK);
+ break;
+ case META_SIT:
++ if (unlikely(blkno >= TOTAL_SEGS(sbi)))
++ goto out;
+ /* get sit block addr */
+ fio.new_blkaddr = current_sit_addr(sbi,
+ blkno * SIT_ENTRY_PER_BLOCK);
+--
+2.27.0
+
--- /dev/null
+From 3a22012def3e2faef6cc0b80fd1f0a46a0eca91b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Oct 2020 10:40:48 +0800
+Subject: f2fs: fix to set SBI_NEED_FSCK flag for inconsistent inode
+
+From: Chao Yu <yuchao0@huawei.com>
+
+[ Upstream commit d662fad143c0470ad7f46ea7b02da539f613d7d7 ]
+
+If compressed inode has inconsistent fields on i_compress_algorithm,
+i_compr_blocks and i_log_cluster_size, we missed to set SBI_NEED_FSCK
+to notice fsck to repair the inode, fix it.
+
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/inode.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 5195e083fc1e6..12c7fa1631935 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -299,6 +299,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+ i_log_cluster_size)) {
+ if (ri->i_compress_algorithm >= COMPRESS_MAX) {
++ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
+ "compress algorithm: %u, run fsck to fix",
+ __func__, inode->i_ino,
+@@ -307,6 +308,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ }
+ if (le64_to_cpu(ri->i_compr_blocks) >
+ SECTOR_TO_BLOCK(inode->i_blocks)) {
++ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
+ "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
+ __func__, inode->i_ino,
+@@ -316,6 +318,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ }
+ if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+ ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
++ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
+ "log cluster size: %u, run fsck to fix",
+ __func__, inode->i_ino,
+--
+2.27.0
+
--- /dev/null
+From c7dbda0ad137f297d9928b3290bddd9d45ad0d01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Sep 2020 09:22:50 +0800
+Subject: f2fs: fix uninit-value in f2fs_lookup
+
+From: Chao Yu <yuchao0@huawei.com>
+
+[ Upstream commit 6d7ab88a98c1b7a47c228f8ffb4f44d631eaf284 ]
+
+As syzbot reported:
+
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x21c/0x280 lib/dump_stack.c:118
+ kmsan_report+0xf7/0x1e0 mm/kmsan/kmsan_report.c:122
+ __msan_warning+0x58/0xa0 mm/kmsan/kmsan_instr.c:219
+ f2fs_lookup+0xe05/0x1a80 fs/f2fs/namei.c:503
+ lookup_open fs/namei.c:3082 [inline]
+ open_last_lookups fs/namei.c:3177 [inline]
+ path_openat+0x2729/0x6a90 fs/namei.c:3365
+ do_filp_open+0x2b8/0x710 fs/namei.c:3395
+ do_sys_openat2+0xa88/0x1140 fs/open.c:1168
+ do_sys_open fs/open.c:1184 [inline]
+ __do_compat_sys_openat fs/open.c:1242 [inline]
+ __se_compat_sys_openat+0x2a4/0x310 fs/open.c:1240
+ __ia32_compat_sys_openat+0x56/0x70 fs/open.c:1240
+ do_syscall_32_irqs_on arch/x86/entry/common.c:80 [inline]
+ __do_fast_syscall_32+0x129/0x180 arch/x86/entry/common.c:139
+ do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:162
+ do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:205
+ entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
+
+In f2fs_lookup(), @res_page could be used before being initialized,
+because in __f2fs_find_entry(), once F2FS_I(dir)->i_current_depth was
+been fuzzed to zero, then @res_page will never be initialized, causing
+this kmsan warning, relocating @res_page initialization place to fix
+this bug.
+
+Reported-by: syzbot+0eac6f0bbd558fd866d7@syzkaller.appspotmail.com
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/dir.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index 069f498af1e38..ceb4431b56690 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -357,16 +357,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ unsigned int max_depth;
+ unsigned int level;
+
++ *res_page = NULL;
++
+ if (f2fs_has_inline_dentry(dir)) {
+- *res_page = NULL;
+ de = f2fs_find_in_inline_dir(dir, fname, res_page);
+ goto out;
+ }
+
+- if (npages == 0) {
+- *res_page = NULL;
++ if (npages == 0)
+ goto out;
+- }
+
+ max_depth = F2FS_I(dir)->i_current_depth;
+ if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
+@@ -377,7 +376,6 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ }
+
+ for (level = 0; level < max_depth; level++) {
+- *res_page = NULL;
+ de = find_in_level(dir, level, fname, res_page);
+ if (de || IS_ERR(*res_page))
+ break;
+--
+2.27.0
+
--- /dev/null
+From ae2c5076a9c9cd4c6794a957452442b92e97410c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Oct 2020 14:17:35 -0700
+Subject: f2fs: handle errors of f2fs_get_meta_page_nofail
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+[ Upstream commit 86f33603f8c51537265ff7ac0320638fd2cbdb1b ]
+
+First problem is we hit BUG_ON() in f2fs_get_sum_page given EIO on
+f2fs_get_meta_page_nofail().
+
+Quick fix was not to give any error with infinite loop, but syzbot caught
+a case where it goes to that loop from fuzzed image. In turned out we abused
+f2fs_get_meta_page_nofail() like in the below call stack.
+
+- f2fs_fill_super
+ - f2fs_build_segment_manager
+ - build_sit_entries
+ - get_current_sit_page
+
+INFO: task syz-executor178:6870 can't die for more than 143 seconds.
+task:syz-executor178 state:R
+ stack:26960 pid: 6870 ppid: 6869 flags:0x00004006
+Call Trace:
+
+Showing all locks held in the system:
+1 lock held by khungtaskd/1179:
+ #0: ffffffff8a554da0 (rcu_read_lock){....}-{1:2}, at: debug_show_all_locks+0x53/0x260 kernel/locking/lockdep.c:6242
+1 lock held by systemd-journal/3920:
+1 lock held by in:imklog/6769:
+ #0: ffff88809eebc130 (&f->f_pos_lock){+.+.}-{3:3}, at: __fdget_pos+0xe9/0x100 fs/file.c:930
+1 lock held by syz-executor178/6870:
+ #0: ffff8880925120e0 (&type->s_umount_key#47/1){+.+.}-{3:3}, at: alloc_super+0x201/0xaf0 fs/super.c:229
+
+Actually, we didn't have to use _nofail in this case, since we could return
+error to mount(2) already with the error handler.
+
+As a result, this patch tries to 1) remove _nofail callers as much as possible,
+2) deal with error case in last remaining caller, f2fs_get_sum_page().
+
+Reported-by: syzbot+ee250ac8137be41d7b13@syzkaller.appspotmail.com
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/checkpoint.c | 2 +-
+ fs/f2fs/f2fs.h | 2 +-
+ fs/f2fs/node.c | 2 +-
+ fs/f2fs/segment.c | 12 +++++++++---
+ 4 files changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index 0b7aec059f112..4a97fe4ddf789 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -107,7 +107,7 @@ struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+ return __get_meta_page(sbi, index, true);
+ }
+
+-struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
++struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index)
+ {
+ struct page *page;
+ int count = 0;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 98c4b166f192b..d44c6c36de678 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3385,7 +3385,7 @@ enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
+ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
+ struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+ struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+-struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
++struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
+ struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
+ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type);
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index cb1b5b61a1dab..cc4700f6240db 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -109,7 +109,7 @@ static void clear_node_page_dirty(struct page *page)
+
+ static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+ {
+- return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
++ return f2fs_get_meta_page(sbi, current_nat_addr(sbi, nid));
+ }
+
+ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index e247a5ef3713f..2628406f43f64 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -2344,7 +2344,9 @@ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
+ */
+ struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
+ {
+- return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno));
++ if (unlikely(f2fs_cp_error(sbi)))
++ return ERR_PTR(-EIO);
++ return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
+ }
+
+ void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
+@@ -2616,7 +2618,11 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type)
+ __next_free_blkoff(sbi, curseg, 0);
+
+ sum_page = f2fs_get_sum_page(sbi, new_segno);
+- f2fs_bug_on(sbi, IS_ERR(sum_page));
++ if (IS_ERR(sum_page)) {
++ /* GC won't be able to use stale summary pages by cp_error */
++ memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
++ return;
++ }
+ sum_node = (struct f2fs_summary_block *)page_address(sum_page);
+ memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
+ f2fs_put_page(sum_page, 1);
+@@ -3781,7 +3787,7 @@ int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
+ static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+ {
+- return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno));
++ return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
+ }
+
+ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
+--
+2.27.0
+
--- /dev/null
+From 5b3cdc4c28e487aa0db0d0d08000c49704f52e20 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Oct 2020 14:26:24 +0100
+Subject: firmware: arm_scmi: Add missing Rx size re-initialisation
+
+From: Sudeep Holla <sudeep.holla@arm.com>
+
+[ Upstream commit 9724722fde8f9bbd2b87340f00b9300c9284001e ]
+
+Few commands provide the list of description partially and require
+to be called consecutively until all the descriptors are fetched
+completely. In such cases, we don't release the buffers and reuse
+them for consecutive transmits.
+
+However, currently we don't reset the Rx size which will be set as
+per the response for the last transmit. This may result in incorrect
+response size being interpretted as the firmware may repond with size
+greater than the one set but we read only upto the size set by previous
+response.
+
+Let us reset the receive buffer size to max possible in such cases as
+we don't know the exact size of the response.
+
+Link: https://lore.kernel.org/r/20201012141746.32575-1-sudeep.holla@arm.com
+Fixes: b6f20ff8bd94 ("firmware: arm_scmi: add common infrastructure and support for base protocol")
+Reported-by: Etienne Carriere <etienne.carriere@linaro.org>
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/arm_scmi/base.c | 2 ++
+ drivers/firmware/arm_scmi/clock.c | 2 ++
+ drivers/firmware/arm_scmi/common.h | 2 ++
+ drivers/firmware/arm_scmi/driver.c | 8 ++++++++
+ drivers/firmware/arm_scmi/perf.c | 2 ++
+ drivers/firmware/arm_scmi/sensors.c | 2 ++
+ 6 files changed, 18 insertions(+)
+
+diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
+index 9853bd3c4d456..017e5d8bd869a 100644
+--- a/drivers/firmware/arm_scmi/base.c
++++ b/drivers/firmware/arm_scmi/base.c
+@@ -197,6 +197,8 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
+ protocols_imp[tot_num_ret + loop] = *(list + loop);
+
+ tot_num_ret += loop_num_ret;
++
++ scmi_reset_rx_to_maxsz(handle, t);
+ } while (loop_num_ret);
+
+ scmi_xfer_put(handle, t);
+diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
+index 75e39882746e1..fa3ad3a150c36 100644
+--- a/drivers/firmware/arm_scmi/clock.c
++++ b/drivers/firmware/arm_scmi/clock.c
+@@ -192,6 +192,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
+ }
+
+ tot_rate_cnt += num_returned;
++
++ scmi_reset_rx_to_maxsz(handle, t);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
+index c113e578cc6ce..6db59a7ac8531 100644
+--- a/drivers/firmware/arm_scmi/common.h
++++ b/drivers/firmware/arm_scmi/common.h
+@@ -147,6 +147,8 @@ int scmi_do_xfer_with_response(const struct scmi_handle *h,
+ struct scmi_xfer *xfer);
+ int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
+ size_t tx_size, size_t rx_size, struct scmi_xfer **p);
++void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
++ struct scmi_xfer *xfer);
+ int scmi_handle_put(const struct scmi_handle *handle);
+ struct scmi_handle *scmi_handle_get(struct device *dev);
+ void scmi_set_handle(struct scmi_device *scmi_dev);
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index 03ec74242c141..28a3e4902ea4e 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -402,6 +402,14 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+ return ret;
+ }
+
++void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
++ struct scmi_xfer *xfer)
++{
++ struct scmi_info *info = handle_to_scmi_info(handle);
++
++ xfer->rx.len = info->desc->max_msg_size;
++}
++
+ #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+
+ /**
+diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
+index 3e1e87012c95b..3e8b548a12b62 100644
+--- a/drivers/firmware/arm_scmi/perf.c
++++ b/drivers/firmware/arm_scmi/perf.c
+@@ -304,6 +304,8 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
+ }
+
+ tot_opp_cnt += num_returned;
++
++ scmi_reset_rx_to_maxsz(handle, t);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
+index 1af0ad362e823..4beee439b84ba 100644
+--- a/drivers/firmware/arm_scmi/sensors.c
++++ b/drivers/firmware/arm_scmi/sensors.c
+@@ -166,6 +166,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
+ }
+
+ desc_index += num_returned;
++
++ scmi_reset_rx_to_maxsz(handle, t);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+--
+2.27.0
+
--- /dev/null
+From 5065d170e4a0db29721991158fb171dd30163451 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Oct 2020 16:37:21 +0200
+Subject: firmware: arm_scmi: Expand SMC/HVC message pool to more than one
+
+From: Etienne Carriere <etienne.carriere@linaro.org>
+
+[ Upstream commit 7adb2c8aaaa6a387af7140e57004beba2c04a4c6 ]
+
+SMC/HVC can transmit only one message at the time as the shared memory
+needs to be protected and the calls are synchronous.
+
+However, in order to allow multiple threads to send SCMI messages
+simultaneously, we need a larger poll of memory.
+
+Let us just use value of 20 to keep it in sync mailbox transport
+implementation. Any other value must work perfectly.
+
+Link: https://lore.kernel.org/r/20201008143722.21888-4-etienne.carriere@linaro.org
+Fixes: 1dc6558062da ("firmware: arm_scmi: Add smc/hvc transport")
+Cc: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Etienne Carriere <etienne.carriere@linaro.org>
+[sudeep.holla: reworded the commit message to indicate the practicality]
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/arm_scmi/smc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
+index a1537d123e385..22f83af6853a1 100644
+--- a/drivers/firmware/arm_scmi/smc.c
++++ b/drivers/firmware/arm_scmi/smc.c
+@@ -149,6 +149,6 @@ static struct scmi_transport_ops scmi_smc_ops = {
+ const struct scmi_desc scmi_smc_desc = {
+ .ops = &scmi_smc_ops,
+ .max_rx_timeout_ms = 30,
+- .max_msg = 1,
++ .max_msg = 20,
+ .max_msg_size = 128,
+ };
+--
+2.27.0
+
--- /dev/null
+From 3efc0836cbeb59d8254ce7edc5fcf4248ddc2ffb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Oct 2020 16:37:22 +0200
+Subject: firmware: arm_scmi: Fix ARCH_COLD_RESET
+
+From: Etienne Carriere <etienne.carriere@linaro.org>
+
+[ Upstream commit 45b9e04d5ba0b043783dfe2b19bb728e712cb32e ]
+
+The defination for ARCH_COLD_RESET is wrong. Let us fix it according to
+the SCMI specification.
+
+Link: https://lore.kernel.org/r/20201008143722.21888-5-etienne.carriere@linaro.org
+Fixes: 95a15d80aa0d ("firmware: arm_scmi: Add RESET protocol in SCMI v2.0")
+Signed-off-by: Etienne Carriere <etienne.carriere@linaro.org>
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/arm_scmi/reset.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
+index 3691bafca0574..86bda46de8eb8 100644
+--- a/drivers/firmware/arm_scmi/reset.c
++++ b/drivers/firmware/arm_scmi/reset.c
+@@ -36,9 +36,7 @@ struct scmi_msg_reset_domain_reset {
+ #define EXPLICIT_RESET_ASSERT BIT(1)
+ #define ASYNCHRONOUS_RESET BIT(2)
+ __le32 reset_state;
+-#define ARCH_RESET_TYPE BIT(31)
+-#define COLD_RESET_STATE BIT(0)
+-#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE)
++#define ARCH_COLD_RESET 0
+ };
+
+ struct scmi_msg_reset_notify {
+--
+2.27.0
+
--- /dev/null
+From fc09a2cdda481caa5a027a4e2314596d4e708bc4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Oct 2020 19:17:37 -0700
+Subject: firmware: arm_scmi: Fix duplicate workqueue name
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit b9ceca6be43233845be70792be9b5ab315d2e010 ]
+
+When more than a single SCMI device are present in the system, the
+creation of the notification workqueue with the WQ_SYSFS flag will lead
+to the following sysfs duplicate node warning:
+
+ sysfs: cannot create duplicate filename '/devices/virtual/workqueue/scmi_notify'
+ CPU: 0 PID: 20 Comm: kworker/0:1 Not tainted 5.9.0-gdf4dd84a3f7d #29
+ Hardware name: Broadcom STB (Flattened Device Tree)
+ Workqueue: events deferred_probe_work_func
+ Backtrace:
+ show_stack + 0x20/0x24
+ dump_stack + 0xbc/0xe0
+ sysfs_warn_dup + 0x70/0x80
+ sysfs_create_dir_ns + 0x15c/0x1a4
+ kobject_add_internal + 0x140/0x4d0
+ kobject_add + 0xc8/0x138
+ device_add + 0x1dc/0xc20
+ device_register + 0x24/0x28
+ workqueue_sysfs_register + 0xe4/0x1f0
+ alloc_workqueue + 0x448/0x6ac
+ scmi_notification_init + 0x78/0x1dc
+ scmi_probe + 0x268/0x4fc
+ platform_drv_probe + 0x70/0xc8
+ really_probe + 0x184/0x728
+ driver_probe_device + 0xa4/0x278
+ __device_attach_driver + 0xe8/0x148
+ bus_for_each_drv + 0x108/0x158
+ __device_attach + 0x190/0x234
+ device_initial_probe + 0x1c/0x20
+ bus_probe_device + 0xdc/0xec
+ deferred_probe_work_func + 0xd4/0x11c
+ process_one_work + 0x420/0x8f0
+ worker_thread + 0x4fc/0x91c
+ kthread + 0x21c/0x22c
+ ret_from_fork + 0x14/0x20
+ kobject_add_internal failed for scmi_notify with -EEXIST, don't try to
+ register things with the same name in the same directory.
+ arm-scmi brcm_scmi@1: SCMI Notifications - Initialization Failed.
+ arm-scmi brcm_scmi@1: SCMI Notifications NOT available.
+ arm-scmi brcm_scmi@1: SCMI Protocol v1.0 'brcm-scmi:' Firmware version 0x1
+
+Fix this by using dev_name(handle->dev) which guarantees that the name is
+unique and this also helps correlate which notification workqueue corresponds
+to which SCMI device instance.
+
+Link: https://lore.kernel.org/r/20201014021737.287340-1-f.fainelli@gmail.com
+Fixes: bd31b249692e ("firmware: arm_scmi: Add notification dispatch and delivery")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+[sudeep.holla: trimmed backtrace to remove all unwanted hexcodes and timestamps]
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/arm_scmi/notify.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
+index 4d9f6de3a7fae..51c5a376fb472 100644
+--- a/drivers/firmware/arm_scmi/notify.c
++++ b/drivers/firmware/arm_scmi/notify.c
+@@ -1474,7 +1474,7 @@ int scmi_notification_init(struct scmi_handle *handle)
+ ni->gid = gid;
+ ni->handle = handle;
+
+- ni->notify_wq = alloc_workqueue("scmi_notify",
++ ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
+ 0);
+ if (!ni->notify_wq)
+--
+2.27.0
+
--- /dev/null
+From 278eb30b7c81d43fcff98dc2bc969ca2c14e1fe2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Oct 2020 14:31:09 +0100
+Subject: firmware: arm_scmi: Fix locking in notifications
+
+From: Cristian Marussi <cristian.marussi@arm.com>
+
+[ Upstream commit c7821c2d9c0dda0adf2bcf88e79b02a19a430be4 ]
+
+When a protocol registers its events, the notification core takes care
+to rescan the hashtable of pending event handlers and activate all the
+possibly existent handlers referring to any of the events that are just
+registered by the new protocol. When a pending handler becomes active
+the core requests and enables the corresponding events in the SCMI
+firmware.
+
+If, for whatever reason, the enable fails, such invalid event handler
+must be finally removed and freed. Let us ensure to use the
+scmi_put_active_handler() helper which handles properly the needed
+additional locking.
+
+Failing to properly acquire all the needed mutexes exposes a race that
+leads to the following splat being observed:
+
+ WARNING: CPU: 0 PID: 388 at lib/refcount.c:28 refcount_warn_saturate+0xf8/0x148
+ Hardware name: ARM LTD ARM Juno Development Platform/ARM Juno Development
+ Platform, BIOS EDK II Jun 30 2020
+ pstate: 40000005 (nZcv daif -PAN -UAO BTYPE=--)
+ pc : refcount_warn_saturate+0xf8/0x148
+ lr : refcount_warn_saturate+0xf8/0x148
+ Call trace:
+ refcount_warn_saturate+0xf8/0x148
+ scmi_put_handler_unlocked.isra.10+0x204/0x208
+ scmi_put_handler+0x50/0xa0
+ scmi_unregister_notifier+0x1bc/0x240
+ scmi_notify_tester_remove+0x4c/0x68 [dummy_scmi_consumer]
+ scmi_dev_remove+0x54/0x68
+ device_release_driver_internal+0x114/0x1e8
+ driver_detach+0x58/0xe8
+ bus_remove_driver+0x88/0xe0
+ driver_unregister+0x38/0x68
+ scmi_driver_unregister+0x1c/0x28
+ scmi_drv_exit+0x1c/0xae0 [dummy_scmi_consumer]
+ __arm64_sys_delete_module+0x1a4/0x268
+ el0_svc_common.constprop.3+0x94/0x178
+ do_el0_svc+0x2c/0x98
+ el0_sync_handler+0x148/0x1a8
+ el0_sync+0x158/0x180
+
+Link: https://lore.kernel.org/r/20201013133109.49821-1-cristian.marussi@arm.com
+Fixes: e7c215f358a35 ("firmware: arm_scmi: Add notification callbacks-registration")
+Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/arm_scmi/notify.c | 20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
+index 4731daaacd19e..4d9f6de3a7fae 100644
+--- a/drivers/firmware/arm_scmi/notify.c
++++ b/drivers/firmware/arm_scmi/notify.c
+@@ -1403,15 +1403,21 @@ static void scmi_protocols_late_init(struct work_struct *work)
+ "finalized PENDING handler - key:%X\n",
+ hndl->key);
+ ret = scmi_event_handler_enable_events(hndl);
++ if (ret) {
++ dev_dbg(ni->handle->dev,
++ "purging INVALID handler - key:%X\n",
++ hndl->key);
++ scmi_put_active_handler(ni, hndl);
++ }
+ } else {
+ ret = scmi_valid_pending_handler(ni, hndl);
+- }
+- if (ret) {
+- dev_dbg(ni->handle->dev,
+- "purging PENDING handler - key:%X\n",
+- hndl->key);
+- /* this hndl can be only a pending one */
+- scmi_put_handler_unlocked(ni, hndl);
++ if (ret) {
++ dev_dbg(ni->handle->dev,
++ "purging PENDING handler - key:%X\n",
++ hndl->key);
++ /* this hndl can be only a pending one */
++ scmi_put_handler_unlocked(ni, hndl);
++ }
+ }
+ }
+ mutex_unlock(&ni->pending_mtx);
+--
+2.27.0
+
--- /dev/null
+From 8e1a20f1635df5d5514d73d71ed76da5528e032c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Sep 2020 12:00:04 +0100
+Subject: firmware: arm_scmi: Move scmi bus init and exit calls into the driver
+
+From: Sudeep Holla <sudeep.holla@arm.com>
+
+[ Upstream commit 5a2f0a0bdf201e2183904b6217f9c74774c961a8 ]
+
+In preparation to enable building scmi as a single module, let us move
+the scmi bus {de-,}initialisation call into the driver.
+
+The main reason for this is to keep it simple instead of maintaining
+it as separate modules and dealing with all possible initcall races
+and deferred probe handling. We can move it as separate modules if
+needed in future.
+
+Link: https://lore.kernel.org/r/20200907195046.56615-3-sudeep.holla@arm.com
+Tested-by: Cristian Marussi <cristian.marussi@arm.com>
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/arm_scmi/bus.c | 6 ++----
+ drivers/firmware/arm_scmi/common.h | 3 +++
+ drivers/firmware/arm_scmi/driver.c | 16 +++++++++++++++-
+ 3 files changed, 20 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
+index db55c43a2cbda..1377ec76a45db 100644
+--- a/drivers/firmware/arm_scmi/bus.c
++++ b/drivers/firmware/arm_scmi/bus.c
+@@ -230,7 +230,7 @@ static void scmi_devices_unregister(void)
+ bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
+ }
+
+-static int __init scmi_bus_init(void)
++int __init scmi_bus_init(void)
+ {
+ int retval;
+
+@@ -240,12 +240,10 @@ static int __init scmi_bus_init(void)
+
+ return retval;
+ }
+-subsys_initcall(scmi_bus_init);
+
+-static void __exit scmi_bus_exit(void)
++void __exit scmi_bus_exit(void)
+ {
+ scmi_devices_unregister();
+ bus_unregister(&scmi_bus_type);
+ ida_destroy(&scmi_bus_id);
+ }
+-module_exit(scmi_bus_exit);
+diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
+index 6db59a7ac8531..124080955c4a0 100644
+--- a/drivers/firmware/arm_scmi/common.h
++++ b/drivers/firmware/arm_scmi/common.h
+@@ -158,6 +158,9 @@ void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
+
+ int scmi_base_protocol_init(struct scmi_handle *h);
+
++int __init scmi_bus_init(void);
++void __exit scmi_bus_exit(void);
++
+ /* SCMI Transport */
+ /**
+ * struct scmi_chan_info - Structure representing a SCMI channel information
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index 28a3e4902ea4e..5c2f4fab40994 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -936,7 +936,21 @@ static struct platform_driver scmi_driver = {
+ .remove = scmi_remove,
+ };
+
+-module_platform_driver(scmi_driver);
++static int __init scmi_driver_init(void)
++{
++ scmi_bus_init();
++
++ return platform_driver_register(&scmi_driver);
++}
++module_init(scmi_driver_init);
++
++static void __exit scmi_driver_exit(void)
++{
++ scmi_bus_exit();
++
++ platform_driver_unregister(&scmi_driver);
++}
++module_exit(scmi_driver_exit);
+
+ MODULE_ALIAS("platform: arm-scmi");
+ MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+--
+2.27.0
+
--- /dev/null
+From bfc3326dea642b2183c4f0bb6b9e209ad83e4745 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 27 Sep 2020 02:08:58 +0200
+Subject: futex: Fix incorrect should_fail_futex() handling
+
+From: Mateusz Nosek <mateusznosek0@gmail.com>
+
+[ Upstream commit 921c7ebd1337d1a46783d7e15a850e12aed2eaa0 ]
+
+If should_futex_fail() returns true in futex_wake_pi(), then the 'ret'
+variable is set to -EFAULT and then immediately overwritten. So the failure
+injection is non-functional.
+
+Fix it by actually leaving the function and returning -EFAULT.
+
+The Fixes tag is kinda blury because the initial commit which introduced
+failure injection was already sloppy, but the below mentioned commit broke
+it completely.
+
+[ tglx: Massaged changelog ]
+
+Fixes: 6b4f4bc9cb22 ("locking/futex: Allow low-level atomic operations to return -EAGAIN")
+Signed-off-by: Mateusz Nosek <mateusznosek0@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20200927000858.24219-1-mateusznosek0@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/futex.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index a5876694a60eb..39681bf8b06ca 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1502,8 +1502,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
+ */
+ newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+
+- if (unlikely(should_fail_futex(true)))
++ if (unlikely(should_fail_futex(true))) {
+ ret = -EFAULT;
++ goto out_unlock;
++ }
+
+ ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
+ if (!ret && (curval != uval)) {
+--
+2.27.0
+
--- /dev/null
+From f165bd0d32998a59aab2ad1b71f75ab4cb18c288 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Oct 2020 22:01:09 +0530
+Subject: gfs2: add validation checks for size of superblock
+
+From: Anant Thazhemadam <anant.thazhemadam@gmail.com>
+
+[ Upstream commit 0ddc5154b24c96f20e94d653b0a814438de6032b ]
+
+In gfs2_check_sb(), no validation checks are performed with regards to
+the size of the superblock.
+syzkaller detected a slab-out-of-bounds bug that was primarily caused
+because the block size for a superblock was set to zero.
+A valid size for a superblock is a power of 2 between 512 and PAGE_SIZE.
+Performing validation checks and ensuring that the size of the superblock
+is valid fixes this bug.
+
+Reported-by: syzbot+af90d47a37376844e731@syzkaller.appspotmail.com
+Tested-by: syzbot+af90d47a37376844e731@syzkaller.appspotmail.com
+Suggested-by: Andrew Price <anprice@redhat.com>
+Signed-off-by: Anant Thazhemadam <anant.thazhemadam@gmail.com>
+[Minor code reordering.]
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/ops_fstype.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 5bd602a290f72..03c33fc03c055 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -169,15 +169,19 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
+ return -EINVAL;
+ }
+
+- /* If format numbers match exactly, we're done. */
+-
+- if (sb->sb_fs_format == GFS2_FORMAT_FS &&
+- sb->sb_multihost_format == GFS2_FORMAT_MULTI)
+- return 0;
++ if (sb->sb_fs_format != GFS2_FORMAT_FS ||
++ sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
++ fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
++ return -EINVAL;
++ }
+
+- fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
++ if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE ||
++ (sb->sb_bsize & (sb->sb_bsize - 1))) {
++ pr_warn("Invalid superblock size\n");
++ return -EINVAL;
++ }
+
+- return -EINVAL;
++ return 0;
+ }
+
+ static void end_bio_io_page(struct bio *bio)
+--
+2.27.0
+
--- /dev/null
+From eb53eda883e77363d0af22801d68e0cacf993ef9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Sep 2020 11:06:23 -0500
+Subject: gfs2: call truncate_inode_pages_final for address space glocks
+
+From: Bob Peterson <rpeterso@redhat.com>
+
+[ Upstream commit ee1e2c773e4f4ce2213f9d77cc703b669ca6fa3f ]
+
+Before this patch, we were not calling truncate_inode_pages_final for the
+address space for glocks, which left the possibility of a leak. We now
+take care of the problem instead of complaining, and we do it during
+glock tear-down..
+
+Signed-off-by: Bob Peterson <rpeterso@redhat.com>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/glock.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index f13b136654cae..3554e71be06ec 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -270,7 +270,12 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
+ gfs2_glock_remove_from_lru(gl);
+ spin_unlock(&gl->gl_lockref.lock);
+ GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
+- GLOCK_BUG_ON(gl, mapping && mapping->nrpages && !gfs2_withdrawn(sdp));
++ if (mapping) {
++ truncate_inode_pages_final(mapping);
++ if (!gfs2_withdrawn(sdp))
++ GLOCK_BUG_ON(gl, mapping->nrpages ||
++ mapping->nrexceptional);
++ }
+ trace_gfs2_glock_put(gl);
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
+ }
+--
+2.27.0
+
--- /dev/null
+From eb31e5d060cbd93b36f6998f8f3b7b5ad75599de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Oct 2020 12:30:58 +0100
+Subject: gfs2: Fix NULL pointer dereference in gfs2_rgrp_dump
+
+From: Andrew Price <anprice@redhat.com>
+
+[ Upstream commit 0e539ca1bbbe85a86549c97a30a765ada4a09df9 ]
+
+When an rindex entry is found to be corrupt, compute_bitstructs() calls
+gfs2_consist_rgrpd() which calls gfs2_rgrp_dump() like this:
+
+ gfs2_rgrp_dump(NULL, rgd->rd_gl, fs_id_buf);
+
+gfs2_rgrp_dump then dereferences the gl without checking it and we get
+
+ BUG: KASAN: null-ptr-deref in gfs2_rgrp_dump+0x28/0x280
+
+because there's no rgrp glock involved while reading the rindex on mount.
+
+Fix this by changing gfs2_rgrp_dump to take an rgrp argument.
+
+Reported-by: syzbot+43fa87986bdd31df9de6@syzkaller.appspotmail.com
+Signed-off-by: Andrew Price <anprice@redhat.com>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/glops.c | 11 ++++++++++-
+ fs/gfs2/rgrp.c | 9 +++------
+ fs/gfs2/rgrp.h | 2 +-
+ fs/gfs2/util.c | 2 +-
+ 4 files changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index de1d5f1d9ff85..c2c90747d79b5 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -227,6 +227,15 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
+ rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
+ }
+
++static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
++ const char *fs_id_buf)
++{
++ struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
++
++ if (rgd)
++ gfs2_rgrp_dump(seq, rgd, fs_id_buf);
++}
++
+ static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
+ {
+ struct gfs2_inode *ip;
+@@ -712,7 +721,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
+ .go_sync = rgrp_go_sync,
+ .go_inval = rgrp_go_inval,
+ .go_lock = gfs2_rgrp_go_lock,
+- .go_dump = gfs2_rgrp_dump,
++ .go_dump = gfs2_rgrp_go_dump,
+ .go_type = LM_TYPE_RGRP,
+ .go_flags = GLOF_LVB,
+ };
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index 074f228ea8390..1bba5a9d45fa3 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -2209,20 +2209,17 @@ static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
+ /**
+ * gfs2_rgrp_dump - print out an rgrp
+ * @seq: The iterator
+- * @gl: The glock in question
++ * @rgd: The rgrp in question
+ * @fs_id_buf: pointer to file system id (if requested)
+ *
+ */
+
+-void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl,
++void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+ const char *fs_id_buf)
+ {
+- struct gfs2_rgrpd *rgd = gl->gl_object;
+ struct gfs2_blkreserv *trs;
+ const struct rb_node *n;
+
+- if (rgd == NULL)
+- return;
+ gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
+ fs_id_buf,
+ (unsigned long long)rgd->rd_addr, rgd->rd_flags,
+@@ -2253,7 +2250,7 @@ static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
+ (unsigned long long)rgd->rd_addr);
+ fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
+ sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
+- gfs2_rgrp_dump(NULL, rgd->rd_gl, fs_id_buf);
++ gfs2_rgrp_dump(NULL, rgd, fs_id_buf);
+ rgd->rd_flags |= GFS2_RDF_ERROR;
+ }
+
+diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
+index a1d7e14fc55b9..9a587ada51eda 100644
+--- a/fs/gfs2/rgrp.h
++++ b/fs/gfs2/rgrp.h
+@@ -67,7 +67,7 @@ extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
+ extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist);
+ extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
+ extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
+-extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl,
++extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+ const char *fs_id_buf);
+ extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
+ struct buffer_head *bh,
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index 1cd0328cae20a..0fba3bf641890 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -419,7 +419,7 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
+ char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
+
+ sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
+- gfs2_rgrp_dump(NULL, rgd->rd_gl, fs_id_buf);
++ gfs2_rgrp_dump(NULL, rgd, fs_id_buf);
+ gfs2_lm(sdp,
+ "fatal: filesystem consistency error\n"
+ " RG = %llu\n"
+--
+2.27.0
+
--- /dev/null
+From 3be4c9e95a1cbdb2ce57cbd2e3575d0bd255e3a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Oct 2020 14:13:09 +0100
+Subject: gfs2: use-after-free in sysfs deregistration
+
+From: Jamie Iles <jamie@nuviainc.com>
+
+[ Upstream commit c2a04b02c060c4858762edce4674d5cba3e5a96f ]
+
+syzkaller found the following splat with CONFIG_DEBUG_KOBJECT_RELEASE=y:
+
+ Read of size 1 at addr ffff000028e896b8 by task kworker/1:2/228
+
+ CPU: 1 PID: 228 Comm: kworker/1:2 Tainted: G S 5.9.0-rc8+ #101
+ Hardware name: linux,dummy-virt (DT)
+ Workqueue: events kobject_delayed_cleanup
+ Call trace:
+ dump_backtrace+0x0/0x4d8
+ show_stack+0x34/0x48
+ dump_stack+0x174/0x1f8
+ print_address_description.constprop.0+0x5c/0x550
+ kasan_report+0x13c/0x1c0
+ __asan_report_load1_noabort+0x34/0x60
+ memcmp+0xd0/0xd8
+ gfs2_uevent+0xc4/0x188
+ kobject_uevent_env+0x54c/0x1240
+ kobject_uevent+0x2c/0x40
+ __kobject_del+0x190/0x1d8
+ kobject_delayed_cleanup+0x2bc/0x3b8
+ process_one_work+0x96c/0x18c0
+ worker_thread+0x3f0/0xc30
+ kthread+0x390/0x498
+ ret_from_fork+0x10/0x18
+
+ Allocated by task 1110:
+ kasan_save_stack+0x28/0x58
+ __kasan_kmalloc.isra.0+0xc8/0xe8
+ kasan_kmalloc+0x10/0x20
+ kmem_cache_alloc_trace+0x1d8/0x2f0
+ alloc_super+0x64/0x8c0
+ sget_fc+0x110/0x620
+ get_tree_bdev+0x190/0x648
+ gfs2_get_tree+0x50/0x228
+ vfs_get_tree+0x84/0x2e8
+ path_mount+0x1134/0x1da8
+ do_mount+0x124/0x138
+ __arm64_sys_mount+0x164/0x238
+ el0_svc_common.constprop.0+0x15c/0x598
+ do_el0_svc+0x60/0x150
+ el0_svc+0x34/0xb0
+ el0_sync_handler+0xc8/0x5b4
+ el0_sync+0x15c/0x180
+
+ Freed by task 228:
+ kasan_save_stack+0x28/0x58
+ kasan_set_track+0x28/0x40
+ kasan_set_free_info+0x24/0x48
+ __kasan_slab_free+0x118/0x190
+ kasan_slab_free+0x14/0x20
+ slab_free_freelist_hook+0x6c/0x210
+ kfree+0x13c/0x460
+
+Use the same pattern as f2fs + ext4 where the kobject destruction must
+complete before allowing the FS itself to be freed. This means that we
+need an explicit free_sbd in the callers.
+
+Cc: Bob Peterson <rpeterso@redhat.com>
+Cc: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Jamie Iles <jamie@nuviainc.com>
+[Also go to fail_free when init_names fails.]
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/incore.h | 1 +
+ fs/gfs2/ops_fstype.c | 22 +++++-----------------
+ fs/gfs2/super.c | 1 +
+ fs/gfs2/sys.c | 5 ++++-
+ 4 files changed, 11 insertions(+), 18 deletions(-)
+
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index ca2ec02436ec7..387e99d6eda9e 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -705,6 +705,7 @@ struct gfs2_sbd {
+ struct super_block *sd_vfs;
+ struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
+ struct kobject sd_kobj;
++ struct completion sd_kobj_unregister;
+ unsigned long sd_flags; /* SDF_... */
+ struct gfs2_sb_host sd_sb;
+
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 6d18d2c91add2..5bd602a290f72 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -1062,26 +1062,14 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ }
+
+ error = init_names(sdp, silent);
+- if (error) {
+- /* In this case, we haven't initialized sysfs, so we have to
+- manually free the sdp. */
+- free_sbd(sdp);
+- sb->s_fs_info = NULL;
+- return error;
+- }
++ if (error)
++ goto fail_free;
+
+ snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
+
+ error = gfs2_sys_fs_add(sdp);
+- /*
+- * If we hit an error here, gfs2_sys_fs_add will have called function
+- * kobject_put which causes the sysfs usage count to go to zero, which
+- * causes sysfs to call function gfs2_sbd_release, which frees sdp.
+- * Subsequent error paths here will call gfs2_sys_fs_del, which also
+- * kobject_put to free sdp.
+- */
+ if (error)
+- return error;
++ goto fail_free;
+
+ gfs2_create_debugfs_file(sdp);
+
+@@ -1179,9 +1167,9 @@ fail_lm:
+ gfs2_lm_unmount(sdp);
+ fail_debug:
+ gfs2_delete_debugfs_file(sdp);
+- /* gfs2_sys_fs_del must be the last thing we do, since it causes
+- * sysfs to call function gfs2_sbd_release, which frees sdp. */
+ gfs2_sys_fs_del(sdp);
++fail_free:
++ free_sbd(sdp);
+ sb->s_fs_info = NULL;
+ return error;
+ }
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 9f4d9e7be8397..a28cf447b6b12 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -736,6 +736,7 @@ restart:
+
+ /* At this point, we're through participating in the lockspace */
+ gfs2_sys_fs_del(sdp);
++ free_sbd(sdp);
+ }
+
+ /**
+diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
+index d28c41bd69b05..c3e72dba7418a 100644
+--- a/fs/gfs2/sys.c
++++ b/fs/gfs2/sys.c
+@@ -303,7 +303,7 @@ static void gfs2_sbd_release(struct kobject *kobj)
+ {
+ struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
+
+- free_sbd(sdp);
++ complete(&sdp->sd_kobj_unregister);
+ }
+
+ static struct kobj_type gfs2_ktype = {
+@@ -655,6 +655,7 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
+ sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
+ sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
+
++ init_completion(&sdp->sd_kobj_unregister);
+ sdp->sd_kobj.kset = gfs2_kset;
+ error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL,
+ "%s", sdp->sd_table_name);
+@@ -685,6 +686,7 @@ fail_tune:
+ fail_reg:
+ fs_err(sdp, "error %d adding sysfs files\n", error);
+ kobject_put(&sdp->sd_kobj);
++ wait_for_completion(&sdp->sd_kobj_unregister);
+ sb->s_fs_info = NULL;
+ return error;
+ }
+@@ -695,6 +697,7 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
+ sysfs_remove_group(&sdp->sd_kobj, &tune_group);
+ sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
+ kobject_put(&sdp->sd_kobj);
++ wait_for_completion(&sdp->sd_kobj_unregister);
+ }
+
+ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
+--
+2.27.0
+
--- /dev/null
+From ae2c61a7076b8c6fa0d8dd9ab34141f1b91e0a4c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Aug 2020 09:41:32 +0300
+Subject: habanalabs: remove security from ARB_MST_QUIET register
+
+From: farah kassabri <fkassabri@habana.ai>
+
+[ Upstream commit acd330c141b4c49f468f00719ebc944656061eac ]
+
+Allow user application to write to this register in order
+to be able to configure the quiet period of the QMAN between grants.
+
+Signed-off-by: farah kassabri <fkassabri@habana.ai>
+Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../misc/habanalabs/gaudi/gaudi_security.c | 55 +++++++------------
+ 1 file changed, 19 insertions(+), 36 deletions(-)
+
+diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
+index 8d5d6ddee6eda..615b547ad2b7d 100644
+--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
++++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
+@@ -831,8 +831,7 @@ static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
+ PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
+ PROT_BITS_OFFS) >> 7) << 2;
+- mask = 1 << ((mmMME0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmMME0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmMME0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -1311,8 +1310,7 @@ static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
+ PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
+ PROT_BITS_OFFS) >> 7) << 2;
+- mask = 1 << ((mmMME2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmMME2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmMME2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -1790,8 +1788,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ word_offset =
+ ((mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+- mask = 1 << ((mmDMA0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmDMA0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmDMA0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -2186,8 +2183,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ word_offset =
+ ((mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+- mask = 1 << ((mmDMA1_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmDMA1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmDMA1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -2582,8 +2578,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ word_offset =
+ ((mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+- mask = 1 << ((mmDMA2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmDMA2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmDMA2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -2978,8 +2973,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ word_offset =
+ ((mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+- mask = 1 << ((mmDMA3_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmDMA3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmDMA3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -3374,8 +3368,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ word_offset =
+ ((mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+- mask = 1 << ((mmDMA4_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmDMA4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmDMA4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -3770,8 +3763,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ word_offset =
+ ((mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+- mask = 1 << ((mmDMA5_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmDMA5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmDMA5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -4166,8 +4158,8 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ word_offset =
+ ((mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+- mask = 1 << ((mmDMA6_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmDMA6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++
++ mask = 1 << ((mmDMA6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -4562,8 +4554,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ word_offset =
+ ((mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+- mask = 1 << ((mmDMA7_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmDMA7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmDMA7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -5491,8 +5482,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+
+ word_offset = ((mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+- mask = 1 << ((mmTPC0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmTPC0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmTPC0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -5947,8 +5937,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+
+ word_offset = ((mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+- mask = 1 << ((mmTPC1_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmTPC1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmTPC1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -6402,8 +6391,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+- mask = 1 << ((mmTPC2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmTPC2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmTPC2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -6857,8 +6845,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+- mask = 1 << ((mmTPC3_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmTPC3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmTPC3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -7312,8 +7299,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+- mask = 1 << ((mmTPC4_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmTPC4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmTPC4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -7767,8 +7753,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+- mask = 1 << ((mmTPC5_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmTPC5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmTPC5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -8223,8 +8208,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+
+ word_offset = ((mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+- mask = 1 << ((mmTPC6_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmTPC6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmTPC6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -8681,8 +8665,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+- mask = 1 << ((mmTPC7_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+- mask |= 1 << ((mmTPC7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++ mask = 1 << ((mmTPC7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+--
+2.27.0
+
--- /dev/null
+From c10cc8cea2527346c3b3019c506124477b751c1a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Sep 2020 05:37:28 +0000
+Subject: Handle STATUS_IO_TIMEOUT gracefully
+
+From: Rohith Surabattula <rohiths@microsoft.com>
+
+[ Upstream commit 8e670f77c4a55013db6d23b962f9bf6673a5e7b6 ]
+
+Currently STATUS_IO_TIMEOUT is not treated as retriable error.
+It is currently mapped to ETIMEDOUT and returned to userspace
+for most system calls. STATUS_IO_TIMEOUT is returned by server
+in case of unavailability or throttling errors.
+
+This patch will map the STATUS_IO_TIMEOUT to EAGAIN, so that it
+can be retried. Also, added a check to drop the connection to
+not overload the server in case of ongoing unavailability.
+
+Signed-off-by: Rohith Surabattula <rohiths@microsoft.com>
+Reviewed-by: Aurelien Aptel <aaptel@suse.com>
+Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/cifsglob.h | 2 ++
+ fs/cifs/connect.c | 15 ++++++++++++++-
+ fs/cifs/smb2maperror.c | 2 +-
+ fs/cifs/smb2ops.c | 15 +++++++++++++++
+ 4 files changed, 32 insertions(+), 2 deletions(-)
+
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index b565d83ba89ed..5a491afafacc7 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -510,6 +510,8 @@ struct smb_version_operations {
+ struct fiemap_extent_info *, u64, u64);
+ /* version specific llseek implementation */
+ loff_t (*llseek)(struct file *, struct cifs_tcon *, loff_t, int);
++ /* Check for STATUS_IO_TIMEOUT */
++ bool (*is_status_io_timeout)(char *buf);
+ };
+
+ struct smb_version_values {
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 9817a31a39db6..b8780a79a42a2 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -69,6 +69,9 @@ extern bool disable_legacy_dialects;
+ #define TLINK_ERROR_EXPIRE (1 * HZ)
+ #define TLINK_IDLE_EXPIRE (600 * HZ)
+
++/* Drop the connection to not overload the server */
++#define NUM_STATUS_IO_TIMEOUT 5
++
+ enum {
+ /* Mount options that take no arguments */
+ Opt_user_xattr, Opt_nouser_xattr,
+@@ -1117,7 +1120,7 @@ cifs_demultiplex_thread(void *p)
+ struct task_struct *task_to_wake = NULL;
+ struct mid_q_entry *mids[MAX_COMPOUND];
+ char *bufs[MAX_COMPOUND];
+- unsigned int noreclaim_flag;
++ unsigned int noreclaim_flag, num_io_timeout = 0;
+
+ noreclaim_flag = memalloc_noreclaim_save();
+ cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
+@@ -1213,6 +1216,16 @@ next_pdu:
+ continue;
+ }
+
++ if (server->ops->is_status_io_timeout &&
++ server->ops->is_status_io_timeout(buf)) {
++ num_io_timeout++;
++ if (num_io_timeout > NUM_STATUS_IO_TIMEOUT) {
++ cifs_reconnect(server);
++ num_io_timeout = 0;
++ continue;
++ }
++ }
++
+ server->lstrp = jiffies;
+
+ for (i = 0; i < num_mids; i++) {
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+index 7fde3775cb574..b004cf87692a7 100644
+--- a/fs/cifs/smb2maperror.c
++++ b/fs/cifs/smb2maperror.c
+@@ -488,7 +488,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ {STATUS_PIPE_CONNECTED, -EIO, "STATUS_PIPE_CONNECTED"},
+ {STATUS_PIPE_LISTENING, -EIO, "STATUS_PIPE_LISTENING"},
+ {STATUS_INVALID_READ_MODE, -EIO, "STATUS_INVALID_READ_MODE"},
+- {STATUS_IO_TIMEOUT, -ETIMEDOUT, "STATUS_IO_TIMEOUT"},
++ {STATUS_IO_TIMEOUT, -EAGAIN, "STATUS_IO_TIMEOUT"},
+ {STATUS_FILE_FORCED_CLOSED, -EIO, "STATUS_FILE_FORCED_CLOSED"},
+ {STATUS_PROFILING_NOT_STARTED, -EIO, "STATUS_PROFILING_NOT_STARTED"},
+ {STATUS_PROFILING_NOT_STOPPED, -EIO, "STATUS_PROFILING_NOT_STOPPED"},
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 09e1cd320ee56..e2e53652193e6 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2346,6 +2346,17 @@ smb2_is_session_expired(char *buf)
+ return true;
+ }
+
++static bool
++smb2_is_status_io_timeout(char *buf)
++{
++ struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
++
++ if (shdr->Status == STATUS_IO_TIMEOUT)
++ return true;
++ else
++ return false;
++}
++
+ static int
+ smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
+ struct cifsInodeInfo *cinode)
+@@ -4816,6 +4827,7 @@ struct smb_version_operations smb20_operations = {
+ .make_node = smb2_make_node,
+ .fiemap = smb3_fiemap,
+ .llseek = smb3_llseek,
++ .is_status_io_timeout = smb2_is_status_io_timeout,
+ };
+
+ struct smb_version_operations smb21_operations = {
+@@ -4916,6 +4928,7 @@ struct smb_version_operations smb21_operations = {
+ .make_node = smb2_make_node,
+ .fiemap = smb3_fiemap,
+ .llseek = smb3_llseek,
++ .is_status_io_timeout = smb2_is_status_io_timeout,
+ };
+
+ struct smb_version_operations smb30_operations = {
+@@ -5026,6 +5039,7 @@ struct smb_version_operations smb30_operations = {
+ .make_node = smb2_make_node,
+ .fiemap = smb3_fiemap,
+ .llseek = smb3_llseek,
++ .is_status_io_timeout = smb2_is_status_io_timeout,
+ };
+
+ struct smb_version_operations smb311_operations = {
+@@ -5137,6 +5151,7 @@ struct smb_version_operations smb311_operations = {
+ .make_node = smb2_make_node,
+ .fiemap = smb3_fiemap,
+ .llseek = smb3_llseek,
++ .is_status_io_timeout = smb2_is_status_io_timeout,
+ };
+
+ struct smb_version_values smb20_values = {
+--
+2.27.0
+
--- /dev/null
+From ae35d66c5000bcbccb7c1c7451156ff5e2339fb7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Aug 2020 22:01:09 +0900
+Subject: ia64: kprobes: Use generic kretprobe trampoline handler
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+[ Upstream commit e792ff804f49720ce003b3e4c618b5d996256a18 ]
+
+Use the generic kretprobe trampoline handler. Don't use
+framepointer verification.
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lore.kernel.org/r/159870606883.1229682.12331813108378725668.stgit@devnote2
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/ia64/kernel/kprobes.c | 77 +-------------------------------------
+ 1 file changed, 2 insertions(+), 75 deletions(-)
+
+diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
+index 7a7df944d7986..fc1ff8a4d7de6 100644
+--- a/arch/ia64/kernel/kprobes.c
++++ b/arch/ia64/kernel/kprobes.c
+@@ -396,83 +396,9 @@ static void kretprobe_trampoline(void)
+ {
+ }
+
+-/*
+- * At this point the target function has been tricked into
+- * returning into our trampoline. Lookup the associated instance
+- * and then:
+- * - call the handler function
+- * - cleanup by marking the instance as unused
+- * - long jump back to the original return address
+- */
+ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+ {
+- struct kretprobe_instance *ri = NULL;
+- struct hlist_head *head, empty_rp;
+- struct hlist_node *tmp;
+- unsigned long flags, orig_ret_address = 0;
+- unsigned long trampoline_address =
+- ((struct fnptr *)kretprobe_trampoline)->ip;
+-
+- INIT_HLIST_HEAD(&empty_rp);
+- kretprobe_hash_lock(current, &head, &flags);
+-
+- /*
+- * It is possible to have multiple instances associated with a given
+- * task either because an multiple functions in the call path
+- * have a return probe installed on them, and/or more than one return
+- * return probe was registered for a target function.
+- *
+- * We can handle this because:
+- * - instances are always inserted at the head of the list
+- * - when multiple return probes are registered for the same
+- * function, the first instance's ret_addr will point to the
+- * real return address, and all the rest will point to
+- * kretprobe_trampoline
+- */
+- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+- if (ri->task != current)
+- /* another task is sharing our hash bucket */
+- continue;
+-
+- orig_ret_address = (unsigned long)ri->ret_addr;
+- if (orig_ret_address != trampoline_address)
+- /*
+- * This is the real return address. Any other
+- * instances associated with this task are for
+- * other calls deeper on the call stack
+- */
+- break;
+- }
+-
+- regs->cr_iip = orig_ret_address;
+-
+- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+- if (ri->task != current)
+- /* another task is sharing our hash bucket */
+- continue;
+-
+- if (ri->rp && ri->rp->handler)
+- ri->rp->handler(ri, regs);
+-
+- orig_ret_address = (unsigned long)ri->ret_addr;
+- recycle_rp_inst(ri, &empty_rp);
+-
+- if (orig_ret_address != trampoline_address)
+- /*
+- * This is the real return address. Any other
+- * instances associated with this task are for
+- * other calls deeper on the call stack
+- */
+- break;
+- }
+- kretprobe_assert(ri, orig_ret_address, trampoline_address);
+-
+- kretprobe_hash_unlock(current, &flags);
+-
+- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+- hlist_del(&ri->hlist);
+- kfree(ri);
+- }
++ regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL);
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+@@ -485,6 +411,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+ ri->ret_addr = (kprobe_opcode_t *)regs->b0;
++ ri->fp = NULL;
+
+ /* Replace the return addr with trampoline addr */
+ regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
+--
+2.27.0
+
--- /dev/null
+From 7c9934fdf2d67fbdab74c73235753376fdffd9da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Oct 2020 22:40:34 +0300
+Subject: interconnect: qcom: sdm845: Enable keepalive for the MM1 BCM
+
+From: Georgi Djakov <georgi.djakov@linaro.org>
+
+[ Upstream commit 5be1805dc3961ce0465bcb0beab85fe8580af08d ]
+
+After enabling interconnect scaling for display on the db845c board,
+in certain configurations the board hangs, while the following errors
+are observed on the console:
+
+ Error sending AMC RPMH requests (-110)
+ qcom_rpmh TCS Busy, retrying RPMH message send: addr=0x50000
+ qcom_rpmh TCS Busy, retrying RPMH message send: addr=0x50000
+ qcom_rpmh TCS Busy, retrying RPMH message send: addr=0x50000
+ ...
+
+In this specific case, the above is related to one of the sequencers
+being stuck, while client drivers are returning from probe and trying
+to disable the currently unused clock and interconnect resources.
+Generally we want to keep the multimedia NoC enabled like the rest of
+the NoCs, so let's set the keepalive flag on it too.
+
+Fixes: aae57773fbe0 ("interconnect: qcom: sdm845: Split qnodes into their respective NoCs")
+Reported-by: Amit Pundir <amit.pundir@linaro.org>
+Reviewed-by: Mike Tipton <mdtipton@codeaurora.org>
+Tested-by: John Stultz <john.stultz@linaro.org>
+Link: https://lore.kernel.org/r/20201012194034.26944-1-georgi.djakov@linaro.org
+Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/interconnect/qcom/sdm845.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
+index f6c7b969520d0..86f08c0f4c41b 100644
+--- a/drivers/interconnect/qcom/sdm845.c
++++ b/drivers/interconnect/qcom/sdm845.c
+@@ -151,7 +151,7 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+ DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+ DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+ DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
++DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+ DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
+ DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
+ DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
+--
+2.27.0
+
--- /dev/null
+From 6ad6d8e2917f309e12ba92f001536d2d05409d01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Oct 2020 09:43:56 +0100
+Subject: io_uring: don't set COMP_LOCKED if won't put
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 368c5481ae7c6a9719c40984faea35480d9f4872 ]
+
+__io_kill_linked_timeout() sets REQ_F_COMP_LOCKED for a linked timeout
+even if it can't cancel it, e.g. it's already running. It not only races
+with io_link_timeout_fn() for ->flags field, but also leaves the flag
+set and so io_link_timeout_fn() may find it and decide that it holds the
+lock. Hopefully, the second problem is potential.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 59ab8c5c2aaaa..50a7a99dad4ca 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1650,6 +1650,7 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
+
+ ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
+ if (ret != -1) {
++ req->flags |= REQ_F_COMP_LOCKED;
+ io_cqring_fill_event(req, -ECANCELED);
+ io_commit_cqring(ctx);
+ req->flags &= ~REQ_F_LINK_HEAD;
+@@ -1672,7 +1673,6 @@ static bool __io_kill_linked_timeout(struct io_kiocb *req)
+ return false;
+
+ list_del_init(&link->link_list);
+- link->flags |= REQ_F_COMP_LOCKED;
+ wake_ev = io_link_cancel_timeout(link);
+ req->flags &= ~REQ_F_LINK_TIMEOUT;
+ return wake_ev;
+--
+2.27.0
+
--- /dev/null
+From 056b679b4fe88041cbe0e83a3898eee772822837 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Nov 2020 12:30:42 -0500
+Subject: ionic: no rx flush in deinit
+
+[ Upstream commit 43ecf7b46f2688fd37909801aee264f288b3917b ]
+
+Kmemleak pointed out to us that ionic_rx_flush() is sending
+skbs into napi_gro_XXX with a disabled napi context, and these
+end up getting lost and leaked. We can safely remove the flush.
+
+Fixes: 0f3154e6bcb3 ("ionic: Add Tx and Rx handling")
+Signed-off-by: Shannon Nelson <snelson@pensando.io>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_lif.c | 1 -
+ drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 13 -------------
+ drivers/net/ethernet/pensando/ionic/ionic_txrx.h | 1 -
+ 3 files changed, 15 deletions(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 26988ad7ec979..8867d4ac871c1 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -1512,7 +1512,6 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
+ if (lif->rxqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
+- ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
+ ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
+ }
+ }
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+index def65fee27b5a..39e85870c15e9 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+@@ -253,19 +253,6 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+ return true;
+ }
+
+-void ionic_rx_flush(struct ionic_cq *cq)
+-{
+- struct ionic_dev *idev = &cq->lif->ionic->idev;
+- u32 work_done;
+-
+- work_done = ionic_cq_service(cq, cq->num_descs,
+- ionic_rx_service, NULL, NULL);
+-
+- if (work_done)
+- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
+- work_done, IONIC_INTR_CRED_RESET_COALESCE);
+-}
+-
+ static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
+ dma_addr_t *dma_addr)
+ {
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+index a5883be0413f6..7667b72232b8a 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+@@ -4,7 +4,6 @@
+ #ifndef _IONIC_TXRX_H_
+ #define _IONIC_TXRX_H_
+
+-void ionic_rx_flush(struct ionic_cq *cq);
+ void ionic_tx_flush(struct ionic_cq *cq);
+
+ void ionic_rx_fill(struct ionic_queue *q);
+--
+2.27.0
+
--- /dev/null
+From b6a7906f6ac7312f84ba2a06938ef4e8443239a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Oct 2020 18:49:00 +0200
+Subject: jbd2: avoid transaction reuse after reformatting
+
+From: changfengnan <fengnanchang@foxmail.com>
+
+[ Upstream commit fc750a3b44bdccb9fb96d6abbc48a9b8e480ce7b ]
+
+When ext4 is formatted with lazy_journal_init=1 and transactions from
+the previous filesystem are still on disk, it is possible that they are
+considered during a recovery after a crash. Because the checksum seed
+has changed, the CRC check will fail, and the journal recovery fails
+with checksum error although the journal is otherwise perfectly valid.
+Fix the problem by checking commit block time stamps to determine
+whether the data in the journal block is just stale or whether it is
+indeed corrupt.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Reviewed-by: Andreas Dilger <adilger@dilger.ca>
+Signed-off-by: Fengnan Chang <changfengnan@hikvision.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20201012164900.20197-1-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jbd2/recovery.c | 78 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 66 insertions(+), 12 deletions(-)
+
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index faa97d748474d..fb134c7a12c89 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -428,6 +428,8 @@ static int do_one_pass(journal_t *journal,
+ __u32 crc32_sum = ~0; /* Transactional Checksums */
+ int descr_csum_size = 0;
+ int block_error = 0;
++ bool need_check_commit_time = false;
++ __u64 last_trans_commit_time = 0, commit_time;
+
+ /*
+ * First thing is to establish what we expect to find in the log
+@@ -520,12 +522,21 @@ static int do_one_pass(journal_t *journal,
+ if (descr_csum_size > 0 &&
+ !jbd2_descriptor_block_csum_verify(journal,
+ bh->b_data)) {
+- printk(KERN_ERR "JBD2: Invalid checksum "
+- "recovering block %lu in log\n",
+- next_log_block);
+- err = -EFSBADCRC;
+- brelse(bh);
+- goto failed;
++ /*
++ * PASS_SCAN can see stale blocks due to lazy
++ * journal init. Don't error out on those yet.
++ */
++ if (pass != PASS_SCAN) {
++ pr_err("JBD2: Invalid checksum recovering block %lu in log\n",
++ next_log_block);
++ err = -EFSBADCRC;
++ brelse(bh);
++ goto failed;
++ }
++ need_check_commit_time = true;
++ jbd_debug(1,
++ "invalid descriptor block found in %lu\n",
++ next_log_block);
+ }
+
+ /* If it is a valid descriptor block, replay it
+@@ -535,6 +546,7 @@ static int do_one_pass(journal_t *journal,
+ if (pass != PASS_REPLAY) {
+ if (pass == PASS_SCAN &&
+ jbd2_has_feature_checksum(journal) &&
++ !need_check_commit_time &&
+ !info->end_transaction) {
+ if (calc_chksums(journal, bh,
+ &next_log_block,
+@@ -683,11 +695,41 @@ static int do_one_pass(journal_t *journal,
+ * mentioned conditions. Hence assume
+ * "Interrupted Commit".)
+ */
++ commit_time = be64_to_cpu(
++ ((struct commit_header *)bh->b_data)->h_commit_sec);
++ /*
++ * If need_check_commit_time is set, it means we are in
++ * PASS_SCAN and csum verify failed before. If
++ * commit_time is increasing, it's the same journal,
++ * otherwise it is stale journal block, just end this
++ * recovery.
++ */
++ if (need_check_commit_time) {
++ if (commit_time >= last_trans_commit_time) {
++ pr_err("JBD2: Invalid checksum found in transaction %u\n",
++ next_commit_ID);
++ err = -EFSBADCRC;
++ brelse(bh);
++ goto failed;
++ }
++ ignore_crc_mismatch:
++ /*
++ * It likely does not belong to same journal,
++ * just end this recovery with success.
++ */
++ jbd_debug(1, "JBD2: Invalid checksum ignored in transaction %u, likely stale data\n",
++ next_commit_ID);
++ err = 0;
++ brelse(bh);
++ goto done;
++ }
+
+- /* Found an expected commit block: if checksums
+- * are present verify them in PASS_SCAN; else not
++ /*
++ * Found an expected commit block: if checksums
++ * are present, verify them in PASS_SCAN; else not
+ * much to do other than move on to the next sequence
+- * number. */
++ * number.
++ */
+ if (pass == PASS_SCAN &&
+ jbd2_has_feature_checksum(journal)) {
+ struct commit_header *cbh =
+@@ -719,6 +761,8 @@ static int do_one_pass(journal_t *journal,
+ !jbd2_commit_block_csum_verify(journal,
+ bh->b_data)) {
+ chksum_error:
++ if (commit_time < last_trans_commit_time)
++ goto ignore_crc_mismatch;
+ info->end_transaction = next_commit_ID;
+
+ if (!jbd2_has_feature_async_commit(journal)) {
+@@ -728,11 +772,24 @@ static int do_one_pass(journal_t *journal,
+ break;
+ }
+ }
++ if (pass == PASS_SCAN)
++ last_trans_commit_time = commit_time;
+ brelse(bh);
+ next_commit_ID++;
+ continue;
+
+ case JBD2_REVOKE_BLOCK:
++ /*
++ * Check revoke block crc in pass_scan, if csum verify
++ * failed, check commit block time later.
++ */
++ if (pass == PASS_SCAN &&
++ !jbd2_descriptor_block_csum_verify(journal,
++ bh->b_data)) {
++ jbd_debug(1, "JBD2: invalid revoke block found in %lu\n",
++ next_log_block);
++ need_check_commit_time = true;
++ }
+ /* If we aren't in the REVOKE pass, then we can
+ * just skip over this block. */
+ if (pass != PASS_REVOKE) {
+@@ -800,9 +857,6 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
+ offset = sizeof(jbd2_journal_revoke_header_t);
+ rcount = be32_to_cpu(header->r_count);
+
+- if (!jbd2_descriptor_block_csum_verify(journal, header))
+- return -EFSBADCRC;
+-
+ if (jbd2_journal_has_csum_v2or3(journal))
+ csum_size = sizeof(struct jbd2_journal_block_tail);
+ if (rcount > journal->j_blocksize - csum_size)
+--
+2.27.0
+
--- /dev/null
+From 29ee4d426eb57040461725ca55617f2869224af2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jun 2020 15:14:38 -0700
+Subject: kgdb: Make "kgdbcon" work properly with "kgdb_earlycon"
+
+From: Douglas Anderson <dianders@chromium.org>
+
+[ Upstream commit b18b099e04f450cdc77bec72acefcde7042bd1f3 ]
+
+On my system the kernel processes the "kgdb_earlycon" parameter before
+the "kgdbcon" parameter. When we setup "kgdb_earlycon" we'll end up
+in kgdb_register_callbacks() and "kgdb_use_con" won't have been set
+yet so we'll never get around to starting "kgdbcon". Let's remedy
+this by detecting that the IO module was already registered when
+setting "kgdb_use_con" and registering the console then.
+
+As part of this, to avoid pre-declaring things, move the handling of
+the "kgdbcon" further down in the file.
+
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://lore.kernel.org/r/20200630151422.1.I4aa062751ff5e281f5116655c976dff545c09a46@changeid
+Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/debug/debug_core.c | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index b16dbc1bf0567..404d6d47a11da 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -94,14 +94,6 @@ int dbg_switch_cpu;
+ /* Use kdb or gdbserver mode */
+ int dbg_kdb_mode = 1;
+
+-static int __init opt_kgdb_con(char *str)
+-{
+- kgdb_use_con = 1;
+- return 0;
+-}
+-
+-early_param("kgdbcon", opt_kgdb_con);
+-
+ module_param(kgdb_use_con, int, 0644);
+ module_param(kgdbreboot, int, 0644);
+
+@@ -920,6 +912,20 @@ static struct console kgdbcons = {
+ .index = -1,
+ };
+
++static int __init opt_kgdb_con(char *str)
++{
++ kgdb_use_con = 1;
++
++ if (kgdb_io_module_registered && !kgdb_con_registered) {
++ register_console(&kgdbcons);
++ kgdb_con_registered = 1;
++ }
++
++ return 0;
++}
++
++early_param("kgdbcon", opt_kgdb_con);
++
+ #ifdef CONFIG_MAGIC_SYSRQ
+ static void sysrq_handle_dbg(int key)
+ {
+--
+2.27.0
+
--- /dev/null
+From 23ee4f53fe8dfa3204a9e305287c3457130e14cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Sep 2020 01:16:07 -0300
+Subject: KVM: PPC: Book3S HV: Do not allocate HPT for a nested guest
+
+From: Fabiano Rosas <farosas@linux.ibm.com>
+
+[ Upstream commit 05e6295dc7de859c9d56334805485c4d20bebf25 ]
+
+The current nested KVM code does not support HPT guests. This is
+informed/enforced in some ways:
+
+- Hosts < P9 will not be able to enable the nested HV feature;
+
+- The nested hypervisor MMU capabilities will not contain
+ KVM_CAP_PPC_MMU_HASH_V3;
+
+- QEMU reflects the MMU capabilities in the
+ 'ibm,arch-vec-5-platform-support' device-tree property;
+
+- The nested guest, at 'prom_parse_mmu_model' ignores the
+ 'disable_radix' kernel command line option if HPT is not supported;
+
+- The KVM_PPC_CONFIGURE_V3_MMU ioctl will fail if trying to use HPT.
+
+There is, however, still a way to start a HPT guest by using
+max-compat-cpu=power8 at the QEMU machine options. This leads to the
+guest being set to use hash after QEMU calls the KVM_PPC_ALLOCATE_HTAB
+ioctl.
+
+With the guest set to hash, the nested hypervisor goes through the
+entry path that has no knowledge of nesting (kvmppc_run_vcpu) and
+crashes when it tries to execute an hypervisor-privileged (mtspr
+HDEC) instruction at __kvmppc_vcore_entry:
+
+root@L1:~ $ qemu-system-ppc64 -machine pseries,max-cpu-compat=power8 ...
+
+<snip>
+[ 538.543303] CPU: 83 PID: 25185 Comm: CPU 0/KVM Not tainted 5.9.0-rc4 #1
+[ 538.543355] NIP: c00800000753f388 LR: c00800000753f368 CTR: c0000000001e5ec0
+[ 538.543417] REGS: c0000013e91e33b0 TRAP: 0700 Not tainted (5.9.0-rc4)
+[ 538.543470] MSR: 8000000002843033 <SF,VEC,VSX,FP,ME,IR,DR,RI,LE> CR: 22422882 XER: 20040000
+[ 538.543546] CFAR: c00800000753f4b0 IRQMASK: 3
+ GPR00: c0080000075397a0 c0000013e91e3640 c00800000755e600 0000000080000000
+ GPR04: 0000000000000000 c0000013eab19800 c000001394de0000 00000043a054db72
+ GPR08: 00000000003b1652 0000000000000000 0000000000000000 c0080000075502e0
+ GPR12: c0000000001e5ec0 c0000007ffa74200 c0000013eab19800 0000000000000008
+ GPR16: 0000000000000000 c00000139676c6c0 c000000001d23948 c0000013e91e38b8
+ GPR20: 0000000000000053 0000000000000000 0000000000000001 0000000000000000
+ GPR24: 0000000000000001 0000000000000001 0000000000000000 0000000000000001
+ GPR28: 0000000000000001 0000000000000053 c0000013eab19800 0000000000000001
+[ 538.544067] NIP [c00800000753f388] __kvmppc_vcore_entry+0x90/0x104 [kvm_hv]
+[ 538.544121] LR [c00800000753f368] __kvmppc_vcore_entry+0x70/0x104 [kvm_hv]
+[ 538.544173] Call Trace:
+[ 538.544196] [c0000013e91e3640] [c0000013e91e3680] 0xc0000013e91e3680 (unreliable)
+[ 538.544260] [c0000013e91e3820] [c0080000075397a0] kvmppc_run_core+0xbc8/0x19d0 [kvm_hv]
+[ 538.544325] [c0000013e91e39e0] [c00800000753d99c] kvmppc_vcpu_run_hv+0x404/0xc00 [kvm_hv]
+[ 538.544394] [c0000013e91e3ad0] [c0080000072da4fc] kvmppc_vcpu_run+0x34/0x48 [kvm]
+[ 538.544472] [c0000013e91e3af0] [c0080000072d61b8] kvm_arch_vcpu_ioctl_run+0x310/0x420 [kvm]
+[ 538.544539] [c0000013e91e3b80] [c0080000072c7450] kvm_vcpu_ioctl+0x298/0x778 [kvm]
+[ 538.544605] [c0000013e91e3ce0] [c0000000004b8c2c] sys_ioctl+0x1dc/0xc90
+[ 538.544662] [c0000013e91e3dc0] [c00000000002f9a4] system_call_exception+0xe4/0x1c0
+[ 538.544726] [c0000013e91e3e20] [c00000000000d140] system_call_common+0xf0/0x27c
+[ 538.544787] Instruction dump:
+[ 538.544821] f86d1098 60000000 60000000 48000099 e8ad0fe8 e8c500a0 e9264140 75290002
+[ 538.544886] 7d1602a6 7cec42a6 40820008 7d0807b4 <7d164ba6> 7d083a14 f90d10a0 480104fd
+[ 538.544953] ---[ end trace 74423e2b948c2e0c ]---
+
+This patch makes the KVM_PPC_ALLOCATE_HTAB ioctl fail when running in
+the nested hypervisor, causing QEMU to abort.
+
+Reported-by: Satheesh Rajendran <sathnaga@linux.vnet.ibm.com>
+Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kvm/book3s_hv.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 3bd3118c76330..e2b476d76506a 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -5257,6 +5257,12 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
+ case KVM_PPC_ALLOCATE_HTAB: {
+ u32 htab_order;
+
++ /* If we're a nested hypervisor, we currently only support radix */
++ if (kvmhv_on_pseries()) {
++ r = -EOPNOTSUPP;
++ break;
++ }
++
+ r = -EFAULT;
+ if (get_user(htab_order, (u32 __user *)argp))
+ break;
+--
+2.27.0
+
--- /dev/null
+From 3caf638ca9f4c8e5ca1dfb7f96090c5adb42a82e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Oct 2020 12:23:02 +0200
+Subject: lockdep: Fix preemption WARN for spurious IRQ-enable
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit f8e48a3dca060e80f672d398d181db1298fbc86c ]
+
+It is valid (albeit uncommon) to call local_irq_enable() without first
+having called local_irq_disable(). In this case we enter
+lockdep_hardirqs_on*() with IRQs enabled and trip a preemption warning
+for using __this_cpu_read().
+
+Use this_cpu_read() instead to avoid the warning.
+
+Fixes: 4d004099a6 ("lockdep: Fix lockdep recursion")
+Reported-by: syzbot+53f8ce8bbc07924b6417@syzkaller.appspotmail.com
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/locking/lockdep.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 85d15f0362dc5..3eb35ad1b5241 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3681,7 +3681,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
+ if (unlikely(in_nmi()))
+ return;
+
+- if (unlikely(__this_cpu_read(lockdep_recursion)))
++ if (unlikely(this_cpu_read(lockdep_recursion)))
+ return;
+
+ if (unlikely(lockdep_hardirqs_enabled())) {
+@@ -3750,7 +3750,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
+ goto skip_checks;
+ }
+
+- if (unlikely(__this_cpu_read(lockdep_recursion)))
++ if (unlikely(this_cpu_read(lockdep_recursion)))
+ return;
+
+ if (lockdep_hardirqs_enabled()) {
+--
+2.27.0
+
--- /dev/null
+From 695cbee6e0191a2620ae9fae18aa706d2e953427 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Sep 2020 14:36:49 +0200
+Subject: mac80211: add missing queue/hash initialization to 802.3 xmit
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit 5f8d69eaab1915df97f4f2aca89ea16abdd092d5 ]
+
+Fixes AQL for encap-offloaded tx
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Link: https://lore.kernel.org/r/20200908123702.88454-2-nbd@nbd.name
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/tx.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index dca01d7e6e3e0..282b0bc201eeb 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -4209,6 +4209,12 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
+ if (is_zero_ether_addr(ra))
+ goto out_free;
+
++ if (local->ops->wake_tx_queue) {
++ u16 queue = __ieee80211_select_queue(sdata, sta, skb);
++ skb_set_queue_mapping(skb, queue);
++ skb_get_hash(skb);
++ }
++
+ multicast = is_multicast_ether_addr(ra);
+
+ if (sta)
+--
+2.27.0
+
--- /dev/null
+From 5313a08de0f0de388b29e59a9172925c5fbc017d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Oct 2020 00:00:24 +0800
+Subject: md/bitmap: md_bitmap_get_counter returns wrong blocks
+
+From: Zhao Heming <heming.zhao@suse.com>
+
+[ Upstream commit d837f7277f56e70d82b3a4a037d744854e62f387 ]
+
+md_bitmap_get_counter() has code:
+
+```
+ if (bitmap->bp[page].hijacked ||
+ bitmap->bp[page].map == NULL)
+ csize = ((sector_t)1) << (bitmap->chunkshift +
+ PAGE_COUNTER_SHIFT - 1);
+```
+
+The minus 1 is wrong, this branch should report 2048 bits of space.
+With "-1" action, this only report 1024 bit of space.
+
+This bug code returns wrong blocks, but it doesn't inflence bitmap logic:
+1. Most callers focus this function return value (the counter of offset),
+ not the parameter blocks.
+2. The bug is only triggered when hijacked is true or map is NULL.
+ the hijacked true condition is very rare.
+ the "map == null" only true when array is creating or resizing.
+3. Even the caller gets wrong blocks, current code makes caller just to
+ call md_bitmap_get_counter() one more time.
+
+Signed-off-by: Zhao Heming <heming.zhao@suse.com>
+Signed-off-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/md-bitmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index c61ab86a28b52..d910833feeb4d 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1367,7 +1367,7 @@ __acquires(bitmap->lock)
+ if (bitmap->bp[page].hijacked ||
+ bitmap->bp[page].map == NULL)
+ csize = ((sector_t)1) << (bitmap->chunkshift +
+- PAGE_COUNTER_SHIFT - 1);
++ PAGE_COUNTER_SHIFT);
+ else
+ csize = ((sector_t)1) << bitmap->chunkshift;
+ *blocks = csize - (offset & (csize - 1));
+--
+2.27.0
+
--- /dev/null
+From 95d98caeb88660721a9a22c6c7c93060d4e54087 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jul 2020 11:20:32 +0200
+Subject: media: imx274: fix frame interval handling
+
+From: Hans Verkuil <hverkuil@xs4all.nl>
+
+[ Upstream commit 49b20d981d723fae5a93843c617af2b2c23611ec ]
+
+1) the numerator and/or denominator might be 0, in that case
+ fall back to the default frame interval. This is per the spec
+ and this caused a v4l2-compliance failure.
+
+2) the updated frame interval wasn't returned in the s_frame_interval
+ subdev op.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Reviewed-by: Luca Ceresoli <luca@lucaceresoli.net>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/i2c/imx274.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
+index 6011cec5e351d..e6aa9f32b6a83 100644
+--- a/drivers/media/i2c/imx274.c
++++ b/drivers/media/i2c/imx274.c
+@@ -1235,6 +1235,8 @@ static int imx274_s_frame_interval(struct v4l2_subdev *sd,
+ ret = imx274_set_frame_interval(imx274, fi->interval);
+
+ if (!ret) {
++ fi->interval = imx274->frame_interval;
++
+ /*
+ * exposure time range is decided by frame interval
+ * need to update it after frame interval changes
+@@ -1730,9 +1732,9 @@ static int imx274_set_frame_interval(struct stimx274 *priv,
+ __func__, frame_interval.numerator,
+ frame_interval.denominator);
+
+- if (frame_interval.numerator == 0) {
+- err = -EINVAL;
+- goto fail;
++ if (frame_interval.numerator == 0 || frame_interval.denominator == 0) {
++ frame_interval.denominator = IMX274_DEF_FRAME_RATE;
++ frame_interval.numerator = 1;
+ }
+
+ req_frame_rate = (u32)(frame_interval.denominator
+--
+2.27.0
+
--- /dev/null
+From 44f6b23abc9c13206748ac23fb20589829e56e63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Aug 2020 09:11:35 +0200
+Subject: media: platform: Improve queue set up flow for bug fixing
+
+From: Xia Jiang <xia.jiang@mediatek.com>
+
+[ Upstream commit 5095a6413a0cf896ab468009b6142cb0fe617e66 ]
+
+Add checking created buffer size follow in mtk_jpeg_queue_setup().
+
+Reviewed-by: Tomasz Figa <tfiga@chromium.org>
+Signed-off-by: Xia Jiang <xia.jiang@mediatek.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+index 61fed1e35a005..b1ca4e3adae32 100644
+--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+@@ -571,6 +571,13 @@ static int mtk_jpeg_queue_setup(struct vb2_queue *q,
+ if (!q_data)
+ return -EINVAL;
+
++ if (*num_planes) {
++ for (i = 0; i < *num_planes; i++)
++ if (sizes[i] < q_data->sizeimage[i])
++ return -EINVAL;
++ return 0;
++ }
++
+ *num_planes = q_data->fmt->colplanes;
+ for (i = 0; i < q_data->fmt->colplanes; i++) {
+ sizes[i] = q_data->sizeimage[i];
+--
+2.27.0
+
--- /dev/null
+From bd9eebea1a261a3d1952030c1c1e470f68ff1020 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Aug 2020 21:25:18 +0200
+Subject: media: tw5864: check status of tw5864_frameinterval_get
+
+From: Tom Rix <trix@redhat.com>
+
+[ Upstream commit 780d815dcc9b34d93ae69385a8465c38d423ff0f ]
+
+clang static analysis reports this problem
+
+tw5864-video.c:773:32: warning: The left expression of the compound
+ assignment is an uninitialized value.
+ The computed value will also be garbage
+ fintv->stepwise.max.numerator *= std_max_fps;
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^
+
+stepwise.max is set with frameinterval, which comes from
+
+ ret = tw5864_frameinterval_get(input, &frameinterval);
+ fintv->stepwise.step = frameinterval;
+ fintv->stepwise.min = frameinterval;
+ fintv->stepwise.max = frameinterval;
+ fintv->stepwise.max.numerator *= std_max_fps;
+
+When tw5864_frameinterval_get() fails, frameinterval is not
+set. So check the status and fix another similar problem.
+
+Signed-off-by: Tom Rix <trix@redhat.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/pci/tw5864/tw5864-video.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
+index ec1e06da7e4fb..a65114e7ca346 100644
+--- a/drivers/media/pci/tw5864/tw5864-video.c
++++ b/drivers/media/pci/tw5864/tw5864-video.c
+@@ -767,6 +767,9 @@ static int tw5864_enum_frameintervals(struct file *file, void *priv,
+ fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+
+ ret = tw5864_frameinterval_get(input, &frameinterval);
++ if (ret)
++ return ret;
++
+ fintv->stepwise.step = frameinterval;
+ fintv->stepwise.min = frameinterval;
+ fintv->stepwise.max = frameinterval;
+@@ -785,6 +788,9 @@ static int tw5864_g_parm(struct file *file, void *priv,
+ cp->capability = V4L2_CAP_TIMEPERFRAME;
+
+ ret = tw5864_frameinterval_get(input, &cp->timeperframe);
++ if (ret)
++ return ret;
++
+ cp->timeperframe.numerator *= input->frame_interval;
+ cp->capturemode = 0;
+ cp->readbuffers = 2;
+--
+2.27.0
+
--- /dev/null
+From de1e473ab4ac0f16f0f684c6e581f5f2ca44f06a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Aug 2020 10:35:30 +0200
+Subject: media: uvcvideo: Fix dereference of out-of-bound list iterator
+
+From: Daniel W. S. Almeida <dwlsalmeida@gmail.com>
+
+[ Upstream commit f875bcc375c738bf2f599ff2e1c5b918dbd07c45 ]
+
+Fixes the following coccinelle report:
+
+drivers/media/usb/uvc/uvc_ctrl.c:1860:5-11:
+ERROR: invalid reference to the index variable of the iterator on line 1854
+
+by adding a boolean variable to check if the loop has found the
+
+Found using - Coccinelle (http://coccinelle.lip6.fr)
+
+[Replace cursor variable with bool found]
+
+Signed-off-by: Daniel W. S. Almeida <dwlsalmeida@gmail.com>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/uvc/uvc_ctrl.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index a30a8a731eda8..c13ed95cb06fe 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1848,30 +1848,35 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
+ {
+ struct uvc_entity *entity;
+ struct uvc_control *ctrl;
+- unsigned int i, found = 0;
++ unsigned int i;
++ bool found;
+ u32 reqflags;
+ u16 size;
+ u8 *data = NULL;
+ int ret;
+
+ /* Find the extension unit. */
++ found = false;
+ list_for_each_entry(entity, &chain->entities, chain) {
+ if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT &&
+- entity->id == xqry->unit)
++ entity->id == xqry->unit) {
++ found = true;
+ break;
++ }
+ }
+
+- if (entity->id != xqry->unit) {
++ if (!found) {
+ uvc_trace(UVC_TRACE_CONTROL, "Extension unit %u not found.\n",
+ xqry->unit);
+ return -ENOENT;
+ }
+
+ /* Find the control and perform delayed initialization if needed. */
++ found = false;
+ for (i = 0; i < entity->ncontrols; ++i) {
+ ctrl = &entity->controls[i];
+ if (ctrl->index == xqry->selector - 1) {
+- found = 1;
++ found = true;
+ break;
+ }
+ }
+--
+2.27.0
+
--- /dev/null
+From 4a05eb6b54bec5b9dface4faae19f46b9544c7ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Aug 2020 12:47:16 +0200
+Subject: media: videodev2.h: RGB BT2020 and HSV are always full range
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+[ Upstream commit b305dfe2e93434b12d438434461b709641f62af4 ]
+
+The default RGB quantization range for BT.2020 is full range (just as for
+all the other RGB pixel encodings), not limited range.
+
+Update the V4L2_MAP_QUANTIZATION_DEFAULT macro and documentation
+accordingly.
+
+Also mention that HSV is always full range and cannot be limited range.
+
+When RGB BT2020 was introduced in V4L2 it was not clear whether it should
+be limited or full range, but full range is the right (and consistent)
+choice.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../media/v4l/colorspaces-defs.rst | 9 ++++-----
+ .../media/v4l/colorspaces-details.rst | 5 ++---
+ include/uapi/linux/videodev2.h | 17 ++++++++---------
+ 3 files changed, 14 insertions(+), 17 deletions(-)
+
+diff --git a/Documentation/userspace-api/media/v4l/colorspaces-defs.rst b/Documentation/userspace-api/media/v4l/colorspaces-defs.rst
+index 01404e1f609a7..4089f426258d6 100644
+--- a/Documentation/userspace-api/media/v4l/colorspaces-defs.rst
++++ b/Documentation/userspace-api/media/v4l/colorspaces-defs.rst
+@@ -36,8 +36,7 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
+ :c:type:`v4l2_hsv_encoding` specifies which encoding is used.
+
+ .. note:: The default R'G'B' quantization is full range for all
+- colorspaces except for BT.2020 which uses limited range R'G'B'
+- quantization.
++ colorspaces. HSV formats are always full range.
+
+ .. tabularcolumns:: |p{6.7cm}|p{10.8cm}|
+
+@@ -169,8 +168,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
+ - Details
+ * - ``V4L2_QUANTIZATION_DEFAULT``
+ - Use the default quantization encoding as defined by the
+- colorspace. This is always full range for R'G'B' (except for the
+- BT.2020 colorspace) and HSV. It is usually limited range for Y'CbCr.
++ colorspace. This is always full range for R'G'B' and HSV.
++ It is usually limited range for Y'CbCr.
+ * - ``V4L2_QUANTIZATION_FULL_RANGE``
+ - Use the full range quantization encoding. I.e. the range [0…1] is
+ mapped to [0…255] (with possible clipping to [1…254] to avoid the
+@@ -180,4 +179,4 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
+ * - ``V4L2_QUANTIZATION_LIM_RANGE``
+ - Use the limited range quantization encoding. I.e. the range [0…1]
+ is mapped to [16…235]. Cb and Cr are mapped from [-0.5…0.5] to
+- [16…240].
++ [16…240]. Limited Range cannot be used with HSV.
+diff --git a/Documentation/userspace-api/media/v4l/colorspaces-details.rst b/Documentation/userspace-api/media/v4l/colorspaces-details.rst
+index 300c5d2e7d0f0..cf1b825ec34a7 100644
+--- a/Documentation/userspace-api/media/v4l/colorspaces-details.rst
++++ b/Documentation/userspace-api/media/v4l/colorspaces-details.rst
+@@ -377,9 +377,8 @@ Colorspace BT.2020 (V4L2_COLORSPACE_BT2020)
+ The :ref:`itu2020` standard defines the colorspace used by Ultra-high
+ definition television (UHDTV). The default transfer function is
+ ``V4L2_XFER_FUNC_709``. The default Y'CbCr encoding is
+-``V4L2_YCBCR_ENC_BT2020``. The default R'G'B' quantization is limited
+-range (!), and so is the default Y'CbCr quantization. The chromaticities
+-of the primary colors and the white reference are:
++``V4L2_YCBCR_ENC_BT2020``. The default Y'CbCr quantization is limited range.
++The chromaticities of the primary colors and the white reference are:
+
+
+
+diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
+index 235db7754606d..f717826d5d7c0 100644
+--- a/include/uapi/linux/videodev2.h
++++ b/include/uapi/linux/videodev2.h
+@@ -373,9 +373,9 @@ enum v4l2_hsv_encoding {
+
+ enum v4l2_quantization {
+ /*
+- * The default for R'G'B' quantization is always full range, except
+- * for the BT2020 colorspace. For Y'CbCr the quantization is always
+- * limited range, except for COLORSPACE_JPEG: this is full range.
++ * The default for R'G'B' quantization is always full range.
++ * For Y'CbCr the quantization is always limited range, except
++ * for COLORSPACE_JPEG: this is full range.
+ */
+ V4L2_QUANTIZATION_DEFAULT = 0,
+ V4L2_QUANTIZATION_FULL_RANGE = 1,
+@@ -384,14 +384,13 @@ enum v4l2_quantization {
+
+ /*
+ * Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
+- * This depends on whether the image is RGB or not, the colorspace and the
+- * Y'CbCr encoding.
++ * This depends on whether the image is RGB or not, the colorspace.
++ * The Y'CbCr encoding is not used anymore, but is still there for backwards
++ * compatibility.
+ */
+ #define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
+- (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
+- V4L2_QUANTIZATION_LIM_RANGE : \
+- (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+- V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
++ (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
++ V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)
+
+ /*
+ * Deprecated names for opRGB colorspace (IEC 61966-2-5)
+--
+2.27.0
+
--- /dev/null
+From ff6510e618d9480a10fd51992cc174ba794ba181 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Aug 2020 14:37:59 +0300
+Subject: memory: emif: Remove bogus debugfs error handling
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit fd22781648080cc400772b3c68aa6b059d2d5420 ]
+
+Callers are generally not supposed to check the return values from
+debugfs functions. Debugfs functions never return NULL so this error
+handling will never trigger. (Historically debugfs functions used to
+return a mix of NULL and error pointers but it was eventually deemed too
+complicated for something which wasn't intended to be used in normal
+situations).
+
+Delete all the error handling.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Santosh Shilimkar <ssantosh@kernel.org>
+Link: https://lore.kernel.org/r/20200826113759.GF393664@mwanda
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/emif.c | 33 +++++----------------------------
+ 1 file changed, 5 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
+index bb6a71d267988..5c4d8319c9cfb 100644
+--- a/drivers/memory/emif.c
++++ b/drivers/memory/emif.c
+@@ -163,35 +163,12 @@ static const struct file_operations emif_mr4_fops = {
+
+ static int __init_or_module emif_debugfs_init(struct emif_data *emif)
+ {
+- struct dentry *dentry;
+- int ret;
+-
+- dentry = debugfs_create_dir(dev_name(emif->dev), NULL);
+- if (!dentry) {
+- ret = -ENOMEM;
+- goto err0;
+- }
+- emif->debugfs_root = dentry;
+-
+- dentry = debugfs_create_file("regcache_dump", S_IRUGO,
+- emif->debugfs_root, emif, &emif_regdump_fops);
+- if (!dentry) {
+- ret = -ENOMEM;
+- goto err1;
+- }
+-
+- dentry = debugfs_create_file("mr4", S_IRUGO,
+- emif->debugfs_root, emif, &emif_mr4_fops);
+- if (!dentry) {
+- ret = -ENOMEM;
+- goto err1;
+- }
+-
++ emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
++ debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
++ &emif_regdump_fops);
++ debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
++ &emif_mr4_fops);
+ return 0;
+-err1:
+- debugfs_remove_recursive(emif->debugfs_root);
+-err0:
+- return ret;
+ }
+
+ static void __exit emif_debugfs_exit(struct emif_data *emif)
+--
+2.27.0
+
--- /dev/null
+From 426dc0b53ee45b3baaf1fc0795a8e3582c8b2f8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Aug 2020 08:33:12 +0200
+Subject: misc: fastrpc: fix common struct sg_table related issues
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+[ Upstream commit 7cd7edb89437457ec36ffdbb970cc314d00c4aba ]
+
+The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function
+returns the number of the created entries in the DMA address space.
+However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and
+dma_unmap_sg must be called with the original number of the entries
+passed to the dma_map_sg().
+
+struct sg_table is a common structure used for describing a non-contiguous
+memory buffer, used commonly in the DRM and graphics subsystems. It
+consists of a scatterlist with memory pages and DMA addresses (sgl entry),
+as well as the number of scatterlist entries: CPU pages (orig_nents entry)
+and DMA mapped pages (nents entry).
+
+It turned out that it was a common mistake to misuse nents and orig_nents
+entries, calling DMA-mapping functions with a wrong number of entries or
+ignoring the number of mapped entries returned by the dma_map_sg()
+function.
+
+To avoid such issues, lets use a common dma-mapping wrappers operating
+directly on the struct sg_table objects and use scatterlist page
+iterators where possible. This, almost always, hides references to the
+nents and orig_nents entries, making the code robust, easier to follow
+and copy/paste safe.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Link: https://lore.kernel.org/r/20200826063316.23486-29-m.szyprowski@samsung.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/misc/fastrpc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 7939c55daceb2..9d68677493163 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -518,7 +518,7 @@ fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
+
+ table = &a->sgt;
+
+- if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
++ if (!dma_map_sgtable(attachment->dev, table, dir, 0))
+ return ERR_PTR(-ENOMEM);
+
+ return table;
+@@ -528,7 +528,7 @@ static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *table,
+ enum dma_data_direction dir)
+ {
+- dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
++ dma_unmap_sgtable(attach->dev, table, dir, 0);
+ }
+
+ static void fastrpc_release(struct dma_buf *dmabuf)
+--
+2.27.0
+
--- /dev/null
+From 9e44ec940fa93b53819da22c0872a29740882ef7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 24 Oct 2020 16:37:33 +0300
+Subject: mlxsw: core: Fix use-after-free in mlxsw_emad_trans_finish()
+
+From: Amit Cohen <amcohen@nvidia.com>
+
+[ Upstream commit 0daf2bf5a2dcf33d446b76360908f109816e2e21 ]
+
+Each EMAD transaction stores the skb used to issue the EMAD request
+('trans->tx_skb') so that the request could be retried in case of a
+timeout. The skb can be freed when a corresponding response is received
+or as part of the retry logic (e.g., failed retransmit, exceeded maximum
+number of retries).
+
+The two tasks (i.e., response processing and retransmits) are
+synchronized by the atomic 'trans->active' field which ensures that
+responses to inactive transactions are ignored.
+
+In case of a failed retransmit the transaction is finished and all of
+its resources are freed. However, the current code does not mark it as
+inactive. Syzkaller was able to hit a race condition in which a
+concurrent response is processed while the transaction's resources are
+being freed, resulting in a use-after-free [1].
+
+Fix the issue by making sure to mark the transaction as inactive after a
+failed retransmit and free its resources only if a concurrent task did
+not already do that.
+
+[1]
+BUG: KASAN: use-after-free in consume_skb+0x30/0x370
+net/core/skbuff.c:833
+Read of size 4 at addr ffff88804f570494 by task syz-executor.0/1004
+
+CPU: 0 PID: 1004 Comm: syz-executor.0 Not tainted 5.8.0-rc7+ #68
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
+rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0xf6/0x16e lib/dump_stack.c:118
+ print_address_description.constprop.0+0x1c/0x250
+mm/kasan/report.c:383
+ __kasan_report mm/kasan/report.c:513 [inline]
+ kasan_report.cold+0x1f/0x37 mm/kasan/report.c:530
+ check_memory_region_inline mm/kasan/generic.c:186 [inline]
+ check_memory_region+0x14e/0x1b0 mm/kasan/generic.c:192
+ instrument_atomic_read include/linux/instrumented.h:56 [inline]
+ atomic_read include/asm-generic/atomic-instrumented.h:27 [inline]
+ refcount_read include/linux/refcount.h:147 [inline]
+ skb_unref include/linux/skbuff.h:1044 [inline]
+ consume_skb+0x30/0x370 net/core/skbuff.c:833
+ mlxsw_emad_trans_finish+0x64/0x1c0 drivers/net/ethernet/mellanox/mlxsw/core.c:592
+ mlxsw_emad_process_response drivers/net/ethernet/mellanox/mlxsw/core.c:651 [inline]
+ mlxsw_emad_rx_listener_func+0x5c9/0xac0 drivers/net/ethernet/mellanox/mlxsw/core.c:672
+ mlxsw_core_skb_receive+0x4df/0x770 drivers/net/ethernet/mellanox/mlxsw/core.c:2063
+ mlxsw_pci_cqe_rdq_handle drivers/net/ethernet/mellanox/mlxsw/pci.c:595 [inline]
+ mlxsw_pci_cq_tasklet+0x12a6/0x2520 drivers/net/ethernet/mellanox/mlxsw/pci.c:651
+ tasklet_action_common.isra.0+0x13f/0x3e0 kernel/softirq.c:550
+ __do_softirq+0x223/0x964 kernel/softirq.c:292
+ asm_call_on_stack+0x12/0x20 arch/x86/entry/entry_64.S:711
+
+Allocated by task 1006:
+ save_stack+0x1b/0x40 mm/kasan/common.c:48
+ set_track mm/kasan/common.c:56 [inline]
+ __kasan_kmalloc mm/kasan/common.c:494 [inline]
+ __kasan_kmalloc.constprop.0+0xc2/0xd0 mm/kasan/common.c:467
+ slab_post_alloc_hook mm/slab.h:586 [inline]
+ slab_alloc_node mm/slub.c:2824 [inline]
+ slab_alloc mm/slub.c:2832 [inline]
+ kmem_cache_alloc+0xcd/0x2e0 mm/slub.c:2837
+ __build_skb+0x21/0x60 net/core/skbuff.c:311
+ __netdev_alloc_skb+0x1e2/0x360 net/core/skbuff.c:464
+ netdev_alloc_skb include/linux/skbuff.h:2810 [inline]
+ mlxsw_emad_alloc drivers/net/ethernet/mellanox/mlxsw/core.c:756 [inline]
+ mlxsw_emad_reg_access drivers/net/ethernet/mellanox/mlxsw/core.c:787 [inline]
+ mlxsw_core_reg_access_emad+0x1ab/0x1420 drivers/net/ethernet/mellanox/mlxsw/core.c:1817
+ mlxsw_reg_trans_query+0x39/0x50 drivers/net/ethernet/mellanox/mlxsw/core.c:1831
+ mlxsw_sp_sb_pm_occ_clear drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c:260 [inline]
+ mlxsw_sp_sb_occ_max_clear+0xbff/0x10a0 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c:1365
+ mlxsw_devlink_sb_occ_max_clear+0x76/0xb0 drivers/net/ethernet/mellanox/mlxsw/core.c:1037
+ devlink_nl_cmd_sb_occ_max_clear_doit+0x1ec/0x280 net/core/devlink.c:1765
+ genl_family_rcv_msg_doit net/netlink/genetlink.c:669 [inline]
+ genl_family_rcv_msg net/netlink/genetlink.c:714 [inline]
+ genl_rcv_msg+0x617/0x980 net/netlink/genetlink.c:731
+ netlink_rcv_skb+0x152/0x440 net/netlink/af_netlink.c:2470
+ genl_rcv+0x24/0x40 net/netlink/genetlink.c:742
+ netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline]
+ netlink_unicast+0x53a/0x750 net/netlink/af_netlink.c:1330
+ netlink_sendmsg+0x850/0xd90 net/netlink/af_netlink.c:1919
+ sock_sendmsg_nosec net/socket.c:651 [inline]
+ sock_sendmsg+0x150/0x190 net/socket.c:671
+ ____sys_sendmsg+0x6d8/0x840 net/socket.c:2359
+ ___sys_sendmsg+0xff/0x170 net/socket.c:2413
+ __sys_sendmsg+0xe5/0x1b0 net/socket.c:2446
+ do_syscall_64+0x56/0xa0 arch/x86/entry/common.c:384
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Freed by task 73:
+ save_stack+0x1b/0x40 mm/kasan/common.c:48
+ set_track mm/kasan/common.c:56 [inline]
+ kasan_set_free_info mm/kasan/common.c:316 [inline]
+ __kasan_slab_free+0x12c/0x170 mm/kasan/common.c:455
+ slab_free_hook mm/slub.c:1474 [inline]
+ slab_free_freelist_hook mm/slub.c:1507 [inline]
+ slab_free mm/slub.c:3072 [inline]
+ kmem_cache_free+0xbe/0x380 mm/slub.c:3088
+ kfree_skbmem net/core/skbuff.c:622 [inline]
+ kfree_skbmem+0xef/0x1b0 net/core/skbuff.c:616
+ __kfree_skb net/core/skbuff.c:679 [inline]
+ consume_skb net/core/skbuff.c:837 [inline]
+ consume_skb+0xe1/0x370 net/core/skbuff.c:831
+ mlxsw_emad_trans_finish+0x64/0x1c0 drivers/net/ethernet/mellanox/mlxsw/core.c:592
+ mlxsw_emad_transmit_retry.isra.0+0x9d/0xc0 drivers/net/ethernet/mellanox/mlxsw/core.c:613
+ mlxsw_emad_trans_timeout_work+0x43/0x50 drivers/net/ethernet/mellanox/mlxsw/core.c:625
+ process_one_work+0xa3e/0x17a0 kernel/workqueue.c:2269
+ worker_thread+0x9e/0x1050 kernel/workqueue.c:2415
+ kthread+0x355/0x470 kernel/kthread.c:291
+ ret_from_fork+0x22/0x30 arch/x86/entry/entry_64.S:293
+
+The buggy address belongs to the object at ffff88804f5703c0
+ which belongs to the cache skbuff_head_cache of size 224
+The buggy address is located 212 bytes inside of
+ 224-byte region [ffff88804f5703c0, ffff88804f5704a0)
+The buggy address belongs to the page:
+page:ffffea00013d5c00 refcount:1 mapcount:0 mapping:0000000000000000
+index:0x0
+flags: 0x100000000000200(slab)
+raw: 0100000000000200 dead000000000100 dead000000000122 ffff88806c625400
+raw: 0000000000000000 00000000000c000c 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff88804f570380: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
+ ffff88804f570400: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+>ffff88804f570480: fb fb fb fb fc fc fc fc fc fc fc fc fc fc fc fc
+ ^
+ ffff88804f570500: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff88804f570580: 00 00 00 00 00 00 00 00 00 00 00 00 fc fc fc fc
+
+Fixes: caf7297e7ab5f ("mlxsw: core: Introduce support for asynchronous EMAD register access")
+Signed-off-by: Amit Cohen <amcohen@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
+index f6aa80fe343f5..05e90ef15871c 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -607,6 +607,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
+ err = mlxsw_emad_transmit(trans->core, trans);
+ if (err == 0)
+ return;
++
++ if (!atomic_dec_and_test(&trans->active))
++ return;
+ } else {
+ err = -EIO;
+ }
+--
+2.27.0
+
--- /dev/null
+From 8469091364ccc3e9cba51aeee3bff547a03b4c86 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Sep 2020 14:52:16 +1000
+Subject: mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+[ Upstream commit d53c3dfb23c45f7d4f910c3a3ca84bf0a99c6143 ]
+
+Reading and modifying current->mm and current->active_mm and switching
+mm should be done with irqs off, to prevent races seeing an intermediate
+state.
+
+This is similar to commit 38cf307c1f20 ("mm: fix kthread_use_mm() vs TLB
+invalidate"). At exec-time when the new mm is activated, the old one
+should usually be single-threaded and no longer used, unless something
+else is holding an mm_users reference (which may be possible).
+
+Absent other mm_users, there is also a race with preemption and lazy tlb
+switching. Consider the kernel_execve case where the current thread is
+using a lazy tlb active mm:
+
+ call_usermodehelper()
+ kernel_execve()
+ old_mm = current->mm;
+ active_mm = current->active_mm;
+ *** preempt *** --------------------> schedule()
+ prev->active_mm = NULL;
+ mmdrop(prev active_mm);
+ ...
+ <-------------------- schedule()
+ current->mm = mm;
+ current->active_mm = mm;
+ if (!old_mm)
+ mmdrop(active_mm);
+
+If we switch back to the kernel thread from a different mm, there is a
+double free of the old active_mm, and a missing free of the new one.
+
+Closing this race only requires interrupts to be disabled while ->mm
+and ->active_mm are being switched, but the TLB problem requires also
+holding interrupts off over activate_mm. Unfortunately not all archs
+can do that yet, e.g., arm defers the switch if irqs are disabled and
+expects finish_arch_post_lock_switch() to be called to complete the
+flush; um takes a blocking lock in activate_mm().
+
+So as a first step, disable interrupts across the mm/active_mm updates
+to close the lazy tlb preempt race, and provide an arch option to
+extend that to activate_mm which allows architectures doing IPI based
+TLB shootdowns to close the second race.
+
+This is a bit ugly, but in the interest of fixing the bug and backporting
+before all architectures are converted this is a compromise.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200914045219.3736466-2-npiggin@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/Kconfig | 7 +++++++
+ fs/exec.c | 17 +++++++++++++++--
+ 2 files changed, 22 insertions(+), 2 deletions(-)
+
+diff --git a/arch/Kconfig b/arch/Kconfig
+index af14a567b493f..94821e3f94d16 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -414,6 +414,13 @@ config MMU_GATHER_NO_GATHER
+ bool
+ depends on MMU_GATHER_TABLE_FREE
+
++config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
++ bool
++ help
++ Temporary select until all architectures can be converted to have
++ irqs disabled over activate_mm. Architectures that do IPI based TLB
++ shootdowns should enable this.
++
+ config ARCH_HAVE_NMI_SAFE_CMPXCHG
+ bool
+
+diff --git a/fs/exec.c b/fs/exec.c
+index 07910f5032e74..3622681489864 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1131,11 +1131,24 @@ static int exec_mmap(struct mm_struct *mm)
+ }
+
+ task_lock(tsk);
+- active_mm = tsk->active_mm;
+ membarrier_exec_mmap(mm);
+- tsk->mm = mm;
++
++ local_irq_disable();
++ active_mm = tsk->active_mm;
+ tsk->active_mm = mm;
++ tsk->mm = mm;
++ /*
++ * This prevents preemption while active_mm is being loaded and
++ * it and mm are being updated, which could cause problems for
++ * lazy tlb mm refcounting when these are updated by context
++ * switches. Not all architectures can handle irqs off over
++ * activate_mm yet.
++ */
++ if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
++ local_irq_enable();
+ activate_mm(active_mm, mm);
++ if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
++ local_irq_enable();
+ tsk->mm->vmacache_seqnum = 0;
+ vmacache_flush(tsk);
+ task_unlock(tsk);
+--
+2.27.0
+
--- /dev/null
+From 572eaabac066d032fd59a16a980b2c86cc7c643c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 22 Aug 2020 11:45:28 +0530
+Subject: mmc: via-sdmmc: Fix data race bug
+
+From: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
+
+[ Upstream commit 87d7ad089b318b4f319bf57f1daa64eb6d1d10ad ]
+
+via_save_pcictrlreg() should be called with host->lock held
+as it writes to pm_pcictrl_reg, otherwise there can be a race
+condition between via_sd_suspend() and via_sdc_card_detect().
+The same pattern is used in the function via_reset_pcictrl()
+as well, where via_save_pcictrlreg() is called with host->lock
+held.
+
+Found by Linux Driver Verification project (linuxtesting.org).
+
+Signed-off-by: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
+Link: https://lore.kernel.org/r/20200822061528.7035-1-madhuparnabhowmik10@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/via-sdmmc.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
+index 49dab9f42b6d6..9b755ea0fa03c 100644
+--- a/drivers/mmc/host/via-sdmmc.c
++++ b/drivers/mmc/host/via-sdmmc.c
+@@ -1257,11 +1257,14 @@ static void __maybe_unused via_init_sdc_pm(struct via_crdr_mmc_host *host)
+ static int __maybe_unused via_sd_suspend(struct device *dev)
+ {
+ struct via_crdr_mmc_host *host;
++ unsigned long flags;
+
+ host = dev_get_drvdata(dev);
+
++ spin_lock_irqsave(&host->lock, flags);
+ via_save_pcictrlreg(host);
+ via_save_sdcreg(host);
++ spin_unlock_irqrestore(&host->lock, flags);
+
+ device_wakeup_enable(dev);
+
+--
+2.27.0
+
--- /dev/null
+From 3b22be95b17740068729260c0c7a0598eabbfcd1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Oct 2020 22:45:14 -0400
+Subject: nbd: make the config put is called before the notifying the waiter
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit 87aac3a80af5cbad93e63250e8a1e19095ba0d30 ]
+
+There has one race case for ceph's rbd-nbd tool. When do mapping
+it may fail with EBUSY from ioctl(nbd, NBD_DO_IT), but actually
+the nbd device has already unmaped.
+
+It dues to if just after the wake_up(), the recv_work() is scheduled
+out and defers calling the nbd_config_put(), though the map process
+has exited the "nbd->recv_task" is not cleared.
+
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/nbd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index edf8b632e3d27..f46e26c9d9b3c 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -801,9 +801,9 @@ static void recv_work(struct work_struct *work)
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
+ }
++ nbd_config_put(nbd);
+ atomic_dec(&config->recv_threads);
+ wake_up(&config->recv_wq);
+- nbd_config_put(nbd);
+ kfree(args);
+ }
+
+--
+2.27.0
+
--- /dev/null
+From b9628f2520a744ecd6b2c2ee996f9515ce6ec03c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Oct 2020 09:54:04 +0530
+Subject: net: 9p: initialize sun_server.sun_path to have addr's value only
+ when addr is valid
+
+From: Anant Thazhemadam <anant.thazhemadam@gmail.com>
+
+[ Upstream commit 7ca1db21ef8e0e6725b4d25deed1ca196f7efb28 ]
+
+In p9_fd_create_unix, checking is performed to see if the addr (passed
+as an argument) is NULL or not.
+However, no check is performed to see if addr is a valid address, i.e.,
+it doesn't entirely consist of only 0's.
+The initialization of sun_server.sun_path to be equal to this faulty
+addr value leads to an uninitialized variable, as detected by KMSAN.
+Checking for this (faulty addr) and returning a negative error number
+appropriately, resolves this issue.
+
+Link: http://lkml.kernel.org/r/20201012042404.2508-1-anant.thazhemadam@gmail.com
+Reported-by: syzbot+75d51fe5bf4ebe988518@syzkaller.appspotmail.com
+Tested-by: syzbot+75d51fe5bf4ebe988518@syzkaller.appspotmail.com
+Signed-off-by: Anant Thazhemadam <anant.thazhemadam@gmail.com>
+Signed-off-by: Dominique Martinet <asmadeus@codewreck.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/9p/trans_fd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index c0762a302162c..8f528e783a6c5 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -1023,7 +1023,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
+
+ csocket = NULL;
+
+- if (addr == NULL)
++ if (!addr || !strlen(addr))
+ return -EINVAL;
+
+ if (strlen(addr) >= UNIX_PATH_MAX) {
+--
+2.27.0
+
--- /dev/null
+From 23ca1d55353b2be67394f0632e23f576e5e42e61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Sep 2020 18:12:16 +0200
+Subject: nfc: s3fwrn5: Add missing CRYPTO_HASH dependency
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+[ Upstream commit 4aa62c62d4c41d71b2bda5ed01b78961829ee93c ]
+
+The driver uses crypto hash functions so it needs to select CRYPTO_HASH.
+This fixes build errors:
+
+ arc-linux-ld: drivers/nfc/s3fwrn5/firmware.o: in function `s3fwrn5_fw_download':
+ firmware.c:(.text+0x152): undefined reference to `crypto_alloc_shash'
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nfc/s3fwrn5/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
+index af9d18690afeb..3f8b6da582803 100644
+--- a/drivers/nfc/s3fwrn5/Kconfig
++++ b/drivers/nfc/s3fwrn5/Kconfig
+@@ -2,6 +2,7 @@
+ config NFC_S3FWRN5
+ tristate
+ select CRYPTO
++ select CRYPTO_HASH
+ help
+ Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
+ of chip. It's intended to be used by PHYs to avoid duplicating lots
+--
+2.27.0
+
--- /dev/null
+From 279b911057bb72dc05e85faf4cafd211ec86b13c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Aug 2020 12:11:47 -0400
+Subject: NFS4: Fix oops when copy_file_range is attempted with NFS4.0 source
+
+From: Dave Wysochanski <dwysocha@redhat.com>
+
+[ Upstream commit d8a6ad913c286d4763ae20b14c02fe6f39d7cd9f ]
+
+The following oops is seen during xfstest/565 when the 'test'
+(source of the copy) is NFS4.0 and 'scratch' (destination) is NFS4.2
+[ 59.692458] run fstests generic/565 at 2020-08-01 05:50:35
+[ 60.613588] BUG: kernel NULL pointer dereference, address: 0000000000000008
+[ 60.624970] #PF: supervisor read access in kernel mode
+[ 60.627671] #PF: error_code(0x0000) - not-present page
+[ 60.630347] PGD 0 P4D 0
+[ 60.631853] Oops: 0000 [#1] SMP PTI
+[ 60.634086] CPU: 6 PID: 2828 Comm: xfs_io Kdump: loaded Not tainted 5.8.0-rc3 #1
+[ 60.637676] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
+[ 60.639901] RIP: 0010:nfs4_check_serverowner_major_id+0x5/0x30 [nfsv4]
+[ 60.642719] Code: 89 ff e8 3e b3 b8 e1 e9 71 fe ff ff 41 bc da d8 ff ff e9 c3 fe ff ff e8 e9 9d 08 e2 66 0f 1f 84 00 00 00 00 00 66 66 66 66 90 <8b> 57 08 31 c0 3b 56 08 75 12 48 83 c6 0c 48 83 c7 0c e8 c4 97 bb
+[ 60.652629] RSP: 0018:ffffc265417f7e10 EFLAGS: 00010287
+[ 60.655379] RAX: ffffa0664b066400 RBX: 0000000000000000 RCX: 0000000000000001
+[ 60.658754] RDX: ffffa066725fb000 RSI: ffffa066725fd000 RDI: 0000000000000000
+[ 60.662292] RBP: 0000000000020000 R08: 0000000000020000 R09: 0000000000000000
+[ 60.666189] R10: 0000000000000003 R11: 0000000000000000 R12: ffffa06648258d00
+[ 60.669914] R13: 0000000000000000 R14: 0000000000000000 R15: ffffa06648258100
+[ 60.673645] FS: 00007faa9fb35800(0000) GS:ffffa06677d80000(0000) knlGS:0000000000000000
+[ 60.677698] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 60.680773] CR2: 0000000000000008 CR3: 0000000203f14000 CR4: 00000000000406e0
+[ 60.684476] Call Trace:
+[ 60.685809] nfs4_copy_file_range+0xfc/0x230 [nfsv4]
+[ 60.688704] vfs_copy_file_range+0x2ee/0x310
+[ 60.691104] __x64_sys_copy_file_range+0xd6/0x210
+[ 60.693527] do_syscall_64+0x4d/0x90
+[ 60.695512] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[ 60.698006] RIP: 0033:0x7faa9febc1bd
+
+Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4file.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index 984938024011b..9d354de613dae 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -146,7 +146,8 @@ static ssize_t __nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
+ /* Only offload copy if superblock is the same */
+ if (file_in->f_op != &nfs4_file_operations)
+ return -EXDEV;
+- if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY))
++ if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY) ||
++ !nfs_server_capable(file_inode(file_in), NFS_CAP_COPY))
+ return -EOPNOTSUPP;
+ if (file_inode(file_in) == file_inode(file_out))
+ return -EOPNOTSUPP;
+--
+2.27.0
+
--- /dev/null
+From 86b0f6e00ba01ace261b1278545e3a187f91c6a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Aug 2020 15:02:55 +0800
+Subject: nfsd: rename delegation related tracepoints to make them less
+ confusing
+
+From: Hou Tao <houtao1@huawei.com>
+
+[ Upstream commit 3caf91757ced158e6c4a44d8b105bd7b3e1767d8 ]
+
+Now when a read delegation is given, two delegation related traces
+will be printed:
+
+ nfsd_deleg_open: client 5f45b854:e6058001 stateid 00000030:00000001
+ nfsd_deleg_none: client 5f45b854:e6058001 stateid 0000002f:00000001
+
+Although the intention is to let developers know two stateid are
+returned, the traces are confusing about whether or not a read delegation
+is handled out. So renaming trace_nfsd_deleg_none() to trace_nfsd_open()
+and trace_nfsd_deleg_open() to trace_nfsd_deleg_read() to make
+the intension clearer.
+
+The patched traces will be:
+
+ nfsd_deleg_read: client 5f48a967:b55b21cd stateid 00000003:00000001
+ nfsd_open: client 5f48a967:b55b21cd stateid 00000002:00000001
+
+Suggested-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4state.c | 4 ++--
+ fs/nfsd/trace.h | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index c09a2a4281ec9..0525acfe31314 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -5126,7 +5126,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
+
+ memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
+
+- trace_nfsd_deleg_open(&dp->dl_stid.sc_stateid);
++ trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
+ open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
+ nfs4_put_stid(&dp->dl_stid);
+ return;
+@@ -5243,7 +5243,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ nfs4_open_delegation(current_fh, open, stp);
+ nodeleg:
+ status = nfs_ok;
+- trace_nfsd_deleg_none(&stp->st_stid.sc_stateid);
++ trace_nfsd_open(&stp->st_stid.sc_stateid);
+ out:
+ /* 4.1 client trying to upgrade/downgrade delegation? */
+ if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 1861db1bdc670..99bf07800cd09 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -289,8 +289,8 @@ DEFINE_STATEID_EVENT(layout_recall_done);
+ DEFINE_STATEID_EVENT(layout_recall_fail);
+ DEFINE_STATEID_EVENT(layout_recall_release);
+
+-DEFINE_STATEID_EVENT(deleg_open);
+-DEFINE_STATEID_EVENT(deleg_none);
++DEFINE_STATEID_EVENT(open);
++DEFINE_STATEID_EVENT(deleg_read);
+ DEFINE_STATEID_EVENT(deleg_break);
+ DEFINE_STATEID_EVENT(deleg_recall);
+
+--
+2.27.0
+
--- /dev/null
+From b860e2a3a353b7ff780c85150f0ce6ac9a934941 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Sep 2020 10:09:58 -0400
+Subject: nfsd4: remove check_conflicting_opens warning
+
+From: J. Bruce Fields <bfields@redhat.com>
+
+[ Upstream commit 50747dd5e47bde3b7d7f839c84d0d3b554090497 ]
+
+There are actually rare races where this is possible (e.g. if a new open
+intervenes between the read of i_writecount and the fi_fds).
+
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4state.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 0525acfe31314..1f646a27481fb 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -4954,7 +4954,6 @@ static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
+ writes--;
+ if (fp->fi_fds[O_RDWR])
+ writes--;
+- WARN_ON_ONCE(writes < 0);
+ if (writes > 0)
+ return -EAGAIN;
+ spin_lock(&fp->fi_lock);
+--
+2.27.0
+
--- /dev/null
+From df615a1d939aa3820332e6a8b2ab76f806e104a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Oct 2020 16:10:40 +0800
+Subject: nvme-rdma: fix crash when connect rejected
+
+From: Chao Leng <lengchao@huawei.com>
+
+[ Upstream commit 43efdb8e870ee0f58633fd579aa5b5185bf5d39e ]
+
+A crash can happened when a connect is rejected. The host establishes
+the connection after received ConnectReply, and then continues to send
+the fabrics Connect command. If the controller does not receive the
+ReadyToUse capsule, host may receive a ConnectReject reply.
+
+Call nvme_rdma_destroy_queue_ib after the host received the
+RDMA_CM_EVENT_REJECTED event. Then when the fabrics Connect command
+times out, nvme_rdma_timeout calls nvme_rdma_complete_rq to fail the
+request. A crash happenes due to use after free in
+nvme_rdma_complete_rq.
+
+nvme_rdma_destroy_queue_ib is redundant when handling the
+RDMA_CM_EVENT_REJECTED event as nvme_rdma_destroy_queue_ib is already
+called in connection failure handler.
+
+Signed-off-by: Chao Leng <lengchao@huawei.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/rdma.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 9e378d0a0c01c..116902b1b2c34 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1926,7 +1926,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ complete(&queue->cm_done);
+ return 0;
+ case RDMA_CM_EVENT_REJECTED:
+- nvme_rdma_destroy_queue_ib(queue);
+ cm_error = nvme_rdma_conn_rejected(queue, ev);
+ break;
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+--
+2.27.0
+
--- /dev/null
+From ffbf3e63486b989d931b537169387f590d18e863 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Sep 2020 11:28:14 +0200
+Subject: octeontx2-af: fix LD CUSTOM LTYPE aliasing
+
+From: Stanislaw Kardach <skardach@marvell.com>
+
+[ Upstream commit 450f0b978870c384dd81d1176088536555f3170e ]
+
+Since LD contains LTYPE definitions tweaked toward efficient
+NIX_AF_RX_FLOW_KEY_ALG(0..31)_FIELD(0..4) usage, the original location
+of NPC_LT_LD_CUSTOM0/1 was aliased with MPLS_IN_* definitions.
+Moving custom frame to value 6 and 7 removes the aliasing at the cost of
+custom frames being also considered when TCP/UDP RSS algo is configured.
+
+However since the goal of CUSTOM frames is to classify them to a
+separate set of RQs, this cost is acceptable.
+
+Signed-off-by: Stanislaw Kardach <skardach@marvell.com>
+Acked-by: Sunil Goutham <sgoutham@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/npc.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+index 3803af9231c68..c0ff5f70aa431 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+@@ -77,6 +77,8 @@ enum npc_kpu_ld_ltype {
+ NPC_LT_LD_ICMP,
+ NPC_LT_LD_SCTP,
+ NPC_LT_LD_ICMP6,
++ NPC_LT_LD_CUSTOM0,
++ NPC_LT_LD_CUSTOM1,
+ NPC_LT_LD_IGMP = 8,
+ NPC_LT_LD_ESP,
+ NPC_LT_LD_AH,
+@@ -85,8 +87,6 @@ enum npc_kpu_ld_ltype {
+ NPC_LT_LD_NSH,
+ NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_LT_LD_TU_MPLS_IN_IP,
+- NPC_LT_LD_CUSTOM0 = 0xE,
+- NPC_LT_LD_CUSTOM1 = 0xF,
+ };
+
+ enum npc_kpu_le_ltype {
+--
+2.27.0
+
--- /dev/null
+From 1f53251e8f005d3902d370831a9e4a011dfaca91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Aug 2020 14:57:34 -0700
+Subject: PCI/ACPI: Add Ampere Altra SOC MCFG quirk
+
+From: Tuan Phan <tuanphan@os.amperecomputing.com>
+
+[ Upstream commit 877c1a5f79c6984bbe3f2924234c08e2f4f1acd5 ]
+
+Ampere Altra SOC supports only 32-bit ECAM reads. Add an MCFG quirk for
+the platform.
+
+Link: https://lore.kernel.org/r/1596751055-12316-1-git-send-email-tuanphan@os.amperecomputing.com
+Signed-off-by: Tuan Phan <tuanphan@os.amperecomputing.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/pci_mcfg.c | 20 ++++++++++++++++++++
+ drivers/pci/ecam.c | 10 ++++++++++
+ include/linux/pci-ecam.h | 1 +
+ 3 files changed, 31 insertions(+)
+
+diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
+index 54b36b7ad47d9..e526571e0ebdb 100644
+--- a/drivers/acpi/pci_mcfg.c
++++ b/drivers/acpi/pci_mcfg.c
+@@ -142,6 +142,26 @@ static struct mcfg_fixup mcfg_quirks[] = {
+ XGENE_V2_ECAM_MCFG(4, 0),
+ XGENE_V2_ECAM_MCFG(4, 1),
+ XGENE_V2_ECAM_MCFG(4, 2),
++
++#define ALTRA_ECAM_QUIRK(rev, seg) \
++ { "Ampere", "Altra ", rev, seg, MCFG_BUS_ANY, &pci_32b_read_ops }
++
++ ALTRA_ECAM_QUIRK(1, 0),
++ ALTRA_ECAM_QUIRK(1, 1),
++ ALTRA_ECAM_QUIRK(1, 2),
++ ALTRA_ECAM_QUIRK(1, 3),
++ ALTRA_ECAM_QUIRK(1, 4),
++ ALTRA_ECAM_QUIRK(1, 5),
++ ALTRA_ECAM_QUIRK(1, 6),
++ ALTRA_ECAM_QUIRK(1, 7),
++ ALTRA_ECAM_QUIRK(1, 8),
++ ALTRA_ECAM_QUIRK(1, 9),
++ ALTRA_ECAM_QUIRK(1, 10),
++ ALTRA_ECAM_QUIRK(1, 11),
++ ALTRA_ECAM_QUIRK(1, 12),
++ ALTRA_ECAM_QUIRK(1, 13),
++ ALTRA_ECAM_QUIRK(1, 14),
++ ALTRA_ECAM_QUIRK(1, 15),
+ };
+
+ static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
+diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
+index 8f065a42fc1a2..b54d32a316693 100644
+--- a/drivers/pci/ecam.c
++++ b/drivers/pci/ecam.c
+@@ -168,4 +168,14 @@ const struct pci_ecam_ops pci_32b_ops = {
+ .write = pci_generic_config_write32,
+ }
+ };
++
++/* ECAM ops for 32-bit read only (non-compliant) */
++const struct pci_ecam_ops pci_32b_read_ops = {
++ .bus_shift = 20,
++ .pci_ops = {
++ .map_bus = pci_ecam_map_bus,
++ .read = pci_generic_config_read32,
++ .write = pci_generic_config_write,
++ }
++};
+ #endif
+diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
+index 1af5cb02ef7f9..033ce74f02e81 100644
+--- a/include/linux/pci-ecam.h
++++ b/include/linux/pci-ecam.h
+@@ -51,6 +51,7 @@ extern const struct pci_ecam_ops pci_generic_ecam_ops;
+
+ #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+ extern const struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
++extern const struct pci_ecam_ops pci_32b_read_ops; /* 32-bit read only */
+ extern const struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
+ extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
+ extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
+--
+2.27.0
+
--- /dev/null
+From e54e268c56c207108396b34c4304650d68612ea3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 19 Sep 2020 16:04:14 +0200
+Subject: power: supply: bq27xxx: report "not charging" on all types
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+[ Upstream commit 7bf738ba110722b63e9dc8af760d3fb2aef25593 ]
+
+Commit 6f24ff97e323 ("power: supply: bq27xxx_battery: Add the
+BQ27Z561 Battery monitor") and commit d74534c27775 ("power:
+bq27xxx_battery: Add support for additional bq27xxx family devices")
+added support for new device types by copying most of the code and
+adding necessary quirks.
+
+However they did not copy the code in bq27xxx_battery_status()
+responsible for returning POWER_SUPPLY_STATUS_NOT_CHARGING.
+
+Unify the bq27xxx_battery_status() so for all types when charger is
+supplied, it will return "not charging" status.
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/power/supply/bq27xxx_battery.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index a123f6e21f08a..08b9d025a3e81 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1772,8 +1772,6 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
+ status = POWER_SUPPLY_STATUS_FULL;
+ else if (di->cache.flags & BQ27000_FLAG_CHGS)
+ status = POWER_SUPPLY_STATUS_CHARGING;
+- else if (power_supply_am_i_supplied(di->bat) > 0)
+- status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else if (di->opts & BQ27Z561_O_BITS) {
+@@ -1792,6 +1790,10 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ }
+
++ if ((status == POWER_SUPPLY_STATUS_DISCHARGING) &&
++ (power_supply_am_i_supplied(di->bat) > 0))
++ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
++
+ val->intval = status;
+
+ return 0;
+--
+2.27.0
+
--- /dev/null
+From 62f1ebdde2f690326d28b8d65a204ae881cba52a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Sep 2020 14:09:58 +0800
+Subject: power: supply: test_power: add missing newlines when printing
+ parameters by sysfs
+
+From: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+
+[ Upstream commit c07fa6c1631333f02750cf59f22b615d768b4d8f ]
+
+When I cat some module parameters by sysfs, it displays as follows.
+It's better to add a newline for easy reading.
+
+root@syzkaller:~# cd /sys/module/test_power/parameters/
+root@syzkaller:/sys/module/test_power/parameters# cat ac_online
+onroot@syzkaller:/sys/module/test_power/parameters# cat battery_present
+trueroot@syzkaller:/sys/module/test_power/parameters# cat battery_health
+goodroot@syzkaller:/sys/module/test_power/parameters# cat battery_status
+dischargingroot@syzkaller:/sys/module/test_power/parameters# cat battery_technology
+LIONroot@syzkaller:/sys/module/test_power/parameters# cat usb_online
+onroot@syzkaller:/sys/module/test_power/parameters#
+
+Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/power/supply/test_power.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
+index 04acd76bbaa12..4895ee5e63a9a 100644
+--- a/drivers/power/supply/test_power.c
++++ b/drivers/power/supply/test_power.c
+@@ -353,6 +353,7 @@ static int param_set_ac_online(const char *key, const struct kernel_param *kp)
+ static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
+ {
+ strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown"));
++ strcat(buffer, "\n");
+ return strlen(buffer);
+ }
+
+@@ -366,6 +367,7 @@ static int param_set_usb_online(const char *key, const struct kernel_param *kp)
+ static int param_get_usb_online(char *buffer, const struct kernel_param *kp)
+ {
+ strcpy(buffer, map_get_key(map_ac_online, usb_online, "unknown"));
++ strcat(buffer, "\n");
+ return strlen(buffer);
+ }
+
+@@ -380,6 +382,7 @@ static int param_set_battery_status(const char *key,
+ static int param_get_battery_status(char *buffer, const struct kernel_param *kp)
+ {
+ strcpy(buffer, map_get_key(map_status, battery_status, "unknown"));
++ strcat(buffer, "\n");
+ return strlen(buffer);
+ }
+
+@@ -394,6 +397,7 @@ static int param_set_battery_health(const char *key,
+ static int param_get_battery_health(char *buffer, const struct kernel_param *kp)
+ {
+ strcpy(buffer, map_get_key(map_health, battery_health, "unknown"));
++ strcat(buffer, "\n");
+ return strlen(buffer);
+ }
+
+@@ -409,6 +413,7 @@ static int param_get_battery_present(char *buffer,
+ const struct kernel_param *kp)
+ {
+ strcpy(buffer, map_get_key(map_present, battery_present, "unknown"));
++ strcat(buffer, "\n");
+ return strlen(buffer);
+ }
+
+@@ -426,6 +431,7 @@ static int param_get_battery_technology(char *buffer,
+ {
+ strcpy(buffer,
+ map_get_key(map_technology, battery_technology, "unknown"));
++ strcat(buffer, "\n");
+ return strlen(buffer);
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 5ef799fd79133916f1701e91b048b0a618d1defc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Aug 2020 17:55:35 +1000
+Subject: powerpc/64s: handle ISA v3.1 local copy-paste context switches
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+[ Upstream commit dc462267d2d7aacffc3c1d99b02d7a7c59db7c66 ]
+
+The ISA v3.1 the copy-paste facility has a new memory move functionality
+which allows the copy buffer to be pasted to domestic memory (RAM) as
+opposed to foreign memory (accelerator).
+
+This means the POWER9 trick of avoiding the cp_abort on context switch if
+the process had not mapped foreign memory does not work on POWER10. Do the
+cp_abort unconditionally there.
+
+KVM must also cp_abort on guest exit to prevent copy buffer state leaking
+between contexts.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Acked-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200825075535.224536-1-npiggin@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/process.c | 16 +++++++++-------
+ arch/powerpc/kvm/book3s_hv.c | 7 +++++++
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 8 ++++++++
+ 3 files changed, 24 insertions(+), 7 deletions(-)
+
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 73a57043ee662..3f2dc0675ea7a 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1256,15 +1256,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
+ restore_math(current->thread.regs);
+
+ /*
+- * The copy-paste buffer can only store into foreign real
+- * addresses, so unprivileged processes can not see the
+- * data or use it in any way unless they have foreign real
+- * mappings. If the new process has the foreign real address
+- * mappings, we must issue a cp_abort to clear any state and
+- * prevent snooping, corruption or a covert channel.
++ * On POWER9 the copy-paste buffer can only paste into
++ * foreign real addresses, so unprivileged processes can not
++ * see the data or use it in any way unless they have
++ * foreign real mappings. If the new process has the foreign
++ * real address mappings, we must issue a cp_abort to clear
++ * any state and prevent snooping, corruption or a covert
++ * channel. ISA v3.1 supports paste into local memory.
+ */
+ if (current->mm &&
+- atomic_read(¤t->mm->context.vas_windows))
++ (cpu_has_feature(CPU_FTR_ARCH_31) ||
++ atomic_read(¤t->mm->context.vas_windows)))
+ asm volatile(PPC_CP_ABORT);
+ }
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 4ba06a2a306cf..3bd3118c76330 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -3530,6 +3530,13 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
+ */
+ asm volatile("eieio; tlbsync; ptesync");
+
++ /*
++ * cp_abort is required if the processor supports local copy-paste
++ * to clear the copy buffer that was under control of the guest.
++ */
++ if (cpu_has_feature(CPU_FTR_ARCH_31))
++ asm volatile(PPC_CP_ABORT);
++
+ mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
+ isync();
+
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 799d6d0f4eade..cd9995ee84419 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1830,6 +1830,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
+ 2:
+ #endif /* CONFIG_PPC_RADIX_MMU */
+
++ /*
++ * cp_abort is required if the processor supports local copy-paste
++ * to clear the copy buffer that was under control of the guest.
++ */
++BEGIN_FTR_SECTION
++ PPC_CP_ABORT
++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
++
+ /*
+ * POWER7/POWER8 guest -> host partition switch code.
+ * We don't have to lock against tlbies but we do
+--
+2.27.0
+
--- /dev/null
+From 5f6049cbf724134b95be4896e419812409f3b8bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Aug 2020 10:54:05 +1000
+Subject: powerpc/powernv/smp: Fix spurious DBG() warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Oliver O'Halloran <oohall@gmail.com>
+
+[ Upstream commit f6bac19cf65c5be21d14a0c9684c8f560f2096dd ]
+
+When building with W=1 we get the following warning:
+
+ arch/powerpc/platforms/powernv/smp.c: In function ‘pnv_smp_cpu_kill_self’:
+ arch/powerpc/platforms/powernv/smp.c:276:16: error: suggest braces around
+ empty body in an ‘if’ statement [-Werror=empty-body]
+ 276 | cpu, srr1);
+ | ^
+ cc1: all warnings being treated as errors
+
+The full context is this block:
+
+ if (srr1 && !generic_check_cpu_restart(cpu))
+ DBG("CPU%d Unexpected exit while offline srr1=%lx!\n",
+ cpu, srr1);
+
+When building with DEBUG undefined DBG() expands to nothing and GCC emits
+the warning due to the lack of braces around an empty statement.
+
+Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
+Reviewed-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200804005410.146094-2-oohall@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/platforms/powernv/smp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
+index b2ba3e95bda73..bbf361f23ae86 100644
+--- a/arch/powerpc/platforms/powernv/smp.c
++++ b/arch/powerpc/platforms/powernv/smp.c
+@@ -43,7 +43,7 @@
+ #include <asm/udbg.h>
+ #define DBG(fmt...) udbg_printf(fmt)
+ #else
+-#define DBG(fmt...)
++#define DBG(fmt...) do { } while (0)
+ #endif
+
+ static void pnv_smp_setup_cpu(int cpu)
+--
+2.27.0
+
--- /dev/null
+From c8c405cdb8388a19ea45e7d1de8020e074432737 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Sep 2020 14:52:17 +1000
+Subject: powerpc: select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+[ Upstream commit 66acd46080bd9e5ad2be4b0eb1d498d5145d058e ]
+
+powerpc uses IPIs in some situations to switch a kernel thread away
+from a lazy tlb mm, which is subject to the TLB flushing race
+described in the changelog introducing ARCH_WANT_IRQS_OFF_ACTIVATE_MM.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200914045219.3736466-3-npiggin@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/Kconfig | 1 +
+ arch/powerpc/include/asm/mmu_context.h | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 2b15b4870565d..857b258de8aa5 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -148,6 +148,7 @@ config PPC
+ select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS
+ select ARCH_WANT_IPC_PARSE_VERSION
++ select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+ select ARCH_WEAK_RELEASE_ACQUIRE
+ select BINFMT_ELF
+ select BUILDTIME_TABLE_SORT
+diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
+index 7f3658a973846..e02aa793420b8 100644
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -244,7 +244,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ */
+ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+ {
+- switch_mm(prev, next, current);
++ switch_mm_irqs_off(prev, next, current);
+ }
+
+ /* We don't currently use enter_lazy_tlb() for anything */
+--
+2.27.0
+
--- /dev/null
+From 70a14b3849b86d26de9539d690dc195228eed346 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 Jul 2020 17:04:59 +0530
+Subject: powerpc/vmemmap: Fix memory leak with vmemmap list allocation
+ failures.
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+[ Upstream commit ccaea15296f9773abd43aaa17ee4b88848e4a505 ]
+
+If we fail to allocate vmemmap list, we don't keep track of allocated
+vmemmap block buf. Hence on section deactivate we skip vmemmap block
+buf free. This results in memory leak.
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200731113500.248306-1-aneesh.kumar@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/mm/init_64.c | 35 ++++++++++++++++++++++++++++-------
+ 1 file changed, 28 insertions(+), 7 deletions(-)
+
+diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
+index 8459056cce671..2ae42c2a5cf04 100644
+--- a/arch/powerpc/mm/init_64.c
++++ b/arch/powerpc/mm/init_64.c
+@@ -162,16 +162,16 @@ static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
+ return next++;
+ }
+
+-static __meminit void vmemmap_list_populate(unsigned long phys,
+- unsigned long start,
+- int node)
++static __meminit int vmemmap_list_populate(unsigned long phys,
++ unsigned long start,
++ int node)
+ {
+ struct vmemmap_backing *vmem_back;
+
+ vmem_back = vmemmap_list_alloc(node);
+ if (unlikely(!vmem_back)) {
+- WARN_ON(1);
+- return;
++ pr_debug("vmemap list allocation failed\n");
++ return -ENOMEM;
+ }
+
+ vmem_back->phys = phys;
+@@ -179,6 +179,7 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
+ vmem_back->list = vmemmap_list;
+
+ vmemmap_list = vmem_back;
++ return 0;
+ }
+
+ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
+@@ -199,6 +200,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
+ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+ struct vmem_altmap *altmap)
+ {
++ bool altmap_alloc;
+ unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
+
+ /* Align to the page size of the linear mapping. */
+@@ -228,13 +230,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+ p = vmemmap_alloc_block_buf(page_size, node, altmap);
+ if (!p)
+ pr_debug("altmap block allocation failed, falling back to system memory");
++ else
++ altmap_alloc = true;
+ }
+- if (!p)
++ if (!p) {
+ p = vmemmap_alloc_block_buf(page_size, node, NULL);
++ altmap_alloc = false;
++ }
+ if (!p)
+ return -ENOMEM;
+
+- vmemmap_list_populate(__pa(p), start, node);
++ if (vmemmap_list_populate(__pa(p), start, node)) {
++ /*
++ * If we don't populate vmemap list, we don't have
++ * the ability to free the allocated vmemmap
++ * pages in section_deactivate. Hence free them
++ * here.
++ */
++ int nr_pfns = page_size >> PAGE_SHIFT;
++ unsigned long page_order = get_order(page_size);
++
++ if (altmap_alloc)
++ vmem_altmap_free(altmap, nr_pfns);
++ else
++ free_pages((unsigned long)p, page_order);
++ return -ENOMEM;
++ }
+
+ pr_debug(" * %016lx..%016lx allocated at %p\n",
+ start, start + page_size, p);
+--
+2.27.0
+
--- /dev/null
+From aabd62eeadeb16f096eff9f11f67db15e25be67a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Sep 2020 09:59:40 +0530
+Subject: powerpc/watchpoint/ptrace: Fix SETHWDEBUG when
+ CONFIG_HAVE_HW_BREAKPOINT=N
+
+From: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
+
+[ Upstream commit 9b6b7c680cc20971444d9f836e49fc98848bcd0a ]
+
+When kernel is compiled with CONFIG_HAVE_HW_BREAKPOINT=N, user can
+still create watchpoint using PPC_PTRACE_SETHWDEBUG, with limited
+functionalities. But, such watchpoints are never firing because of
+the missing privilege settings. Fix that.
+
+It's safe to set HW_BRK_TYPE_PRIV_ALL because we don't really leak
+any kernel address in signal info. Setting HW_BRK_TYPE_PRIV_ALL will
+also help to find scenarios when kernel accesses user memory.
+
+Reported-by: Pedro Miraglia Franco de Carvalho <pedromfc@linux.ibm.com>
+Suggested-by: Pedro Miraglia Franco de Carvalho <pedromfc@linux.ibm.com>
+Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200902042945.129369-4-ravi.bangoria@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/ptrace/ptrace-noadv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
+index 8bd8d8de5c40b..a570782e954be 100644
+--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c
++++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
+@@ -217,7 +217,7 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf
+ return -EIO;
+
+ brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE);
+- brk.type = HW_BRK_TYPE_TRANSLATE;
++ brk.type = HW_BRK_TYPE_TRANSLATE | HW_BRK_TYPE_PRIV_ALL;
+ brk.len = DABR_MAX_LEN;
+ brk.hw_len = DABR_MAX_LEN;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+--
+2.27.0
+
--- /dev/null
+From eacf30a5df9503bd30206eb5a5c210ed962df86e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Aug 2020 09:37:22 +0206
+Subject: printk: reduce LOG_BUF_SHIFT range for H8300
+
+From: John Ogness <john.ogness@linutronix.de>
+
+[ Upstream commit 550c10d28d21bd82a8bb48debbb27e6ed53262f6 ]
+
+The .bss section for the h8300 is relatively small. A value of
+CONFIG_LOG_BUF_SHIFT that is larger than 19 will create a static
+printk ringbuffer that is too large. Limit the range appropriately
+for the H8300.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200812073122.25412-1-john.ogness@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ init/Kconfig | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index d6a0b31b13dc9..2a5df1cf838c6 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -682,7 +682,8 @@ config IKHEADERS
+
+ config LOG_BUF_SHIFT
+ int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
+- range 12 25
++ range 12 25 if !H8300
++ range 12 19 if H8300
+ default 17
+ depends on PRINTK
+ help
+--
+2.27.0
+
--- /dev/null
+From 13a925b4bacccb83cbd915d5ae396575cd7a3dc1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Sep 2020 11:17:08 +0300
+Subject: RDMA/core: Change how failing destroy is handled during uobj abort
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit f553246f7f794675da1794ae7ee07d1f35e561ae ]
+
+Currently it triggers a WARN_ON and then goes ahead and destroys the
+uobject anyhow, leaking any driver memory.
+
+The only place that leaks driver memory should be during FD close() in
+uverbs_destroy_ufile_hw().
+
+Drivers are only allowed to fail destroy uobjects if they guarantee
+destroy will eventually succeed. uverbs_destroy_ufile_hw() provides the
+loop to give the driver that chance.
+
+Link: https://lore.kernel.org/r/20200902081708.746631-1-leon@kernel.org
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/rdma_core.c | 30 ++++++++++++++---------------
+ include/rdma/ib_verbs.h | 5 -----
+ 2 files changed, 15 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
+index 6d3ed7c6e19eb..3962da54ffbf4 100644
+--- a/drivers/infiniband/core/rdma_core.c
++++ b/drivers/infiniband/core/rdma_core.c
+@@ -130,17 +130,6 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
+ lockdep_assert_held(&ufile->hw_destroy_rwsem);
+ assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
+
+- if (reason == RDMA_REMOVE_ABORT_HWOBJ) {
+- reason = RDMA_REMOVE_ABORT;
+- ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
+- attrs);
+- /*
+- * Drivers are not permitted to ignore RDMA_REMOVE_ABORT, see
+- * ib_is_destroy_retryable, cleanup_retryable == false here.
+- */
+- WARN_ON(ret);
+- }
+-
+ if (reason == RDMA_REMOVE_ABORT) {
+ WARN_ON(!list_empty(&uobj->list));
+ WARN_ON(!uobj->context);
+@@ -674,11 +663,22 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
+ bool hw_obj_valid)
+ {
+ struct ib_uverbs_file *ufile = uobj->ufile;
++ int ret;
++
++ if (hw_obj_valid) {
++ ret = uobj->uapi_object->type_class->destroy_hw(
++ uobj, RDMA_REMOVE_ABORT, attrs);
++ /*
++ * If the driver couldn't destroy the object then go ahead and
++ * commit it. Leaking objects that can't be destroyed is only
++ * done during FD close after the driver has a few more tries to
++ * destroy it.
++ */
++ if (WARN_ON(ret))
++ return rdma_alloc_commit_uobject(uobj, attrs);
++ }
+
+- uverbs_destroy_uobject(uobj,
+- hw_obj_valid ? RDMA_REMOVE_ABORT_HWOBJ :
+- RDMA_REMOVE_ABORT,
+- attrs);
++ uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
+
+ /* Matches the down_read in rdma_alloc_begin_uobject */
+ up_read(&ufile->hw_destroy_rwsem);
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 5b4f0efc4241f..ef7b786b8675c 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -1463,11 +1463,6 @@ enum rdma_remove_reason {
+ RDMA_REMOVE_DRIVER_REMOVE,
+ /* uobj is being cleaned-up before being committed */
+ RDMA_REMOVE_ABORT,
+- /*
+- * uobj has been fully created, with the uobj->object set, but is being
+- * cleaned up before being comitted
+- */
+- RDMA_REMOVE_ABORT_HWOBJ,
+ };
+
+ struct ib_rdmacg_object {
+--
+2.27.0
+
--- /dev/null
+From 7c0a613635e732b1f7f1f6a6d5885b98ba0152db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Oct 2020 15:43:59 +0200
+Subject: RDMA/mlx5: Fix devlink deadlock on net namespace deletion
+
+From: Parav Pandit <parav@nvidia.com>
+
+[ Upstream commit fbdd0049d98d44914fc57d4b91f867f4996c787b ]
+
+When a mlx5 core devlink instance is reloaded in different net namespace,
+its associated IB device is deleted and recreated.
+
+Example sequence is:
+$ ip netns add foo
+$ devlink dev reload pci/0000:00:08.0 netns foo
+$ ip netns del foo
+
+mlx5 IB device needs to attach and detach the netdevice to it through the
+netdev notifier chain during load and unload sequence. A below call graph
+of the unload flow.
+
+cleanup_net()
+ down_read(&pernet_ops_rwsem); <- first sem acquired
+ ops_pre_exit_list()
+ pre_exit()
+ devlink_pernet_pre_exit()
+ devlink_reload()
+ mlx5_devlink_reload_down()
+ mlx5_unload_one()
+ [...]
+ mlx5_ib_remove()
+ mlx5_ib_unbind_slave_port()
+ mlx5_remove_netdev_notifier()
+ unregister_netdevice_notifier()
+ down_write(&pernet_ops_rwsem);<- recurrsive lock
+
+Hence, when net namespace is deleted, mlx5 reload results in deadlock.
+
+When deadlock occurs, devlink mutex is also held. This not only deadlocks
+the mlx5 device under reload, but all the processes which attempt to
+access unrelated devlink devices are deadlocked.
+
+Hence, fix this by mlx5 ib driver to register for per net netdev notifier
+instead of global one, which operats on the net namespace without holding
+the pernet_ops_rwsem.
+
+Fixes: 4383cfcc65e7 ("net/mlx5: Add devlink reload")
+Link: https://lore.kernel.org/r/20201026134359.23150-1-parav@nvidia.com
+Signed-off-by: Parav Pandit <parav@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/main.c | 6 ++++--
+ .../net/ethernet/mellanox/mlx5/core/lib/mlx5.h | 5 -----
+ include/linux/mlx5/driver.h | 18 ++++++++++++++++++
+ 3 files changed, 22 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index b805cc8124657..2a7b5ffb2a2ef 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -3318,7 +3318,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+ int err;
+
+ dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
+- err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
++ err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
++ &dev->port[port_num].roce.nb);
+ if (err) {
+ dev->port[port_num].roce.nb.notifier_call = NULL;
+ return err;
+@@ -3330,7 +3331,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+ static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+ {
+ if (dev->port[port_num].roce.nb.notifier_call) {
+- unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
++ unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
++ &dev->port[port_num].roce.nb);
+ dev->port[port_num].roce.nb.notifier_call = NULL;
+ }
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+index d046db7bb047d..3a9fa629503f0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+@@ -90,9 +90,4 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
+ u32 key_type, u32 *p_key_id);
+ void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+
+-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+-{
+- return devlink_net(priv_to_devlink(dev));
+-}
+-
+ #endif
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 372100c755e7f..e30be3dd5be0e 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1212,4 +1212,22 @@ static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
+ return val.vbool;
+ }
+
++/**
++ * mlx5_core_net - Provide net namespace of the mlx5_core_dev
++ * @dev: mlx5 core device
++ *
++ * mlx5_core_net() returns the net namespace of mlx5 core device.
++ * This can be called only in below described limited context.
++ * (a) When a devlink instance for mlx5_core is registered and
++ * when devlink reload operation is disabled.
++ * or
++ * (b) during devlink reload reload_down() and reload_up callbacks
++ * where it is ensured that devlink instance's net namespace is
++ * stable.
++ */
++static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
++{
++ return devlink_net(priv_to_devlink(dev));
++}
++
+ #endif /* MLX5_DRIVER_H */
+--
+2.27.0
+
--- /dev/null
+From cf81a6ddbd9c676332cb2498c4f4e0d720a9789f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Oct 2020 11:50:08 +0000
+Subject: RDMA/qedr: Fix memory leak in iWARP CM
+
+From: Alok Prasad <palok@marvell.com>
+
+[ Upstream commit a2267f8a52eea9096861affd463f691be0f0e8c9 ]
+
+Fixes memory leak in iWARP CM
+
+Fixes: e411e0587e0d ("RDMA/qedr: Add iWARP connection management functions")
+Link: https://lore.kernel.org/r/20201021115008.28138-1-palok@marvell.com
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: Alok Prasad <palok@marvell.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/qedr/qedr_iw_cm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+index c7169d2c69e5b..c4bc58736e489 100644
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -727,6 +727,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
+ listener->qed_handle);
+
+ cm_id->rem_ref(cm_id);
++ kfree(listener);
+ return rc;
+ }
+
+--
+2.27.0
+
--- /dev/null
+From d50281e03365e4164294208396c5caaac53aa410 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 31 Aug 2020 15:33:49 +0800
+Subject: riscv: Define AT_VECTOR_SIZE_ARCH for ARCH_DLINFO
+
+From: Zong Li <zong.li@sifive.com>
+
+[ Upstream commit b5fca7c55f9fbab5ad732c3bce00f31af6ba5cfa ]
+
+AT_VECTOR_SIZE_ARCH should be defined with the maximum number of
+NEW_AUX_ENT entries that ARCH_DLINFO can contain, but it wasn't defined
+for RISC-V at all even though ARCH_DLINFO will contain one NEW_AUX_ENT
+for the VDSO address.
+
+Signed-off-by: Zong Li <zong.li@sifive.com>
+Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Reviewed-by: Pekka Enberg <penberg@kernel.org>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/uapi/asm/auxvec.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
+index d86cb17bbabe6..22e0ae8884061 100644
+--- a/arch/riscv/include/uapi/asm/auxvec.h
++++ b/arch/riscv/include/uapi/asm/auxvec.h
+@@ -10,4 +10,7 @@
+ /* vDSO location */
+ #define AT_SYSINFO_EHDR 33
+
++/* entries in ARCH_DLINFO */
++#define AT_VECTOR_SIZE_ARCH 1
++
+ #endif /* _UAPI_ASM_RISCV_AUXVEC_H */
+--
+2.27.0
+
--- /dev/null
+From 37520e9c5072924aa51e144c493019c3d81c9fd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jun 2020 22:15:18 +0530
+Subject: rpmsg: glink: Use complete_all for open states
+
+From: Chris Lew <clew@codeaurora.org>
+
+[ Upstream commit 4fcdaf6e28d11e2f3820d54dd23cd12a47ddd44e ]
+
+The open_req and open_ack completion variables are the state variables
+to represet a remote channel as open. Use complete_all so there are no
+races with waiters and using completion_done.
+
+Signed-off-by: Chris Lew <clew@codeaurora.org>
+Signed-off-by: Arun Kumar Neelakantam <aneela@codeaurora.org>
+Signed-off-by: Deepak Kumar Singh <deesin@codeaurora.org>
+Link: https://lore.kernel.org/r/1593017121-7953-2-git-send-email-deesin@codeaurora.org
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/rpmsg/qcom_glink_native.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index f40312b16da06..b5570c83a28c6 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -970,7 +970,7 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
+ return -EINVAL;
+ }
+
+- complete(&channel->open_ack);
++ complete_all(&channel->open_ack);
+
+ return 0;
+ }
+@@ -1178,7 +1178,7 @@ static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
+ __be32 *val = defaults;
+ int size;
+
+- if (glink->intentless)
++ if (glink->intentless || !completion_done(&channel->open_ack))
+ return 0;
+
+ prop = of_find_property(np, "qcom,intents", NULL);
+@@ -1413,7 +1413,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
+ channel->rcid = ret;
+ spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+- complete(&channel->open_req);
++ complete_all(&channel->open_req);
+
+ if (create_device) {
+ rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
+--
+2.27.0
+
--- /dev/null
+From ba54cc8a1d4d76a4388d7a3ddf98f5597d0a46b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Aug 2020 09:27:47 +0200
+Subject: s390/ap/zcrypt: revisit ap and zcrypt error handling
+
+From: Harald Freudenberger <freude@linux.ibm.com>
+
+[ Upstream commit e0332629e33d1926c93348d918aaaf451ef9a16b ]
+
+Revisit the ap queue error handling: Based on discussions and
+evaluatios with the firmware folk here is now a rework of the response
+code handling for all the AP instructions. The idea is to distinguish
+between failures because of some kind of invalid request where a retry
+does not make any sense and a failure where another attempt to send
+the very same request may succeed. The first case is handled by
+returning EINVAL to the userspace application. The second case results
+in retries within the zcrypt API controlled by a per message retry
+counter.
+
+Revisit the zcrpyt error handling: Similar here, based on discussions
+with the firmware people here comes a rework of the handling of all
+the reply codes. Main point here is that there are only very few
+cases left, where a zcrypt device queue is switched to offline. It
+should never be the case that an AP reply message is 'unknown' to the
+device driver as it indicates a total mismatch between device driver
+and crypto card firmware. In all other cases, the code distinguishes
+between failure because of invalid message (see above - EINVAL) or
+failures of the infrastructure (see above - EAGAIN).
+
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/crypto/ap_bus.h | 1 +
+ drivers/s390/crypto/ap_queue.c | 8 +--
+ drivers/s390/crypto/zcrypt_debug.h | 8 +++
+ drivers/s390/crypto/zcrypt_error.h | 88 +++++++++---------------
+ drivers/s390/crypto/zcrypt_msgtype50.c | 50 +++++++-------
+ drivers/s390/crypto/zcrypt_msgtype6.c | 92 +++++++++++++-------------
+ 6 files changed, 116 insertions(+), 131 deletions(-)
+
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
+index 1ea046324e8f6..c4afca0d773c6 100644
+--- a/drivers/s390/crypto/ap_bus.h
++++ b/drivers/s390/crypto/ap_bus.h
+@@ -50,6 +50,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
+ #define AP_RESPONSE_NO_FIRST_PART 0x13
+ #define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
+ #define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
++#define AP_RESPONSE_INVALID_DOMAIN 0x42
+
+ /*
+ * Known device types
+diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
+index 688ebebbf98cb..99f73bbb1c751 100644
+--- a/drivers/s390/crypto/ap_queue.c
++++ b/drivers/s390/crypto/ap_queue.c
+@@ -237,6 +237,9 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ aq->sm_state = AP_SM_STATE_RESET_WAIT;
+ return AP_SM_WAIT_TIMEOUT;
++ case AP_RESPONSE_INVALID_DOMAIN:
++ AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
++ fallthrough;
+ case AP_RESPONSE_MESSAGE_TOO_BIG:
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ list_del_init(&ap_msg->list);
+@@ -278,11 +281,6 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
+ aq->sm_state = AP_SM_STATE_RESET_WAIT;
+ aq->interrupt = AP_INTR_DISABLED;
+ return AP_SM_WAIT_TIMEOUT;
+- case AP_RESPONSE_BUSY:
+- return AP_SM_WAIT_TIMEOUT;
+- case AP_RESPONSE_Q_NOT_AVAIL:
+- case AP_RESPONSE_DECONFIGURED:
+- case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ aq->sm_state = AP_SM_STATE_BORKED;
+ return AP_SM_WAIT_NONE;
+diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
+index 241dbb5f75bf3..3225489a1c411 100644
+--- a/drivers/s390/crypto/zcrypt_debug.h
++++ b/drivers/s390/crypto/zcrypt_debug.h
+@@ -21,6 +21,14 @@
+
+ #define ZCRYPT_DBF(...) \
+ debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
++#define ZCRYPT_DBF_ERR(...) \
++ debug_sprintf_event(zcrypt_dbf_info, DBF_ERR, ##__VA_ARGS__)
++#define ZCRYPT_DBF_WARN(...) \
++ debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__)
++#define ZCRYPT_DBF_INFO(...) \
++ debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__)
++#define ZCRYPT_DBF_DBG(...) \
++ debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
+
+ extern debug_info_t *zcrypt_dbf_info;
+
+diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
+index 54a04f8c38ef9..39e626e3a3794 100644
+--- a/drivers/s390/crypto/zcrypt_error.h
++++ b/drivers/s390/crypto/zcrypt_error.h
+@@ -52,7 +52,6 @@ struct error_hdr {
+ #define REP82_ERROR_INVALID_COMMAND 0x30
+ #define REP82_ERROR_MALFORMED_MSG 0x40
+ #define REP82_ERROR_INVALID_SPECIAL_CMD 0x41
+-#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
+ #define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
+ #define REP82_ERROR_WORD_ALIGNMENT 0x60
+ #define REP82_ERROR_MESSAGE_LENGTH 0x80
+@@ -67,7 +66,6 @@ struct error_hdr {
+ #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
+
+ #define REP88_ERROR_MODULE_FAILURE 0x10
+-
+ #define REP88_ERROR_MESSAGE_TYPE 0x20
+ #define REP88_ERROR_MESSAGE_MALFORMD 0x22
+ #define REP88_ERROR_MESSAGE_LENGTH 0x23
+@@ -85,78 +83,56 @@ static inline int convert_error(struct zcrypt_queue *zq,
+ int queue = AP_QID_QUEUE(zq->queue->qid);
+
+ switch (ehdr->reply_code) {
+- case REP82_ERROR_OPERAND_INVALID:
+- case REP82_ERROR_OPERAND_SIZE:
+- case REP82_ERROR_EVEN_MOD_IN_OPND:
+- case REP88_ERROR_MESSAGE_MALFORMD:
+- case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
+- case REP82_ERROR_INVALID_DOMAIN_PENDING:
+- case REP82_ERROR_INVALID_SPECIAL_CMD:
+- case REP82_ERROR_FILTERED_BY_HYPERVISOR:
+- // REP88_ERROR_INVALID_KEY // '82' CEX2A
+- // REP88_ERROR_OPERAND // '84' CEX2A
+- // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
+- /* Invalid input data. */
++ case REP82_ERROR_INVALID_MSG_LEN: /* 0x23 */
++ case REP82_ERROR_RESERVD_FIELD: /* 0x24 */
++ case REP82_ERROR_FORMAT_FIELD: /* 0x29 */
++ case REP82_ERROR_MALFORMED_MSG: /* 0x40 */
++ case REP82_ERROR_INVALID_SPECIAL_CMD: /* 0x41 */
++ case REP82_ERROR_MESSAGE_LENGTH: /* 0x80 */
++ case REP82_ERROR_OPERAND_INVALID: /* 0x82 */
++ case REP82_ERROR_OPERAND_SIZE: /* 0x84 */
++ case REP82_ERROR_EVEN_MOD_IN_OPND: /* 0x85 */
++ case REP82_ERROR_INVALID_DOMAIN_PENDING: /* 0x8A */
++ case REP82_ERROR_FILTERED_BY_HYPERVISOR: /* 0x8B */
++ case REP82_ERROR_PACKET_TRUNCATED: /* 0xA0 */
++ case REP88_ERROR_MESSAGE_MALFORMD: /* 0x22 */
++ case REP88_ERROR_KEY_TYPE: /* 0x34 */
++ /* RY indicates malformed request */
+ ZCRYPT_DBF(DBF_WARN,
+- "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
++ "dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
+ card, queue, ehdr->reply_code);
+ return -EINVAL;
+- case REP82_ERROR_MESSAGE_TYPE:
+- // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
++ case REP82_ERROR_MACHINE_FAILURE: /* 0x10 */
++ case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */
++ case REP82_ERROR_TRANSPORT_FAIL: /* 0x90 */
+ /*
+- * To sent a message of the wrong type is a bug in the
+- * device driver. Send error msg, disable the device
+- * and then repeat the request.
++ * Msg to wrong type or card/infrastructure failure.
++ * Trigger rescan of the ap bus, trigger retry request.
+ */
+ atomic_set(&zcrypt_rescan_req, 1);
+- zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+- card, queue);
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+- card, queue, ehdr->reply_code);
+- return -EAGAIN;
+- case REP82_ERROR_TRANSPORT_FAIL:
+- /* Card or infrastructure failure, disable card */
+- atomic_set(&zcrypt_rescan_req, 1);
+- zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+- card, queue);
+ /* For type 86 response show the apfs value (failure reason) */
+- if (ehdr->type == TYPE86_RSP_CODE) {
++ if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL &&
++ ehdr->type == TYPE86_RSP_CODE) {
+ struct {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ } __packed * head = reply->msg;
+ unsigned int apfs = *((u32 *)head->fmt2.apfs);
+
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x reply=0x%02x apfs=0x%x => online=0 rc=EAGAIN\n",
+- card, queue, apfs, ehdr->reply_code);
++ ZCRYPT_DBF(DBF_WARN,
++ "dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n",
++ card, queue, ehdr->reply_code, apfs);
+ } else
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
++ ZCRYPT_DBF(DBF_WARN,
++ "dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
+ return -EAGAIN;
+- case REP82_ERROR_MACHINE_FAILURE:
+- // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
+- /* If a card fails disable it and repeat the request. */
+- atomic_set(&zcrypt_rescan_req, 1);
+- zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+- card, queue);
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+- card, queue, ehdr->reply_code);
+- return -EAGAIN;
+ default:
+- zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+- card, queue);
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
++ /* Assume request is valid and a retry will be worth it */
++ ZCRYPT_DBF(DBF_WARN,
++ "dev=%02x.%04x RY=0x%02x => rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
+- return -EAGAIN; /* repeat the request on a different device. */
++ return -EAGAIN;
+ }
+ }
+
+diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
+index 7aedc338b4459..88916addd513e 100644
+--- a/drivers/s390/crypto/zcrypt_msgtype50.c
++++ b/drivers/s390/crypto/zcrypt_msgtype50.c
+@@ -356,15 +356,15 @@ static int convert_type80(struct zcrypt_queue *zq,
+ if (t80h->len < sizeof(*t80h) + outputdatalength) {
+ /* The result is too short, the CEXxA card may not do that.. */
+ zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++ pr_err("Crypto dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid));
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+- AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid),
+- t80h->code);
+- return -EAGAIN; /* repeat the request on a different device. */
++ AP_QID_QUEUE(zq->queue->qid),
++ t80h->code);
++ ZCRYPT_DBF_ERR("dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
++ AP_QID_CARD(zq->queue->qid),
++ AP_QID_QUEUE(zq->queue->qid),
++ t80h->code);
++ return -EAGAIN;
+ }
+ if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
+ BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
+@@ -376,10 +376,10 @@ static int convert_type80(struct zcrypt_queue *zq,
+ return 0;
+ }
+
+-static int convert_response(struct zcrypt_queue *zq,
+- struct ap_message *reply,
+- char __user *outputdata,
+- unsigned int outputdatalength)
++static int convert_response_cex2a(struct zcrypt_queue *zq,
++ struct ap_message *reply,
++ char __user *outputdata,
++ unsigned int outputdatalength)
+ {
+ /* Response type byte is the second byte in the response. */
+ unsigned char rtype = ((unsigned char *) reply->msg)[1];
+@@ -393,15 +393,15 @@ static int convert_response(struct zcrypt_queue *zq,
+ outputdata, outputdatalength);
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid));
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+- AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid),
+- (unsigned int) rtype);
+- return -EAGAIN; /* repeat the request on a different device. */
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) rtype);
++ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
++ AP_QID_CARD(zq->queue->qid),
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) rtype);
++ return -EAGAIN;
+ }
+ }
+
+@@ -476,8 +476,9 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
+ if (rc == 0) {
+ rc = ap_msg.rc;
+ if (rc == 0)
+- rc = convert_response(zq, &ap_msg, mex->outputdata,
+- mex->outputdatalength);
++ rc = convert_response_cex2a(zq, &ap_msg,
++ mex->outputdata,
++ mex->outputdatalength);
+ } else
+ /* Signal pending. */
+ ap_cancel_message(zq->queue, &ap_msg);
+@@ -520,8 +521,9 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
+ if (rc == 0) {
+ rc = ap_msg.rc;
+ if (rc == 0)
+- rc = convert_response(zq, &ap_msg, crt->outputdata,
+- crt->outputdatalength);
++ rc = convert_response_cex2a(zq, &ap_msg,
++ crt->outputdata,
++ crt->outputdatalength);
+ } else
+ /* Signal pending. */
+ ap_cancel_message(zq->queue, &ap_msg);
+diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
+index d77991c74c252..21ea3b73c8674 100644
+--- a/drivers/s390/crypto/zcrypt_msgtype6.c
++++ b/drivers/s390/crypto/zcrypt_msgtype6.c
+@@ -650,23 +650,22 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
+ (service_rc == 8 && service_rs == 72) ||
+ (service_rc == 8 && service_rs == 770) ||
+ (service_rc == 12 && service_rs == 769)) {
+- ZCRYPT_DBF(DBF_DEBUG,
+- "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+- AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid),
+- (int) service_rc, (int) service_rs);
++ ZCRYPT_DBF_WARN("dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
++ AP_QID_CARD(zq->queue->qid),
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) service_rc, (int) service_rs);
+ return -EINVAL;
+ }
+ zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++ pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid));
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+- AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid),
+- (int) service_rc, (int) service_rs);
+- return -EAGAIN; /* repeat the request on a different device. */
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) service_rc, (int) service_rs);
++ ZCRYPT_DBF_ERR("dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
++ AP_QID_CARD(zq->queue->qid),
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) service_rc, (int) service_rs);
++ return -EAGAIN;
+ }
+ data = msg->text;
+ reply_len = msg->length - 2;
+@@ -800,17 +799,18 @@ static int convert_response_ica(struct zcrypt_queue *zq,
+ return convert_type86_ica(zq, reply,
+ outputdata, outputdatalength);
+ fallthrough; /* wrong cprb version is an unknown response */
+- default: /* Unknown response type, this should NEVER EVER happen */
++ default:
++ /* Unknown response type, this should NEVER EVER happen */
+ zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid));
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+- AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid),
+- (int) msg->hdr.type);
+- return -EAGAIN; /* repeat the request on a different device. */
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) msg->hdr.type);
++ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
++ AP_QID_CARD(zq->queue->qid),
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) msg->hdr.type);
++ return -EAGAIN;
+ }
+ }
+
+@@ -836,15 +836,15 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
+ default: /* Unknown response type, this should NEVER EVER happen */
+ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid));
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+- AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid),
+- (int) msg->hdr.type);
+- return -EAGAIN; /* repeat the request on a different device. */
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) msg->hdr.type);
++ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
++ AP_QID_CARD(zq->queue->qid),
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) msg->hdr.type);
++ return -EAGAIN;
+ }
+ }
+
+@@ -865,15 +865,15 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
+ fallthrough; /* wrong cprb version is an unknown resp */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid));
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+- AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid),
+- (int) msg->hdr.type);
+- return -EAGAIN; /* repeat the request on a different device. */
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) msg->hdr.type);
++ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
++ AP_QID_CARD(zq->queue->qid),
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) msg->hdr.type);
++ return -EAGAIN;
+ }
+ }
+
+@@ -895,15 +895,15 @@ static int convert_response_rng(struct zcrypt_queue *zq,
+ fallthrough; /* wrong cprb version is an unknown response */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zq->online = 0;
+- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid));
+- ZCRYPT_DBF(DBF_ERR,
+- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+- AP_QID_CARD(zq->queue->qid),
+- AP_QID_QUEUE(zq->queue->qid),
+- (int) msg->hdr.type);
+- return -EAGAIN; /* repeat the request on a different device. */
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) msg->hdr.type);
++ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
++ AP_QID_CARD(zq->queue->qid),
++ AP_QID_QUEUE(zq->queue->qid),
++ (int) msg->hdr.type);
++ return -EAGAIN;
+ }
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 267879df0627b9af225069376b4d0ba0712e1b4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Sep 2020 19:07:04 +0200
+Subject: s390/startup: avoid save_area_sync overflow
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+[ Upstream commit 2835c2ea95d50625108e47a459e1a47f6be836ce ]
+
+Currently we overflow save_area_sync and write over
+save_area_async. Although this is not a real problem make
+startup_pgm_check_handler consistent with late pgm check handler and
+store [%r0,%r7] directly into gpregs_save_area.
+
+Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/boot/head.S | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
+index dae10961d0724..1a2c2b1ed9649 100644
+--- a/arch/s390/boot/head.S
++++ b/arch/s390/boot/head.S
+@@ -360,22 +360,23 @@ ENTRY(startup_kdump)
+ # the save area and does disabled wait with a faulty address.
+ #
+ ENTRY(startup_pgm_check_handler)
+- stmg %r0,%r15,__LC_SAVE_AREA_SYNC
+- la %r1,4095
+- stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r1)
+- mvc __LC_GPREGS_SAVE_AREA-4095(128,%r1),__LC_SAVE_AREA_SYNC
+- mvc __LC_PSW_SAVE_AREA-4095(16,%r1),__LC_PGM_OLD_PSW
++ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
++ la %r8,4095
++ stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8)
++ stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8)
++ mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC
++ mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW
+ mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW
+ ni __LC_RETURN_PSW,0xfc # remove IO and EX bits
+ ni __LC_RETURN_PSW+1,0xfb # remove MCHK bit
+ oi __LC_RETURN_PSW+1,0x2 # set wait state bit
+- larl %r2,.Lold_psw_disabled_wait
+- stg %r2,__LC_PGM_NEW_PSW+8
+- l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r2)
++ larl %r9,.Lold_psw_disabled_wait
++ stg %r9,__LC_PGM_NEW_PSW+8
++ l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9)
+ brasl %r14,print_pgm_check_info
+ .Lold_psw_disabled_wait:
+- la %r1,4095
+- lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
++ la %r8,4095
++ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
+ lpswe __LC_RETURN_PSW # disabled wait
+ .Ldump_info_stack:
+ .long 0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD
+--
+2.27.0
+
--- /dev/null
+From ee24c74471e1a8555489da14c7290b845b79e1a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Sep 2020 10:31:05 +0200
+Subject: samples/bpf: Fix possible deadlock in xdpsock
+
+From: Magnus Karlsson <magnus.karlsson@intel.com>
+
+[ Upstream commit 5a2a0dd88f0f267ac5953acd81050ae43a82201f ]
+
+Fix a possible deadlock in the l2fwd application in xdpsock that can
+occur when there is no space in the Tx ring. There are two ways to get
+the kernel to consume entries in the Tx ring: calling sendto() to make
+it send packets and freeing entries from the completion ring, as the
+kernel will not send a packet if there is no space for it to add a
+completion entry in the completion ring. The Tx loop in l2fwd only
+used to call sendto(). This patches adds cleaning the completion ring
+in that loop.
+
+Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/1599726666-8431-3-git-send-email-magnus.karlsson@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ samples/bpf/xdpsock_user.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
+index c821e98671393..63a9a2a39da7b 100644
+--- a/samples/bpf/xdpsock_user.c
++++ b/samples/bpf/xdpsock_user.c
+@@ -1111,6 +1111,7 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
+ while (ret != rcvd) {
+ if (ret < 0)
+ exit_with_error(-ret);
++ complete_tx_l2fwd(xsk, fds);
+ if (xsk_ring_prod__needs_wakeup(&xsk->tx))
+ kick_tx(xsk);
+ ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
+--
+2.27.0
+
--- /dev/null
+From 2bbcc1ec16c4df8adb8d09c74a4e52844988614a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Oct 2020 10:41:28 +0200
+Subject: scsi: core: Clean up allocation and freeing of sgtables
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 7007e9dd56767a95de0947b3f7599bcc2f21687f ]
+
+Rename scsi_init_io() to scsi_alloc_sgtables(), and ensure callers call
+scsi_free_sgtables() to cleanup failures close to scsi_init_io() instead of
+leaking it down the generic I/O submission path.
+
+Link: https://lore.kernel.org/r/20201005084130.143273-9-hch@lst.de
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/scsi_lib.c | 22 ++++++++--------------
+ drivers/scsi/sd.c | 27 +++++++++++++++------------
+ drivers/scsi/sr.c | 16 ++++++----------
+ include/scsi/scsi_cmnd.h | 3 ++-
+ 4 files changed, 31 insertions(+), 37 deletions(-)
+
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 7affaaf8b98e0..198130b6a9963 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -530,7 +530,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
+ }
+ }
+
+-static void scsi_free_sgtables(struct scsi_cmnd *cmd)
++void scsi_free_sgtables(struct scsi_cmnd *cmd)
+ {
+ if (cmd->sdb.table.nents)
+ sg_free_table_chained(&cmd->sdb.table,
+@@ -539,6 +539,7 @@ static void scsi_free_sgtables(struct scsi_cmnd *cmd)
+ sg_free_table_chained(&cmd->prot_sdb->table,
+ SCSI_INLINE_PROT_SG_CNT);
+ }
++EXPORT_SYMBOL_GPL(scsi_free_sgtables);
+
+ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
+ {
+@@ -966,7 +967,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
+ }
+
+ /**
+- * scsi_init_io - SCSI I/O initialization function.
++ * scsi_alloc_sgtables - allocate S/G tables for a command
+ * @cmd: command descriptor we wish to initialize
+ *
+ * Returns:
+@@ -974,7 +975,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
+ * * BLK_STS_RESOURCE - if the failure is retryable
+ * * BLK_STS_IOERR - if the failure is fatal
+ */
+-blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
++blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
+ {
+ struct scsi_device *sdev = cmd->device;
+ struct request *rq = cmd->request;
+@@ -1066,7 +1067,7 @@ out_free_sgtables:
+ scsi_free_sgtables(cmd);
+ return ret;
+ }
+-EXPORT_SYMBOL(scsi_init_io);
++EXPORT_SYMBOL(scsi_alloc_sgtables);
+
+ /**
+ * scsi_initialize_rq - initialize struct scsi_cmnd partially
+@@ -1154,7 +1155,7 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
+ * submit a request without an attached bio.
+ */
+ if (req->bio) {
+- blk_status_t ret = scsi_init_io(cmd);
++ blk_status_t ret = scsi_alloc_sgtables(cmd);
+ if (unlikely(ret != BLK_STS_OK))
+ return ret;
+ } else {
+@@ -1194,7 +1195,6 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
+ struct request *req)
+ {
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+- blk_status_t ret;
+
+ if (!blk_rq_bytes(req))
+ cmd->sc_data_direction = DMA_NONE;
+@@ -1204,14 +1204,8 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
+ cmd->sc_data_direction = DMA_FROM_DEVICE;
+
+ if (blk_rq_is_scsi(req))
+- ret = scsi_setup_scsi_cmnd(sdev, req);
+- else
+- ret = scsi_setup_fs_cmnd(sdev, req);
+-
+- if (ret != BLK_STS_OK)
+- scsi_free_sgtables(cmd);
+-
+- return ret;
++ return scsi_setup_scsi_cmnd(sdev, req);
++ return scsi_setup_fs_cmnd(sdev, req);
+ }
+
+ static blk_status_t
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 16503e22691ed..e93a9a874004f 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -866,7 +866,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
+ cmd->transfersize = data_len;
+ rq->timeout = SD_TIMEOUT;
+
+- return scsi_init_io(cmd);
++ return scsi_alloc_sgtables(cmd);
+ }
+
+ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
+@@ -897,7 +897,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
+ cmd->transfersize = data_len;
+ rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
+
+- return scsi_init_io(cmd);
++ return scsi_alloc_sgtables(cmd);
+ }
+
+ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
+@@ -928,7 +928,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
+ cmd->transfersize = data_len;
+ rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
+
+- return scsi_init_io(cmd);
++ return scsi_alloc_sgtables(cmd);
+ }
+
+ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
+@@ -1069,7 +1069,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
+ * knows how much to actually write.
+ */
+ rq->__data_len = sdp->sector_size;
+- ret = scsi_init_io(cmd);
++ ret = scsi_alloc_sgtables(cmd);
+ rq->__data_len = blk_rq_bytes(rq);
+
+ return ret;
+@@ -1187,23 +1187,24 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+ unsigned int dif;
+ bool dix;
+
+- ret = scsi_init_io(cmd);
++ ret = scsi_alloc_sgtables(cmd);
+ if (ret != BLK_STS_OK)
+ return ret;
+
++ ret = BLK_STS_IOERR;
+ if (!scsi_device_online(sdp) || sdp->changed) {
+ scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
+- return BLK_STS_IOERR;
++ goto fail;
+ }
+
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
+ scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
+- return BLK_STS_IOERR;
++ goto fail;
+ }
+
+ if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
+ scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
+- return BLK_STS_IOERR;
++ goto fail;
+ }
+
+ /*
+@@ -1225,7 +1226,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+ if (req_op(rq) == REQ_OP_ZONE_APPEND) {
+ ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
+ if (ret)
+- return ret;
++ goto fail;
+ }
+
+ fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
+@@ -1253,7 +1254,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+ }
+
+ if (unlikely(ret != BLK_STS_OK))
+- return ret;
++ goto fail;
+
+ /*
+ * We shouldn't disconnect in the middle of a sector, so with a dumb
+@@ -1277,10 +1278,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+ blk_rq_sectors(rq)));
+
+ /*
+- * This indicates that the command is ready from our end to be
+- * queued.
++ * This indicates that the command is ready from our end to be queued.
+ */
+ return BLK_STS_OK;
++fail:
++ scsi_free_sgtables(cmd);
++ return ret;
+ }
+
+ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 3b3a53c6a0de5..7e8fe55f3b339 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -392,15 +392,11 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
+ struct request *rq = SCpnt->request;
+ blk_status_t ret;
+
+- ret = scsi_init_io(SCpnt);
++ ret = scsi_alloc_sgtables(SCpnt);
+ if (ret != BLK_STS_OK)
+- goto out;
++ return ret;
+ cd = scsi_cd(rq->rq_disk);
+
+- /* from here on until we're complete, any goto out
+- * is used for a killable error condition */
+- ret = BLK_STS_IOERR;
+-
+ SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
+ "Doing sr request, block = %d\n", block));
+
+@@ -509,12 +505,12 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
+ SCpnt->allowed = MAX_RETRIES;
+
+ /*
+- * This indicates that the command is ready from our end to be
+- * queued.
++ * This indicates that the command is ready from our end to be queued.
+ */
+- ret = BLK_STS_OK;
++ return BLK_STS_OK;
+ out:
+- return ret;
++ scsi_free_sgtables(SCpnt);
++ return BLK_STS_IOERR;
+ }
+
+ static int sr_block_open(struct block_device *bdev, fmode_t mode)
+diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
+index e76bac4d14c51..69ade4fb71aab 100644
+--- a/include/scsi/scsi_cmnd.h
++++ b/include/scsi/scsi_cmnd.h
+@@ -165,7 +165,8 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
+ size_t *offset, size_t *len);
+ extern void scsi_kunmap_atomic_sg(void *virt);
+
+-extern blk_status_t scsi_init_io(struct scsi_cmnd *cmd);
++blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd);
++void scsi_free_sgtables(struct scsi_cmnd *cmd);
+
+ #ifdef CONFIG_SCSI_DMA
+ extern int scsi_dma_map(struct scsi_cmnd *cmd);
+--
+2.27.0
+
--- /dev/null
+From 7a8d5d373a15da2f2dc92e51538fe69d2978ff5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Sep 2020 13:27:18 -0700
+Subject: selftests/bpf: Define string const as global for test_sysctl_prog.c
+
+From: Yonghong Song <yhs@fb.com>
+
+[ Upstream commit 6e057fc15a2da4ee03eb1fa6889cf687e690106e ]
+
+When tweaking llvm optimizations, I found that selftest build failed
+with the following error:
+ libbpf: elf: skipping unrecognized data section(6) .rodata.str1.1
+ libbpf: prog 'sysctl_tcp_mem': bad map relo against '.L__const.is_tcp_mem.tcp_mem_name'
+ in section '.rodata.str1.1'
+ Error: failed to open BPF object file: Relocation failed
+ make: *** [/work/net-next/tools/testing/selftests/bpf/test_sysctl_prog.skel.h] Error 255
+ make: *** Deleting file `/work/net-next/tools/testing/selftests/bpf/test_sysctl_prog.skel.h'
+
+The local string constant "tcp_mem_name" is put into '.rodata.str1.1' section
+which libbpf cannot handle. Using untweaked upstream llvm, "tcp_mem_name"
+is completely inlined after loop unrolling.
+
+Commit 7fb5eefd7639 ("selftests/bpf: Fix test_sysctl_loop{1, 2}
+failure due to clang change") solved a similar problem by defining
+the string const as a global. Let us do the same here
+for test_sysctl_prog.c so it can weather future potential llvm changes.
+
+Signed-off-by: Yonghong Song <yhs@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Andrii Nakryiko <andriin@fb.com>
+Link: https://lore.kernel.org/bpf/20200910202718.956042-1-yhs@fb.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/bpf/progs/test_sysctl_prog.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+index 50525235380e8..5489823c83fc2 100644
+--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
++++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+@@ -19,11 +19,11 @@
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ #endif
+
++const char tcp_mem_name[] = "net/ipv4/tcp_mem";
+ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
+ {
+- char tcp_mem_name[] = "net/ipv4/tcp_mem";
+ unsigned char i;
+- char name[64];
++ char name[sizeof(tcp_mem_name)];
+ int ret;
+
+ memset(name, 0, sizeof(name));
+--
+2.27.0
+
--- /dev/null
+From 77ee8b3a971505d4266484e97b6a54db623a0446 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Aug 2020 11:57:19 +1000
+Subject: selftests/powerpc: Make using_hash_mmu() work on Cell & PowerMac
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+[ Upstream commit 34c103342be3f9397e656da7c5cc86e97b91f514 ]
+
+These platforms don't show the MMU in /proc/cpuinfo, but they always
+use hash, so teach using_hash_mmu() that.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200819015727.1977134-1-mpe@ellerman.id.au
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/powerpc/utils.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
+index 18b6a773d5c73..638ffacc90aa1 100644
+--- a/tools/testing/selftests/powerpc/utils.c
++++ b/tools/testing/selftests/powerpc/utils.c
+@@ -318,7 +318,9 @@ int using_hash_mmu(bool *using_hash)
+
+ rc = 0;
+ while (fgets(line, sizeof(line), f) != NULL) {
+- if (strcmp(line, "MMU : Hash\n") == 0) {
++ if (!strcmp(line, "MMU : Hash\n") ||
++ !strcmp(line, "platform : Cell\n") ||
++ !strcmp(line, "platform : PowerMac\n")) {
+ *using_hash = true;
+ goto out;
+ }
+--
+2.27.0
+
--- /dev/null
+From 05c8a72c1f479365c98080f1d199ebaa8c61a075 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Aug 2020 10:00:45 -0700
+Subject: selftests/x86/fsgsbase: Reap a forgotten child
+
+From: Andy Lutomirski <luto@kernel.org>
+
+[ Upstream commit ab2dd173330a3f07142e68cd65682205036cd00f ]
+
+The ptrace() test forgot to reap its child. Reap it.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lore.kernel.org/r/e7700a503f30e79ab35a63103938a19893dbeff2.1598461151.git.luto@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/x86/fsgsbase.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
+index 9983195535237..0056e2597f53a 100644
+--- a/tools/testing/selftests/x86/fsgsbase.c
++++ b/tools/testing/selftests/x86/fsgsbase.c
+@@ -517,6 +517,9 @@ static void test_ptrace_write_gsbase(void)
+
+ END:
+ ptrace(PTRACE_CONT, child, NULL, NULL);
++ wait(&status);
++ if (!WIFEXITED(status))
++ printf("[WARN]\tChild didn't exit cleanly.\n");
+ }
+
+ int main()
+--
+2.27.0
+
--- /dev/null
+From 58be58c2aefa794692c3a73312a68e175dd374f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Sep 2020 10:28:05 -0400
+Subject: selinux: access policycaps with READ_ONCE/WRITE_ONCE
+
+From: Stephen Smalley <stephen.smalley.work@gmail.com>
+
+[ Upstream commit e8ba53d0023a76ba0f50e6ee3e6288c5442f9d33 ]
+
+Use READ_ONCE/WRITE_ONCE for all accesses to the
+selinux_state.policycaps booleans to prevent compiler
+mischief.
+
+Signed-off-by: Stephen Smalley <stephen.smalley.work@gmail.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/selinux/include/security.h | 14 +++++++-------
+ security/selinux/ss/services.c | 3 ++-
+ 2 files changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
+index b0e02cfe3ce14..8a432f646967e 100644
+--- a/security/selinux/include/security.h
++++ b/security/selinux/include/security.h
+@@ -177,49 +177,49 @@ static inline bool selinux_policycap_netpeer(void)
+ {
+ struct selinux_state *state = &selinux_state;
+
+- return state->policycap[POLICYDB_CAPABILITY_NETPEER];
++ return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_NETPEER]);
+ }
+
+ static inline bool selinux_policycap_openperm(void)
+ {
+ struct selinux_state *state = &selinux_state;
+
+- return state->policycap[POLICYDB_CAPABILITY_OPENPERM];
++ return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_OPENPERM]);
+ }
+
+ static inline bool selinux_policycap_extsockclass(void)
+ {
+ struct selinux_state *state = &selinux_state;
+
+- return state->policycap[POLICYDB_CAPABILITY_EXTSOCKCLASS];
++ return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_EXTSOCKCLASS]);
+ }
+
+ static inline bool selinux_policycap_alwaysnetwork(void)
+ {
+ struct selinux_state *state = &selinux_state;
+
+- return state->policycap[POLICYDB_CAPABILITY_ALWAYSNETWORK];
++ return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_ALWAYSNETWORK]);
+ }
+
+ static inline bool selinux_policycap_cgroupseclabel(void)
+ {
+ struct selinux_state *state = &selinux_state;
+
+- return state->policycap[POLICYDB_CAPABILITY_CGROUPSECLABEL];
++ return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_CGROUPSECLABEL]);
+ }
+
+ static inline bool selinux_policycap_nnp_nosuid_transition(void)
+ {
+ struct selinux_state *state = &selinux_state;
+
+- return state->policycap[POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION];
++ return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION]);
+ }
+
+ static inline bool selinux_policycap_genfs_seclabel_symlinks(void)
+ {
+ struct selinux_state *state = &selinux_state;
+
+- return state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS];
++ return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]);
+ }
+
+ int security_mls_enabled(struct selinux_state *state);
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 1caf4e6033096..c55b3063753ab 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -2103,7 +2103,8 @@ static void security_load_policycaps(struct selinux_state *state)
+ struct ebitmap_node *node;
+
+ for (i = 0; i < ARRAY_SIZE(state->policycap); i++)
+- state->policycap[i] = ebitmap_get_bit(&p->policycaps, i);
++ WRITE_ONCE(state->policycap[i],
++ ebitmap_get_bit(&p->policycaps, i));
+
+ for (i = 0; i < ARRAY_SIZE(selinux_policycap_names); i++)
+ pr_info("SELinux: policy capability %s=%d\n",
+--
+2.27.0
+
--- /dev/null
+firmware-arm_scmi-fix-arch_cold_reset.patch
+firmware-arm_scmi-expand-smc-hvc-message-pool-to-mor.patch
+tee-client-uuid-skip-ree-kernel-login-method-as-well.patch
+firmware-arm_scmi-add-missing-rx-size-re-initialisat.patch
+x86-unwind-orc-fix-inactive-tasks-with-stack-pointer.patch
+firmware-arm_scmi-fix-locking-in-notifications.patch
+firmware-arm_scmi-fix-duplicate-workqueue-name.patch
+x86-alternative-don-t-call-text_poke-in-lazy-tlb-mod.patch
+ionic-no-rx-flush-in-deinit.patch
+rdma-mlx5-fix-devlink-deadlock-on-net-namespace-dele.patch
+mlxsw-core-fix-use-after-free-in-mlxsw_emad_trans_fi.patch
+tracing-synthetic-events-replace-buggy-strcat-with-s.patch
+afs-fix-a-use-after-free-in-afs_xattr_get_acl.patch
+afs-fix-afs_launder_page-to-not-clear-pg_writeback.patch
+rdma-qedr-fix-memory-leak-in-iwarp-cm.patch
+ata-sata_nv-fix-retrieving-of-active-qcs.patch
+arm64-efi-increase-efi-pe-coff-header-padding-to-64-.patch
+afs-fix-to-take-ref-on-page-when-pg_private-is-set.patch
+afs-fix-page-leak-on-afs_write_begin-failure.patch
+afs-fix-where-page-private-is-set-during-write.patch
+afs-wrap-page-private-manipulations-in-inline-functi.patch
+afs-alter-dirty-range-encoding-in-page-private.patch
+afs-fix-afs_invalidatepage-to-adjust-the-dirty-regio.patch
+afs-fix-dirty-region-encoding-on-ppc32-with-64k-page.patch
+vdpasim-fix-mac-address-configuration.patch
+interconnect-qcom-sdm845-enable-keepalive-for-the-mm.patch
+lockdep-fix-preemption-warn-for-spurious-irq-enable.patch
+usb-host-ehci-tegra-fix-error-handling-in-tegra_ehci.patch
+futex-fix-incorrect-should_fail_futex-handling.patch
+powerpc-vmemmap-fix-memory-leak-with-vmemmap-list-al.patch
+powerpc-powernv-smp-fix-spurious-dbg-warning.patch
+rdma-core-change-how-failing-destroy-is-handled-duri.patch
+f2fs-allocate-proper-size-memory-for-zstd-decompress.patch
+powerpc-watchpoint-ptrace-fix-sethwdebug-when-config.patch
+mm-fix-exec-activate_mm-vs-tlb-shootdown-and-lazy-tl.patch
+powerpc-select-arch_want_irqs_off_activate_mm.patch
+sparc64-remove-mm_cpumask-clearing-to-fix-kthread_us.patch
+f2fs-add-trace-exit-in-exception-path.patch
+f2fs-do-sanity-check-on-zoned-block-device-path.patch
+f2fs-fix-uninit-value-in-f2fs_lookup.patch
+f2fs-fix-to-check-segment-boundary-during-sit-page-r.patch
+s390-startup-avoid-save_area_sync-overflow.patch
+f2fs-compress-fix-to-disallow-enabling-compress-on-n.patch
+s390-ap-zcrypt-revisit-ap-and-zcrypt-error-handling.patch
+um-change-sigio_spinlock-to-a-mutex.patch
+f2fs-handle-errors-of-f2fs_get_meta_page_nofail.patch
+afs-don-t-assert-on-unpurgeable-server-records.patch
+powerpc-64s-handle-isa-v3.1-local-copy-paste-context.patch
+arm-8997-2-hw_breakpoint-handle-inexact-watchpoint-a.patch
+nfs4-fix-oops-when-copy_file_range-is-attempted-with.patch
+xfs-set-xfs_buf-type-flag-when-growing-summary-bitma.patch
+xfs-set-xfs_buf-s-b_ops-member-when-zeroing-bitmap-s.patch
+xfs-log-new-intent-items-created-as-part-of-finishin.patch
+power-supply-bq27xxx-report-not-charging-on-all-type.patch
+xfs-change-the-order-in-which-child-and-parent-defer.patch
+xfs-fix-realtime-bitmap-summary-file-truncation-when.patch
+io_uring-don-t-set-comp_locked-if-won-t-put.patch
+ath10k-fix-retry-packets-update-in-station-dump.patch
+x86-kaslr-initialize-mem_limit-to-the-real-maximum-a.patch
+drm-ast-separate-drm-driver-from-pci-code.patch
+drm-amdgpu-restore-ras-flags-when-user-resets-eeprom.patch
+video-fbdev-pvr2fb-initialize-variables.patch
+ath10k-start-recovery-process-when-payload-length-ex.patch
+ath10k-fix-vht-nss-calculation-when-stbc-is-enabled.patch
+drm-scheduler-scheduler-priority-fixes-v2.patch
+drm-brige-megachips-add-checking-if-ge_b850v3_lvds_i.patch
+asoc-sof-fix-a-runtime-pm-issue-in-sof-when-hdmi-cod.patch
+selftests-x86-fsgsbase-reap-a-forgotten-child.patch
+drm-bridge_connector-set-default-status-connected-fo.patch
+media-videodev2.h-rgb-bt2020-and-hsv-are-always-full.patch
+asoc-amd-clean-kernel-log-from-deferred-probe-error-.patch
+misc-fastrpc-fix-common-struct-sg_table-related-issu.patch
+staging-wfx-fix-potential-use-before-init.patch
+media-platform-improve-queue-set-up-flow-for-bug-fix.patch
+usb-typec-tcpm-during-pr_swap-source-caps-should-be-.patch
+media-tw5864-check-status-of-tw5864_frameinterval_ge.patch
+drm-vkms-avoid-warning-in-vkms_get_vblank_timestamp.patch
+media-imx274-fix-frame-interval-handling.patch
+mmc-via-sdmmc-fix-data-race-bug.patch
+drm-bridge-synopsys-dsi-add-support-for-non-continuo.patch
+brcmfmac-increase-f2-watermark-for-bcm4329.patch
+arm64-topology-stop-using-mpidr-for-topology-informa.patch
+printk-reduce-log_buf_shift-range-for-h8300.patch
+ia64-kprobes-use-generic-kretprobe-trampoline-handle.patch
+selftests-powerpc-make-using_hash_mmu-work-on-cell-p.patch
+kgdb-make-kgdbcon-work-properly-with-kgdb_earlycon.patch
+bpf-permit-map_ptr-arithmetic-with-opcode-add-and-of.patch
+drm-exynos-fix-common-struct-sg_table-related-issues.patch
+xen-gntdev-fix-common-struct-sg_table-related-issues.patch
+drm-lima-fix-common-struct-sg_table-related-issues.patch
+drm-panfrost-fix-common-struct-sg_table-related-issu.patch
+media-uvcvideo-fix-dereference-of-out-of-bound-list-.patch
+nfc-s3fwrn5-add-missing-crypto_hash-dependency.patch
+selftests-bpf-define-string-const-as-global-for-test.patch
+selinux-access-policycaps-with-read_once-write_once.patch
+samples-bpf-fix-possible-deadlock-in-xdpsock.patch
+drm-amd-display-check-clock-table-return.patch
+riscv-define-at_vector_size_arch-for-arch_dlinfo.patch
+cpufreq-sti-cpufreq-add-stih418-support.patch
+usb-adutux-fix-debugging.patch
+uio-free-uio-id-after-uio-file-node-is-freed.patch
+coresight-make-sysfs-functional-on-topologies-with-p.patch
+drm-amdgpu-no-sysfs-not-an-error-condition.patch
+mac80211-add-missing-queue-hash-initialization-to-80.patch
+usb-xhci-omit-duplicate-actions-when-suspending-a-ru.patch
+sunrpc-mitigate-cond_resched-in-xprt_transmit.patch
+cpuidle-tegra-correctly-handle-result-of-arm_cpuidle.patch
+arm64-mm-return-cpu_all_mask-when-node-is-numa_no_no.patch
+can-flexcan-disable-clocks-during-stop-mode.patch
+habanalabs-remove-security-from-arb_mst_quiet-regist.patch
+xfs-don-t-free-rt-blocks-when-we-re-doing-a-remap-bu.patch
+xfs-avoid-lr-buffer-overrun-due-to-crafted-h_len.patch
+acpi-add-out-of-bounds-and-numa_off-protections-to-p.patch
+octeontx2-af-fix-ld-custom-ltype-aliasing.patch
+brcmfmac-fix-warning-message-after-dongle-setup-fail.patch
+ath11k-use-gfp_atomic-instead-of-gfp_kernel-in-ath11.patch
+ath11k-fix-warning-caused-by-lockdep_assert_held.patch
+ath11k-change-to-disable-softirqs-for-ath11k_regd_up.patch
+drivers-net-wan-hdlc_fr-correctly-handle-special-skb.patch
+usb-dwc3-core-do-not-queue-work-if-dr_mode-is-not-us.patch
+bus-mhi-core-abort-suspends-due-to-outgoing-pending-.patch
+bus-fsl_mc-do-not-rely-on-caller-to-provide-non-null.patch
+acpi-hmat-fix-handling-of-changes-from-acpi-6.2-to-a.patch
+power-supply-test_power-add-missing-newlines-when-pr.patch
+drm-amd-display-hdmi-remote-sink-need-mode-validatio.patch
+drm-amd-display-avoid-set-zero-in-the-requested-clk.patch
+arc-dts-fix-the-errors-detected-by-dtbs_check.patch
+block-consider-only-dispatched-requests-for-inflight.patch
+btrfs-fix-replace-of-seed-device.patch
+md-bitmap-md_bitmap_get_counter-returns-wrong-blocks.patch
+f2fs-fix-to-set-sbi_need_fsck-flag-for-inconsistent-.patch
+bnxt_en-log-unknown-link-speed-appropriately.patch
+rpmsg-glink-use-complete_all-for-open-states.patch
+pci-acpi-add-ampere-altra-soc-mcfg-quirk.patch
+clk-ti-clockdomain-fix-static-checker-warning.patch
+nfsd-rename-delegation-related-tracepoints-to-make-t.patch
+nfsd4-remove-check_conflicting_opens-warning.patch
+net-9p-initialize-sun_server.sun_path-to-have-addr-s.patch
+ceph-encode-inodes-parent-d_name-in-cap-reconnect-me.patch
+drivers-watchdog-rdc321x_wdt-fix-race-condition-bugs.patch
+jbd2-avoid-transaction-reuse-after-reformatting.patch
+ext4-detect-already-used-quota-file-early.patch
+kvm-ppc-book3s-hv-do-not-allocate-hpt-for-a-nested-g.patch
+scsi-core-clean-up-allocation-and-freeing-of-sgtable.patch
+gfs2-call-truncate_inode_pages_final-for-address-spa.patch
+gfs2-fix-null-pointer-dereference-in-gfs2_rgrp_dump.patch
+gfs2-use-after-free-in-sysfs-deregistration.patch
+gfs2-add-validation-checks-for-size-of-superblock.patch
+handle-status_io_timeout-gracefully.patch
+cifs-handle-eintr-in-cifs_setattr.patch
+arm64-dts-renesas-ulcb-add-full-pwr-cycle-in-suspend.patch
+arm-dts-omap4-fix-sgx-clock-rate-for-4430.patch
+memory-emif-remove-bogus-debugfs-error-handling.patch
+arm-dts-s5pv210-enable-audio-on-aries-boards.patch
+arm-dts-s5pv210-remove-dma-controller-bus-node-name-.patch
+arm-dts-s5pv210-move-fixed-clocks-under-root-node.patch
+arm-dts-s5pv210-move-pmu-node-out-of-clock-controlle.patch
+arm-dts-s5pv210-remove-dedicated-audio-subsystem-nod.patch
+arm-dts-s5pv210-add-rtc-32-khz-clock-in-aries-family.patch
+arm-dts-s5pv210-align-spi-gpio-node-name-with-dtsche.patch
+soc-qcom-rpmh-rsc-sleep-waiting-for-tcs-slots-to-be-.patch
+soc-ti-k3-ringacc-add-am65x-sr2.0-support.patch
+bindings-soc-ti-soc-ringacc-remove-ti-dma-ring-reset.patch
+firmware-arm_scmi-move-scmi-bus-init-and-exit-calls-.patch
+arm64-dts-qcom-kitakami-temporarily-disable-sdhci1.patch
+nbd-make-the-config-put-is-called-before-the-notifyi.patch
+sgl_alloc_order-fix-memory-leak.patch
+nvme-rdma-fix-crash-when-connect-rejected.patch
--- /dev/null
+From e5f1e1a343c5605385f67ddbb41633057aa3a4d4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Oct 2020 14:57:35 -0400
+Subject: sgl_alloc_order: fix memory leak
+
+From: Douglas Gilbert <dgilbert@interlog.com>
+
+[ Upstream commit b2a182a40278bc5849730e66bca01a762188ed86 ]
+
+sgl_alloc_order() can fail when 'length' is large on a memory
+constrained system. When order > 0 it will potentially be
+making several multi-page allocations with the later ones more
+likely to fail than the earlier one. So it is important that
+sgl_alloc_order() frees up any pages it has obtained before
+returning NULL. In the case when order > 0 it calls the wrong
+free page function and leaks. In testing the leak was
+sufficient to bring down my 8 GiB laptop with OOM.
+
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Douglas Gilbert <dgilbert@interlog.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/scatterlist.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index 5d63a8857f361..c448642e0f786 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -514,7 +514,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
+ elem_len = min_t(u64, length, PAGE_SIZE << order);
+ page = alloc_pages(gfp, order);
+ if (!page) {
+- sgl_free(sgl);
++ sgl_free_order(sgl, order);
+ return NULL;
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 4a61607838fe62e63b81e25a14b6c1859328bc6a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Jul 2020 14:17:11 -0700
+Subject: soc: qcom: rpmh-rsc: Sleep waiting for tcs slots to be free
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+[ Upstream commit 2bc20f3c8487bd5bc4dd9ad2c06d2ba05fd4e838 ]
+
+The busy loop in rpmh_rsc_send_data() is written with the assumption
+that the udelay will be preempted by the tcs_tx_done() irq handler when
+the TCS slots are all full. This doesn't hold true when the calling
+thread is an irqthread and the tcs_tx_done() irq is also an irqthread.
+That's because kernel irqthreads are SCHED_FIFO and thus need to
+voluntarily give up priority by calling into the scheduler so that other
+threads can run.
+
+I see RCU stalls when I boot with irqthreads on the kernel commandline
+because the modem remoteproc driver is trying to send an rpmh async
+message from an irqthread that needs to give up the CPU for the rpmh
+irqthread to run and clear out tcs slots.
+
+ rcu: INFO: rcu_preempt self-detected stall on CPU
+ rcu: 0-....: (1 GPs behind) idle=402/1/0x4000000000000002 softirq=2108/2109 fqs=4920
+ (t=21016 jiffies g=2933 q=590)
+ Task dump for CPU 0:
+ irq/11-smp2p R running task 0 148 2 0x00000028
+ Call trace:
+ dump_backtrace+0x0/0x154
+ show_stack+0x20/0x2c
+ sched_show_task+0xfc/0x108
+ dump_cpu_task+0x44/0x50
+ rcu_dump_cpu_stacks+0xa4/0xf8
+ rcu_sched_clock_irq+0x7dc/0xaa8
+ update_process_times+0x30/0x54
+ tick_sched_handle+0x50/0x64
+ tick_sched_timer+0x4c/0x8c
+ __hrtimer_run_queues+0x21c/0x36c
+ hrtimer_interrupt+0xf0/0x22c
+ arch_timer_handler_phys+0x40/0x50
+ handle_percpu_devid_irq+0x114/0x25c
+ __handle_domain_irq+0x84/0xc4
+ gic_handle_irq+0xd0/0x178
+ el1_irq+0xbc/0x180
+ save_return_addr+0x18/0x28
+ return_address+0x54/0x88
+ preempt_count_sub+0x40/0x88
+ _raw_spin_unlock_irqrestore+0x4c/0x6c
+ ___ratelimit+0xd0/0x128
+ rpmh_rsc_send_data+0x24c/0x378
+ __rpmh_write+0x1b0/0x208
+ rpmh_write_async+0x90/0xbc
+ rpmhpd_send_corner+0x60/0x8c
+ rpmhpd_aggregate_corner+0x8c/0x124
+ rpmhpd_set_performance_state+0x8c/0xbc
+ _genpd_set_performance_state+0xdc/0x1b8
+ dev_pm_genpd_set_performance_state+0xb8/0xf8
+ q6v5_pds_disable+0x34/0x60 [qcom_q6v5_mss]
+ qcom_msa_handover+0x38/0x44 [qcom_q6v5_mss]
+ q6v5_handover_interrupt+0x24/0x3c [qcom_q6v5]
+ handle_nested_irq+0xd0/0x138
+ qcom_smp2p_intr+0x188/0x200
+ irq_thread_fn+0x2c/0x70
+ irq_thread+0xfc/0x14c
+ kthread+0x11c/0x12c
+ ret_from_fork+0x10/0x18
+
+This busy loop naturally lends itself to using a wait queue so that each
+thread that tries to send a message will sleep waiting on the waitqueue
+and only be woken up when a free slot is available. This should make
+things more predictable too because the scheduler will be able to sleep
+tasks that are waiting on a free tcs instead of the busy loop we
+currently have today.
+
+Reviewed-by: Maulik Shah <mkshah@codeaurora.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Tested-by: Stanimir Varbanov <stanimir.varbanov@linaro.org>
+Cc: Douglas Anderson <dianders@chromium.org>
+Cc: Maulik Shah <mkshah@codeaurora.org>
+Cc: Lina Iyer <ilina@codeaurora.org>
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/20200724211711.810009-1-sboyd@kernel.org
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/soc/qcom/rpmh-internal.h | 4 ++
+ drivers/soc/qcom/rpmh-rsc.c | 115 +++++++++++++++----------------
+ 2 files changed, 58 insertions(+), 61 deletions(-)
+
+diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
+index ef60e790a750a..344ba687c13be 100644
+--- a/drivers/soc/qcom/rpmh-internal.h
++++ b/drivers/soc/qcom/rpmh-internal.h
+@@ -8,6 +8,7 @@
+ #define __RPM_INTERNAL_H__
+
+ #include <linux/bitmap.h>
++#include <linux/wait.h>
+ #include <soc/qcom/tcs.h>
+
+ #define TCS_TYPE_NR 4
+@@ -106,6 +107,8 @@ struct rpmh_ctrlr {
+ * @lock: Synchronize state of the controller. If RPMH's cache
+ * lock will also be held, the order is: drv->lock then
+ * cache_lock.
++ * @tcs_wait: Wait queue used to wait for @tcs_in_use to free up a
++ * slot
+ * @client: Handle to the DRV's client.
+ */
+ struct rsc_drv {
+@@ -118,6 +121,7 @@ struct rsc_drv {
+ struct tcs_group tcs[TCS_TYPE_NR];
+ DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
+ spinlock_t lock;
++ wait_queue_head_t tcs_wait;
+ struct rpmh_ctrlr client;
+ };
+
+diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
+index ae66757825813..a297911afe571 100644
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -19,6 +19,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/wait.h>
+
+ #include <soc/qcom/cmd-db.h>
+ #include <soc/qcom/tcs.h>
+@@ -453,6 +454,7 @@ skip:
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ enable_tcs_irq(drv, i, false);
+ spin_unlock(&drv->lock);
++ wake_up(&drv->tcs_wait);
+ if (req)
+ rpmh_tx_done(req, err);
+ }
+@@ -571,73 +573,34 @@ static int find_free_tcs(struct tcs_group *tcs)
+ }
+
+ /**
+- * tcs_write() - Store messages into a TCS right now, or return -EBUSY.
++ * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
+ * @drv: The controller.
++ * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
+ * @msg: The data to be sent.
+ *
+- * Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it.
++ * Claims a tcs in the given tcs_group while making sure that no existing cmd
++ * is in flight that would conflict with the one in @msg.
+ *
+- * If there are no free TCSes for ACTIVE_ONLY transfers or if a command for
+- * the same address is already transferring returns -EBUSY which means the
+- * client should retry shortly.
++ * Context: Must be called with the drv->lock held since that protects
++ * tcs_in_use.
+ *
+- * Return: 0 on success, -EBUSY if client should retry, or an error.
+- * Client should have interrupts enabled for a bit before retrying.
++ * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
++ * or the tcs_group is full.
+ */
+-static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
++static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
++ const struct tcs_request *msg)
+ {
+- struct tcs_group *tcs;
+- int tcs_id;
+- unsigned long flags;
+ int ret;
+
+- tcs = get_tcs_for_msg(drv, msg);
+- if (IS_ERR(tcs))
+- return PTR_ERR(tcs);
+-
+- spin_lock_irqsave(&drv->lock, flags);
+ /*
+ * The h/w does not like if we send a request to the same address,
+ * when one is already in-flight or being processed.
+ */
+ ret = check_for_req_inflight(drv, tcs, msg);
+ if (ret)
+- goto unlock;
+-
+- ret = find_free_tcs(tcs);
+- if (ret < 0)
+- goto unlock;
+- tcs_id = ret;
+-
+- tcs->req[tcs_id - tcs->offset] = msg;
+- set_bit(tcs_id, drv->tcs_in_use);
+- if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
+- /*
+- * Clear previously programmed WAKE commands in selected
+- * repurposed TCS to avoid triggering them. tcs->slots will be
+- * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
+- */
+- write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+- write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+- enable_tcs_irq(drv, tcs_id, true);
+- }
+- spin_unlock_irqrestore(&drv->lock, flags);
+-
+- /*
+- * These two can be done after the lock is released because:
+- * - We marked "tcs_in_use" under lock.
+- * - Once "tcs_in_use" has been marked nobody else could be writing
+- * to these registers until the interrupt goes off.
+- * - The interrupt can't go off until we trigger w/ the last line
+- * of __tcs_set_trigger() below.
+- */
+- __tcs_buffer_write(drv, tcs_id, 0, msg);
+- __tcs_set_trigger(drv, tcs_id, true);
++ return ret;
+
+- return 0;
+-unlock:
+- spin_unlock_irqrestore(&drv->lock, flags);
+- return ret;
++ return find_free_tcs(tcs);
+ }
+
+ /**
+@@ -664,18 +627,47 @@ unlock:
+ */
+ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+ {
+- int ret;
++ struct tcs_group *tcs;
++ int tcs_id;
++ unsigned long flags;
+
+- do {
+- ret = tcs_write(drv, msg);
+- if (ret == -EBUSY) {
+- pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
+- msg->cmds[0].addr);
+- udelay(10);
+- }
+- } while (ret == -EBUSY);
++ tcs = get_tcs_for_msg(drv, msg);
++ if (IS_ERR(tcs))
++ return PTR_ERR(tcs);
+
+- return ret;
++ spin_lock_irqsave(&drv->lock, flags);
++
++ /* Wait forever for a free tcs. It better be there eventually! */
++ wait_event_lock_irq(drv->tcs_wait,
++ (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
++ drv->lock);
++
++ tcs->req[tcs_id - tcs->offset] = msg;
++ set_bit(tcs_id, drv->tcs_in_use);
++ if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
++ /*
++ * Clear previously programmed WAKE commands in selected
++ * repurposed TCS to avoid triggering them. tcs->slots will be
++ * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
++ */
++ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
++ write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
++ enable_tcs_irq(drv, tcs_id, true);
++ }
++ spin_unlock_irqrestore(&drv->lock, flags);
++
++ /*
++ * These two can be done after the lock is released because:
++ * - We marked "tcs_in_use" under lock.
++ * - Once "tcs_in_use" has been marked nobody else could be writing
++ * to these registers until the interrupt goes off.
++ * - The interrupt can't go off until we trigger w/ the last line
++ * of __tcs_set_trigger() below.
++ */
++ __tcs_buffer_write(drv, tcs_id, 0, msg);
++ __tcs_set_trigger(drv, tcs_id, true);
++
++ return 0;
+ }
+
+ /**
+@@ -983,6 +975,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
+ return ret;
+
+ spin_lock_init(&drv->lock);
++ init_waitqueue_head(&drv->tcs_wait);
+ bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
+
+ irq = platform_get_irq(pdev, drv->id);
+--
+2.27.0
+
--- /dev/null
+From 3644180bf2cca0a9d1bf37325eef7a63f4e623e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Sep 2020 21:29:56 -0700
+Subject: soc: ti: k3: ringacc: add am65x sr2.0 support
+
+From: Grygorii Strashko <grygorii.strashko@ti.com>
+
+[ Upstream commit 95e7be062aea6d2e09116cd4d28957d310c04781 ]
+
+The AM65x SR2.0 Ringacc has fixed errata i2023 "RINGACC, UDMA: RINGACC and
+UDMA Ring State Interoperability Issue after Channel Teardown". This errata
+also fixed for J271E SoC.
+
+Use SOC bus data for K3 SoC identification and enable i2023 errate w/a only
+for the AM65x SR1.0. This also makes obsolete "ti,dma-ring-reset-quirk" DT
+property.
+
+Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
+Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/soc/ti/k3-ringacc.c | 33 ++++++++++++++++++++++++++++++---
+ 1 file changed, 30 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
+index 6dcc21dde0cb7..1147dc4c1d596 100644
+--- a/drivers/soc/ti/k3-ringacc.c
++++ b/drivers/soc/ti/k3-ringacc.c
+@@ -10,6 +10,7 @@
+ #include <linux/init.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/sys_soc.h>
+ #include <linux/soc/ti/k3-ringacc.h>
+ #include <linux/soc/ti/ti_sci_protocol.h>
+ #include <linux/soc/ti/ti_sci_inta_msi.h>
+@@ -208,6 +209,15 @@ struct k3_ringacc {
+ const struct k3_ringacc_ops *ops;
+ };
+
++/**
++ * struct k3_ringacc - Rings accelerator SoC data
++ *
++ * @dma_ring_reset_quirk: DMA reset w/a enable
++ */
++struct k3_ringacc_soc_data {
++ unsigned dma_ring_reset_quirk:1;
++};
++
+ static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+ {
+ return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+@@ -1051,9 +1061,6 @@ static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+ return ret;
+ }
+
+- ringacc->dma_ring_reset_quirk =
+- of_property_read_bool(node, "ti,dma-ring-reset-quirk");
+-
+ ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+ if (IS_ERR(ringacc->tisci)) {
+ ret = PTR_ERR(ringacc->tisci);
+@@ -1084,9 +1091,22 @@ static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+ ringacc->rm_gp_range);
+ }
+
++static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = {
++ .dma_ring_reset_quirk = 1,
++};
++
++static const struct soc_device_attribute k3_ringacc_socinfo[] = {
++ { .family = "AM65X",
++ .revision = "SR1.0",
++ .data = &k3_ringacc_soc_data_sr1
++ },
++ {/* sentinel */}
++};
++
+ static int k3_ringacc_init(struct platform_device *pdev,
+ struct k3_ringacc *ringacc)
+ {
++ const struct soc_device_attribute *soc;
+ void __iomem *base_fifo, *base_rt;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+@@ -1103,6 +1123,13 @@ static int k3_ringacc_init(struct platform_device *pdev,
+ if (ret)
+ return ret;
+
++ soc = soc_device_match(k3_ringacc_socinfo);
++ if (soc && soc->data) {
++ const struct k3_ringacc_soc_data *soc_data = soc->data;
++
++ ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk;
++ }
++
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+--
+2.27.0
+
--- /dev/null
+From dea7ea49daa0a9026ff4ac2e052c75118f44046d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Sep 2020 14:52:18 +1000
+Subject: sparc64: remove mm_cpumask clearing to fix kthread_use_mm race
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+[ Upstream commit bafb056ce27940c9994ea905336aa8f27b4f7275 ]
+
+The de facto (and apparently uncommented) standard for using an mm had,
+thanks to this code in sparc if nothing else, been that you must have a
+reference on mm_users *and that reference must have been obtained with
+mmget()*, i.e., from a thread with a reference to mm_users that had used
+the mm.
+
+The introduction of mmget_not_zero() in commit d2005e3f41d4
+("userfaultfd: don't pin the user memory in userfaultfd_file_create()")
+allowed mm_count holders to aoperate on user mappings asynchronously
+from the actual threads using the mm, but they were not to load those
+mappings into their TLB (i.e., walking vmas and page tables is okay,
+kthread_use_mm() is not).
+
+io_uring 2b188cc1bb857 ("Add io_uring IO interface") added code which
+does a kthread_use_mm() from a mmget_not_zero() refcount.
+
+The problem with this is code which previously assumed mm == current->mm
+and mm->mm_users == 1 implies the mm will remain single-threaded at
+least until this thread creates another mm_users reference, has now
+broken.
+
+arch/sparc/kernel/smp_64.c:
+
+ if (atomic_read(&mm->mm_users) == 1) {
+ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+ goto local_flush_and_out;
+ }
+
+vs fs/io_uring.c
+
+ if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) ||
+ !mmget_not_zero(ctx->sqo_mm)))
+ return -EFAULT;
+ kthread_use_mm(ctx->sqo_mm);
+
+mmget_not_zero() could come in right after the mm_users == 1 test, then
+kthread_use_mm() which sets its CPU in the mm_cpumask. That update could
+be lost if cpumask_copy() occurs afterward.
+
+I propose we fix this by allowing mmget_not_zero() to be a first-class
+reference, and not have this obscure undocumented and unchecked
+restriction.
+
+The basic fix for sparc64 is to remove its mm_cpumask clearing code. The
+optimisation could be effectively restored by sending IPIs to mm_cpumask
+members and having them remove themselves from mm_cpumask. This is more
+tricky so I leave it as an exercise for someone with a sparc64 SMP.
+powerpc has a (currently similarly broken) example.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Acked-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200914045219.3736466-4-npiggin@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/sparc/kernel/smp_64.c | 65 ++++++++------------------------------
+ 1 file changed, 14 insertions(+), 51 deletions(-)
+
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index e286e2badc8a4..e38d8bf454e86 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -1039,38 +1039,9 @@ void smp_fetch_global_pmu(void)
+ * are flush_tlb_*() routines, and these run after flush_cache_*()
+ * which performs the flushw.
+ *
+- * The SMP TLB coherency scheme we use works as follows:
+- *
+- * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
+- * space has (potentially) executed on, this is the heuristic
+- * we use to avoid doing cross calls.
+- *
+- * Also, for flushing from kswapd and also for clones, we
+- * use cpu_vm_mask as the list of cpus to make run the TLB.
+- *
+- * 2) TLB context numbers are shared globally across all processors
+- * in the system, this allows us to play several games to avoid
+- * cross calls.
+- *
+- * One invariant is that when a cpu switches to a process, and
+- * that processes tsk->active_mm->cpu_vm_mask does not have the
+- * current cpu's bit set, that tlb context is flushed locally.
+- *
+- * If the address space is non-shared (ie. mm->count == 1) we avoid
+- * cross calls when we want to flush the currently running process's
+- * tlb state. This is done by clearing all cpu bits except the current
+- * processor's in current->mm->cpu_vm_mask and performing the
+- * flush locally only. This will force any subsequent cpus which run
+- * this task to flush the context from the local tlb if the process
+- * migrates to another cpu (again).
+- *
+- * 3) For shared address spaces (threads) and swapping we bite the
+- * bullet for most cases and perform the cross call (but only to
+- * the cpus listed in cpu_vm_mask).
+- *
+- * The performance gain from "optimizing" away the cross call for threads is
+- * questionable (in theory the big win for threads is the massive sharing of
+- * address space state across processors).
++ * mm->cpu_vm_mask is a bit mask of which cpus an address
++ * space has (potentially) executed on, this is the heuristic
++ * we use to limit cross calls.
+ */
+
+ /* This currently is only used by the hugetlb arch pre-fault
+@@ -1080,18 +1051,13 @@ void smp_fetch_global_pmu(void)
+ void smp_flush_tlb_mm(struct mm_struct *mm)
+ {
+ u32 ctx = CTX_HWBITS(mm->context);
+- int cpu = get_cpu();
+
+- if (atomic_read(&mm->mm_users) == 1) {
+- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+- goto local_flush_and_out;
+- }
++ get_cpu();
+
+ smp_cross_call_masked(&xcall_flush_tlb_mm,
+ ctx, 0, 0,
+ mm_cpumask(mm));
+
+-local_flush_and_out:
+ __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
+
+ put_cpu();
+@@ -1114,17 +1080,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
+ {
+ u32 ctx = CTX_HWBITS(mm->context);
+ struct tlb_pending_info info;
+- int cpu = get_cpu();
++
++ get_cpu();
+
+ info.ctx = ctx;
+ info.nr = nr;
+ info.vaddrs = vaddrs;
+
+- if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+- else
+- smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
+- &info, 1);
++ smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
++ &info, 1);
+
+ __flush_tlb_pending(ctx, nr, vaddrs);
+
+@@ -1134,14 +1098,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
+ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+ {
+ unsigned long context = CTX_HWBITS(mm->context);
+- int cpu = get_cpu();
+
+- if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+- else
+- smp_cross_call_masked(&xcall_flush_tlb_page,
+- context, vaddr, 0,
+- mm_cpumask(mm));
++ get_cpu();
++
++ smp_cross_call_masked(&xcall_flush_tlb_page,
++ context, vaddr, 0,
++ mm_cpumask(mm));
++
+ __flush_tlb_page(context, vaddr);
+
+ put_cpu();
+--
+2.27.0
+
--- /dev/null
+From 233c97539b8ab5d7ace7497a2bd1aa1211f22f8e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Aug 2020 10:58:24 +0200
+Subject: staging: wfx: fix potential use before init
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jérôme Pouiller <jerome.pouiller@silabs.com>
+
+[ Upstream commit ce3653a8d3db096aa163fc80239d8ec1305c81fa ]
+
+The trace below can appear:
+
+ [83613.832200] INFO: trying to register non-static key.
+ [83613.837248] the code is fine but needs lockdep annotation.
+ [83613.842808] turning off the locking correctness validator.
+ [83613.848375] CPU: 3 PID: 141 Comm: kworker/3:2H Tainted: G O 5.6.13-silabs15 #2
+ [83613.857019] Hardware name: BCM2835
+ [83613.860605] Workqueue: events_highpri bh_work [wfx]
+ [83613.865552] Backtrace:
+ [83613.868041] [<c010f2cc>] (dump_backtrace) from [<c010f7b8>] (show_stack+0x20/0x24)
+ [83613.881463] [<c010f798>] (show_stack) from [<c0d82138>] (dump_stack+0xe8/0x114)
+ [83613.888882] [<c0d82050>] (dump_stack) from [<c01a02ec>] (register_lock_class+0x748/0x768)
+ [83613.905035] [<c019fba4>] (register_lock_class) from [<c019da04>] (__lock_acquire+0x88/0x13dc)
+ [83613.924192] [<c019d97c>] (__lock_acquire) from [<c019f6a4>] (lock_acquire+0xe8/0x274)
+ [83613.942644] [<c019f5bc>] (lock_acquire) from [<c0daa5dc>] (_raw_spin_lock_irqsave+0x58/0x6c)
+ [83613.961714] [<c0daa584>] (_raw_spin_lock_irqsave) from [<c0ab3248>] (skb_dequeue+0x24/0x78)
+ [83613.974967] [<c0ab3224>] (skb_dequeue) from [<bf330db0>] (wfx_tx_queues_get+0x96c/0x1294 [wfx])
+ [83613.989728] [<bf330444>] (wfx_tx_queues_get [wfx]) from [<bf320454>] (bh_work+0x454/0x26d8 [wfx])
+ [83614.009337] [<bf320000>] (bh_work [wfx]) from [<c014c920>] (process_one_work+0x23c/0x7ec)
+ [83614.028141] [<c014c6e4>] (process_one_work) from [<c014cf1c>] (worker_thread+0x4c/0x55c)
+ [83614.046861] [<c014ced0>] (worker_thread) from [<c0154c04>] (kthread+0x138/0x168)
+ [83614.064876] [<c0154acc>] (kthread) from [<c01010b4>] (ret_from_fork+0x14/0x20)
+ [83614.072200] Exception stack(0xecad3fb0 to 0xecad3ff8)
+ [83614.077323] 3fa0: 00000000 00000000 00000000 00000000
+ [83614.085620] 3fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+ [83614.093914] 3fe0: 00000000 00000000 00000000 00000000 00000013 00000000
+
+Indeed, the code of wfx_add_interface() shows that the interface is
+enabled to early. So, the spinlock associated with some skb_queue may
+not yet initialized when wfx_tx_queues_get() is called.
+
+Signed-off-by: Jérôme Pouiller <jerome.pouiller@silabs.com>
+Link: https://lore.kernel.org/r/20200825085828.399505-8-Jerome.Pouiller@silabs.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/wfx/sta.c | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
+index 7dace7c17bf5c..536c62001c709 100644
+--- a/drivers/staging/wfx/sta.c
++++ b/drivers/staging/wfx/sta.c
+@@ -761,17 +761,6 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ return -EOPNOTSUPP;
+ }
+
+- for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+- if (!wdev->vif[i]) {
+- wdev->vif[i] = vif;
+- wvif->id = i;
+- break;
+- }
+- }
+- if (i == ARRAY_SIZE(wdev->vif)) {
+- mutex_unlock(&wdev->conf_mutex);
+- return -EOPNOTSUPP;
+- }
+ // FIXME: prefer use of container_of() to get vif
+ wvif->vif = vif;
+ wvif->wdev = wdev;
+@@ -788,12 +777,22 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ init_completion(&wvif->scan_complete);
+ INIT_WORK(&wvif->scan_work, wfx_hw_scan_work);
+
+- mutex_unlock(&wdev->conf_mutex);
++ wfx_tx_queues_init(wvif);
++ wfx_tx_policy_init(wvif);
++
++ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
++ if (!wdev->vif[i]) {
++ wdev->vif[i] = vif;
++ wvif->id = i;
++ break;
++ }
++ }
++ WARN(i == ARRAY_SIZE(wdev->vif), "try to instantiate more vif than supported");
+
+ hif_set_macaddr(wvif, vif->addr);
+
+- wfx_tx_queues_init(wvif);
+- wfx_tx_policy_init(wvif);
++ mutex_unlock(&wdev->conf_mutex);
++
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ // Combo mode does not support Block Acks. We can re-enable them
+@@ -825,6 +824,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ wvif->vif = NULL;
+
+ mutex_unlock(&wdev->conf_mutex);
++
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ // Combo mode does not support Block Acks. We can re-enable them
+--
+2.27.0
+
--- /dev/null
+From cf002bbdab73a1193931266927c9d0f42c4a1086 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jul 2020 16:09:53 -0400
+Subject: SUNRPC: Mitigate cond_resched() in xprt_transmit()
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 6f9f17287e78e5049931af2037b15b26d134a32a ]
+
+The original purpose of this expensive call is to prevent a long
+queue of requests from blocking other work.
+
+The cond_resched() call is unnecessary after just a single send
+operation.
+
+For longer queues, instead of invoking the kernel scheduler, simply
+release the transport send lock and return to the RPC scheduler.
+
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/xprt.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 5a8e47bbfb9f4..13fbc2dd4196a 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1520,10 +1520,13 @@ xprt_transmit(struct rpc_task *task)
+ {
+ struct rpc_rqst *next, *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+- int status;
++ int counter, status;
+
+ spin_lock(&xprt->queue_lock);
++ counter = 0;
+ while (!list_empty(&xprt->xmit_queue)) {
++ if (++counter == 20)
++ break;
+ next = list_first_entry(&xprt->xmit_queue,
+ struct rpc_rqst, rq_xmit);
+ xprt_pin_rqst(next);
+@@ -1531,7 +1534,6 @@ xprt_transmit(struct rpc_task *task)
+ status = xprt_request_transmit(next, task);
+ if (status == -EBADMSG && next != req)
+ status = 0;
+- cond_resched();
+ spin_lock(&xprt->queue_lock);
+ xprt_unpin_rqst(next);
+ if (status == 0) {
+--
+2.27.0
+
--- /dev/null
+From 4d7655f5b4c5d6846b795960c70e84e7a4d3aeb0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Sep 2020 19:10:22 +0530
+Subject: tee: client UUID: Skip REE kernel login method as well
+
+From: Sumit Garg <sumit.garg@linaro.org>
+
+[ Upstream commit 722939528a37aa0cb22d441e2045c0cf53e78fb0 ]
+
+Since the addition of session's client UUID generation via commit [1],
+login via REE kernel method was disallowed. So fix that via passing
+nill UUID in case of TEE_IOCTL_LOGIN_REE_KERNEL method as well.
+
+Fixes: e33bcbab16d1 ("tee: add support for session's client UUID generation") [1]
+Signed-off-by: Sumit Garg <sumit.garg@linaro.org>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tee/tee_core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
+index 64637e09a0953..2f6199ebf7698 100644
+--- a/drivers/tee/tee_core.c
++++ b/drivers/tee/tee_core.c
+@@ -200,7 +200,8 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ int name_len;
+ int rc;
+
+- if (connection_method == TEE_IOCTL_LOGIN_PUBLIC) {
++ if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
++ connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
+ /* Nil UUID to be passed to TEE environment */
+ uuid_copy(uuid, &uuid_null);
+ return 0;
+--
+2.27.0
+
--- /dev/null
+From 1e5dd94addbfef346ce10ee9a919376b11b58ada Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Nov 2020 12:35:33 -0500
+Subject: tracing, synthetic events: Replace buggy strcat() with seq_buf
+ operations
+
+[ Upstream commit 761a8c58db6bc884994b28cd6d9707b467d680c1 ]
+
+There was a memory corruption bug happening while running the synthetic
+event selftests:
+
+ kmemleak: Cannot insert 0xffff8c196fa2afe5 into the object search tree (overlaps existing)
+ CPU: 5 PID: 6866 Comm: ftracetest Tainted: G W 5.9.0-rc5-test+ #577
+ Hardware name: Hewlett-Packard HP Compaq Pro 6300 SFF/339A, BIOS K01 v03.03 07/14/2016
+ Call Trace:
+ dump_stack+0x8d/0xc0
+ create_object.cold+0x3b/0x60
+ slab_post_alloc_hook+0x57/0x510
+ ? tracing_map_init+0x178/0x340
+ __kmalloc+0x1b1/0x390
+ tracing_map_init+0x178/0x340
+ event_hist_trigger_func+0x523/0xa40
+ trigger_process_regex+0xc5/0x110
+ event_trigger_write+0x71/0xd0
+ vfs_write+0xca/0x210
+ ksys_write+0x70/0xf0
+ do_syscall_64+0x33/0x40
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ RIP: 0033:0x7fef0a63a487
+ Code: 64 89 02 48 c7 c0 ff ff ff ff eb bb 0f 1f 80 00 00 00 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24
+ RSP: 002b:00007fff76f18398 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+ RAX: ffffffffffffffda RBX: 0000000000000039 RCX: 00007fef0a63a487
+ RDX: 0000000000000039 RSI: 000055eb3b26d690 RDI: 0000000000000001
+ RBP: 000055eb3b26d690 R08: 000000000000000a R09: 0000000000000038
+ R10: 000055eb3b2cdb80 R11: 0000000000000246 R12: 0000000000000039
+ R13: 00007fef0a70b500 R14: 0000000000000039 R15: 00007fef0a70b700
+ kmemleak: Kernel memory leak detector disabled
+ kmemleak: Object 0xffff8c196fa2afe0 (size 8):
+ kmemleak: comm "ftracetest", pid 6866, jiffies 4295082531
+ kmemleak: min_count = 1
+ kmemleak: count = 0
+ kmemleak: flags = 0x1
+ kmemleak: checksum = 0
+ kmemleak: backtrace:
+ __kmalloc+0x1b1/0x390
+ tracing_map_init+0x1be/0x340
+ event_hist_trigger_func+0x523/0xa40
+ trigger_process_regex+0xc5/0x110
+ event_trigger_write+0x71/0xd0
+ vfs_write+0xca/0x210
+ ksys_write+0x70/0xf0
+ do_syscall_64+0x33/0x40
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+The cause came down to a use of strcat() that was adding an string that was
+shorten, but the strcat() did not take that into account.
+
+strcat() is extremely dangerous as it does not care how big the buffer is.
+Replace it with seq_buf operations that prevent the buffer from being
+overwritten if what is being written is bigger than the buffer.
+
+Fixes: 10819e25799a ("tracing: Handle synthetic event array field type checking correctly")
+Reviewed-by: Tom Zanussi <zanussi@kernel.org>
+Tested-by: Tom Zanussi <zanussi@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_events_synth.c | 23 ++++++++++++-----------
+ 1 file changed, 12 insertions(+), 11 deletions(-)
+
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index c8892156db341..65e8c27141c02 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -465,6 +465,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
+ struct synth_field *field;
+ const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
+ int len, ret = 0;
++ struct seq_buf s;
+ ssize_t size;
+
+ if (field_type[0] == ';')
+@@ -503,13 +504,9 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
+ field_type++;
+ len = strlen(field_type) + 1;
+
+- if (array) {
+- int l = strlen(array);
++ if (array)
++ len += strlen(array);
+
+- if (l && array[l - 1] == ';')
+- l--;
+- len += l;
+- }
+ if (prefix)
+ len += strlen(prefix);
+
+@@ -518,14 +515,18 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
+ ret = -ENOMEM;
+ goto free;
+ }
++ seq_buf_init(&s, field->type, len);
+ if (prefix)
+- strcat(field->type, prefix);
+- strcat(field->type, field_type);
++ seq_buf_puts(&s, prefix);
++ seq_buf_puts(&s, field_type);
+ if (array) {
+- strcat(field->type, array);
+- if (field->type[len - 1] == ';')
+- field->type[len - 1] = '\0';
++ seq_buf_puts(&s, array);
++ if (s.buffer[s.len - 1] == ';')
++ s.len--;
+ }
++ if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
++ goto free;
++ s.buffer[s.len] = '\0';
+
+ size = synth_field_size(field->type);
+ if (size <= 0) {
+--
+2.27.0
+
--- /dev/null
+From b4f37168f2d6523b0465f0f943ebb307b3ea64d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Sep 2020 11:26:41 +0800
+Subject: uio: free uio id after uio file node is freed
+
+From: Lang Dai <lang.dai@intel.com>
+
+[ Upstream commit 8fd0e2a6df262539eaa28b0a2364cca10d1dc662 ]
+
+uio_register_device() do two things.
+1) get an uio id from a global pool, e.g. the id is <A>
+2) create file nodes like /sys/class/uio/uio<A>
+
+uio_unregister_device() do two things.
+1) free the uio id <A> and return it to the global pool
+2) free the file node /sys/class/uio/uio<A>
+
+There is a situation is that one worker is calling uio_unregister_device(),
+and another worker is calling uio_register_device().
+If the two workers are X and Y, they go as below sequence,
+1) X free the uio id <AAA>
+2) Y get an uio id <AAA>
+3) Y create file node /sys/class/uio/uio<AAA>
+4) X free the file note /sys/class/uio/uio<AAA>
+Then it will failed at the 3rd step and cause the phenomenon we saw as it
+is creating a duplicated file node.
+
+Failure reports as follows:
+sysfs: cannot create duplicate filename '/class/uio/uio10'
+Call Trace:
+ sysfs_do_create_link_sd.isra.2+0x9e/0xb0
+ sysfs_create_link+0x25/0x40
+ device_add+0x2c4/0x640
+ __uio_register_device+0x1c5/0x576 [uio]
+ adf_uio_init_bundle_dev+0x231/0x280 [intel_qat]
+ adf_uio_register+0x1c0/0x340 [intel_qat]
+ adf_dev_start+0x202/0x370 [intel_qat]
+ adf_dev_start_async+0x40/0xa0 [intel_qat]
+ process_one_work+0x14d/0x410
+ worker_thread+0x4b/0x460
+ kthread+0x105/0x140
+ ? process_one_work+0x410/0x410
+ ? kthread_bind+0x40/0x40
+ ret_from_fork+0x1f/0x40
+ Code: 85 c0 48 89 c3 74 12 b9 00 10 00 00 48 89 c2 31 f6 4c 89 ef
+ e8 ec c4 ff ff 4c 89 e2 48 89 de 48 c7 c7 e8 b4 ee b4 e8 6a d4 d7
+ ff <0f> 0b 48 89 df e8 20 fa f3 ff 5b 41 5c 41 5d 5d c3 66 0f 1f 84
+---[ end trace a7531c1ed5269e84 ]---
+ c6xxvf b002:00:00.0: Failed to register UIO devices
+ c6xxvf b002:00:00.0: Failed to register UIO devices
+
+Signed-off-by: Lang Dai <lang.dai@intel.com>
+
+Link: https://lore.kernel.org/r/1600054002-17722-1-git-send-email-lang.dai@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/uio/uio.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index 73efb80815db8..6dca744e39e95 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -1048,8 +1048,6 @@ void uio_unregister_device(struct uio_info *info)
+
+ idev = info->uio_dev;
+
+- uio_free_minor(idev);
+-
+ mutex_lock(&idev->info_lock);
+ uio_dev_del_attributes(idev);
+
+@@ -1064,6 +1062,8 @@ void uio_unregister_device(struct uio_info *info)
+
+ device_unregister(&idev->dev);
+
++ uio_free_minor(idev);
++
+ return;
+ }
+ EXPORT_SYMBOL_GPL(uio_unregister_device);
+--
+2.27.0
+
--- /dev/null
+From acba3ca0949e9d5841542363b96bca728efafe9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Jun 2020 13:23:17 +0200
+Subject: um: change sigio_spinlock to a mutex
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit f2d05059e15af3f70502074f4e3a504530af504a ]
+
+Lockdep complains at boot:
+
+=============================
+[ BUG: Invalid wait context ]
+5.7.0-05093-g46d91ecd597b #98 Not tainted
+-----------------------------
+swapper/1 is trying to lock:
+0000000060931b98 (&desc[i].request_mutex){+.+.}-{3:3}, at: __setup_irq+0x11d/0x623
+other info that might help us debug this:
+context-{4:4}
+1 lock held by swapper/1:
+ #0: 000000006074fed8 (sigio_spinlock){+.+.}-{2:2}, at: sigio_lock+0x1a/0x1c
+stack backtrace:
+CPU: 0 PID: 1 Comm: swapper Not tainted 5.7.0-05093-g46d91ecd597b #98
+Stack:
+ 7fa4fab0 6028dfd1 0000002a 6008bea5
+ 7fa50700 7fa50040 7fa4fac0 6028e016
+ 7fa4fb50 6007f6da 60959c18 00000000
+Call Trace:
+ [<60023a0e>] show_stack+0x13b/0x155
+ [<6028e016>] dump_stack+0x2a/0x2c
+ [<6007f6da>] __lock_acquire+0x515/0x15f2
+ [<6007eb50>] lock_acquire+0x245/0x273
+ [<6050d9f1>] __mutex_lock+0xbd/0x325
+ [<6050dc76>] mutex_lock_nested+0x1d/0x1f
+ [<6008e27e>] __setup_irq+0x11d/0x623
+ [<6008e8ed>] request_threaded_irq+0x169/0x1a6
+ [<60021eb0>] um_request_irq+0x1ee/0x24b
+ [<600234ee>] write_sigio_irq+0x3b/0x76
+ [<600383ca>] sigio_broken+0x146/0x2e4
+ [<60020bd8>] do_one_initcall+0xde/0x281
+
+Because we hold sigio_spinlock and then get into requesting
+an interrupt with a mutex.
+
+Change the spinlock to a mutex to avoid that.
+
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/kernel/sigio.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c
+index 10c99e058fcae..d1cffc2a7f212 100644
+--- a/arch/um/kernel/sigio.c
++++ b/arch/um/kernel/sigio.c
+@@ -35,14 +35,14 @@ int write_sigio_irq(int fd)
+ }
+
+ /* These are called from os-Linux/sigio.c to protect its pollfds arrays. */
+-static DEFINE_SPINLOCK(sigio_spinlock);
++static DEFINE_MUTEX(sigio_mutex);
+
+ void sigio_lock(void)
+ {
+- spin_lock(&sigio_spinlock);
++ mutex_lock(&sigio_mutex);
+ }
+
+ void sigio_unlock(void)
+ {
+- spin_unlock(&sigio_spinlock);
++ mutex_unlock(&sigio_mutex);
+ }
+--
+2.27.0
+
--- /dev/null
+From 09157fde8694480dfd1dada8c4b77b50eacbee56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Sep 2020 13:26:00 +0200
+Subject: USB: adutux: fix debugging
+
+From: Oliver Neukum <oneukum@suse.com>
+
+[ Upstream commit c56150c1bc8da5524831b1dac2eec3c67b89f587 ]
+
+Handling for removal of the controller was missing at one place.
+Add it.
+
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Link: https://lore.kernel.org/r/20200917112600.26508-1-oneukum@suse.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/misc/adutux.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
+index a7eefe11f31aa..45a3879799352 100644
+--- a/drivers/usb/misc/adutux.c
++++ b/drivers/usb/misc/adutux.c
+@@ -209,6 +209,7 @@ static void adu_interrupt_out_callback(struct urb *urb)
+
+ if (status != 0) {
+ if ((status != -ENOENT) &&
++ (status != -ESHUTDOWN) &&
+ (status != -ECONNRESET)) {
+ dev_dbg(&dev->udev->dev,
+ "%s :nonzero status received: %d\n", __func__,
+--
+2.27.0
+
--- /dev/null
+From beb64fd53f1b2af1e561865d7b6cf97a0d78231c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Jul 2020 11:17:39 +0800
+Subject: usb: dwc3: core: do not queue work if dr_mode is not USB_DR_MODE_OTG
+
+From: Li Jun <jun.li@nxp.com>
+
+[ Upstream commit dc336b19e82d0454ea60270cd18fbb4749e162f6 ]
+
+Do not try to queue a drd work if dr_mode is not USB_DR_MODE_OTG
+because the work is not inited, this may be triggered by user try
+to change mode file of debugfs on a single role port, which will
+cause below kernel dump:
+[ 60.115529] ------------[ cut here ]------------
+[ 60.120166] WARNING: CPU: 1 PID: 627 at kernel/workqueue.c:1473
+__queue_work+0x46c/0x520
+[ 60.128254] Modules linked in:
+[ 60.131313] CPU: 1 PID: 627 Comm: sh Not tainted
+5.7.0-rc4-00022-g914a586-dirty #135
+[ 60.139054] Hardware name: NXP i.MX8MQ EVK (DT)
+[ 60.143585] pstate: a0000085 (NzCv daIf -PAN -UAO)
+[ 60.148376] pc : __queue_work+0x46c/0x520
+[ 60.152385] lr : __queue_work+0x314/0x520
+[ 60.156393] sp : ffff8000124ebc40
+[ 60.159705] x29: ffff8000124ebc40 x28: ffff800011808018
+[ 60.165018] x27: ffff800011819ef8 x26: ffff800011d39980
+[ 60.170331] x25: ffff800011808018 x24: 0000000000000100
+[ 60.175643] x23: 0000000000000013 x22: 0000000000000001
+[ 60.180955] x21: ffff0000b7c08e00 x20: ffff0000b6c31080
+[ 60.186267] x19: ffff0000bb99bc00 x18: 0000000000000000
+[ 60.191579] x17: 0000000000000000 x16: 0000000000000000
+[ 60.196891] x15: 0000000000000000 x14: 0000000000000000
+[ 60.202202] x13: 0000000000000000 x12: 0000000000000000
+[ 60.207515] x11: 0000000000000000 x10: 0000000000000040
+[ 60.212827] x9 : ffff800011d55460 x8 : ffff800011d55458
+[ 60.218138] x7 : ffff0000b7800028 x6 : 0000000000000000
+[ 60.223450] x5 : ffff0000b7800000 x4 : 0000000000000000
+[ 60.228762] x3 : ffff0000bb997cc0 x2 : 0000000000000001
+[ 60.234074] x1 : 0000000000000000 x0 : ffff0000b6c31088
+[ 60.239386] Call trace:
+[ 60.241834] __queue_work+0x46c/0x520
+[ 60.245496] queue_work_on+0x6c/0x90
+[ 60.249075] dwc3_set_mode+0x48/0x58
+[ 60.252651] dwc3_mode_write+0xf8/0x150
+[ 60.256489] full_proxy_write+0x5c/0xa8
+[ 60.260327] __vfs_write+0x18/0x40
+[ 60.263729] vfs_write+0xdc/0x1c8
+[ 60.267045] ksys_write+0x68/0xf0
+[ 60.270360] __arm64_sys_write+0x18/0x20
+[ 60.274286] el0_svc_common.constprop.0+0x68/0x160
+[ 60.279077] do_el0_svc+0x20/0x80
+[ 60.282394] el0_sync_handler+0x10c/0x178
+[ 60.286403] el0_sync+0x140/0x180
+[ 60.289716] ---[ end trace 70b155582e2b7988 ]---
+
+Signed-off-by: Li Jun <jun.li@nxp.com>
+Signed-off-by: Felipe Balbi <balbi@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/dwc3/core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 2f9f4ad562d4e..6dd02a8802f4b 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -121,9 +121,6 @@ static void __dwc3_set_mode(struct work_struct *work)
+ int ret;
+ u32 reg;
+
+- if (dwc->dr_mode != USB_DR_MODE_OTG)
+- return;
+-
+ pm_runtime_get_sync(dwc->dev);
+
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
+@@ -209,6 +206,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+ {
+ unsigned long flags;
+
++ if (dwc->dr_mode != USB_DR_MODE_OTG)
++ return;
++
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->desired_dr_role = mode;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+--
+2.27.0
+
--- /dev/null
+From 0d3072eaf8fb326cc34eddd8f9ae73fcadb846c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Oct 2020 17:06:57 +0800
+Subject: usb: host: ehci-tegra: Fix error handling in tegra_ehci_probe()
+
+From: Tang Bin <tangbin@cmss.chinamobile.com>
+
+[ Upstream commit 32d174d2d5eb318c34ff36771adefabdf227c186 ]
+
+If the function platform_get_irq() failed, the negative value
+returned will not be detected here. So fix error handling in
+tegra_ehci_probe().
+
+Fixes: 79ad3b5add4a ("usb: host: Add EHCI driver for NVIDIA Tegra SoCs")
+Acked-by: Alan Stern <stern@rowland.harvard.edu>
+Acked-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Tang Bin <tangbin@cmss.chinamobile.com>
+Link: https://lore.kernel.org/r/20201026090657.49988-1-tangbin@cmss.chinamobile.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/host/ehci-tegra.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
+index e077b2ca53c51..869d9c4de5fcd 100644
+--- a/drivers/usb/host/ehci-tegra.c
++++ b/drivers/usb/host/ehci-tegra.c
+@@ -479,8 +479,8 @@ static int tegra_ehci_probe(struct platform_device *pdev)
+ u_phy->otg->host = hcd_to_bus(hcd);
+
+ irq = platform_get_irq(pdev, 0);
+- if (!irq) {
+- err = -ENODEV;
++ if (irq < 0) {
++ err = irq;
+ goto cleanup_phy;
+ }
+
+--
+2.27.0
+
--- /dev/null
+From 5fa8a5c48a28ce39d7b4fc635aece49616444e12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Aug 2020 11:38:27 -0700
+Subject: usb: typec: tcpm: During PR_SWAP, source caps should be sent only
+ after tSwapSourceStart
+
+From: Badhri Jagan Sridharan <badhri@google.com>
+
+[ Upstream commit 6bbe2a90a0bb4af8dd99c3565e907fe9b5e7fd88 ]
+
+The patch addresses the compliance test failures while running
+TD.PD.CP.E3, TD.PD.CP.E4, TD.PD.CP.E5 of the "Deterministic PD
+Compliance MOI" test plan published in https://www.usb.org/usbc.
+For a product to be Type-C compliant, it's expected that these tests
+are run on usb.org certified Type-C compliance tester as mentioned in
+https://www.usb.org/usbc.
+
+The purpose of the tests TD.PD.CP.E3, TD.PD.CP.E4, TD.PD.CP.E5 is to
+verify the PR_SWAP response of the device. While doing so, the test
+asserts that Source Capabilities message is NOT received from the test
+device within tSwapSourceStart min (20 ms) from the time the last bit
+of GoodCRC corresponding to the RS_RDY message sent by the UUT was
+sent. If it does then the test fails.
+
+This is in line with the requirements from the USB Power Delivery
+Specification Revision 3.0, Version 1.2:
+"6.6.8.1 SwapSourceStartTimer
+The SwapSourceStartTimer Shall be used by the new Source, after a
+Power Role Swap or Fast Role Swap, to ensure that it does not send
+Source_Capabilities Message before the new Sink is ready to receive
+the
+Source_Capabilities Message. The new Source Shall Not send the
+Source_Capabilities Message earlier than tSwapSourceStart after the
+last bit of the EOP of GoodCRC Message sent in response to the PS_RDY
+Message sent by the new Source indicating that its power supply is
+ready."
+
+The patch makes sure that TCPM does not send the Source_Capabilities
+Message within tSwapSourceStart(20ms) by transitioning into
+SRC_STARTUP only after tSwapSourceStart(20ms).
+
+Signed-off-by: Badhri Jagan Sridharan <badhri@google.com>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20200817183828.1895015-1-badhri@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/typec/tcpm/tcpm.c | 2 +-
+ include/linux/usb/pd.h | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index a48e3f90d1961..1e676ee44c937 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -3573,7 +3573,7 @@ static void run_state_machine(struct tcpm_port *port)
+ */
+ tcpm_set_pwr_role(port, TYPEC_SOURCE);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+- tcpm_set_state(port, SRC_STARTUP, 0);
++ tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
+ break;
+
+ case VCONN_SWAP_ACCEPT:
+diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
+index b6c233e79bd45..1df895e4680b2 100644
+--- a/include/linux/usb/pd.h
++++ b/include/linux/usb/pd.h
+@@ -473,6 +473,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
+ #define PD_T_ERROR_RECOVERY 100 /* minimum 25 is insufficient */
+ #define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */
+ #define PD_T_NEWSRC 250 /* Maximum of 275ms */
++#define PD_T_SWAP_SRC_START 20 /* Minimum of 20ms */
+
+ #define PD_T_DRP_TRY 100 /* 75 - 150 ms */
+ #define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */
+--
+2.27.0
+
--- /dev/null
+From 82467739e6ba263655c90983dca0a2859a6676d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Sep 2020 16:17:49 +0300
+Subject: usb: xhci: omit duplicate actions when suspending a runtime suspended
+ host.
+
+From: Peter Chen <peter.chen@nxp.com>
+
+[ Upstream commit 18a367e8947d72dd91b6fc401e88a2952c6363f7 ]
+
+If the xhci-plat.c is the platform driver, after the runtime pm is
+enabled, the xhci_suspend is called if nothing is connected on
+the port. When the system goes to suspend, it will call xhci_suspend again
+if USB wakeup is enabled.
+
+Since the runtime suspend wakeup setting is not always the same as
+system suspend wakeup setting, eg, at runtime suspend we always need
+wakeup if the controller is in low power mode; but at system suspend,
+we may not need wakeup. So, we move the judgement after changing
+wakeup setting.
+
+[commit message rewording -Mathias]
+
+Reviewed-by: Jun Li <jun.li@nxp.com>
+Signed-off-by: Peter Chen <peter.chen@nxp.com>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20200918131752.16488-8-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/host/xhci.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index e534f524b7f87..e88f4f9539955 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -982,12 +982,15 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
+ xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+ return -EINVAL;
+
+- xhci_dbc_suspend(xhci);
+-
+ /* Clear root port wake on bits if wakeup not allowed. */
+ if (!do_wakeup)
+ xhci_disable_port_wake_on_bits(xhci);
+
++ if (!HCD_HW_ACCESSIBLE(hcd))
++ return 0;
++
++ xhci_dbc_suspend(xhci);
++
+ /* Don't poll the roothubs on bus suspend. */
+ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+--
+2.27.0
+
--- /dev/null
+From beb087ac5ecb37cb08a459d13a7250fa18c69a53 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Oct 2020 13:20:49 +0100
+Subject: vdpasim: fix MAC address configuration
+
+From: Laurent Vivier <lvivier@redhat.com>
+
+[ Upstream commit 4a6a42db53aae049a8a64d4b273761bc80c46ebf ]
+
+vdpa_sim generates a ramdom MAC address but it is never used by upper
+layers because the VIRTIO_NET_F_MAC bit is not set in the features list.
+
+Because of that, virtio-net always regenerates a random MAC address each
+time it is loaded whereas the address should only change on vdpa_sim
+load/unload.
+
+Fix that by adding VIRTIO_NET_F_MAC in the features list of vdpa_sim.
+
+Fixes: 2c53d0f64c06 ("vdpasim: vDPA device simulator")
+Cc: jasowang@redhat.com
+Signed-off-by: Laurent Vivier <lvivier@redhat.com>
+Link: https://lore.kernel.org/r/20201029122050.776445-2-lvivier@redhat.com
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/vdpa_sim/vdpa_sim.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+index 62d6403271450..01ef22d03a8c1 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -60,7 +60,8 @@ struct vdpasim_virtqueue {
+
+ static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
+ (1ULL << VIRTIO_F_VERSION_1) |
+- (1ULL << VIRTIO_F_ACCESS_PLATFORM);
++ (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
++ (1ULL << VIRTIO_NET_F_MAC);
+
+ /* State of each vdpasim device */
+ struct vdpasim {
+--
+2.27.0
+
--- /dev/null
+From ea9edc65049907f2b2c9e140938bd09e6b6511b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jul 2020 12:18:45 -0700
+Subject: video: fbdev: pvr2fb: initialize variables
+
+From: Tom Rix <trix@redhat.com>
+
+[ Upstream commit 8e1ba47c60bcd325fdd097cd76054639155e5d2e ]
+
+clang static analysis reports this repesentative error
+
+pvr2fb.c:1049:2: warning: 1st function call argument
+ is an uninitialized value [core.CallAndMessage]
+ if (*cable_arg)
+ ^~~~~~~~~~~~~~~
+
+Problem is that cable_arg depends on the input loop to
+set the cable_arg[0]. If it does not, then some random
+value from the stack is used.
+
+A similar problem exists for output_arg.
+
+So initialize cable_arg and output_arg.
+
+Signed-off-by: Tom Rix <trix@redhat.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200720191845.20115-1-trix@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/pvr2fb.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
+index 2d9f69b93392a..f4add36cb5f4d 100644
+--- a/drivers/video/fbdev/pvr2fb.c
++++ b/drivers/video/fbdev/pvr2fb.c
+@@ -1028,6 +1028,8 @@ static int __init pvr2fb_setup(char *options)
+ if (!options || !*options)
+ return 0;
+
++ cable_arg[0] = output_arg[0] = 0;
++
+ while ((this_opt = strsep(&options, ","))) {
+ if (!*this_opt)
+ continue;
+--
+2.27.0
+
--- /dev/null
+From a8d1179e0149f29467a483a0cf5be2b8d655edc2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Oct 2020 16:42:25 +0200
+Subject: x86/alternative: Don't call text_poke() in lazy TLB mode
+
+From: Juergen Gross <jgross@suse.com>
+
+[ Upstream commit abee7c494d8c41bb388839bccc47e06247f0d7de ]
+
+When running in lazy TLB mode the currently active page tables might
+be the ones of a previous process, e.g. when running a kernel thread.
+
+This can be problematic in case kernel code is being modified via
+text_poke() in a kernel thread, and on another processor exit_mmap()
+is active for the process which was running on the first cpu before
+the kernel thread.
+
+As text_poke() is using a temporary address space and the former
+address space (obtained via cpu_tlbstate.loaded_mm) is restored
+afterwards, there is a race possible in case the cpu on which
+exit_mmap() is running wants to make sure there are no stale
+references to that address space on any cpu active (this e.g. is
+required when running as a Xen PV guest, where this problem has been
+observed and analyzed).
+
+In order to avoid that, drop off TLB lazy mode before switching to the
+temporary address space.
+
+Fixes: cefa929c034eb5d ("x86/mm: Introduce temporary mm structs")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20201009144225.12019-1-jgross@suse.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/alternative.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index cdaab30880b91..cd6be6f143e85 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -807,6 +807,15 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
+ temp_mm_state_t temp_state;
+
+ lockdep_assert_irqs_disabled();
++
++ /*
++ * Make sure not to be in TLB lazy mode, as otherwise we'll end up
++ * with a stale address space WITHOUT being in lazy mode after
++ * restoring the previous mm.
++ */
++ if (this_cpu_read(cpu_tlbstate.is_lazy))
++ leave_mm(smp_processor_id());
++
+ temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ switch_mm_irqs_off(NULL, mm, current);
+
+--
+2.27.0
+
--- /dev/null
+From 5881e73040272a7423987a98c6c5b34e2095e364 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Jul 2020 19:07:57 -0400
+Subject: x86/kaslr: Initialize mem_limit to the real maximum address
+
+From: Arvind Sankar <nivedita@alum.mit.edu>
+
+[ Upstream commit 451286940d95778e83fa7f97006316d995b4c4a8 ]
+
+On 64-bit, the kernel must be placed below MAXMEM (64TiB with 4-level
+paging or 4PiB with 5-level paging). This is currently not enforced by
+KASLR, which thus implicitly relies on physical memory being limited to
+less than 64TiB.
+
+On 32-bit, the limit is KERNEL_IMAGE_SIZE (512MiB). This is enforced by
+special checks in __process_mem_region().
+
+Initialize mem_limit to the maximum (depending on architecture), instead
+of ULLONG_MAX, and make sure the command-line arguments can only
+decrease it. This makes the enforcement explicit on 64-bit, and
+eliminates the 32-bit specific checks to keep the kernel below 512M.
+
+Check upfront to make sure the minimum address is below the limit before
+doing any work.
+
+Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20200727230801.3468620-5-nivedita@alum.mit.edu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/boot/compressed/kaslr.c | 41 +++++++++++++++++---------------
+ 1 file changed, 22 insertions(+), 19 deletions(-)
+
+diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
+index dde7cb3724df3..9bd966ef7d19e 100644
+--- a/arch/x86/boot/compressed/kaslr.c
++++ b/arch/x86/boot/compressed/kaslr.c
+@@ -87,8 +87,11 @@ static unsigned long get_boot_seed(void)
+ static bool memmap_too_large;
+
+
+-/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
+-static unsigned long long mem_limit = ULLONG_MAX;
++/*
++ * Store memory limit: MAXMEM on 64-bit and KERNEL_IMAGE_SIZE on 32-bit.
++ * It may be reduced by "mem=nn[KMG]" or "memmap=nn[KMG]" command line options.
++ */
++static unsigned long long mem_limit;
+
+ /* Number of immovable memory regions */
+ static int num_immovable_mem;
+@@ -214,7 +217,7 @@ static void mem_avoid_memmap(enum parse_mode mode, char *str)
+
+ if (start == 0) {
+ /* Store the specified memory limit if size > 0 */
+- if (size > 0)
++ if (size > 0 && size < mem_limit)
+ mem_limit = size;
+
+ continue;
+@@ -302,7 +305,8 @@ static void handle_mem_options(void)
+ if (mem_size == 0)
+ goto out;
+
+- mem_limit = mem_size;
++ if (mem_size < mem_limit)
++ mem_limit = mem_size;
+ } else if (!strcmp(param, "efi_fake_mem")) {
+ mem_avoid_memmap(PARSE_EFI, val);
+ }
+@@ -314,7 +318,9 @@ out:
+ }
+
+ /*
+- * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
++ * In theory, KASLR can put the kernel anywhere in the range of [16M, MAXMEM)
++ * on 64-bit, and [16M, KERNEL_IMAGE_SIZE) on 32-bit.
++ *
+ * The mem_avoid array is used to store the ranges that need to be avoided
+ * when KASLR searches for an appropriate random address. We must avoid any
+ * regions that are unsafe to overlap with during decompression, and other
+@@ -614,10 +620,6 @@ static void __process_mem_region(struct mem_vector *entry,
+ unsigned long start_orig, end;
+ struct mem_vector cur_entry;
+
+- /* On 32-bit, ignore entries entirely above our maximum. */
+- if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
+- return;
+-
+ /* Ignore entries entirely below our minimum. */
+ if (entry->start + entry->size < minimum)
+ return;
+@@ -650,11 +652,6 @@ static void __process_mem_region(struct mem_vector *entry,
+ /* Reduce size by any delta from the original address. */
+ region.size -= region.start - start_orig;
+
+- /* On 32-bit, reduce region size to fit within max size. */
+- if (IS_ENABLED(CONFIG_X86_32) &&
+- region.start + region.size > KERNEL_IMAGE_SIZE)
+- region.size = KERNEL_IMAGE_SIZE - region.start;
+-
+ /* Return if region can't contain decompressed kernel */
+ if (region.size < image_size)
+ return;
+@@ -839,15 +836,16 @@ static void process_e820_entries(unsigned long minimum,
+ static unsigned long find_random_phys_addr(unsigned long minimum,
+ unsigned long image_size)
+ {
++ /* Bail out early if it's impossible to succeed. */
++ if (minimum + image_size > mem_limit)
++ return 0;
++
+ /* Check if we had too many memmaps. */
+ if (memmap_too_large) {
+ debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
+ return 0;
+ }
+
+- /* Make sure minimum is aligned. */
+- minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+-
+ if (process_efi_entries(minimum, image_size))
+ return slots_fetch_random();
+
+@@ -860,8 +858,6 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
+ {
+ unsigned long slots, random_addr;
+
+- /* Make sure minimum is aligned. */
+- minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+ /* Align image_size for easy slot calculations. */
+ image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
+
+@@ -908,6 +904,11 @@ void choose_random_location(unsigned long input,
+ /* Prepare to add new identity pagetables on demand. */
+ initialize_identity_maps();
+
++ if (IS_ENABLED(CONFIG_X86_32))
++ mem_limit = KERNEL_IMAGE_SIZE;
++ else
++ mem_limit = MAXMEM;
++
+ /* Record the various known unsafe memory ranges. */
+ mem_avoid_init(input, input_size, *output);
+
+@@ -917,6 +918,8 @@ void choose_random_location(unsigned long input,
+ * location:
+ */
+ min_addr = min(*output, 512UL << 20);
++ /* Make sure minimum is aligned. */
++ min_addr = ALIGN(min_addr, CONFIG_PHYSICAL_ALIGN);
+
+ /* Walk available memory entries to find a random address. */
+ random_addr = find_random_phys_addr(min_addr, output_size);
+--
+2.27.0
+
--- /dev/null
+From 133a968abb025292915304c3af5cf5151fba17ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Oct 2020 07:30:51 +0200
+Subject: x86/unwind/orc: Fix inactive tasks with stack pointer in %sp on GCC
+ 10 compiled kernels
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+[ Upstream commit f2ac57a4c49d40409c21c82d23b5706df9b438af ]
+
+GCC 10 optimizes the scheduler code differently than its predecessors.
+
+When CONFIG_DEBUG_SECTION_MISMATCH=y, the Makefile forces GCC not
+to inline some functions (-fno-inline-functions-called-once). Before GCC
+10, "no-inlined" __schedule() starts with the usual prologue:
+
+ push %bp
+ mov %sp, %bp
+
+So the ORC unwinder simply picks stack pointer from %bp and
+unwinds from __schedule() just perfectly:
+
+ $ cat /proc/1/stack
+ [<0>] ep_poll+0x3e9/0x450
+ [<0>] do_epoll_wait+0xaa/0xc0
+ [<0>] __x64_sys_epoll_wait+0x1a/0x20
+ [<0>] do_syscall_64+0x33/0x40
+ [<0>] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+But now, with GCC 10, there is no %bp prologue in __schedule():
+
+ $ cat /proc/1/stack
+ <nothing>
+
+The ORC entry of the point in __schedule() is:
+
+ sp:sp+88 bp:last_sp-48 type:call end:0
+
+In this case, nobody subtracts sizeof "struct inactive_task_frame" in
+__unwind_start(). The struct is put on the stack by __switch_to_asm() and
+only then __switch_to_asm() stores %sp to task->thread.sp. But we start
+unwinding from a point in __schedule() (stored in frame->ret_addr by
+'call') and not in __switch_to_asm().
+
+So for these example values in __unwind_start():
+
+ sp=ffff94b50001fdc8 bp=ffff8e1f41d29340 ip=__schedule+0x1f0
+
+The stack is:
+
+ ffff94b50001fdc8: ffff8e1f41578000 # struct inactive_task_frame
+ ffff94b50001fdd0: 0000000000000000
+ ffff94b50001fdd8: ffff8e1f41d29340
+ ffff94b50001fde0: ffff8e1f41611d40 # ...
+ ffff94b50001fde8: ffffffff93c41920 # bx
+ ffff94b50001fdf0: ffff8e1f41d29340 # bp
+ ffff94b50001fdf8: ffffffff9376cad0 # ret_addr (and end of the struct)
+
+0xffffffff9376cad0 is __schedule+0x1f0 (after the call to
+__switch_to_asm). Now follow those 88 bytes from the ORC entry (sp+88).
+The entry is correct, __schedule() really pushes 48 bytes (8*7) + 32 bytes
+via subq to store some local values (like 4U below). So to unwind, look
+at the offset 88-sizeof(long) = 0x50 from here:
+
+ ffff94b50001fe00: ffff8e1f41578618
+ ffff94b50001fe08: 00000cc000000255
+ ffff94b50001fe10: 0000000500000004
+ ffff94b50001fe18: 7793fab6956b2d00 # NOTE (see below)
+ ffff94b50001fe20: ffff8e1f41578000
+ ffff94b50001fe28: ffff8e1f41578000
+ ffff94b50001fe30: ffff8e1f41578000
+ ffff94b50001fe38: ffff8e1f41578000
+ ffff94b50001fe40: ffff94b50001fed8
+ ffff94b50001fe48: ffff8e1f41577ff0
+ ffff94b50001fe50: ffffffff9376cf12
+
+Here ^^^^^^^^^^^^^^^^ is the correct ret addr from
+__schedule(). It translates to schedule+0x42 (insn after a call to
+__schedule()).
+
+BUT, unwind_next_frame() tries to take the address starting from
+0xffff94b50001fdc8. That is exactly from thread.sp+88-sizeof(long) =
+0xffff94b50001fdc8+88-8 = 0xffff94b50001fe18, which is garbage marked as
+NOTE above. So this quits the unwinding as 7793fab6956b2d00 is obviously
+not a kernel address.
+
+There was a fix to skip 'struct inactive_task_frame' in
+unwind_get_return_address_ptr in the following commit:
+
+ 187b96db5ca7 ("x86/unwind/orc: Fix unwind_get_return_address_ptr() for inactive tasks")
+
+But we need to skip the struct already in the unwinder proper. So
+subtract the size (increase the stack pointer) of the structure in
+__unwind_start() directly. This allows for removal of the code added by
+commit 187b96db5ca7 completely, as the address is now at
+'(unsigned long *)state->sp - 1', the same as in the generic case.
+
+[ mingo: Cleaned up the changelog a bit, for better readability. ]
+
+Fixes: ee9f8fce9964 ("x86/unwind: Add the ORC unwinder")
+Bug: https://bugzilla.suse.com/show_bug.cgi?id=1176907
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lore.kernel.org/r/20201014053051.24199-1-jslaby@suse.cz
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/unwind_orc.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
+index ec88bbe08a328..4a96aa3de7d8a 100644
+--- a/arch/x86/kernel/unwind_orc.c
++++ b/arch/x86/kernel/unwind_orc.c
+@@ -320,19 +320,12 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
+ {
+- struct task_struct *task = state->task;
+-
+ if (unwind_done(state))
+ return NULL;
+
+ if (state->regs)
+ return &state->regs->ip;
+
+- if (task != current && state->sp == task->thread.sp) {
+- struct inactive_task_frame *frame = (void *)task->thread.sp;
+- return &frame->ret_addr;
+- }
+-
+ if (state->sp)
+ return (unsigned long *)state->sp - 1;
+
+@@ -662,7 +655,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
+ } else {
+ struct inactive_task_frame *frame = (void *)task->thread.sp;
+
+- state->sp = task->thread.sp;
++ state->sp = task->thread.sp + sizeof(*frame);
+ state->bp = READ_ONCE_NOCHECK(frame->bp);
+ state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
+ state->signal = (void *)state->ip == ret_from_fork;
+--
+2.27.0
+
--- /dev/null
+From 7a639046ccbb1b6bbf84318acadfc9abf18e630d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2020 16:07:13 +0200
+Subject: xen: gntdev: fix common struct sg_table related issues
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+[ Upstream commit d1749eb1ab85e04e58c29e58900e3abebbdd6e82 ]
+
+The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function
+returns the number of the created entries in the DMA address space.
+However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and
+dma_unmap_sg must be called with the original number of the entries
+passed to the dma_map_sg().
+
+struct sg_table is a common structure used for describing a non-contiguous
+memory buffer, used commonly in the DRM and graphics subsystems. It
+consists of a scatterlist with memory pages and DMA addresses (sgl entry),
+as well as the number of scatterlist entries: CPU pages (orig_nents entry)
+and DMA mapped pages (nents entry).
+
+It turned out that it was a common mistake to misuse nents and orig_nents
+entries, calling DMA-mapping functions with a wrong number of entries or
+ignoring the number of mapped entries returned by the dma_map_sg()
+function.
+
+To avoid such issues, lets use a common dma-mapping wrappers operating
+directly on the struct sg_table objects and use scatterlist page
+iterators where possible. This, almost always, hides references to the
+nents and orig_nents entries, making the code robust, easier to follow
+and copy/paste safe.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/gntdev-dmabuf.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
+index b1b6eebafd5de..4c13cbc99896a 100644
+--- a/drivers/xen/gntdev-dmabuf.c
++++ b/drivers/xen/gntdev-dmabuf.c
+@@ -247,10 +247,9 @@ static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
+
+ if (sgt) {
+ if (gntdev_dmabuf_attach->dir != DMA_NONE)
+- dma_unmap_sg_attrs(attach->dev, sgt->sgl,
+- sgt->nents,
+- gntdev_dmabuf_attach->dir,
+- DMA_ATTR_SKIP_CPU_SYNC);
++ dma_unmap_sgtable(attach->dev, sgt,
++ gntdev_dmabuf_attach->dir,
++ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ }
+
+@@ -288,8 +287,8 @@ dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
+ sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
+ gntdev_dmabuf->nr_pages);
+ if (!IS_ERR(sgt)) {
+- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+- DMA_ATTR_SKIP_CPU_SYNC)) {
++ if (dma_map_sgtable(attach->dev, sgt, dir,
++ DMA_ATTR_SKIP_CPU_SYNC)) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = ERR_PTR(-ENOMEM);
+@@ -633,7 +632,7 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
+
+ /* Now convert sgt to array of pages and check for page validity. */
+ i = 0;
+- for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
++ for_each_sgtable_page(sgt, &sg_iter, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ /*
+ * Check if page is valid: this can happen if we are given
+--
+2.27.0
+
--- /dev/null
+From b0d1f5d4ecb322ad07487ff945912fe3140ba93f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Sep 2020 09:41:06 -0700
+Subject: xfs: avoid LR buffer overrun due to crafted h_len
+
+From: Gao Xiang <hsiangkao@redhat.com>
+
+[ Upstream commit f692d09e9c8fd0f5557c2e87f796a16dd95222b8 ]
+
+Currently, crafted h_len has been blocked for the log
+header of the tail block in commit a70f9fe52daa ("xfs:
+detect and handle invalid iclog size set by mkfs").
+
+However, each log record could still have crafted h_len
+and cause log record buffer overrun. So let's check
+h_len vs buffer size for each log record as well.
+
+Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/xfs_log_recover.c | 39 +++++++++++++++++++--------------------
+ 1 file changed, 19 insertions(+), 20 deletions(-)
+
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index e2ec91b2d0f46..9ceb67d0f2565 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2904,7 +2904,8 @@ STATIC int
+ xlog_valid_rec_header(
+ struct xlog *log,
+ struct xlog_rec_header *rhead,
+- xfs_daddr_t blkno)
++ xfs_daddr_t blkno,
++ int bufsize)
+ {
+ int hlen;
+
+@@ -2920,10 +2921,14 @@ xlog_valid_rec_header(
+ return -EFSCORRUPTED;
+ }
+
+- /* LR body must have data or it wouldn't have been written */
++ /*
++ * LR body must have data (or it wouldn't have been written)
++ * and h_len must not be greater than LR buffer size.
++ */
+ hlen = be32_to_cpu(rhead->h_len);
+- if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > INT_MAX))
++ if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
+ return -EFSCORRUPTED;
++
+ if (XFS_IS_CORRUPT(log->l_mp,
+ blkno > log->l_logBBsize || blkno > INT_MAX))
+ return -EFSCORRUPTED;
+@@ -2984,9 +2989,6 @@ xlog_do_recovery_pass(
+ goto bread_err1;
+
+ rhead = (xlog_rec_header_t *)offset;
+- error = xlog_valid_rec_header(log, rhead, tail_blk);
+- if (error)
+- goto bread_err1;
+
+ /*
+ * xfsprogs has a bug where record length is based on lsunit but
+@@ -3001,21 +3003,18 @@ xlog_do_recovery_pass(
+ */
+ h_size = be32_to_cpu(rhead->h_size);
+ h_len = be32_to_cpu(rhead->h_len);
+- if (h_len > h_size) {
+- if (h_len <= log->l_mp->m_logbsize &&
+- be32_to_cpu(rhead->h_num_logops) == 1) {
+- xfs_warn(log->l_mp,
++ if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
++ rhead->h_num_logops == cpu_to_be32(1)) {
++ xfs_warn(log->l_mp,
+ "invalid iclog size (%d bytes), using lsunit (%d bytes)",
+- h_size, log->l_mp->m_logbsize);
+- h_size = log->l_mp->m_logbsize;
+- } else {
+- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
+- log->l_mp);
+- error = -EFSCORRUPTED;
+- goto bread_err1;
+- }
++ h_size, log->l_mp->m_logbsize);
++ h_size = log->l_mp->m_logbsize;
+ }
+
++ error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
++ if (error)
++ goto bread_err1;
++
+ if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
+ (h_size > XLOG_HEADER_CYCLE_SIZE)) {
+ hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
+@@ -3096,7 +3095,7 @@ xlog_do_recovery_pass(
+ }
+ rhead = (xlog_rec_header_t *)offset;
+ error = xlog_valid_rec_header(log, rhead,
+- split_hblks ? blk_no : 0);
++ split_hblks ? blk_no : 0, h_size);
+ if (error)
+ goto bread_err2;
+
+@@ -3177,7 +3176,7 @@ xlog_do_recovery_pass(
+ goto bread_err2;
+
+ rhead = (xlog_rec_header_t *)offset;
+- error = xlog_valid_rec_header(log, rhead, blk_no);
++ error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
+ if (error)
+ goto bread_err2;
+
+--
+2.27.0
+
--- /dev/null
+From c3e274af6835f3dfea379f89e3c0f46cc4c7a29b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Sep 2020 17:39:51 -0700
+Subject: xfs: change the order in which child and parent defer ops are
+ finished
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit 27dada070d59c28a441f1907d2cec891b17dcb26 ]
+
+The defer ops code has been finishing items in the wrong order -- if a
+top level defer op creates items A and B, and finishing item A creates
+more defer ops A1 and A2, we'll put the new items on the end of the
+chain and process them in the order A B A1 A2. This is kind of weird,
+since it's convenient for programmers to be able to think of A and B as
+an ordered sequence where all the sub-tasks for A must finish before we
+move on to B, e.g. A A1 A2 D.
+
+Right now, our log intent items are not so complex that this matters,
+but this will become important for the atomic extent swapping patchset.
+In order to maintain correct reference counting of extents, we have to
+unmap and remap extents in that order, and we want to complete that work
+before moving on to the next range that the user wants to swap. This
+patch fixes defer ops to satsify that requirement.
+
+The primary symptom of the incorrect order was noticed in an early
+performance analysis of the atomic extent swap code. An astonishingly
+large number of deferred work items accumulated when userspace requested
+an atomic update of two very fragmented files. The cause of this was
+traced to the same ordering bug in the inner loop of
+xfs_defer_finish_noroll.
+
+If the ->finish_item method of a deferred operation queues new deferred
+operations, those new deferred ops are appended to the tail of the
+pending work list. To illustrate, say that a caller creates a
+transaction t0 with four deferred operations D0-D3. The first thing
+defer ops does is roll the transaction to t1, leaving us with:
+
+t1: D0(t0), D1(t0), D2(t0), D3(t0)
+
+Let's say that finishing each of D0-D3 will create two new deferred ops.
+After finish D0 and roll, we'll have the following chain:
+
+t2: D1(t0), D2(t0), D3(t0), d4(t1), d5(t1)
+
+d4 and d5 were logged to t1. Notice that while we're about to start
+work on D1, we haven't actually completed all the work implied by D0
+being finished. So far we've been careful (or lucky) to structure the
+dfops callers such that D1 doesn't depend on d4 or d5 being finished,
+but this is a potential logic bomb.
+
+There's a second problem lurking. Let's see what happens as we finish
+D1-D3:
+
+t3: D2(t0), D3(t0), d4(t1), d5(t1), d6(t2), d7(t2)
+t4: D3(t0), d4(t1), d5(t1), d6(t2), d7(t2), d8(t3), d9(t3)
+t5: d4(t1), d5(t1), d6(t2), d7(t2), d8(t3), d9(t3), d10(t4), d11(t4)
+
+Let's say that d4-d11 are simple work items that don't queue any other
+operations, which means that we can complete each d4 and roll to t6:
+
+t6: d5(t1), d6(t2), d7(t2), d8(t3), d9(t3), d10(t4), d11(t4)
+t7: d6(t2), d7(t2), d8(t3), d9(t3), d10(t4), d11(t4)
+...
+t11: d10(t4), d11(t4)
+t12: d11(t4)
+<done>
+
+When we try to roll to transaction #12, we're holding defer op d11,
+which we logged way back in t4. This means that the tail of the log is
+pinned at t4. If the log is very small or there are a lot of other
+threads updating metadata, this means that we might have wrapped the log
+and cannot get roll to t11 because there isn't enough space left before
+we'd run into t4.
+
+Let's shift back to the original failure. I mentioned before that I
+discovered this flaw while developing the atomic file update code. In
+that scenario, we have a defer op (D0) that finds a range of file blocks
+to remap, creates a handful of new defer ops to do that, and then asks
+to be continued with however much work remains.
+
+So, D0 is the original swapext deferred op. The first thing defer ops
+does is rolls to t1:
+
+t1: D0(t0)
+
+We try to finish D0, logging d1 and d2 in the process, but can't get all
+the work done. We log a done item and a new intent item for the work
+that D0 still has to do, and roll to t2:
+
+t2: D0'(t1), d1(t1), d2(t1)
+
+We roll and try to finish D0', but still can't get all the work done, so
+we log a done item and a new intent item for it, requeue D0 a second
+time, and roll to t3:
+
+t3: D0''(t2), d1(t1), d2(t1), d3(t2), d4(t2)
+
+If it takes 48 more rolls to complete D0, then we'll finally dispense
+with D0 in t50:
+
+t50: D<fifty primes>(t49), d1(t1), ..., d102(t50)
+
+We then try to roll again to get a chain like this:
+
+t51: d1(t1), d2(t1), ..., d101(t50), d102(t50)
+...
+t152: d102(t50)
+<done>
+
+Notice that in rolling to transaction #51, we're holding on to a log
+intent item for d1 that was logged in transaction #1. This means that
+the tail of the log is pinned at t1. If the log is very small or there
+are a lot of other threads updating metadata, this means that we might
+have wrapped the log and cannot roll to t51 because there isn't enough
+space left before we'd run into t1. This is of course problem #2 again.
+
+But notice the third problem with this scenario: we have 102 defer ops
+tied to this transaction! Each of these items are backed by pinned
+kernel memory, which means that we risk OOM if the chains get too long.
+
+Yikes. Problem #1 is a subtle logic bomb that could hit someone in the
+future; problem #2 applies (rarely) to the current upstream, and problem
+#3 applies to work under development.
+
+This is not how incremental deferred operations were supposed to work.
+The dfops design of logging in the same transaction an intent-done item
+and a new intent item for the work remaining was to make it so that we
+only have to juggle enough deferred work items to finish that one small
+piece of work. Deferred log item recovery will find that first
+unfinished work item and restart it, no matter how many other intent
+items might follow it in the log. Therefore, it's ok to put the new
+intents at the start of the dfops chain.
+
+For the first example, the chains look like this:
+
+t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
+t3: d5(t1), D1(t0), D2(t0), D3(t0)
+...
+t9: d9(t7), D3(t0)
+t10: D3(t0)
+t11: d10(t10), d11(t10)
+t12: d11(t10)
+
+For the second example, the chains look like this:
+
+t1: D0(t0)
+t2: d1(t1), d2(t1), D0'(t1)
+t3: d2(t1), D0'(t1)
+t4: D0'(t1)
+t5: d1(t4), d2(t4), D0''(t4)
+...
+t148: D0<50 primes>(t147)
+t149: d101(t148), d102(t148)
+t150: d102(t148)
+<done>
+
+This actually sucks more for pinning the log tail (we try to roll to t10
+while holding an intent item that was logged in t1) but we've solved
+problem #1. We've also reduced the maximum chain length from:
+
+ sum(all the new items) + nr_original_items
+
+to:
+
+ max(new items that each original item creates) + nr_original_items
+
+This solves problem #3 by sharply reducing the number of defer ops that
+can be attached to a transaction at any given time. The change makes
+the problem of log tail pinning worse, but is improvement we need to
+solve problem #2. Actually solving #2, however, is left to the next
+patch.
+
+Note that a subsequent analysis of some hard-to-trigger reflink and COW
+livelocks on extremely fragmented filesystems (or systems running a lot
+of IO threads) showed the same symptoms -- uncomfortably large numbers
+of incore deferred work items and occasional stalls in the transaction
+grant code while waiting for log reservations. I think this patch and
+the next one will also solve these problems.
+
+As originally written, the code used list_splice_tail_init instead of
+list_splice_init, so change that, and leave a short comment explaining
+our actions.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/libxfs/xfs_defer.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
+index 29e9762f3b77c..4959d8a32b606 100644
+--- a/fs/xfs/libxfs/xfs_defer.c
++++ b/fs/xfs/libxfs/xfs_defer.c
+@@ -430,8 +430,17 @@ xfs_defer_finish_noroll(
+
+ /* Until we run out of pending work to finish... */
+ while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
++ /*
++ * Deferred items that are created in the process of finishing
++ * other deferred work items should be queued at the head of
++ * the pending list, which puts them ahead of the deferred work
++ * that was created by the caller. This keeps the number of
++ * pending work items to a minimum, which decreases the amount
++ * of time that any one intent item can stick around in memory,
++ * pinning the log tail.
++ */
+ xfs_defer_create_intents(*tp);
+- list_splice_tail_init(&(*tp)->t_dfops, &dop_pending);
++ list_splice_init(&(*tp)->t_dfops, &dop_pending);
+
+ error = xfs_defer_trans_roll(tp);
+ if (error)
+--
+2.27.0
+
--- /dev/null
+From 16b172d053828ced1ac37b32dcb94f25ab9c1d5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Sep 2020 09:15:08 -0700
+Subject: xfs: don't free rt blocks when we're doing a REMAP bunmapi call
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit 8df0fa39bdd86ca81a8d706a6ed9d33cc65ca625 ]
+
+When callers pass XFS_BMAPI_REMAP into xfs_bunmapi, they want the extent
+to be unmapped from the given file fork without the extent being freed.
+We do this for non-rt files, but we forgot to do this for realtime
+files. So far this isn't a big deal since nobody makes a bunmapi call
+to a rt file with the REMAP flag set, but don't leave a logic bomb.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/libxfs/xfs_bmap.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 1b0a01b06a05d..d9a692484eaed 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -5046,20 +5046,25 @@ xfs_bmap_del_extent_real(
+
+ flags = XFS_ILOG_CORE;
+ if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
+- xfs_fsblock_t bno;
+ xfs_filblks_t len;
+ xfs_extlen_t mod;
+
+- bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
+- &mod);
+- ASSERT(mod == 0);
+ len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
+ &mod);
+ ASSERT(mod == 0);
+
+- error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
+- if (error)
+- goto done;
++ if (!(bflags & XFS_BMAPI_REMAP)) {
++ xfs_fsblock_t bno;
++
++ bno = div_u64_rem(del->br_startblock,
++ mp->m_sb.sb_rextsize, &mod);
++ ASSERT(mod == 0);
++
++ error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
++ if (error)
++ goto done;
++ }
++
+ do_fx = 0;
+ nblks = len * mp->m_sb.sb_rextsize;
+ qfield = XFS_TRANS_DQ_RTBCOUNT;
+--
+2.27.0
+
--- /dev/null
+From c95b9d4dbefd14220985ed47955ba0bc70d7b9d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Oct 2020 13:55:16 -0700
+Subject: xfs: fix realtime bitmap/summary file truncation when growing rt
+ volume
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit f4c32e87de7d66074d5612567c5eac7325024428 ]
+
+The realtime bitmap and summary files are regular files that are hidden
+away from the directory tree. Since they're regular files, inode
+inactivation will try to purge what it thinks are speculative
+preallocations beyond the incore size of the file. Unfortunately,
+xfs_growfs_rt forgets to update the incore size when it resizes the
+inodes, with the result that inactivating the rt inodes at unmount time
+will cause their contents to be truncated.
+
+Fix this by updating the incore size when we change the ondisk size as
+part of updating the superblock. Note that we don't do this when we're
+allocating blocks to the rt inodes because we actually want those blocks
+to get purged if the growfs fails.
+
+This fixes corruption complaints from the online rtsummary checker when
+running xfs/233. Since that test requires rmap, one can also trigger
+this by growing an rt volume, cycling the mount, and creating rt files.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/xfs_rtalloc.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 0753b1dfd0750..be01bfbc3ad93 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -1027,10 +1027,13 @@ xfs_growfs_rt(
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+ /*
+- * Update the bitmap inode's size.
++ * Update the bitmap inode's size ondisk and incore. We need
++ * to update the incore size so that inode inactivation won't
++ * punch what it thinks are "posteof" blocks.
+ */
+ mp->m_rbmip->i_d.di_size =
+ nsbp->sb_rbmblocks * nsbp->sb_blocksize;
++ i_size_write(VFS_I(mp->m_rbmip), mp->m_rbmip->i_d.di_size);
+ xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
+ /*
+ * Get the summary inode into the transaction.
+@@ -1038,9 +1041,12 @@ xfs_growfs_rt(
+ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
+ /*
+- * Update the summary inode's size.
++ * Update the summary inode's size. We need to update the
++ * incore size so that inode inactivation won't punch what it
++ * thinks are "posteof" blocks.
+ */
+ mp->m_rsumip->i_d.di_size = nmp->m_rsumsize;
++ i_size_write(VFS_I(mp->m_rsumip), mp->m_rsumip->i_d.di_size);
+ xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE);
+ /*
+ * Copy summary data from old to new sizes.
+--
+2.27.0
+
--- /dev/null
+From 10005ab8ba5a080100e7f0a347d1d10114903946 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Sep 2020 09:15:09 -0700
+Subject: xfs: log new intent items created as part of finishing recovered
+ intent items
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit 93293bcbde93567efaf4e6bcd58cad270e1fcbf5 ]
+
+During a code inspection, I found a serious bug in the log intent item
+recovery code when an intent item cannot complete all the work and
+decides to requeue itself to get that done. When this happens, the
+item recovery creates a new incore deferred op representing the
+remaining work and attaches it to the transaction that it allocated. At
+the end of _item_recover, it moves the entire chain of deferred ops to
+the dummy parent_tp that xlog_recover_process_intents passed to it, but
+fail to log a new intent item for the remaining work before committing
+the transaction for the single unit of work.
+
+xlog_finish_defer_ops logs those new intent items once recovery has
+finished dealing with the intent items that it recovered, but this isn't
+sufficient. If the log is forced to disk after a recovered log item
+decides to requeue itself and the system goes down before we call
+xlog_finish_defer_ops, the second log recovery will never see the new
+intent item and therefore has no idea that there was more work to do.
+It will finish recovery leaving the filesystem in a corrupted state.
+
+The same logic applies to /any/ deferred ops added during intent item
+recovery, not just the one handling the remaining work.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/libxfs/xfs_defer.c | 26 ++++++++++++++++++++++++--
+ fs/xfs/libxfs/xfs_defer.h | 6 ++++++
+ fs/xfs/xfs_bmap_item.c | 2 +-
+ fs/xfs/xfs_refcount_item.c | 2 +-
+ 4 files changed, 32 insertions(+), 4 deletions(-)
+
+diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
+index d8f586256add7..29e9762f3b77c 100644
+--- a/fs/xfs/libxfs/xfs_defer.c
++++ b/fs/xfs/libxfs/xfs_defer.c
+@@ -186,8 +186,9 @@ xfs_defer_create_intent(
+ {
+ const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
+
+- dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
+- dfp->dfp_count, sort);
++ if (!dfp->dfp_intent)
++ dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
++ dfp->dfp_count, sort);
+ }
+
+ /*
+@@ -390,6 +391,7 @@ xfs_defer_finish_one(
+ list_add(li, &dfp->dfp_work);
+ dfp->dfp_count++;
+ dfp->dfp_done = NULL;
++ dfp->dfp_intent = NULL;
+ xfs_defer_create_intent(tp, dfp, false);
+ }
+
+@@ -552,3 +554,23 @@ xfs_defer_move(
+
+ xfs_defer_reset(stp);
+ }
++
++/*
++ * Prepare a chain of fresh deferred ops work items to be completed later. Log
++ * recovery requires the ability to put off until later the actual finishing
++ * work so that it can process unfinished items recovered from the log in
++ * correct order.
++ *
++ * Create and log intent items for all the work that we're capturing so that we
++ * can be assured that the items will get replayed if the system goes down
++ * before log recovery gets a chance to finish the work it put off. Then we
++ * move the chain from stp to dtp.
++ */
++void
++xfs_defer_capture(
++ struct xfs_trans *dtp,
++ struct xfs_trans *stp)
++{
++ xfs_defer_create_intents(stp);
++ xfs_defer_move(dtp, stp);
++}
+diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
+index 6b2ca580f2b06..3164199162b61 100644
+--- a/fs/xfs/libxfs/xfs_defer.h
++++ b/fs/xfs/libxfs/xfs_defer.h
+@@ -63,4 +63,10 @@ extern const struct xfs_defer_op_type xfs_rmap_update_defer_type;
+ extern const struct xfs_defer_op_type xfs_extent_free_defer_type;
+ extern const struct xfs_defer_op_type xfs_agfl_free_defer_type;
+
++/*
++ * Functions to capture a chain of deferred operations and continue them later.
++ * This doesn't normally happen except log recovery.
++ */
++void xfs_defer_capture(struct xfs_trans *dtp, struct xfs_trans *stp);
++
+ #endif /* __XFS_DEFER_H__ */
+diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
+index ec3691372e7c0..815a0563288f4 100644
+--- a/fs/xfs/xfs_bmap_item.c
++++ b/fs/xfs/xfs_bmap_item.c
+@@ -534,7 +534,7 @@ xfs_bui_item_recover(
+ xfs_bmap_unmap_extent(tp, ip, &irec);
+ }
+
+- xfs_defer_move(parent_tp, tp);
++ xfs_defer_capture(parent_tp, tp);
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_irele(ip);
+diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
+index ca93b64883774..492d80a0b4060 100644
+--- a/fs/xfs/xfs_refcount_item.c
++++ b/fs/xfs/xfs_refcount_item.c
+@@ -555,7 +555,7 @@ xfs_cui_item_recover(
+ }
+
+ xfs_refcount_finish_one_cleanup(tp, rcur, error);
+- xfs_defer_move(parent_tp, tp);
++ xfs_defer_capture(parent_tp, tp);
+ error = xfs_trans_commit(tp);
+ return error;
+
+--
+2.27.0
+
--- /dev/null
+From ef891334906f0f7fc86ae6853614c24beafc803c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Sep 2020 11:12:08 -0700
+Subject: xfs: Set xfs_buf's b_ops member when zeroing bitmap/summary files
+
+From: Chandan Babu R <chandanrlinux@gmail.com>
+
+[ Upstream commit c54e14d155f5fdbac73a8cd4bd2678cb252149dc ]
+
+In xfs_growfs_rt(), we enlarge bitmap and summary files by allocating
+new blocks for both files. For each of the new blocks allocated, we
+allocate an xfs_buf, zero the payload, log the contents and commit the
+transaction. Hence these buffers will eventually find themselves
+appended to list at xfs_ail->ail_buf_list.
+
+Later, xfs_growfs_rt() loops across all of the new blocks belonging to
+the bitmap inode to set the bitmap values to 1. In doing so, it
+allocates a new transaction and invokes the following sequence of
+functions,
+ - xfs_rtfree_range()
+ - xfs_rtmodify_range()
+ - xfs_rtbuf_get()
+ We pass '&xfs_rtbuf_ops' as the ops pointer to xfs_trans_read_buf().
+ - xfs_trans_read_buf()
+ We find the xfs_buf of interest in per-ag hash table, invoke
+ xfs_buf_reverify() which ends up assigning '&xfs_rtbuf_ops' to
+ xfs_buf->b_ops.
+
+On the other hand, if xfs_growfs_rt_alloc() had allocated a few blocks
+for the bitmap inode and returned with an error, all the xfs_bufs
+corresponding to the new bitmap blocks that have been allocated would
+continue to be on xfs_ail->ail_buf_list list without ever having a
+non-NULL value assigned to their b_ops members. An AIL flush operation
+would then trigger the following warning message to be printed on the
+console,
+
+ XFS (loop0): _xfs_buf_ioapply: no buf ops on daddr 0x58 len 8
+ 00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00000020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00000030: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00000040: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00000050: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00000060: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00000070: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ CPU: 3 PID: 449 Comm: xfsaild/loop0 Not tainted 5.8.0-rc4-chandan-00038-g4d8c2b9de9ab-dirty #37
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
+ Call Trace:
+ dump_stack+0x57/0x70
+ _xfs_buf_ioapply+0x37c/0x3b0
+ ? xfs_rw_bdev+0x1e0/0x1e0
+ ? xfs_buf_delwri_submit_buffers+0xd4/0x210
+ __xfs_buf_submit+0x6d/0x1f0
+ xfs_buf_delwri_submit_buffers+0xd4/0x210
+ xfsaild+0x2c8/0x9e0
+ ? __switch_to_asm+0x42/0x70
+ ? xfs_trans_ail_cursor_first+0x80/0x80
+ kthread+0xfe/0x140
+ ? kthread_park+0x90/0x90
+ ret_from_fork+0x22/0x30
+
+This message indicates that the xfs_buf had its b_ops member set to
+NULL.
+
+This commit fixes the issue by assigning "&xfs_rtbuf_ops" to b_ops
+member of each of the xfs_bufs logged by xfs_growfs_rt_alloc().
+
+Signed-off-by: Chandan Babu R <chandanrlinux@gmail.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/xfs_rtalloc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 912f96a248f25..0753b1dfd0750 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -849,6 +849,7 @@ xfs_growfs_rt_alloc(
+ goto out_trans_cancel;
+
+ xfs_trans_buf_set_type(tp, bp, buf_type);
++ bp->b_ops = &xfs_rtbuf_ops;
+ memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
+ xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
+ /*
+--
+2.27.0
+
--- /dev/null
+From 86a941bd2debe05ac7c0efb0208f76e5472c7d9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Sep 2020 20:50:42 -0700
+Subject: xfs: Set xfs_buf type flag when growing summary/bitmap files
+
+From: Chandan Babu R <chandanrlinux@gmail.com>
+
+[ Upstream commit 72cc95132a93293dcd0b6f68353f4741591c9aeb ]
+
+The following sequence of commands,
+
+ mkfs.xfs -f -m reflink=0 -r rtdev=/dev/loop1,size=10M /dev/loop0
+ mount -o rtdev=/dev/loop1 /dev/loop0 /mnt
+ xfs_growfs /mnt
+
+... causes the following call trace to be printed on the console,
+
+XFS: Assertion failed: (bip->bli_flags & XFS_BLI_STALE) || (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF), file: fs/xfs/xfs_buf_item.c, line: 331
+Call Trace:
+ xfs_buf_item_format+0x632/0x680
+ ? kmem_alloc_large+0x29/0x90
+ ? kmem_alloc+0x70/0x120
+ ? xfs_log_commit_cil+0x132/0x940
+ xfs_log_commit_cil+0x26f/0x940
+ ? xfs_buf_item_init+0x1ad/0x240
+ ? xfs_growfs_rt_alloc+0x1fc/0x280
+ __xfs_trans_commit+0xac/0x370
+ xfs_growfs_rt_alloc+0x1fc/0x280
+ xfs_growfs_rt+0x1a0/0x5e0
+ xfs_file_ioctl+0x3fd/0xc70
+ ? selinux_file_ioctl+0x174/0x220
+ ksys_ioctl+0x87/0xc0
+ __x64_sys_ioctl+0x16/0x20
+ do_syscall_64+0x3e/0x70
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+This occurs because the buffer being formatted has the value of
+XFS_BLFT_UNKNOWN_BUF assigned to the 'type' subfield of
+bip->bli_formats->blf_flags.
+
+This commit fixes the issue by assigning one of XFS_BLFT_RTSUMMARY_BUF
+and XFS_BLFT_RTBITMAP_BUF to the 'type' subfield of
+bip->bli_formats->blf_flags before committing the corresponding
+transaction.
+
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Chandan Babu R <chandanrlinux@gmail.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/xfs_rtalloc.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 86994d7f7cba3..912f96a248f25 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -778,8 +778,14 @@ xfs_growfs_rt_alloc(
+ struct xfs_bmbt_irec map; /* block map output */
+ int nmap; /* number of block maps */
+ int resblks; /* space reservation */
++ enum xfs_blft buf_type;
+ struct xfs_trans *tp;
+
++ if (ip == mp->m_rsumip)
++ buf_type = XFS_BLFT_RTSUMMARY_BUF;
++ else
++ buf_type = XFS_BLFT_RTBITMAP_BUF;
++
+ /*
+ * Allocate space to the file, as necessary.
+ */
+@@ -841,6 +847,8 @@ xfs_growfs_rt_alloc(
+ mp->m_bsize, 0, &bp);
+ if (error)
+ goto out_trans_cancel;
++
++ xfs_trans_buf_set_type(tp, bp, buf_type);
+ memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
+ xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
+ /*
+--
+2.27.0
+