]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.15
authorSasha Levin <sashal@kernel.org>
Thu, 9 Feb 2023 18:18:40 +0000 (13:18 -0500)
committerSasha Levin <sashal@kernel.org>
Thu, 9 Feb 2023 18:18:40 +0000 (13:18 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.15/migrate-hugetlb-check-for-hugetlb-shared-pmd-in-node.patch [new file with mode: 0644]
queue-5.15/mm-migration-return-errno-when-isolate_huge_page-fai.patch [new file with mode: 0644]
queue-5.15/nvmem-core-add-error-handling-for-dev_set_name.patch [new file with mode: 0644]
queue-5.15/nvmem-core-fix-cleanup-after-dev_set_name.patch [new file with mode: 0644]
queue-5.15/nvmem-core-fix-registration-vs-use-race.patch [new file with mode: 0644]
queue-5.15/series [new file with mode: 0644]

diff --git a/queue-5.15/migrate-hugetlb-check-for-hugetlb-shared-pmd-in-node.patch b/queue-5.15/migrate-hugetlb-check-for-hugetlb-shared-pmd-in-node.patch
new file mode 100644 (file)
index 0000000..01c01a2
--- /dev/null
@@ -0,0 +1,55 @@
+From efb8c3cd7e494a0a9965f14dfe174c8af788c7ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Jan 2023 14:27:21 -0800
+Subject: migrate: hugetlb: check for hugetlb shared PMD in node migration
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+[ Upstream commit 73bdf65ea74857d7fb2ec3067a3cec0e261b1462 ]
+
+migrate_pages/mempolicy semantics state that CAP_SYS_NICE is required to
+move pages shared with another process to a different node.  page_mapcount
+> 1 is being used to determine if a hugetlb page is shared.  However, a
+hugetlb page will have a mapcount of 1 if mapped by multiple processes via
+a shared PMD.  As a result, hugetlb pages shared by multiple processes and
+mapped with a shared PMD can be moved by a process without CAP_SYS_NICE.
+
+To fix, check for a shared PMD if mapcount is 1.  If a shared PMD is found
+consider the page shared.
+
+Link: https://lkml.kernel.org/r/20230126222721.222195-3-mike.kravetz@oracle.com
+Fixes: e2d8cf405525 ("migrate: add hugepage migration code to migrate_pages()")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Peter Xu <peterx@redhat.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: James Houghton <jthoughton@google.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
+Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/mempolicy.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 8687781b41c96..818753635e427 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -603,7 +603,8 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
+       /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
+       if (flags & (MPOL_MF_MOVE_ALL) ||
+-          (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
++          (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
++           !hugetlb_pmd_shared(pte))) {
+               if (isolate_hugetlb(page, qp->pagelist) &&
+                       (flags & MPOL_MF_STRICT))
+                       /*
+-- 
+2.39.0
+
diff --git a/queue-5.15/mm-migration-return-errno-when-isolate_huge_page-fai.patch b/queue-5.15/mm-migration-return-errno-when-isolate_huge_page-fai.patch
new file mode 100644 (file)
index 0000000..90411b5
--- /dev/null
@@ -0,0 +1,191 @@
+From 5b6a914c4cafcacdfa0f9269a7f481a9f8284859 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 May 2022 19:30:15 +0800
+Subject: mm/migration: return errno when isolate_huge_page failed
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+[ Upstream commit 7ce82f4c3f3ead13a9d9498768e3b1a79975c4d8 ]
+
+We might fail to isolate huge page due to e.g.  the page is under
+migration which cleared HPageMigratable.  We should return errno in this
+case rather than always return 1 which could confuse the user, i.e.  the
+caller might think all of the memory is migrated while the hugetlb page is
+left behind.  We make the prototype of isolate_huge_page consistent with
+isolate_lru_page as suggested by Huang Ying and rename isolate_huge_page
+to isolate_hugetlb as suggested by Muchun to improve the readability.
+
+Link: https://lkml.kernel.org/r/20220530113016.16663-4-linmiaohe@huawei.com
+Fixes: e8db67eb0ded ("mm: migrate: move_pages() supports thp migration")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Suggested-by: Huang Ying <ying.huang@intel.com>
+Reported-by: kernel test robot <lkp@intel.com> (build error)
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: David Howells <dhowells@redhat.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 73bdf65ea748 ("migrate: hugetlb: check for hugetlb shared PMD in node migration")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/hugetlb.h |  6 +++---
+ mm/gup.c                |  2 +-
+ mm/hugetlb.c            | 11 +++++------
+ mm/memory-failure.c     |  2 +-
+ mm/memory_hotplug.c     |  2 +-
+ mm/mempolicy.c          |  2 +-
+ mm/migrate.c            |  7 ++++---
+ 7 files changed, 16 insertions(+), 16 deletions(-)
+
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index cac25ad9d643f..f98d747f983b9 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -166,7 +166,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
+                                               vm_flags_t vm_flags);
+ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
+                                               long freed);
+-bool isolate_huge_page(struct page *page, struct list_head *list);
++int isolate_hugetlb(struct page *page, struct list_head *list);
+ int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
+ int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
+ void putback_active_hugepage(struct page *page);
+@@ -354,9 +354,9 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+       return NULL;
+ }
+-static inline bool isolate_huge_page(struct page *page, struct list_head *list)
++static inline int isolate_hugetlb(struct page *page, struct list_head *list)
+ {
+-      return false;
++      return -EBUSY;
+ }
+ static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+diff --git a/mm/gup.c b/mm/gup.c
+index 2370565a81dc3..0a1839b325747 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1877,7 +1877,7 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
+                */
+               if (!is_pinnable_page(head)) {
+                       if (PageHuge(head)) {
+-                              if (!isolate_huge_page(head, &movable_page_list))
++                              if (isolate_hugetlb(head, &movable_page_list))
+                                       isolation_error_count++;
+                       } else {
+                               if (!PageLRU(head) && drain_allow) {
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 8599f16d4aa4f..2f5c1b2456ef2 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2656,8 +2656,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
+                * Fail with -EBUSY if not possible.
+                */
+               spin_unlock_irq(&hugetlb_lock);
+-              if (!isolate_huge_page(old_page, list))
+-                      ret = -EBUSY;
++              ret = isolate_hugetlb(old_page, list);
+               spin_lock_irq(&hugetlb_lock);
+               goto free_new;
+       } else if (!HPageFreed(old_page)) {
+@@ -2733,7 +2732,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
+       if (hstate_is_gigantic(h))
+               return -ENOMEM;
+-      if (page_count(head) && isolate_huge_page(head, list))
++      if (page_count(head) && !isolate_hugetlb(head, list))
+               ret = 0;
+       else if (!page_count(head))
+               ret = alloc_and_dissolve_huge_page(h, head, list);
+@@ -6277,15 +6276,15 @@ follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int fla
+       return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
+ }
+-bool isolate_huge_page(struct page *page, struct list_head *list)
++int isolate_hugetlb(struct page *page, struct list_head *list)
+ {
+-      bool ret = true;
++      int ret = 0;
+       spin_lock_irq(&hugetlb_lock);
+       if (!PageHeadHuge(page) ||
+           !HPageMigratable(page) ||
+           !get_page_unless_zero(page)) {
+-              ret = false;
++              ret = -EBUSY;
+               goto unlock;
+       }
+       ClearHPageMigratable(page);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 2ad0f45800916..9f9dd968fbe3c 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -2106,7 +2106,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
+       bool lru = PageLRU(page);
+       if (PageHuge(page)) {
+-              isolated = isolate_huge_page(page, pagelist);
++              isolated = !isolate_hugetlb(page, pagelist);
+       } else {
+               if (lru)
+                       isolated = !isolate_lru_page(page);
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 9fd0be32a281e..81f2a97c886c9 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1704,7 +1704,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
+               if (PageHuge(page)) {
+                       pfn = page_to_pfn(head) + compound_nr(head) - 1;
+-                      isolate_huge_page(head, &source);
++                      isolate_hugetlb(head, &source);
+                       continue;
+               } else if (PageTransHuge(page))
+                       pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 4472be6f123db..8687781b41c96 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -604,7 +604,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
+       /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
+       if (flags & (MPOL_MF_MOVE_ALL) ||
+           (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
+-              if (!isolate_huge_page(page, qp->pagelist) &&
++              if (isolate_hugetlb(page, qp->pagelist) &&
+                       (flags & MPOL_MF_STRICT))
+                       /*
+                        * Failed to isolate page but allow migrating pages
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 7da052c6cf1ea..dd50b1cc089e0 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -134,7 +134,7 @@ static void putback_movable_page(struct page *page)
+  *
+  * This function shall be used whenever the isolated pageset has been
+  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
+- * and isolate_huge_page().
++ * and isolate_hugetlb().
+  */
+ void putback_movable_pages(struct list_head *l)
+ {
+@@ -1722,8 +1722,9 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
+       if (PageHuge(page)) {
+               if (PageHead(page)) {
+-                      isolate_huge_page(page, pagelist);
+-                      err = 1;
++                      err = isolate_hugetlb(page, pagelist);
++                      if (!err)
++                              err = 1;
+               }
+       } else {
+               struct page *head;
+-- 
+2.39.0
+
diff --git a/queue-5.15/nvmem-core-add-error-handling-for-dev_set_name.patch b/queue-5.15/nvmem-core-add-error-handling-for-dev_set_name.patch
new file mode 100644 (file)
index 0000000..c002f12
--- /dev/null
@@ -0,0 +1,58 @@
+From cd542b485d6ea5ab35230c905660d2f1afbaa8ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Sep 2022 13:20:50 +0100
+Subject: nvmem: core: add error handling for dev_set_name
+
+From: Gaosheng Cui <cuigaosheng1@huawei.com>
+
+[ Upstream commit 5544e90c81261e82e02bbf7c6015a4b9c8c825ef ]
+
+The type of return value of dev_set_name is int, which may return
+wrong result, so we add error handling for it to reclaim memory
+of nvmem resource, and return early when an error occurs.
+
+Signed-off-by: Gaosheng Cui <cuigaosheng1@huawei.com>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20220916122100.170016-4-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: ab3428cfd9aa ("nvmem: core: fix registration vs use race")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvmem/core.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index ee86022c4f2b8..51bec9f8a3bf9 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -804,18 +804,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+       switch (config->id) {
+       case NVMEM_DEVID_NONE:
+-              dev_set_name(&nvmem->dev, "%s", config->name);
++              rval = dev_set_name(&nvmem->dev, "%s", config->name);
+               break;
+       case NVMEM_DEVID_AUTO:
+-              dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
++              rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
+               break;
+       default:
+-              dev_set_name(&nvmem->dev, "%s%d",
++              rval = dev_set_name(&nvmem->dev, "%s%d",
+                            config->name ? : "nvmem",
+                            config->name ? config->id : nvmem->id);
+               break;
+       }
++      if (rval) {
++              ida_free(&nvmem_ida, nvmem->id);
++              kfree(nvmem);
++              return ERR_PTR(rval);
++      }
++
+       nvmem->read_only = device_property_present(config->dev, "read-only") ||
+                          config->read_only || !nvmem->reg_write;
+-- 
+2.39.0
+
diff --git a/queue-5.15/nvmem-core-fix-cleanup-after-dev_set_name.patch b/queue-5.15/nvmem-core-fix-cleanup-after-dev_set_name.patch
new file mode 100644 (file)
index 0000000..88457cc
--- /dev/null
@@ -0,0 +1,97 @@
+From 5b4256ec1a5fc3ad89289112f84fc17b3810ce8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Jan 2023 10:40:10 +0000
+Subject: nvmem: core: fix cleanup after dev_set_name()
+
+From: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+
+[ Upstream commit 560181d3ace61825f4ca9dd3481d6c0ee6709fa8 ]
+
+If dev_set_name() fails, we leak nvmem->wp_gpio as the cleanup does not
+put this. While a minimal fix for this would be to add the gpiod_put()
+call, we can do better if we split device_register(), and use the
+tested nvmem_release() cleanup code by initialising the device early,
+and putting the device.
+
+This results in a slightly larger fix, but results in clear code.
+
+Note: this patch depends on "nvmem: core: initialise nvmem->id early"
+and "nvmem: core: remove nvmem_config wp_gpio".
+
+Fixes: 5544e90c8126 ("nvmem: core: add error handling for dev_set_name")
+Cc: stable@vger.kernel.org
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+[Srini: Fixed subject line and error code handing with wp_gpio while applying.]
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20230127104015.23839-6-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: ab3428cfd9aa ("nvmem: core: fix registration vs use race")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvmem/core.c | 22 ++++++++++------------
+ 1 file changed, 10 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 51bec9f8a3bf9..f06b65f0d410b 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -768,14 +768,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+       nvmem->id = rval;
++      nvmem->dev.type = &nvmem_provider_type;
++      nvmem->dev.bus = &nvmem_bus_type;
++      nvmem->dev.parent = config->dev;
++
++      device_initialize(&nvmem->dev);
++
+       if (!config->ignore_wp)
+               nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
+                                                   GPIOD_OUT_HIGH);
+       if (IS_ERR(nvmem->wp_gpio)) {
+-              ida_free(&nvmem_ida, nvmem->id);
+               rval = PTR_ERR(nvmem->wp_gpio);
+-              kfree(nvmem);
+-              return ERR_PTR(rval);
++              goto err_put_device;
+       }
+       kref_init(&nvmem->refcnt);
+@@ -787,9 +791,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+       nvmem->stride = config->stride ?: 1;
+       nvmem->word_size = config->word_size ?: 1;
+       nvmem->size = config->size;
+-      nvmem->dev.type = &nvmem_provider_type;
+-      nvmem->dev.bus = &nvmem_bus_type;
+-      nvmem->dev.parent = config->dev;
+       nvmem->root_only = config->root_only;
+       nvmem->priv = config->priv;
+       nvmem->type = config->type;
+@@ -816,11 +817,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+               break;
+       }
+-      if (rval) {
+-              ida_free(&nvmem_ida, nvmem->id);
+-              kfree(nvmem);
+-              return ERR_PTR(rval);
+-      }
++      if (rval)
++              goto err_put_device;
+       nvmem->read_only = device_property_present(config->dev, "read-only") ||
+                          config->read_only || !nvmem->reg_write;
+@@ -831,7 +829,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+       dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+-      rval = device_register(&nvmem->dev);
++      rval = device_add(&nvmem->dev);
+       if (rval)
+               goto err_put_device;
+-- 
+2.39.0
+
diff --git a/queue-5.15/nvmem-core-fix-registration-vs-use-race.patch b/queue-5.15/nvmem-core-fix-registration-vs-use-race.patch
new file mode 100644 (file)
index 0000000..979658d
--- /dev/null
@@ -0,0 +1,87 @@
+From 3b587e409762ba4c04d052f1ee74e5d64dfaab72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Jan 2023 10:40:11 +0000
+Subject: nvmem: core: fix registration vs use race
+
+From: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+
+[ Upstream commit ab3428cfd9aa2f3463ee4b2909b5bb2193bd0c4a ]
+
+The i.MX6 CPU frequency driver sometimes fails to register at boot time
+due to nvmem_cell_read_u32() sporadically returning -ENOENT.
+
+This happens because there is a window where __nvmem_device_get() in
+of_nvmem_cell_get() is able to return the nvmem device, but as cells
+have been setup, nvmem_find_cell_entry_by_node() returns NULL.
+
+The occurs because the nvmem core registration code violates one of the
+fundamental principles of kernel programming: do not publish data
+structures before their setup is complete.
+
+Fix this by making nvmem core code conform with this principle.
+
+Fixes: eace75cfdcf7 ("nvmem: Add a simple NVMEM framework for nvmem providers")
+Cc: stable@vger.kernel.org
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20230127104015.23839-7-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvmem/core.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index f06b65f0d410b..6a74e38746057 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -827,22 +827,16 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+       nvmem->dev.groups = nvmem_dev_groups;
+ #endif
+-      dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+-
+-      rval = device_add(&nvmem->dev);
+-      if (rval)
+-              goto err_put_device;
+-
+       if (nvmem->nkeepout) {
+               rval = nvmem_validate_keepouts(nvmem);
+               if (rval)
+-                      goto err_device_del;
++                      goto err_put_device;
+       }
+       if (config->compat) {
+               rval = nvmem_sysfs_setup_compat(nvmem, config);
+               if (rval)
+-                      goto err_device_del;
++                      goto err_put_device;
+       }
+       if (config->cells) {
+@@ -859,6 +853,12 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+       if (rval)
+               goto err_remove_cells;
++      dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
++
++      rval = device_add(&nvmem->dev);
++      if (rval)
++              goto err_remove_cells;
++
+       blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
+       return nvmem;
+@@ -867,8 +867,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+       nvmem_device_remove_all_cells(nvmem);
+       if (config->compat)
+               nvmem_sysfs_remove_compat(nvmem, config);
+-err_device_del:
+-      device_del(&nvmem->dev);
+ err_put_device:
+       put_device(&nvmem->dev);
+-- 
+2.39.0
+
diff --git a/queue-5.15/series b/queue-5.15/series
new file mode 100644 (file)
index 0000000..cebf2a2
--- /dev/null
@@ -0,0 +1,5 @@
+nvmem-core-add-error-handling-for-dev_set_name.patch
+nvmem-core-fix-cleanup-after-dev_set_name.patch
+nvmem-core-fix-registration-vs-use-race.patch
+mm-migration-return-errno-when-isolate_huge_page-fai.patch
+migrate-hugetlb-check-for-hugetlb-shared-pmd-in-node.patch