]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Dec 2020 12:54:09 +0000 (13:54 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Dec 2020 12:54:09 +0000 (13:54 +0100)
added patches:
iommu-amd-set-dte-to-represent-512-irtes.patch
mm-memcg-slab-fix-obj_cgroup_charge-return-value-handling.patch

queue-5.9/iommu-amd-set-dte-to-represent-512-irtes.patch [new file with mode: 0644]
queue-5.9/mm-memcg-slab-fix-obj_cgroup_charge-return-value-handling.patch [new file with mode: 0644]
queue-5.9/series

diff --git a/queue-5.9/iommu-amd-set-dte-to-represent-512-irtes.patch b/queue-5.9/iommu-amd-set-dte-to-represent-512-irtes.patch
new file mode 100644 (file)
index 0000000..e767c3e
--- /dev/null
@@ -0,0 +1,37 @@
+From 4165bf015ba9454f45beaad621d16c516d5c5afe Mon Sep 17 00:00:00 2001
+From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Date: Mon, 7 Dec 2020 03:19:20 -0600
+Subject: iommu/amd: Set DTE[IntTabLen] to represent 512 IRTEs
+
+From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+
+commit 4165bf015ba9454f45beaad621d16c516d5c5afe upstream.
+
+According to the AMD IOMMU spec, the commit 73db2fc595f3
+("iommu/amd: Increase interrupt remapping table limit to 512 entries")
+also requires the interrupt table length (IntTabLen) to be set to 9
+(power of 2) in the device table mapping entry (DTE).
+
+Fixes: 73db2fc595f3 ("iommu/amd: Increase interrupt remapping table limit to 512 entries")
+Reported-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Link: https://lore.kernel.org/r/20201207091920.3052-1-suravee.suthikulpanit@amd.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/amd/amd_iommu_types.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -254,7 +254,7 @@
+ #define DTE_IRQ_REMAP_INTCTL_MASK     (0x3ULL << 60)
+ #define DTE_IRQ_TABLE_LEN_MASK        (0xfULL << 1)
+ #define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
+-#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
++#define DTE_IRQ_TABLE_LEN       (9ULL << 1)
+ #define DTE_IRQ_REMAP_ENABLE    1ULL
+ #define PAGE_MODE_NONE    0x00
diff --git a/queue-5.9/mm-memcg-slab-fix-obj_cgroup_charge-return-value-handling.patch b/queue-5.9/mm-memcg-slab-fix-obj_cgroup_charge-return-value-handling.patch
new file mode 100644 (file)
index 0000000..aa3ef29
--- /dev/null
@@ -0,0 +1,125 @@
+From becaba65f62f88e553ec92ed98370e9d2b18e629 Mon Sep 17 00:00:00 2001
+From: Roman Gushchin <guro@fb.com>
+Date: Sat, 5 Dec 2020 22:14:45 -0800
+Subject: mm: memcg/slab: fix obj_cgroup_charge() return value handling
+
+From: Roman Gushchin <guro@fb.com>
+
+commit becaba65f62f88e553ec92ed98370e9d2b18e629 upstream.
+
+Commit 10befea91b61 ("mm: memcg/slab: use a single set of kmem_caches
+for all allocations") introduced a regression into the handling of the
+obj_cgroup_charge() return value.  If a non-zero value is returned
+(indicating of exceeding one of memory.max limits), the allocation
+should fail, instead of falling back to non-accounted mode.
+
+To make the code more readable, move memcg_slab_pre_alloc_hook() and
+memcg_slab_post_alloc_hook() calling conditions into bodies of these
+hooks.
+
+Fixes: 10befea91b61 ("mm: memcg/slab: use a single set of kmem_caches for all allocations")
+Signed-off-by: Roman Gushchin <guro@fb.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20201127161828.GD840171@carbon.dhcp.thefacebook.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slab.h |   42 +++++++++++++++++++++++++-----------------
+ 1 file changed, 25 insertions(+), 17 deletions(-)
+
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -275,25 +275,35 @@ static inline size_t obj_full_size(struc
+       return s->size + sizeof(struct obj_cgroup *);
+ }
+-static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+-                                                         size_t objects,
+-                                                         gfp_t flags)
++/*
++ * Returns false if the allocation should fail.
++ */
++static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
++                                           struct obj_cgroup **objcgp,
++                                           size_t objects, gfp_t flags)
+ {
+       struct obj_cgroup *objcg;
++      if (!memcg_kmem_enabled())
++              return true;
++
++      if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
++              return true;
++
+       if (memcg_kmem_bypass())
+-              return NULL;
++              return true;
+       objcg = get_obj_cgroup_from_current();
+       if (!objcg)
+-              return NULL;
++              return true;
+       if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
+               obj_cgroup_put(objcg);
+-              return NULL;
++              return false;
+       }
+-      return objcg;
++      *objcgp = objcg;
++      return true;
+ }
+ static inline void mod_objcg_state(struct obj_cgroup *objcg,
+@@ -319,7 +329,7 @@ static inline void memcg_slab_post_alloc
+       unsigned long off;
+       size_t i;
+-      if (!objcg)
++      if (!memcg_kmem_enabled() || !objcg)
+               return;
+       flags &= ~__GFP_ACCOUNT;
+@@ -404,11 +414,11 @@ static inline void memcg_free_page_obj_c
+ {
+ }
+-static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+-                                                         size_t objects,
+-                                                         gfp_t flags)
++static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
++                                           struct obj_cgroup **objcgp,
++                                           size_t objects, gfp_t flags)
+ {
+-      return NULL;
++      return true;
+ }
+ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+@@ -512,9 +522,8 @@ static inline struct kmem_cache *slab_pr
+       if (should_failslab(s, flags))
+               return NULL;
+-      if (memcg_kmem_enabled() &&
+-          ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
+-              *objcgp = memcg_slab_pre_alloc_hook(s, size, flags);
++      if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
++              return NULL;
+       return s;
+ }
+@@ -533,8 +542,7 @@ static inline void slab_post_alloc_hook(
+                                        s->flags, flags);
+       }
+-      if (memcg_kmem_enabled())
+-              memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
++      memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
+ }
+ #ifndef CONFIG_SLOB
index a274c0432767518a6cba8487f9ddcfd1521d61d3..db3b484fd8334b327de9f553ab1d00feb71898ee 100644 (file)
@@ -54,3 +54,5 @@ mm-list_lru-set-shrinker-map-bit-when-child-nr_items-is-not-zero.patch
 mm-swapfile-do-not-sleep-with-a-spin-lock-held.patch
 hugetlb_cgroup-fix-offline-of-hugetlb-cgroup-with-reservations.patch
 revert-amd-amdgpu-disable-vcn-dpg-mode-for-picasso.patch
+iommu-amd-set-dte-to-represent-512-irtes.patch
+mm-memcg-slab-fix-obj_cgroup_charge-return-value-handling.patch