--- /dev/null
+From d1c362e1dd68a421cf9033404cf141a4ab734a5d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Fri, 9 Oct 2020 20:42:34 +0200
+Subject: bpf: Always return target ifindex in bpf_fib_lookup
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+commit d1c362e1dd68a421cf9033404cf141a4ab734a5d upstream.
+
+The bpf_fib_lookup() helper performs a neighbour lookup for the destination
+IP and returns BPF_FIB_LKUP_NO_NEIGH if this fails, with the expectation
+that the BPF program will pass the packet up the stack in this case.
+However, with the addition of bpf_redirect_neigh() that can be used instead
+to perform the neighbour lookup, at the cost of a bit of duplicated work.
+
+For that we still need the target ifindex, and since bpf_fib_lookup()
+already has that at the time it performs the neighbour lookup, there is
+really no reason why it can't just return it in any case. So let's just
+always return the ifindex if the FIB lookup itself succeeds.
+
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Cc: David Ahern <dsahern@gmail.com>
+Link: https://lore.kernel.org/bpf/20201009184234.134214-1-toke@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4235,7 +4235,6 @@ static int bpf_fib_set_fwd_params(struct
+ memcpy(params->smac, dev->dev_addr, ETH_ALEN);
+ params->h_vlan_TCI = 0;
+ params->h_vlan_proto = 0;
+- params->ifindex = dev->ifindex;
+
+ return 0;
+ }
+@@ -4333,6 +4332,7 @@ static int bpf_ipv4_fib_lookup(struct ne
+ params->ipv4_dst = nh->nh_gw;
+
+ params->rt_metric = res.fi->fib_priority;
++ params->ifindex = dev->ifindex;
+
+ /* xdp and cls_bpf programs are run in RCU-bh so
+ * rcu_read_lock_bh is not needed here
+@@ -4447,6 +4447,7 @@ static int bpf_ipv6_fib_lookup(struct ne
+
+ dev = f6i->fib6_nh.nh_dev;
+ params->rt_metric = f6i->fib6_metric;
++ params->ifindex = dev->ifindex;
+
+ /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
+ * not needed here. Can not use __ipv6_neigh_lookup_noref here
--- /dev/null
+From 73bdf65ea74857d7fb2ec3067a3cec0e261b1462 Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Thu, 26 Jan 2023 14:27:21 -0800
+Subject: migrate: hugetlb: check for hugetlb shared PMD in node migration
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit 73bdf65ea74857d7fb2ec3067a3cec0e261b1462 upstream.
+
+migrate_pages/mempolicy semantics state that CAP_SYS_NICE is required to
+move pages shared with another process to a different node. page_mapcount
+> 1 is being used to determine if a hugetlb page is shared. However, a
+hugetlb page will have a mapcount of 1 if mapped by multiple processes via
+a shared PMD. As a result, hugetlb pages shared by multiple processes and
+mapped with a shared PMD can be moved by a process without CAP_SYS_NICE.
+
+To fix, check for a shared PMD if mapcount is 1. If a shared PMD is found
+consider the page shared.
+
+Link: https://lkml.kernel.org/r/20230126222721.222195-3-mike.kravetz@oracle.com
+Fixes: e2d8cf405525 ("migrate: add hugepage migration code to migrate_pages()")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Peter Xu <peterx@redhat.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: James Houghton <jthoughton@google.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
+Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mempolicy.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -571,7 +571,8 @@ static int queue_pages_hugetlb(pte_t *pt
+ goto unlock;
+ /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
+ if (flags & (MPOL_MF_MOVE_ALL) ||
+- (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
++ (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
++ !hugetlb_pmd_shared(pte)))
+ isolate_huge_page(page, qp->pagelist);
+ unlock:
+ spin_unlock(ptl);