--- /dev/null
+From a3286f05bc5a5bc7fc73a9783ec89de78fcd07f8 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Thu, 22 Feb 2018 15:27:22 +0100
+Subject: powerpc/mm/slice: create header files dedicated to slices
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit a3286f05bc5a5bc7fc73a9783ec89de78fcd07f8 upstream.
+
+In preparation for the following patch which will enhance 'slices'
+for supporting PPC32 in order to fix an issue on hugepages on 8xx,
+this patch takes out of page*.h all bits related to 'slices' and put
+them into newly created slice.h header files.
+While common parts go into asm/slice.h, subarch specific
+parts go into respective books3s/64/slice.c and nohash/64/slice.c
+'slices'
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/book3s/64/slice.h | 27 +++++++++++++
+ arch/powerpc/include/asm/nohash/64/slice.h | 12 +++++
+ arch/powerpc/include/asm/page.h | 1
+ arch/powerpc/include/asm/page_64.h | 59 -----------------------------
+ arch/powerpc/include/asm/slice.h | 40 +++++++++++++++++++
+ 5 files changed, 80 insertions(+), 59 deletions(-)
+
+--- /dev/null
++++ b/arch/powerpc/include/asm/book3s/64/slice.h
+@@ -0,0 +1,27 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
++#define _ASM_POWERPC_BOOK3S_64_SLICE_H
++
++#ifdef CONFIG_PPC_MM_SLICES
++
++#define SLICE_LOW_SHIFT 28
++#define SLICE_LOW_TOP (0x100000000ul)
++#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
++#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
++
++#define SLICE_HIGH_SHIFT 40
++#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
++#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
++
++#else /* CONFIG_PPC_MM_SLICES */
++
++#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
++#define slice_set_user_psize(mm, psize) \
++do { \
++ (mm)->context.user_psize = (psize); \
++ (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
++} while (0)
++
++#endif /* CONFIG_PPC_MM_SLICES */
++
++#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
+--- /dev/null
++++ b/arch/powerpc/include/asm/nohash/64/slice.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H
++#define _ASM_POWERPC_NOHASH_64_SLICE_H
++
++#ifdef CONFIG_PPC_64K_PAGES
++#define get_slice_psize(mm, addr) MMU_PAGE_64K
++#else /* CONFIG_PPC_64K_PAGES */
++#define get_slice_psize(mm, addr) MMU_PAGE_4K
++#endif /* !CONFIG_PPC_64K_PAGES */
++#define slice_set_user_psize(mm, psize) do { BUG(); } while (0)
++
++#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -344,5 +344,6 @@ typedef struct page *pgtable_t;
+
+ #include <asm-generic/memory_model.h>
+ #endif /* __ASSEMBLY__ */
++#include <asm/slice.h>
+
+ #endif /* _ASM_POWERPC_PAGE_H */
+--- a/arch/powerpc/include/asm/page_64.h
++++ b/arch/powerpc/include/asm/page_64.h
+@@ -86,65 +86,6 @@ extern u64 ppc64_pft_size;
+
+ #endif /* __ASSEMBLY__ */
+
+-#ifdef CONFIG_PPC_MM_SLICES
+-
+-#define SLICE_LOW_SHIFT 28
+-#define SLICE_HIGH_SHIFT 40
+-
+-#define SLICE_LOW_TOP (0x100000000ul)
+-#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
+-#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
+-
+-#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
+-#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
+-
+-#ifndef __ASSEMBLY__
+-struct mm_struct;
+-
+-extern unsigned long slice_get_unmapped_area(unsigned long addr,
+- unsigned long len,
+- unsigned long flags,
+- unsigned int psize,
+- int topdown);
+-
+-extern unsigned int get_slice_psize(struct mm_struct *mm,
+- unsigned long addr);
+-
+-extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
+-extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+- unsigned long len, unsigned int psize);
+-
+-#endif /* __ASSEMBLY__ */
+-#else
+-#define slice_init()
+-#ifdef CONFIG_PPC_BOOK3S_64
+-#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
+-#define slice_set_user_psize(mm, psize) \
+-do { \
+- (mm)->context.user_psize = (psize); \
+- (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
+-} while (0)
+-#else /* !CONFIG_PPC_BOOK3S_64 */
+-#ifdef CONFIG_PPC_64K_PAGES
+-#define get_slice_psize(mm, addr) MMU_PAGE_64K
+-#else /* CONFIG_PPC_64K_PAGES */
+-#define get_slice_psize(mm, addr) MMU_PAGE_4K
+-#endif /* !CONFIG_PPC_64K_PAGES */
+-#define slice_set_user_psize(mm, psize) do { BUG(); } while(0)
+-#endif /* CONFIG_PPC_BOOK3S_64 */
+-
+-#define slice_set_range_psize(mm, start, len, psize) \
+- slice_set_user_psize((mm), (psize))
+-#endif /* CONFIG_PPC_MM_SLICES */
+-
+-#ifdef CONFIG_HUGETLB_PAGE
+-
+-#ifdef CONFIG_PPC_MM_SLICES
+-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+-#endif
+-
+-#endif /* !CONFIG_HUGETLB_PAGE */
+-
+ #define VM_DATA_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
+--- /dev/null
++++ b/arch/powerpc/include/asm/slice.h
+@@ -0,0 +1,40 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_SLICE_H
++#define _ASM_POWERPC_SLICE_H
++
++#ifdef CONFIG_PPC_BOOK3S_64
++#include <asm/book3s/64/slice.h>
++#else
++#include <asm/nohash/64/slice.h>
++#endif
++
++#ifdef CONFIG_PPC_MM_SLICES
++
++#ifdef CONFIG_HUGETLB_PAGE
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
++#endif
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
++#ifndef __ASSEMBLY__
++
++struct mm_struct;
++
++unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
++ unsigned long flags, unsigned int psize,
++ int topdown);
++
++unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
++
++void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
++void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
++ unsigned long len, unsigned int psize);
++#endif /* __ASSEMBLY__ */
++
++#else /* CONFIG_PPC_MM_SLICES */
++
++#define slice_set_range_psize(mm, start, len, psize) \
++ slice_set_user_psize((mm), (psize))
++#endif /* CONFIG_PPC_MM_SLICES */
++
++#endif /* _ASM_POWERPC_SLICE_H */
--- /dev/null
+From db3a528db41caaa6dfd4c64e9f5efb1c81a80467 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Thu, 22 Feb 2018 15:27:24 +0100
+Subject: powerpc/mm/slice: Enhance for supporting PPC32
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit db3a528db41caaa6dfd4c64e9f5efb1c81a80467 upstream.
+
+In preparation for the following patch which will fix an issue on
+the 8xx by re-using the 'slices', this patch enhances the
+'slices' implementation to support 32 bits CPUs.
+
+On PPC32, the address space is limited to 4Gbytes, hence only the low
+slices will be used.
+
+The high slices use bitmaps. As bitmap functions are not prepared to
+handle bitmaps of size 0, this patch ensures that bitmap functions
+are called only when SLICE_NUM_HIGH is not nul.
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/nohash/32/slice.h | 18 ++++++++++++++
+ arch/powerpc/include/asm/slice.h | 4 ++-
+ arch/powerpc/mm/slice.c | 37 ++++++++++++++++++++++-------
+ 3 files changed, 50 insertions(+), 9 deletions(-)
+
+--- /dev/null
++++ b/arch/powerpc/include/asm/nohash/32/slice.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H
++#define _ASM_POWERPC_NOHASH_32_SLICE_H
++
++#ifdef CONFIG_PPC_MM_SLICES
++
++#define SLICE_LOW_SHIFT 28
++#define SLICE_LOW_TOP (0x100000000ull)
++#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
++#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
++
++#define SLICE_HIGH_SHIFT 0
++#define SLICE_NUM_HIGH 0ul
++#define GET_HIGH_SLICE_INDEX(addr) (addr & 0)
++
++#endif /* CONFIG_PPC_MM_SLICES */
++
++#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */
+--- a/arch/powerpc/include/asm/slice.h
++++ b/arch/powerpc/include/asm/slice.h
+@@ -4,8 +4,10 @@
+
+ #ifdef CONFIG_PPC_BOOK3S_64
+ #include <asm/book3s/64/slice.h>
+-#else
++#elif defined(CONFIG_PPC64)
+ #include <asm/nohash/64/slice.h>
++#elif defined(CONFIG_PPC_MMU_NOHASH)
++#include <asm/nohash/32/slice.h>
+ #endif
+
+ #ifdef CONFIG_PPC_MM_SLICES
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -73,10 +73,12 @@ static void slice_range_to_mask(unsigned
+ unsigned long end = start + len - 1;
+
+ ret->low_slices = 0;
+- bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
++ if (SLICE_NUM_HIGH)
++ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+
+ if (start < SLICE_LOW_TOP) {
+- unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
++ unsigned long mend = min(end,
++ (unsigned long)(SLICE_LOW_TOP - 1));
+
+ ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
+ - (1u << GET_LOW_SLICE_INDEX(start));
+@@ -113,11 +115,13 @@ static int slice_high_has_vma(struct mm_
+ unsigned long start = slice << SLICE_HIGH_SHIFT;
+ unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
+
++#ifdef CONFIG_PPC64
+ /* Hack, so that each addresses is controlled by exactly one
+ * of the high or low area bitmaps, the first high area starts
+ * at 4GB, not 0 */
+ if (start == 0)
+ start = SLICE_LOW_TOP;
++#endif
+
+ return !slice_area_is_free(mm, start, end - start);
+ }
+@@ -128,7 +132,8 @@ static void slice_mask_for_free(struct m
+ unsigned long i;
+
+ ret->low_slices = 0;
+- bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
++ if (SLICE_NUM_HIGH)
++ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+
+ for (i = 0; i < SLICE_NUM_LOW; i++)
+ if (!slice_low_has_vma(mm, i))
+@@ -151,7 +156,8 @@ static void slice_mask_for_size(struct m
+ u64 lpsizes;
+
+ ret->low_slices = 0;
+- bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
++ if (SLICE_NUM_HIGH)
++ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+
+ lpsizes = mm->context.low_slices_psize;
+ for (i = 0; i < SLICE_NUM_LOW; i++)
+@@ -180,6 +186,10 @@ static int slice_check_fit(struct mm_str
+ */
+ unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
+
++ if (!SLICE_NUM_HIGH)
++ return (mask.low_slices & available.low_slices) ==
++ mask.low_slices;
++
+ bitmap_and(result, mask.high_slices,
+ available.high_slices, slice_count);
+
+@@ -189,6 +199,7 @@ static int slice_check_fit(struct mm_str
+
+ static void slice_flush_segments(void *parm)
+ {
++#ifdef CONFIG_PPC64
+ struct mm_struct *mm = parm;
+ unsigned long flags;
+
+@@ -200,6 +211,7 @@ static void slice_flush_segments(void *p
+ local_irq_save(flags);
+ slb_flush_and_rebolt();
+ local_irq_restore(flags);
++#endif
+ }
+
+ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
+@@ -389,6 +401,8 @@ static unsigned long slice_find_area(str
+ static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
+ {
+ dst->low_slices |= src->low_slices;
++ if (!SLICE_NUM_HIGH)
++ return;
+ bitmap_or(dst->high_slices, dst->high_slices, src->high_slices,
+ SLICE_NUM_HIGH);
+ }
+@@ -397,6 +411,8 @@ static inline void slice_andnot_mask(str
+ {
+ dst->low_slices &= ~src->low_slices;
+
++ if (!SLICE_NUM_HIGH)
++ return;
+ bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices,
+ SLICE_NUM_HIGH);
+ }
+@@ -446,14 +462,17 @@ unsigned long slice_get_unmapped_area(un
+ * init different masks
+ */
+ mask.low_slices = 0;
+- bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
+
+ /* silence stupid warning */;
+ potential_mask.low_slices = 0;
+- bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
+
+ compat_mask.low_slices = 0;
+- bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
++
++ if (SLICE_NUM_HIGH) {
++ bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
++ bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
++ bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
++ }
+
+ /* Sanity checks */
+ BUG_ON(mm->task_size == 0);
+@@ -591,7 +610,9 @@ unsigned long slice_get_unmapped_area(un
+ convert:
+ slice_andnot_mask(&mask, &good_mask);
+ slice_andnot_mask(&mask, &compat_mask);
+- if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
++ if (mask.low_slices ||
++ (SLICE_NUM_HIGH &&
++ !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) {
+ slice_convert(mm, mask, psize);
+ if (psize > MMU_PAGE_BASE)
+ on_each_cpu(slice_flush_segments, mm, 1);
--- /dev/null
+From 326691ad4f179e6edc7eb1271e618dd673e4736d Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Thu, 22 Feb 2018 15:27:20 +0100
+Subject: powerpc/mm/slice: Remove intermediate bitmap copy
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 326691ad4f179e6edc7eb1271e618dd673e4736d upstream.
+
+bitmap_or() and bitmap_andnot() can work properly with dst identical
+to src1 or src2. There is no need of an intermediate result bitmap
+that is copied back to dst in a second step.
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/slice.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -388,21 +388,17 @@ static unsigned long slice_find_area(str
+
+ static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
+ {
+- DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+-
+ dst->low_slices |= src->low_slices;
+- bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
+- bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
++ bitmap_or(dst->high_slices, dst->high_slices, src->high_slices,
++ SLICE_NUM_HIGH);
+ }
+
+ static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
+ {
+- DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+-
+ dst->low_slices &= ~src->low_slices;
+
+- bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
+- bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
++ bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices,
++ SLICE_NUM_HIGH);
+ }
+
+ #ifdef CONFIG_PPC_64K_PAGES
block-null_blk-fix-invalid-parameters-when-loading-module.patch
dmaengine-pl330-fix-a-race-condition-in-case-of-threaded-irqs.patch
arm-dts-keystone-k2e-clocks-fix-missing-unit-address-separator.patch
+powerpc-mm-slice-remove-intermediate-bitmap-copy.patch
+powerpc-mm-slice-create-header-files-dedicated-to-slices.patch
+powerpc-mm-slice-enhance-for-supporting-ppc32.patch
powerpc-mm-slice-fix-hugepage-allocation-at-hint-address-on-8xx.patch
i2c-core-report-of-style-module-alias-for-devices-registered-via-of.patch
dmaengine-rcar-dmac-check-the-done-lists-in-rcar_dmac_chan_get_residue.patch