--- /dev/null
+From 32bec2afa525149288e6696079bc85f747fa2138 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Tue, 31 Oct 2017 21:12:35 -0400
+Subject: drm/amdgpu: allow harvesting check for Polaris VCE
+
+From: Leo Liu <leo.liu@amd.com>
+
+commit 32bec2afa525149288e6696079bc85f747fa2138 upstream.
+
+Fixes init failures on Polaris cards with harvested
+VCE blocks.
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -365,15 +365,10 @@ static unsigned vce_v3_0_get_harvest_con
+ {
+ u32 tmp;
+
+- /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
+ if ((adev->asic_type == CHIP_FIJI) ||
+- (adev->asic_type == CHIP_STONEY) ||
+- (adev->asic_type == CHIP_POLARIS10) ||
+- (adev->asic_type == CHIP_POLARIS11) ||
+- (adev->asic_type == CHIP_POLARIS12))
++ (adev->asic_type == CHIP_STONEY))
+ return AMDGPU_VCE_HARVEST_VCE1;
+
+- /* Tonga and CZ are dual or single pipe */
+ if (adev->flags & AMD_IS_APU)
+ tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
+ VCE_HARVEST_FUSE_MACRO__MASK) >>
+@@ -391,6 +386,11 @@ static unsigned vce_v3_0_get_harvest_con
+ case 3:
+ return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
+ default:
++ if ((adev->asic_type == CHIP_POLARIS10) ||
++ (adev->asic_type == CHIP_POLARIS11) ||
++ (adev->asic_type == CHIP_POLARIS12))
++ return AMDGPU_VCE_HARVEST_VCE1;
++
+ return 0;
+ }
+ }
--- /dev/null
+From cb4b02d7cac56a69d8137d8d843507cca9182aed Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Tue, 31 Oct 2017 21:03:39 -0400
+Subject: drm/amdgpu: return -ENOENT from uvd 6.0 early init for harvesting
+
+From: Leo Liu <leo.liu@amd.com>
+
+commit cb4b02d7cac56a69d8137d8d843507cca9182aed upstream.
+
+Fixes init failures on polaris cards with harvested UVD.
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -93,6 +93,10 @@ static int uvd_v6_0_early_init(void *han
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ if (!(adev->flags & AMD_IS_APU) &&
++ (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
++ return -ENOENT;
++
+ uvd_v6_0_set_ring_funcs(adev);
+ uvd_v6_0_set_irq_funcs(adev);
+
--- /dev/null
+From 8777b927b92cf5b6c29f9f9d3c737addea9ac8a7 Mon Sep 17 00:00:00 2001
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Date: Thu, 19 Oct 2017 17:13:40 +0200
+Subject: drm/i915: Do not rely on wm preservation for ILK watermarks
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+
+commit 8777b927b92cf5b6c29f9f9d3c737addea9ac8a7 upstream.
+
+The original intent was to preserve watermarks as much as possible
+in intel_pipe_wm.raw_wm, and put the validated ones in intel_pipe_wm.wm.
+
+It seems this approach is insufficient and we don't always preserve
+the raw watermarks, so just use the atomic iterator we're already using
+to get a const pointer to all bound planes on the crtc.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102373
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Acked-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20171019151341.4579-1-maarten.lankhorst@linux.intel.com
+(cherry picked from commit 28283f4f359cd7cfa9e65457bb98c507a2cd0cd0)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_drv.h | 1
+ drivers/gpu/drm/i915/intel_pm.c | 51 ++++++++++++++++-----------------------
+ 2 files changed, 21 insertions(+), 31 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -495,7 +495,6 @@ struct intel_crtc_scaler_state {
+
+ struct intel_pipe_wm {
+ struct intel_wm_level wm[5];
+- struct intel_wm_level raw_wm[5];
+ uint32_t linetime;
+ bool fbc_wm_enabled;
+ bool pipe_enabled;
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -2696,9 +2696,9 @@ static void ilk_compute_wm_level(const s
+ const struct intel_crtc *intel_crtc,
+ int level,
+ struct intel_crtc_state *cstate,
+- struct intel_plane_state *pristate,
+- struct intel_plane_state *sprstate,
+- struct intel_plane_state *curstate,
++ const struct intel_plane_state *pristate,
++ const struct intel_plane_state *sprstate,
++ const struct intel_plane_state *curstate,
+ struct intel_wm_level *result)
+ {
+ uint16_t pri_latency = dev_priv->wm.pri_latency[level];
+@@ -3016,28 +3016,24 @@ static int ilk_compute_pipe_wm(struct in
+ struct intel_pipe_wm *pipe_wm;
+ struct drm_device *dev = state->dev;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
+- struct intel_plane *intel_plane;
+- struct intel_plane_state *pristate = NULL;
+- struct intel_plane_state *sprstate = NULL;
+- struct intel_plane_state *curstate = NULL;
++ struct drm_plane *plane;
++ const struct drm_plane_state *plane_state;
++ const struct intel_plane_state *pristate = NULL;
++ const struct intel_plane_state *sprstate = NULL;
++ const struct intel_plane_state *curstate = NULL;
+ int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
+ struct ilk_wm_maximums max;
+
+ pipe_wm = &cstate->wm.ilk.optimal;
+
+- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+- struct intel_plane_state *ps;
++ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
++ const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
+
+- ps = intel_atomic_get_existing_plane_state(state,
+- intel_plane);
+- if (!ps)
+- continue;
+-
+- if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
++ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ pristate = ps;
+- else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
++ else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+ sprstate = ps;
+- else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
++ else if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ curstate = ps;
+ }
+
+@@ -3059,11 +3055,9 @@ static int ilk_compute_pipe_wm(struct in
+ if (pipe_wm->sprites_scaled)
+ usable_level = 0;
+
+- ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+- pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
+-
+ memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+- pipe_wm->wm[0] = pipe_wm->raw_wm[0];
++ ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
++ pristate, sprstate, curstate, &pipe_wm->wm[0]);
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
+@@ -3073,8 +3067,8 @@ static int ilk_compute_pipe_wm(struct in
+
+ ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+
+- for (level = 1; level <= max_level; level++) {
+- struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
++ for (level = 1; level <= usable_level; level++) {
++ struct intel_wm_level *wm = &pipe_wm->wm[level];
+
+ ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
+ pristate, sprstate, curstate, wm);
+@@ -3084,13 +3078,10 @@ static int ilk_compute_pipe_wm(struct in
+ * register maximums since such watermarks are
+ * always invalid.
+ */
+- if (level > usable_level)
+- continue;
+-
+- if (ilk_validate_wm_level(level, &max, wm))
+- pipe_wm->wm[level] = *wm;
+- else
+- usable_level = level;
++ if (!ilk_validate_wm_level(level, &max, wm)) {
++ memset(wm, 0, sizeof(*wm));
++ break;
++ }
+ }
+
+ return 0;
--- /dev/null
+From 7c838e2a9be5ab79b11c7f1520813bfdf0f45462 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Thu, 26 Oct 2017 17:29:31 +0300
+Subject: drm/i915/edp: read edp display control registers unconditionally
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 7c838e2a9be5ab79b11c7f1520813bfdf0f45462 upstream.
+
+Per my reading of the eDP spec, DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
+DP_EDP_CONFIGURATION_CAP should be set if the eDP display control
+registers starting at offset DP_EDP_DPCD_REV are "enabled". Currently we
+check the bit before reading the registers, and DP_EDP_DPCD_REV is the
+only way to detect eDP revision.
+
+Turns out there are (likely buggy) displays that require eDP 1.4+
+features, such as supported link rates and link rate select, but do not
+have the bit set. Read the display control registers
+unconditionally. They are supposed to read zero anyway if they are not
+supported, so there should be no harm in this.
+
+This fixes the referenced bug by enabling the eDP version check, and
+thus reading of the supported link rates. The panel in question has 0 in
+DP_MAX_LINK_RATE which is only supported in eDP 1.4+. Without the
+supported link rates method we default to RBR which is insufficient for
+the panel native mode. As a curiosity, the panel also has a bogus value
+of 0x12 in DP_EDP_DPCD_REV, but that passes our check for >= DP_EDP_14
+(which is 0x03).
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103400
+Reported-and-tested-by: Nicolas P. <issun.artiste@gmail.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20171026142932.17737-1-jani.nikula@intel.com
+(cherry picked from commit 0501a3b0eb01ac2209ef6fce76153e5d6b07034e)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_dp.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -3687,9 +3687,16 @@ intel_edp_init_dpcd(struct intel_dp *int
+
+ }
+
+- /* Read the eDP Display control capabilities registers */
+- if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+- drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
++ /*
++ * Read the eDP display control registers.
++ *
++ * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
++ * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
++ * set, but require eDP 1.4+ detection (e.g. for supported link rates
++ * method). The display control registers should read zero if they're
++ * not supported anyway.
++ */
++ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+ sizeof(intel_dp->edp_dpcd))
+ DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
--- /dev/null
+From ab615a5b879292e83653be60aa82113f7c6f462d Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Thu, 2 Nov 2017 15:59:41 -0700
+Subject: fs/hugetlbfs/inode.c: fix hwpoison reserve accounting
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit ab615a5b879292e83653be60aa82113f7c6f462d upstream.
+
+Calling madvise(MADV_HWPOISON) on a hugetlbfs page will result in bad
+(negative) reserved huge page counts. This may not happen immediately,
+but may happen later when the underlying file is removed or filesystem
+unmounted. For example:
+
+ AnonHugePages: 0 kB
+ ShmemHugePages: 0 kB
+ HugePages_Total: 1
+ HugePages_Free: 0
+ HugePages_Rsvd: 18446744073709551615
+ HugePages_Surp: 0
+ Hugepagesize: 2048 kB
+
+In routine hugetlbfs_error_remove_page(), hugetlb_fix_reserve_counts is
+called after remove_huge_page. hugetlb_fix_reserve_counts is designed
+to only be called/used only if a failure is returned from
+hugetlb_unreserve_pages. Therefore, call hugetlb_unreserve_pages as
+required and only call hugetlb_fix_reserve_counts in the unlikely event
+that hugetlb_unreserve_pages returns an error.
+
+Link: http://lkml.kernel.org/r/20171019230007.17043-2-mike.kravetz@oracle.com
+Fixes: 78bb920344b8 ("mm: hwpoison: dissolve in-use hugepage in unrecoverable memory error")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/hugetlbfs/inode.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -855,9 +855,12 @@ static int hugetlbfs_error_remove_page(s
+ struct page *page)
+ {
+ struct inode *inode = mapping->host;
++ pgoff_t index = page->index;
+
+ remove_huge_page(page);
+- hugetlb_fix_reserve_counts(inode);
++ if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
++ hugetlb_fix_reserve_counts(inode);
++
+ return 0;
+ }
+
--- /dev/null
+From 6a2932a463d526e362a6b4e112be226f1d18d088 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <weiyongjun1@huawei.com>
+Date: Fri, 13 Oct 2017 09:25:17 +0000
+Subject: MIPS: bpf: Fix a typo in build_one_insn()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+commit 6a2932a463d526e362a6b4e112be226f1d18d088 upstream.
+
+Fix a typo in build_one_insn().
+
+Fixes: b6bd53f9c4e8 ("MIPS: Add missing file for eBPF JIT.")
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Patchwork: https://patchwork.linux-mips.org/patch/17491/
+Signed-off-by: James Hogan <jhogan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/net/ebpf_jit.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/net/ebpf_jit.c
++++ b/arch/mips/net/ebpf_jit.c
+@@ -1485,7 +1485,7 @@ ld_skb_common:
+ }
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+- return dst;
++ return src;
+ if (BPF_MODE(insn->code) == BPF_XADD) {
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_W:
--- /dev/null
+From 77238e76b9156d28d86c1e31c00ed2960df0e4de Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <garsilva@embeddedor.com>
+Date: Tue, 31 Oct 2017 00:35:03 -0500
+Subject: MIPS: microMIPS: Fix incorrect mask in insn_table_MM
+
+From: Gustavo A. R. Silva <garsilva@embeddedor.com>
+
+commit 77238e76b9156d28d86c1e31c00ed2960df0e4de upstream.
+
+It seems that this is a typo error and the proper bit masking is
+"RT | RS" instead of "RS | RS".
+
+This issue was detected with the help of Coccinelle.
+
+Fixes: d6b3314b49e1 ("MIPS: uasm: Add lh uam instruction")
+Reported-by: Julia Lawall <julia.lawall@lip6.fr>
+Signed-off-by: Gustavo A. R. Silva <garsilva@embeddedor.com>
+Reviewed-by: James Hogan <jhogan@kernel.org>
+Patchwork: https://patchwork.linux-mips.org/patch/17551/
+Signed-off-by: James Hogan <jhogan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mm/uasm-micromips.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/mm/uasm-micromips.c
++++ b/arch/mips/mm/uasm-micromips.c
+@@ -80,7 +80,7 @@ static const struct insn const insn_tabl
+ [insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS},
+ [insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_ld] = {0, 0},
+- [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM},
++ [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM},
+ [insn_lld] = {0, 0},
+ [insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
--- /dev/null
+From f677b77050c144bd4c515b91ea48bd0efe82355e Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Mon, 23 Oct 2017 19:20:56 +0200
+Subject: MIPS: smp-cmp: Use right include for task_struct
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit f677b77050c144bd4c515b91ea48bd0efe82355e upstream.
+
+When task_struct was moved, this MIPS code was neglected. Evidently
+nobody is using it anymore. This fixes this build error:
+
+In file included from ./arch/mips/include/asm/thread_info.h:15:0,
+ from ./include/linux/thread_info.h:37,
+ from ./include/asm-generic/current.h:4,
+ from ./arch/mips/include/generated/asm/current.h:1,
+ from ./include/linux/sched.h:11,
+ from arch/mips/kernel/smp-cmp.c:22:
+arch/mips/kernel/smp-cmp.c: In function ‘cmp_boot_secondary’:
+./arch/mips/include/asm/processor.h:384:41: error: implicit declaration
+of function ‘task_stack_page’ [-Werror=implicit-function-declaration]
+ #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
+ ^
+arch/mips/kernel/smp-cmp.c:84:21: note: in expansion of macro ‘__KSTK_TOS’
+ unsigned long sp = __KSTK_TOS(idle);
+ ^~~~~~~~~~
+
+Fixes: f3ac60671954 ("sched/headers: Move task-stack related APIs from <linux/sched.h> to <linux/sched/task_stack.h>")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Patchwork: https://patchwork.linux-mips.org/patch/17522/
+Signed-off-by: James Hogan <jhogan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/smp-cmp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/kernel/smp-cmp.c
++++ b/arch/mips/kernel/smp-cmp.c
+@@ -19,7 +19,7 @@
+ #undef DEBUG
+
+ #include <linux/kernel.h>
+-#include <linux/sched.h>
++#include <linux/sched/task_stack.h>
+ #include <linux/smp.h>
+ #include <linux/cpumask.h>
+ #include <linux/interrupt.h>
--- /dev/null
+From 9e8c399a88f0b87e41a894911475ed2a8f8dff9e Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Wed, 27 Sep 2017 10:13:25 +0100
+Subject: MIPS: SMP: Fix deadlock & online race
+
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+
+commit 9e8c399a88f0b87e41a894911475ed2a8f8dff9e upstream.
+
+Commit 6f542ebeaee0 ("MIPS: Fix race on setting and getting
+cpu_online_mask") effectively reverted commit 8f46cca1e6c06 ("MIPS: SMP:
+Fix possibility of deadlock when bringing CPUs online") and thus has
+reinstated the possibility of deadlock.
+
+The commit was based on testing of kernel v4.4, where the CPU hotplug
+core code issued a BUG() if the starting CPU is not marked online when
+the boot CPU returns from __cpu_up. The commit fixes this race (in
+v4.4), but re-introduces the deadlock situation.
+
+As noted in the commit message, upstream differs in this area. Commit
+8df3e07e7f21f ("cpu/hotplug: Let upcoming cpu bring itself fully up")
+adds a completion event in the CPU hotplug core code, making this race
+impossible. However, people were unhappy with relying on the core code
+to do the right thing.
+
+To address the issues both commits were trying to fix, add a second
+completion event in the MIPS smp hotplug path. It removes the
+possibility of a race, since the MIPS smp hotplug code now synchronises
+both the boot and secondary CPUs before they return to the hotplug core
+code. It also addresses the deadlock by ensuring that the secondary CPU
+is not marked online before it's counters are synchronised.
+
+This fix should also be backported to fix the race condition introduced
+by the backport of commit 8f46cca1e6c06 ("MIPS: SMP: Fix possibility of
+deadlock when bringing CPUs online"), through really that race only
+existed before commit 8df3e07e7f21f ("cpu/hotplug: Let upcoming cpu
+bring itself fully up").
+
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Fixes: 6f542ebeaee0 ("MIPS: Fix race on setting and getting cpu_online_mask")
+CC: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nokia.com>
+Patchwork: https://patchwork.linux-mips.org/patch/17376/
+Signed-off-by: James Hogan <jhogan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/smp.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -66,6 +66,7 @@ EXPORT_SYMBOL(cpu_sibling_map);
+ cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(cpu_core_map);
+
++static DECLARE_COMPLETION(cpu_starting);
+ static DECLARE_COMPLETION(cpu_running);
+
+ /*
+@@ -376,6 +377,12 @@ asmlinkage void start_secondary(void)
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ notify_cpu_starting(cpu);
+
++ /* Notify boot CPU that we're starting & ready to sync counters */
++ complete(&cpu_starting);
++
++ synchronise_count_slave(cpu);
++
++ /* The CPU is running and counters synchronised, now mark it online */
+ set_cpu_online(cpu, true);
+
+ set_cpu_sibling_map(cpu);
+@@ -383,8 +390,11 @@ asmlinkage void start_secondary(void)
+
+ calculate_cpu_foreign_map();
+
++ /*
++ * Notify boot CPU that we're up & online and it can safely return
++ * from __cpu_up
++ */
+ complete(&cpu_running);
+- synchronise_count_slave(cpu);
+
+ /*
+ * irq will be enabled in ->smp_finish(), enabling it too early
+@@ -443,17 +453,17 @@ int __cpu_up(unsigned int cpu, struct ta
+ {
+ mp_ops->boot_secondary(cpu, tidle);
+
+- /*
+- * We must check for timeout here, as the CPU will not be marked
+- * online until the counters are synchronised.
+- */
+- if (!wait_for_completion_timeout(&cpu_running,
++ /* Wait for CPU to start and be ready to sync counters */
++ if (!wait_for_completion_timeout(&cpu_starting,
+ msecs_to_jiffies(1000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
+ }
+
+ synchronise_count_master(cpu);
++
++ /* Wait for CPU to finish startup & mark itself online before return */
++ wait_for_completion(&cpu_running);
+ return 0;
+ }
+
--- /dev/null
+From 2628bd6fc052bd85e9864dae4de494d8a6313391 Mon Sep 17 00:00:00 2001
+From: Huang Ying <ying.huang@intel.com>
+Date: Thu, 2 Nov 2017 15:59:50 -0700
+Subject: mm, swap: fix race between swap count continuation operations
+
+From: Huang Ying <ying.huang@intel.com>
+
+commit 2628bd6fc052bd85e9864dae4de494d8a6313391 upstream.
+
+One page may store a set of entries of the sis->swap_map
+(swap_info_struct->swap_map) in multiple swap clusters.
+
+If some of the entries has sis->swap_map[offset] > SWAP_MAP_MAX,
+multiple pages will be used to store the set of entries of the
+sis->swap_map. And the pages are linked with page->lru. This is called
+swap count continuation. To access the pages which store the set of
+entries of the sis->swap_map simultaneously, previously, sis->lock is
+used. But to improve the scalability of __swap_duplicate(), swap
+cluster lock may be used in swap_count_continued() now. This may race
+with add_swap_count_continuation() which operates on a nearby swap
+cluster, in which the sis->swap_map entries are stored in the same page.
+
+The race can cause wrong swap count in practice, thus cause unfreeable
+swap entries or software lockup, etc.
+
+To fix the race, a new spin lock called cont_lock is added to struct
+swap_info_struct to protect the swap count continuation page list. This
+is a lock at the swap device level, so the scalability isn't very well.
+But it is still much better than the original sis->lock, because it is
+only acquired/released when swap count continuation is used. Which is
+considered rare in practice. If it turns out that the scalability
+becomes an issue for some workloads, we can split the lock into some
+more fine grained locks.
+
+Link: http://lkml.kernel.org/r/20171017081320.28133-1-ying.huang@intel.com
+Fixes: 235b62176712 ("mm/swap: add cluster lock")
+Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Shaohua Li <shli@kernel.org>
+Cc: Tim Chen <tim.c.chen@intel.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Aaron Lu <aaron.lu@intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/swap.h | 4 ++++
+ mm/swapfile.c | 23 +++++++++++++++++------
+ 2 files changed, 21 insertions(+), 6 deletions(-)
+
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -246,6 +246,10 @@ struct swap_info_struct {
+ * both locks need hold, hold swap_lock
+ * first.
+ */
++ spinlock_t cont_lock; /*
++ * protect swap count continuation page
++ * list.
++ */
+ struct work_struct discard_work; /* discard worker */
+ struct swap_cluster_list discard_clusters; /* discard clusters list */
+ };
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2635,6 +2635,7 @@ static struct swap_info_struct *alloc_sw
+ p->flags = SWP_USED;
+ spin_unlock(&swap_lock);
+ spin_lock_init(&p->lock);
++ spin_lock_init(&p->cont_lock);
+
+ return p;
+ }
+@@ -3307,6 +3308,7 @@ int add_swap_count_continuation(swp_entr
+ head = vmalloc_to_page(si->swap_map + offset);
+ offset &= ~PAGE_MASK;
+
++ spin_lock(&si->cont_lock);
+ /*
+ * Page allocation does not initialize the page's lru field,
+ * but it does always reset its private field.
+@@ -3326,7 +3328,7 @@ int add_swap_count_continuation(swp_entr
+ * a continuation page, free our allocation and use this one.
+ */
+ if (!(count & COUNT_CONTINUED))
+- goto out;
++ goto out_unlock_cont;
+
+ map = kmap_atomic(list_page) + offset;
+ count = *map;
+@@ -3337,11 +3339,13 @@ int add_swap_count_continuation(swp_entr
+ * free our allocation and use this one.
+ */
+ if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
+- goto out;
++ goto out_unlock_cont;
+ }
+
+ list_add_tail(&page->lru, &head->lru);
+ page = NULL; /* now it's attached, don't free it */
++out_unlock_cont:
++ spin_unlock(&si->cont_lock);
+ out:
+ unlock_cluster(ci);
+ spin_unlock(&si->lock);
+@@ -3366,6 +3370,7 @@ static bool swap_count_continued(struct
+ struct page *head;
+ struct page *page;
+ unsigned char *map;
++ bool ret;
+
+ head = vmalloc_to_page(si->swap_map + offset);
+ if (page_private(head) != SWP_CONTINUED) {
+@@ -3373,6 +3378,7 @@ static bool swap_count_continued(struct
+ return false; /* need to add count continuation */
+ }
+
++ spin_lock(&si->cont_lock);
+ offset &= ~PAGE_MASK;
+ page = list_entry(head->lru.next, struct page, lru);
+ map = kmap_atomic(page) + offset;
+@@ -3393,8 +3399,10 @@ static bool swap_count_continued(struct
+ if (*map == SWAP_CONT_MAX) {
+ kunmap_atomic(map);
+ page = list_entry(page->lru.next, struct page, lru);
+- if (page == head)
+- return false; /* add count continuation */
++ if (page == head) {
++ ret = false; /* add count continuation */
++ goto out;
++ }
+ map = kmap_atomic(page) + offset;
+ init_map: *map = 0; /* we didn't zero the page */
+ }
+@@ -3407,7 +3415,7 @@ init_map: *map = 0; /* we didn't zero
+ kunmap_atomic(map);
+ page = list_entry(page->lru.prev, struct page, lru);
+ }
+- return true; /* incremented */
++ ret = true; /* incremented */
+
+ } else { /* decrementing */
+ /*
+@@ -3433,8 +3441,11 @@ init_map: *map = 0; /* we didn't zero
+ kunmap_atomic(map);
+ page = list_entry(page->lru.prev, struct page, lru);
+ }
+- return count == COUNT_CONTINUED;
++ ret = count == COUNT_CONTINUED;
+ }
++out:
++ spin_unlock(&si->cont_lock);
++ return ret;
+ }
+
+ /*
--- /dev/null
+From 105ddc93f06ebe3e553f58563d11ed63dbcd59f0 Mon Sep 17 00:00:00 2001
+From: Ashish Samant <ashish.samant@oracle.com>
+Date: Thu, 2 Nov 2017 15:59:37 -0700
+Subject: ocfs2: fstrim: Fix start offset of first cluster group during fstrim
+
+From: Ashish Samant <ashish.samant@oracle.com>
+
+commit 105ddc93f06ebe3e553f58563d11ed63dbcd59f0 upstream.
+
+The first cluster group descriptor is not stored at the start of the
+group but at an offset from the start. We need to take this into
+account while doing fstrim on the first cluster group. Otherwise we
+will wrongly start fstrim a few blocks after the desired start block and
+the range can cross over into the next cluster group and zero out the
+group descriptor there. This can cause filesytem corruption that cannot
+be fixed by fsck.
+
+Link: http://lkml.kernel.org/r/1507835579-7308-1-git-send-email-ashish.samant@oracle.com
+Signed-off-by: Ashish Samant <ashish.samant@oracle.com>
+Reviewed-by: Junxiao Bi <junxiao.bi@oracle.com>
+Reviewed-by: Joseph Qi <jiangqi903@gmail.com>
+Cc: Mark Fasheh <mfasheh@versity.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/alloc.c | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+--- a/fs/ocfs2/alloc.c
++++ b/fs/ocfs2/alloc.c
+@@ -7310,13 +7310,24 @@ out:
+
+ static int ocfs2_trim_extent(struct super_block *sb,
+ struct ocfs2_group_desc *gd,
+- u32 start, u32 count)
++ u64 group, u32 start, u32 count)
+ {
+ u64 discard, bcount;
++ struct ocfs2_super *osb = OCFS2_SB(sb);
+
+ bcount = ocfs2_clusters_to_blocks(sb, count);
+- discard = le64_to_cpu(gd->bg_blkno) +
+- ocfs2_clusters_to_blocks(sb, start);
++ discard = ocfs2_clusters_to_blocks(sb, start);
++
++ /*
++ * For the first cluster group, the gd->bg_blkno is not at the start
++ * of the group, but at an offset from the start. If we add it while
++ * calculating discard for first group, we will wrongly start fstrim a
++ * few blocks after the desried start block and the range can cross
++ * over into the next cluster group. So, add it only if this is not
++ * the first cluster group.
++ */
++ if (group != osb->first_cluster_group_blkno)
++ discard += le64_to_cpu(gd->bg_blkno);
+
+ trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
+
+@@ -7324,7 +7335,7 @@ static int ocfs2_trim_extent(struct supe
+ }
+
+ static int ocfs2_trim_group(struct super_block *sb,
+- struct ocfs2_group_desc *gd,
++ struct ocfs2_group_desc *gd, u64 group,
+ u32 start, u32 max, u32 minbits)
+ {
+ int ret = 0, count = 0, next;
+@@ -7343,7 +7354,7 @@ static int ocfs2_trim_group(struct super
+ next = ocfs2_find_next_bit(bitmap, max, start);
+
+ if ((next - start) >= minbits) {
+- ret = ocfs2_trim_extent(sb, gd,
++ ret = ocfs2_trim_extent(sb, gd, group,
+ start, next - start);
+ if (ret < 0) {
+ mlog_errno(ret);
+@@ -7441,7 +7452,8 @@ int ocfs2_trim_fs(struct super_block *sb
+ }
+
+ gd = (struct ocfs2_group_desc *)gd_bh->b_data;
+- cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
++ cnt = ocfs2_trim_group(sb, gd, group,
++ first_bit, last_bit, minlen);
+ brelse(gd_bh);
+ gd_bh = NULL;
+ if (cnt < 0) {
--- /dev/null
+From e6c4dcb308160115287afd87afb63b5684d75a5b Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Mon, 30 Oct 2017 20:42:09 +0530
+Subject: powerpc/kprobes: Dereference function pointers only if the address does not belong to kernel text
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit e6c4dcb308160115287afd87afb63b5684d75a5b upstream.
+
+This makes the changes introduced in commit 83e840c770f2c5
+("powerpc64/elfv1: Only dereference function descriptor for non-text
+symbols") to be specific to the kprobe subsystem.
+
+We previously changed ppc_function_entry() to always check the provided
+address to confirm if it needed to be dereferenced. This is actually
+only an issue for kprobe blacklisted asm labels (through use of
+_ASM_NOKPROBE_SYMBOL) and can cause other issues with ftrace. Also, the
+additional checks are not really necessary for our other uses.
+
+As such, move this check to the kprobes subsystem.
+
+Fixes: 83e840c770f2 ("powerpc64/elfv1: Only dereference function descriptor for non-text symbols")
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/kprobes.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -600,7 +600,12 @@ NOKPROBE_SYMBOL(kprobe_fault_handler);
+
+ unsigned long arch_deref_entry_point(void *entry)
+ {
+- return ppc_global_function_entry(entry);
++#ifdef PPC64_ELF_ABI_v1
++ if (!kernel_text_address((unsigned long)entry))
++ return ppc_global_function_entry(entry);
++ else
++#endif
++ return (unsigned long)entry;
+ }
+ NOKPROBE_SYMBOL(arch_deref_entry_point);
+
--- /dev/null
+From 63be1a81e40733ecd175713b6a7558dc43f00851 Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Mon, 30 Oct 2017 20:42:08 +0530
+Subject: Revert "powerpc64/elfv1: Only dereference function descriptor for non-text symbols"
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit 63be1a81e40733ecd175713b6a7558dc43f00851 upstream.
+
+This reverts commit 83e840c770f2c5 ("powerpc64/elfv1: Only dereference
+function descriptor for non-text symbols").
+
+Chandan reported that on newer kernels, trying to enable function_graph
+tracer on ppc64 (BE) locks up the system with the following trace:
+
+ Unable to handle kernel paging request for data at address 0x600000002fa30010
+ Faulting instruction address: 0xc0000000001f1300
+ Thread overran stack, or stack corrupted
+ Oops: Kernel access of bad area, sig: 11 [#1]
+ BE SMP NR_CPUS=2048 DEBUG_PAGEALLOC NUMA pSeries
+ Modules linked in:
+ CPU: 1 PID: 6586 Comm: bash Not tainted 4.14.0-rc3-00162-g6e51f1f-dirty #20
+ task: c000000625c07200 task.stack: c000000625c07310
+ NIP: c0000000001f1300 LR: c000000000121cac CTR: c000000000061af8
+ REGS: c000000625c088c0 TRAP: 0380 Not tainted (4.14.0-rc3-00162-g6e51f1f-dirty)
+ MSR: 8000000000001032 <SF,ME,IR,DR,RI> CR: 28002848 XER: 00000000
+ CFAR: c0000000001f1320 SOFTE: 0
+ ...
+ NIP [c0000000001f1300] .__is_insn_slot_addr+0x30/0x90
+ LR [c000000000121cac] .kernel_text_address+0x18c/0x1c0
+ Call Trace:
+ [c000000625c08b40] [c0000000001bd040] .is_module_text_address+0x20/0x40 (unreliable)
+ [c000000625c08bc0] [c000000000121cac] .kernel_text_address+0x18c/0x1c0
+ [c000000625c08c50] [c000000000061960] .prepare_ftrace_return+0x50/0x130
+ [c000000625c08cf0] [c000000000061b10] .ftrace_graph_caller+0x14/0x34
+ [c000000625c08d60] [c000000000121b40] .kernel_text_address+0x20/0x1c0
+ [c000000625c08df0] [c000000000061960] .prepare_ftrace_return+0x50/0x130
+ ...
+ [c000000625c0ab30] [c000000000061960] .prepare_ftrace_return+0x50/0x130
+ [c000000625c0abd0] [c000000000061b10] .ftrace_graph_caller+0x14/0x34
+ [c000000625c0ac40] [c000000000121b40] .kernel_text_address+0x20/0x1c0
+ [c000000625c0acd0] [c000000000061960] .prepare_ftrace_return+0x50/0x130
+ [c000000625c0ad70] [c000000000061b10] .ftrace_graph_caller+0x14/0x34
+ [c000000625c0ade0] [c000000000121b40] .kernel_text_address+0x20/0x1c0
+
+This is because ftrace is using ppc_function_entry() for obtaining the
+address of return_to_handler() in prepare_ftrace_return(). The call to
+kernel_text_address() itself gets traced and we end up in a recursive
+loop.
+
+Fixes: 83e840c770f2 ("powerpc64/elfv1: Only dereference function descriptor for non-text symbols")
+Reported-by: Chandan Rajendra <chandan@linux.vnet.ibm.com>
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/code-patching.h | 10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+--- a/arch/powerpc/include/asm/code-patching.h
++++ b/arch/powerpc/include/asm/code-patching.h
+@@ -83,16 +83,8 @@ static inline unsigned long ppc_function
+ * On PPC64 ABIv1 the function pointer actually points to the
+ * function's descriptor. The first entry in the descriptor is the
+ * address of the function text.
+- *
+- * However, we may also receive pointer to an assembly symbol. To
+- * detect that, we first check if the function pointer we receive
+- * already points to kernel/module text and we only dereference it
+- * if it doesn't.
+ */
+- if (kernel_text_address((unsigned long)func))
+- return (unsigned long)func;
+- else
+- return ((func_descr_t *)func)->entry;
++ return ((func_descr_t *)func)->entry;
+ #else
+ return (unsigned long)func;
+ #endif
--- /dev/null
+From 890da9cf098364b11a7f7f5c22fa652531624d03 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 2 Nov 2017 14:06:32 -0700
+Subject: Revert "x86: do not use cpufreq_quick_get() for /proc/cpuinfo "cpu MHz""
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 890da9cf098364b11a7f7f5c22fa652531624d03 upstream.
+
+This reverts commit 51204e0639c49ada02fd823782ad673b6326d748.
+
+There wasn't really any good reason for it, and people are complaining
+(rightly) that it broke existing practice.
+
+Cc: Len Brown <len.brown@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/proc.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -2,6 +2,7 @@
+ #include <linux/timex.h>
+ #include <linux/string.h>
+ #include <linux/seq_file.h>
++#include <linux/cpufreq.h>
+
+ /*
+ * Get CPU information for use by the procfs.
+@@ -75,9 +76,14 @@ static int show_cpuinfo(struct seq_file
+ if (c->microcode)
+ seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
+
+- if (cpu_has(c, X86_FEATURE_TSC))
++ if (cpu_has(c, X86_FEATURE_TSC)) {
++ unsigned int freq = cpufreq_quick_get(cpu);
++
++ if (!freq)
++ freq = cpu_khz;
+ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
+- cpu_khz / 1000, (cpu_khz % 1000));
++ freq / 1000, (freq % 1000));
++ }
+
+ /* Cache size */
+ if (c->x86_cache_size >= 0)
arm-arm64-kvm-disable-branch-profiling-in-hyp-code.patch
arm-dts-mvebu-pl310-cache-disable-double-linefill.patch
arm-8715-1-add-a-private-asm-unaligned.h.patch
+drm-amdgpu-return-enoent-from-uvd-6.0-early-init-for-harvesting.patch
+drm-amdgpu-allow-harvesting-check-for-polaris-vce.patch
+userfaultfd-hugetlbfs-prevent-uffdio_copy-to-fill-beyond-the-end-of-i_size.patch
+ocfs2-fstrim-fix-start-offset-of-first-cluster-group-during-fstrim.patch
+fs-hugetlbfs-inode.c-fix-hwpoison-reserve-accounting.patch
+mm-swap-fix-race-between-swap-count-continuation-operations.patch
+drm-i915-do-not-rely-on-wm-preservation-for-ilk-watermarks.patch
+drm-i915-edp-read-edp-display-control-registers-unconditionally.patch
+revert-powerpc64-elfv1-only-dereference-function-descriptor-for-non-text-symbols.patch
+mips-bpf-fix-a-typo-in-build_one_insn.patch
+mips-smp-cmp-use-right-include-for-task_struct.patch
+mips-micromips-fix-incorrect-mask-in-insn_table_mm.patch
+mips-smp-fix-deadlock-online-race.patch
+revert-x86-do-not-use-cpufreq_quick_get-for-proc-cpuinfo-cpu-mhz.patch
+x86-cpu-fix-up-cpu-mhz-in-proc-cpuinfo.patch
+powerpc-kprobes-dereference-function-pointers-only-if-the-address-does-not-belong-to-kernel-text.patch
--- /dev/null
+From 1e3921471354244f70fe268586ff94a97a6dd4df Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Thu, 2 Nov 2017 15:59:29 -0700
+Subject: userfaultfd: hugetlbfs: prevent UFFDIO_COPY to fill beyond the end of i_size
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 1e3921471354244f70fe268586ff94a97a6dd4df upstream.
+
+This oops:
+
+ kernel BUG at fs/hugetlbfs/inode.c:484!
+ RIP: remove_inode_hugepages+0x3d0/0x410
+ Call Trace:
+ hugetlbfs_setattr+0xd9/0x130
+ notify_change+0x292/0x410
+ do_truncate+0x65/0xa0
+ do_sys_ftruncate.constprop.3+0x11a/0x180
+ SyS_ftruncate+0xe/0x10
+ tracesys+0xd9/0xde
+
+was caused by the lack of i_size check in hugetlb_mcopy_atomic_pte.
+
+mmap() can still succeed beyond the end of the i_size after vmtruncate
+zapped vmas in those ranges, but the faults must not succeed, and that
+includes UFFDIO_COPY.
+
+We could differentiate the retval to userland to represent a SIGBUS like
+a page fault would do (vs SIGSEGV), but it doesn't seem very useful and
+we'd need to pick a random retval as there's no meaningful syscall
+retval that would differentiate from SIGSEGV and SIGBUS, there's just
+-EFAULT.
+
+Link: http://lkml.kernel.org/r/20171016223914.2421-2-aarcange@redhat.com
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/hugetlb.c | 32 ++++++++++++++++++++++++++++++--
+ 1 file changed, 30 insertions(+), 2 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3977,6 +3977,9 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
+ unsigned long src_addr,
+ struct page **pagep)
+ {
++ struct address_space *mapping;
++ pgoff_t idx;
++ unsigned long size;
+ int vm_shared = dst_vma->vm_flags & VM_SHARED;
+ struct hstate *h = hstate_vma(dst_vma);
+ pte_t _dst_pte;
+@@ -4014,13 +4017,24 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
+ __SetPageUptodate(page);
+ set_page_huge_active(page);
+
++ mapping = dst_vma->vm_file->f_mapping;
++ idx = vma_hugecache_offset(h, dst_vma, dst_addr);
++
+ /*
+ * If shared, add to page cache
+ */
+ if (vm_shared) {
+- struct address_space *mapping = dst_vma->vm_file->f_mapping;
+- pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
++ size = i_size_read(mapping->host) >> huge_page_shift(h);
++ ret = -EFAULT;
++ if (idx >= size)
++ goto out_release_nounlock;
+
++ /*
++ * Serialization between remove_inode_hugepages() and
++ * huge_add_to_page_cache() below happens through the
++ * hugetlb_fault_mutex_table that here must be hold by
++ * the caller.
++ */
+ ret = huge_add_to_page_cache(page, mapping, idx);
+ if (ret)
+ goto out_release_nounlock;
+@@ -4029,6 +4043,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
+ ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
+ spin_lock(ptl);
+
++ /*
++ * Recheck the i_size after holding PT lock to make sure not
++ * to leave any page mapped (as page_mapped()) beyond the end
++ * of the i_size (remove_inode_hugepages() is strict about
++ * enforcing that). If we bail out here, we'll also leave a
++ * page in the radix tree in the vm_shared case beyond the end
++ * of the i_size, but remove_inode_hugepages() will take care
++ * of it as soon as we drop the hugetlb_fault_mutex_table.
++ */
++ size = i_size_read(mapping->host) >> huge_page_shift(h);
++ ret = -EFAULT;
++ if (idx >= size)
++ goto out_release_unlock;
++
+ ret = -EEXIST;
+ if (!huge_pte_none(huge_ptep_get(dst_pte)))
+ goto out_release_unlock;
--- /dev/null
+From 941f5f0f6ef5338814145cf2b813cf1f98873e2f Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Fri, 3 Nov 2017 16:35:49 +0100
+Subject: x86: CPU: Fix up "cpu MHz" in /proc/cpuinfo
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 941f5f0f6ef5338814145cf2b813cf1f98873e2f upstream.
+
+Commit 890da9cf0983 (Revert "x86: do not use cpufreq_quick_get() for
+/proc/cpuinfo "cpu MHz"") is not sufficient to restore the previous
+behavior of "cpu MHz" in /proc/cpuinfo on x86 due to some changes
+made after the commit it has reverted.
+
+To address this, make the code in question use arch_freq_get_on_cpu()
+which also is used by cpufreq for reporting the current frequency of
+CPUs and since that function doesn't really depend on cpufreq in any
+way, drop the CONFIG_CPU_FREQ dependency for the object file
+containing it.
+
+Also refactor arch_freq_get_on_cpu() somewhat to avoid IPIs and
+return cached values right away if it is called very often over a
+short time (to prevent user space from triggering IPI storms through
+it).
+
+Fixes: 890da9cf0983 (Revert "x86: do not use cpufreq_quick_get() for /proc/cpuinfo "cpu MHz"")
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/Makefile | 2 +-
+ arch/x86/kernel/cpu/aperfmperf.c | 11 +++++++----
+ arch/x86/kernel/cpu/proc.c | 4 +++-
+ 3 files changed, 11 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -21,7 +21,7 @@ obj-y += common.o
+ obj-y += rdrand.o
+ obj-y += match.o
+ obj-y += bugs.o
+-obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
++obj-y += aperfmperf.o
+
+ obj-$(CONFIG_PROC_FS) += proc.o
+ obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
+--- a/arch/x86/kernel/cpu/aperfmperf.c
++++ b/arch/x86/kernel/cpu/aperfmperf.c
+@@ -42,10 +42,6 @@ static void aperfmperf_snapshot_khz(void
+ s64 time_delta = ktime_ms_delta(now, s->time);
+ unsigned long flags;
+
+- /* Don't bother re-computing within the cache threshold time. */
+- if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
+- return;
+-
+ local_irq_save(flags);
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+@@ -74,6 +70,7 @@ static void aperfmperf_snapshot_khz(void
+
+ unsigned int arch_freq_get_on_cpu(int cpu)
+ {
++ s64 time_delta;
+ unsigned int khz;
+
+ if (!cpu_khz)
+@@ -82,6 +79,12 @@ unsigned int arch_freq_get_on_cpu(int cp
+ if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ return 0;
+
++ /* Don't bother re-computing within the cache threshold time. */
++ time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
++ khz = per_cpu(samples.khz, cpu);
++ if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
++ return khz;
++
+ smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
+ khz = per_cpu(samples.khz, cpu);
+ if (khz)
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -77,9 +77,11 @@ static int show_cpuinfo(struct seq_file
+ seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
+
+ if (cpu_has(c, X86_FEATURE_TSC)) {
+- unsigned int freq = cpufreq_quick_get(cpu);
++ unsigned int freq = arch_freq_get_on_cpu(cpu);
+
+ if (!freq)
++ freq = cpufreq_quick_get(cpu);
++ if (!freq)
+ freq = cpu_khz;
+ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
+ freq / 1000, (freq % 1000));