--- /dev/null
+From e406f12dde1a8375d77ea02d91f313fb1a9c6aec Mon Sep 17 00:00:00 2001
+From: Aditya Pakki <pakki001@umn.edu>
+Date: Mon, 4 Mar 2019 16:48:54 -0600
+Subject: md: Fix failed allocation of md_register_thread
+
+From: Aditya Pakki <pakki001@umn.edu>
+
+commit e406f12dde1a8375d77ea02d91f313fb1a9c6aec upstream.
+
+mddev->sync_thread can be set to NULL on kzalloc failure downstream.
+The patch checks for such a scenario and frees allocated resources.
+
+Committer node:
+
+Added similar fix to raid5.c, as suggested by Guoqing.
+
+Cc: stable@vger.kernel.org # v3.16+
+Acked-by: Guoqing Jiang <gqjiang@suse.com>
+Signed-off-by: Aditya Pakki <pakki001@umn.edu>
+Signed-off-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid10.c | 2 ++
+ drivers/md/raid5.c | 2 ++
+ 2 files changed, 4 insertions(+)
+
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3939,6 +3939,8 @@ static int raid10_run(struct mddev *mdde
+ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
+ "reshape");
++ if (!mddev->sync_thread)
++ goto out_free_conf;
+ }
+
+ return 0;
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -7402,6 +7402,8 @@ static int raid5_run(struct mddev *mddev
+ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
+ "reshape");
++ if (!mddev->sync_thread)
++ goto abort;
+ }
+
+ /* Ok, everything is just fine now */
--- /dev/null
+From c3fcadf0bb765faf45d6d562246e1d08885466df Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Wed, 6 Feb 2019 12:39:43 +0200
+Subject: perf auxtrace: Define auxtrace record alignment
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit c3fcadf0bb765faf45d6d562246e1d08885466df upstream.
+
+Define auxtrace record alignment so that it can be referenced elsewhere.
+
+Note this is preparation for patch "perf intel-pt: Fix overlap calculation
+for padding"
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20190206103947.15750-2-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/auxtrace.c | 4 ++--
+ tools/perf/util/auxtrace.h | 3 +++
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -1278,9 +1278,9 @@ static int __auxtrace_mmap__read(struct
+ }
+
+ /* padding must be written by fn() e.g. record__process_auxtrace() */
+- padding = size & 7;
++ padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
+ if (padding)
+- padding = 8 - padding;
++ padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
+
+ memset(&ev, 0, sizeof(ev));
+ ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
+--- a/tools/perf/util/auxtrace.h
++++ b/tools/perf/util/auxtrace.h
+@@ -40,6 +40,9 @@ struct record_opts;
+ struct auxtrace_info_event;
+ struct events_stats;
+
++/* Auxtrace records must have the same alignment as perf event records */
++#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
++
+ enum auxtrace_type {
+ PERF_AUXTRACE_UNKNOWN,
+ PERF_AUXTRACE_INTEL_PT,
--- /dev/null
+From 03997612904866abe7cdcc992784ef65cb3a4b81 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Wed, 6 Feb 2019 12:39:45 +0200
+Subject: perf intel-pt: Fix CYC timestamp calculation after OVF
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit 03997612904866abe7cdcc992784ef65cb3a4b81 upstream.
+
+CYC packet timestamp calculation depends upon CBR which was being
+cleared upon overflow (OVF). That can cause errors due to failing to
+synchronize with sideband events. Even if a CBR change has been lost,
+the old CBR is still a better estimate than zero. So remove the clearing
+of CBR.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20190206103947.15750-4-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/intel-pt-decoder/intel-pt-decoder.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1394,7 +1394,6 @@ static int intel_pt_overflow(struct inte
+ {
+ intel_pt_log("ERROR: Buffer overflow\n");
+ intel_pt_clear_tx_flags(decoder);
+- decoder->cbr = 0;
+ decoder->timestamp_insn_cnt = 0;
+ decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+ decoder->overflow = true;
--- /dev/null
+From 076333870c2f5bdd9b6d31e7ca1909cf0c84cbfa Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Fri, 1 Mar 2019 12:35:36 +0200
+Subject: perf intel-pt: Fix divide by zero when TSC is not available
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit 076333870c2f5bdd9b6d31e7ca1909cf0c84cbfa upstream.
+
+When TSC is not available, "timeless" decoding is used but a divide by
+zero occurs if perf_time_to_tsc() is called.
+
+Ensure the divisor is not zero.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: stable@vger.kernel.org # v4.9+
+Link: https://lkml.kernel.org/n/tip-1i4j0wqoc8vlbkcizqqxpsf4@git.kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/intel-pt.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -2522,6 +2522,8 @@ int intel_pt_process_auxtrace_info(union
+ }
+
+ pt->timeless_decoding = intel_pt_timeless_decoding(pt);
++ if (pt->timeless_decoding && !pt->tc.time_mult)
++ pt->tc.time_mult = 1;
+ pt->have_tsc = intel_pt_have_tsc(pt);
+ pt->sampling_mode = false;
+ pt->est_tsc = !pt->timeless_decoding;
--- /dev/null
+From 5a99d99e3310a565b0cf63f785b347be9ee0da45 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Wed, 6 Feb 2019 12:39:44 +0200
+Subject: perf intel-pt: Fix overlap calculation for padding
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit 5a99d99e3310a565b0cf63f785b347be9ee0da45 upstream.
+
+Auxtrace records might have up to 7 bytes of padding appended. Adjust
+the overlap accordingly.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20190206103947.15750-3-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/intel-pt-decoder/intel-pt-decoder.c | 36 ++++++++++++++++++--
+ 1 file changed, 34 insertions(+), 2 deletions(-)
+
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -26,6 +26,7 @@
+
+ #include "../cache.h"
+ #include "../util.h"
++#include "../auxtrace.h"
+
+ #include "intel-pt-insn-decoder.h"
+ #include "intel-pt-pkt-decoder.h"
+@@ -2574,6 +2575,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc
+ }
+ }
+
++#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
++
++/**
++ * adj_for_padding - adjust overlap to account for padding.
++ * @buf_b: second buffer
++ * @buf_a: first buffer
++ * @len_a: size of first buffer
++ *
++ * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
++ * accordingly.
++ *
++ * Return: A pointer into @buf_b from where non-overlapped data starts
++ */
++static unsigned char *adj_for_padding(unsigned char *buf_b,
++ unsigned char *buf_a, size_t len_a)
++{
++ unsigned char *p = buf_b - MAX_PADDING;
++ unsigned char *q = buf_a + len_a - MAX_PADDING;
++ int i;
++
++ for (i = MAX_PADDING; i; i--, p++, q++) {
++ if (*p != *q)
++ break;
++ }
++
++ return p;
++}
++
+ /**
+ * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
+ * using TSC.
+@@ -2624,8 +2653,11 @@ static unsigned char *intel_pt_find_over
+
+ /* Same TSC, so buffers are consecutive */
+ if (!cmp && rem_b >= rem_a) {
++ unsigned char *start;
++
+ *consecutive = true;
+- return buf_b + len_b - (rem_b - rem_a);
++ start = buf_b + len_b - (rem_b - rem_a);
++ return adj_for_padding(start, buf_a, len_a);
+ }
+ if (cmp < 0)
+ return buf_b; /* tsc_a < tsc_b => no overlap */
+@@ -2688,7 +2720,7 @@ unsigned char *intel_pt_find_overlap(uns
+ found = memmem(buf_a, len_a, buf_b, len_a);
+ if (found) {
+ *consecutive = true;
+- return buf_b + len_a;
++ return adj_for_padding(buf_b + len_a, buf_a, len_a);
+ }
+
+ /* Try again at next PSB in buffer 'a' */
--- /dev/null
+From d6d457451eb94fa747dc202765592eb8885a7352 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Wed, 9 Jan 2019 11:18:30 +0200
+Subject: perf tools: Fix split_kallsyms_for_kcore() for trampoline symbols
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit d6d457451eb94fa747dc202765592eb8885a7352 upstream.
+
+Kallsyms symbols do not have a size, so the size becomes the distance to
+the next symbol.
+
+Consequently the recently added trampoline symbols end up with large
+sizes because the trampolines are some distance from one another and the
+main kernel map.
+
+However, symbols that end outside their map can disrupt the symbol tree
+because, after mapping, it can appear incorrectly that they overlap
+other symbols.
+
+Add logic to truncate symbol size to the end of the corresponding map.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Cc: stable@vger.kernel.org
+Fixes: d83212d5dd67 ("kallsyms, x86: Export addresses of PTI entry trampolines")
+Link: http://lkml.kernel.org/r/20190109091835.5570-2-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/symbol.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -710,6 +710,8 @@ static int map_groups__split_kallsyms_fo
+ }
+
+ pos->start -= curr_map->start - curr_map->pgoff;
++ if (pos->end > curr_map->end)
++ pos->end = curr_map->end;
+ if (pos->end)
+ pos->end -= curr_map->start - curr_map->pgoff;
+ symbols__insert(&curr_map->dso->symbols, pos);
--- /dev/null
+From 8041ffd36f42d8521d66dd1e236feb58cecd68bc Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Wed, 27 Feb 2019 08:57:29 -0800
+Subject: perf/x86/intel/uncore: Fix client IMC events return huge result
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit 8041ffd36f42d8521d66dd1e236feb58cecd68bc upstream.
+
+The client IMC bandwidth events currently return very large values:
+
+ $ perf stat -e uncore_imc/data_reads/ -e uncore_imc/data_writes/ -I 10000 -a
+
+ 10.000117222 34,788.76 MiB uncore_imc/data_reads/
+ 10.000117222 8.26 MiB uncore_imc/data_writes/
+ 20.000374584 34,842.89 MiB uncore_imc/data_reads/
+ 20.000374584 10.45 MiB uncore_imc/data_writes/
+ 30.000633299 37,965.29 MiB uncore_imc/data_reads/
+ 30.000633299 323.62 MiB uncore_imc/data_writes/
+ 40.000891548 41,012.88 MiB uncore_imc/data_reads/
+ 40.000891548 6.98 MiB uncore_imc/data_writes/
+ 50.001142480 1,125,899,906,621,494.75 MiB uncore_imc/data_reads/
+ 50.001142480 6.97 MiB uncore_imc/data_writes/
+
+The client IMC events are freerunning counters. They still use the
+old event encoding format (0x1 for data_read and 0x2 for data write).
+The counter bit width is calculated by common code, which assume that
+the standard encoding format is used for the freerunning counters.
+Error bit width information is calculated.
+
+The patch intends to convert the old client IMC event encoding to the
+standard encoding format.
+
+Current common code uses event->attr.config which directly copy from
+user space. We should not implicitly modify it for a converted event.
+The event->hw.config is used to replace the event->attr.config in
+common code.
+
+For client IMC events, the event->attr.config is used to calculate a
+converted event with standard encoding format in the custom
+event_init(). The converted event is stored in event->hw.config.
+For other events of freerunning counters, they already use the standard
+encoding format. The same value as event->attr.config is assigned to
+event->hw.config in common event_init().
+
+Reported-by: Jin Yao <yao.jin@linux.intel.com>
+Tested-by: Jin Yao <yao.jin@linux.intel.com>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: stable@kernel.org # v4.18+
+Fixes: 9aae1780e7e8 ("perf/x86/intel/uncore: Clean up client IMC uncore")
+Link: https://lkml.kernel.org/r/20190227165729.1861-1-kan.liang@linux.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/uncore.c | 1 +
+ arch/x86/events/intel/uncore.h | 12 ++++++------
+ arch/x86/events/intel/uncore_snb.c | 4 +++-
+ 3 files changed, 10 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -740,6 +740,7 @@ static int uncore_pmu_event_init(struct
+ /* fixed counters have event field hardcoded to zero */
+ hwc->config = 0ULL;
+ } else if (is_freerunning_event(event)) {
++ hwc->config = event->attr.config;
+ if (!check_valid_freerunning_event(box, event))
+ return -EINVAL;
+ event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
+--- a/arch/x86/events/intel/uncore.h
++++ b/arch/x86/events/intel/uncore.h
+@@ -292,8 +292,8 @@ static inline
+ unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
+ struct perf_event *event)
+ {
+- unsigned int type = uncore_freerunning_type(event->attr.config);
+- unsigned int idx = uncore_freerunning_idx(event->attr.config);
++ unsigned int type = uncore_freerunning_type(event->hw.config);
++ unsigned int idx = uncore_freerunning_idx(event->hw.config);
+ struct intel_uncore_pmu *pmu = box->pmu;
+
+ return pmu->type->freerunning[type].counter_base +
+@@ -377,7 +377,7 @@ static inline
+ unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
+ struct perf_event *event)
+ {
+- unsigned int type = uncore_freerunning_type(event->attr.config);
++ unsigned int type = uncore_freerunning_type(event->hw.config);
+
+ return box->pmu->type->freerunning[type].bits;
+ }
+@@ -385,7 +385,7 @@ unsigned int uncore_freerunning_bits(str
+ static inline int uncore_num_freerunning(struct intel_uncore_box *box,
+ struct perf_event *event)
+ {
+- unsigned int type = uncore_freerunning_type(event->attr.config);
++ unsigned int type = uncore_freerunning_type(event->hw.config);
+
+ return box->pmu->type->freerunning[type].num_counters;
+ }
+@@ -399,8 +399,8 @@ static inline int uncore_num_freerunning
+ static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+ {
+- unsigned int type = uncore_freerunning_type(event->attr.config);
+- unsigned int idx = uncore_freerunning_idx(event->attr.config);
++ unsigned int type = uncore_freerunning_type(event->hw.config);
++ unsigned int idx = uncore_freerunning_idx(event->hw.config);
+
+ return (type < uncore_num_freerunning_types(box, event)) &&
+ (idx < uncore_num_freerunning(box, event));
+--- a/arch/x86/events/intel/uncore_snb.c
++++ b/arch/x86/events/intel/uncore_snb.c
+@@ -448,9 +448,11 @@ static int snb_uncore_imc_event_init(str
+
+ /* must be done before validate_group */
+ event->hw.event_base = base;
+- event->hw.config = cfg;
+ event->hw.idx = idx;
+
++ /* Convert to standard encoding format for freerunning counters */
++ event->hw.config = ((cfg - 1) << 8) | 0x10ff;
++
+ /* no group validation needed, we have free running counters */
+
+ return 0;
bcache-never-writeback-a-discard-operation.patch
bcache-treat-stale-dirty-keys-as-bad-keys.patch
bcache-use-req_meta-req_prio-to-indicate-bio-for-metadata.patch
+stable-kernel-rules.rst-add-link-to-networking-patch-queue.patch
+vt-perform-safe-console-erase-in-the-right-order.patch
+x86-unwind-orc-fix-orc-unwind-table-alignment.patch
+perf-intel-pt-fix-cyc-timestamp-calculation-after-ovf.patch
+perf-tools-fix-split_kallsyms_for_kcore-for-trampoline-symbols.patch
+perf-auxtrace-define-auxtrace-record-alignment.patch
+perf-intel-pt-fix-overlap-calculation-for-padding.patch
+perf-x86-intel-uncore-fix-client-imc-events-return-huge-result.patch
+perf-intel-pt-fix-divide-by-zero-when-tsc-is-not-available.patch
+md-fix-failed-allocation-of-md_register_thread.patch
+x86-kvmclock-set-offset-for-kvm-unstable-clock.patch
+x86-ftrace-fix-warning-and-considate-ftrace_jmp_replace-and-ftrace_call_replace.patch
+tpm-tpm_crb-avoid-unaligned-reads-in-crb_recv.patch
+tpm-unify-the-send-callback-behaviour.patch
--- /dev/null
+From a41e8f25fa8f8f67360d88eb0eebbabe95a64bdf Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Tue, 22 Jan 2019 19:46:32 +0100
+Subject: stable-kernel-rules.rst: add link to networking patch queue
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit a41e8f25fa8f8f67360d88eb0eebbabe95a64bdf upstream.
+
+The networking maintainer keeps a public list of the patches being
+queued up for the next round of stable releases. Be sure to check there
+before asking for a patch to be applied so that you do not waste
+people's time.
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jonathan Corbet <corbet@lwn.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/process/stable-kernel-rules.rst | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/Documentation/process/stable-kernel-rules.rst
++++ b/Documentation/process/stable-kernel-rules.rst
+@@ -38,6 +38,9 @@ Procedure for submitting patches to the
+ - If the patch covers files in net/ or drivers/net please follow netdev stable
+ submission guidelines as described in
+ :ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
++ after first checking the stable networking queue at
++ https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive=
++ to ensure the requested patch is not already queued up.
+ - Security patches should not be handled (solely) by the -stable review
+ process but should follow the procedures in
+ :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
--- /dev/null
+From 3d7a850fdc1a2e4d2adbc95cc0fc962974725e88 Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Date: Mon, 4 Feb 2019 15:59:43 +0200
+Subject: tpm/tpm_crb: Avoid unaligned reads in crb_recv()
+
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+
+commit 3d7a850fdc1a2e4d2adbc95cc0fc962974725e88 upstream.
+
+The current approach to read first 6 bytes from the response and then tail
+of the response, can cause the 2nd memcpy_fromio() to do an unaligned read
+(e.g. read 32-bit word from address aligned to a 16-bits), depending on how
+memcpy_fromio() is implemented. If this happens, the read will fail and the
+memory controller will fill the read with 1's.
+
+This was triggered by 170d13ca3a2f, which should be probably refined to
+check and react to the address alignment. Before that commit, on x86
+memcpy_fromio() turned out to be memcpy(). By a luck GCC has done the right
+thing (from tpm_crb's perspective) for us so far, but we should not rely on
+that. Thus, it makes sense to fix this also in tpm_crb, not least because
+the fix can be then backported to stable kernels and make them more robust
+when compiled in differing environments.
+
+Cc: stable@vger.kernel.org
+Cc: James Morris <jmorris@namei.org>
+Cc: Tomas Winkler <tomas.winkler@intel.com>
+Cc: Jerry Snitselaar <jsnitsel@redhat.com>
+Fixes: 30fc8d138e91 ("tpm: TPM 2.0 CRB Interface")
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Acked-by: Tomas Winkler <tomas.winkler@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_crb.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -287,19 +287,29 @@ static int crb_recv(struct tpm_chip *chi
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ unsigned int expected;
+
+- /* sanity check */
+- if (count < 6)
++ /* A sanity check that the upper layer wants to get at least the header
++ * as that is the minimum size for any TPM response.
++ */
++ if (count < TPM_HEADER_SIZE)
+ return -EIO;
+
++ /* If this bit is set, according to the spec, the TPM is in
++ * unrecoverable condition.
++ */
+ if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
+ return -EIO;
+
+- memcpy_fromio(buf, priv->rsp, 6);
+- expected = be32_to_cpup((__be32 *) &buf[2]);
+- if (expected > count || expected < 6)
++ /* Read the first 8 bytes in order to get the length of the response.
++ * We read exactly a quad word in order to make sure that the remaining
++ * reads will be aligned.
++ */
++ memcpy_fromio(buf, priv->rsp, 8);
++
++ expected = be32_to_cpup((__be32 *)&buf[2]);
++ if (expected > count || expected < TPM_HEADER_SIZE)
+ return -EIO;
+
+- memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
++ memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
+
+ return expected;
+ }
--- /dev/null
+From f5595f5baa30e009bf54d0d7653a9a0cc465be60 Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Date: Fri, 8 Feb 2019 18:30:58 +0200
+Subject: tpm: Unify the send callback behaviour
+
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+
+commit f5595f5baa30e009bf54d0d7653a9a0cc465be60 upstream.
+
+The send() callback should never return length as it does not in every
+driver except tpm_crb in the success case. The reason is that the main
+transmit functionality only cares about whether the transmit was
+successful or not and ignores the count completely.
+
+Suggested-by: Stefan Berger <stefanb@linux.ibm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
+Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Tested-by: Alexander Steffen <Alexander.Steffen@infineon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/st33zp24/st33zp24.c | 2 +-
+ drivers/char/tpm/tpm-interface.c | 11 ++++++++++-
+ drivers/char/tpm/tpm_atmel.c | 2 +-
+ drivers/char/tpm/tpm_i2c_atmel.c | 6 +++++-
+ drivers/char/tpm/tpm_i2c_infineon.c | 2 +-
+ drivers/char/tpm/tpm_i2c_nuvoton.c | 2 +-
+ drivers/char/tpm/tpm_ibmvtpm.c | 8 ++++----
+ drivers/char/tpm/tpm_infineon.c | 2 +-
+ drivers/char/tpm/tpm_nsc.c | 2 +-
+ drivers/char/tpm/tpm_tis_core.c | 2 +-
+ drivers/char/tpm/tpm_vtpm_proxy.c | 3 +--
+ drivers/char/tpm/xen-tpmfront.c | 2 +-
+ 12 files changed, 28 insertions(+), 16 deletions(-)
+
+--- a/drivers/char/tpm/st33zp24/st33zp24.c
++++ b/drivers/char/tpm/st33zp24/st33zp24.c
+@@ -436,7 +436,7 @@ static int st33zp24_send(struct tpm_chip
+ goto out_err;
+ }
+
+- return len;
++ return 0;
+ out_err:
+ st33zp24_cancel(chip);
+ release_locality(chip);
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -230,10 +230,19 @@ static ssize_t tpm_try_transmit(struct t
+ if (rc < 0) {
+ if (rc != -EPIPE)
+ dev_err(&chip->dev,
+- "%s: tpm_send: error %d\n", __func__, rc);
++ "%s: send(): error %d\n", __func__, rc);
+ goto out;
+ }
+
++ /* A sanity check. send() should just return zero on success e.g.
++ * not the command length.
++ */
++ if (rc > 0) {
++ dev_warn(&chip->dev,
++ "%s: send(): invalid value %d\n", __func__, rc);
++ rc = 0;
++ }
++
+ if (chip->flags & TPM_CHIP_FLAG_IRQ)
+ goto out_recv;
+
+--- a/drivers/char/tpm/tpm_atmel.c
++++ b/drivers/char/tpm/tpm_atmel.c
+@@ -105,7 +105,7 @@ static int tpm_atml_send(struct tpm_chip
+ iowrite8(buf[i], priv->iobase);
+ }
+
+- return count;
++ return 0;
+ }
+
+ static void tpm_atml_cancel(struct tpm_chip *chip)
+--- a/drivers/char/tpm/tpm_i2c_atmel.c
++++ b/drivers/char/tpm/tpm_i2c_atmel.c
+@@ -65,7 +65,11 @@ static int i2c_atmel_send(struct tpm_chi
+ dev_dbg(&chip->dev,
+ "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
+ (int)min_t(size_t, 64, len), buf, len, status);
+- return status;
++
++ if (status < 0)
++ return status;
++
++ return 0;
+ }
+
+ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+--- a/drivers/char/tpm/tpm_i2c_infineon.c
++++ b/drivers/char/tpm/tpm_i2c_infineon.c
+@@ -587,7 +587,7 @@ static int tpm_tis_i2c_send(struct tpm_c
+ /* go and do it */
+ iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1);
+
+- return len;
++ return 0;
+ out_err:
+ tpm_tis_i2c_ready(chip);
+ /* The TPM needs some time to clean up here,
+--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
+@@ -467,7 +467,7 @@ static int i2c_nuvoton_send(struct tpm_c
+ }
+
+ dev_dbg(dev, "%s() -> %zd\n", __func__, len);
+- return len;
++ return 0;
+ }
+
+ static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -139,14 +139,14 @@ static int tpm_ibmvtpm_recv(struct tpm_c
+ }
+
+ /**
+- * tpm_ibmvtpm_send - Send tpm request
+- *
++ * tpm_ibmvtpm_send() - Send a TPM command
+ * @chip: tpm chip struct
+ * @buf: buffer contains data to send
+ * @count: size of buffer
+ *
+ * Return:
+- * Number of bytes sent or < 0 on error.
++ * 0 on success,
++ * -errno on error
+ */
+ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+@@ -192,7 +192,7 @@ static int tpm_ibmvtpm_send(struct tpm_c
+ rc = 0;
+ ibmvtpm->tpm_processing_cmd = false;
+ } else
+- rc = count;
++ rc = 0;
+
+ spin_unlock(&ibmvtpm->rtce_lock);
+ return rc;
+--- a/drivers/char/tpm/tpm_infineon.c
++++ b/drivers/char/tpm/tpm_infineon.c
+@@ -354,7 +354,7 @@ static int tpm_inf_send(struct tpm_chip
+ for (i = 0; i < count; i++) {
+ wait_and_send(chip, buf[i]);
+ }
+- return count;
++ return 0;
+ }
+
+ static void tpm_inf_cancel(struct tpm_chip *chip)
+--- a/drivers/char/tpm/tpm_nsc.c
++++ b/drivers/char/tpm/tpm_nsc.c
+@@ -226,7 +226,7 @@ static int tpm_nsc_send(struct tpm_chip
+ }
+ outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND);
+
+- return count;
++ return 0;
+ }
+
+ static void tpm_nsc_cancel(struct tpm_chip *chip)
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -481,7 +481,7 @@ static int tpm_tis_send_main(struct tpm_
+ goto out_err;
+ }
+ }
+- return len;
++ return 0;
+ out_err:
+ tpm_tis_ready(chip);
+ return rc;
+--- a/drivers/char/tpm/tpm_vtpm_proxy.c
++++ b/drivers/char/tpm/tpm_vtpm_proxy.c
+@@ -335,7 +335,6 @@ static int vtpm_proxy_is_driver_command(
+ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+- int rc = 0;
+
+ if (count > sizeof(proxy_dev->buffer)) {
+ dev_err(&chip->dev,
+@@ -366,7 +365,7 @@ static int vtpm_proxy_tpm_op_send(struct
+
+ wake_up_interruptible(&proxy_dev->wq);
+
+- return rc;
++ return 0;
+ }
+
+ static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
+--- a/drivers/char/tpm/xen-tpmfront.c
++++ b/drivers/char/tpm/xen-tpmfront.c
+@@ -173,7 +173,7 @@ static int vtpm_send(struct tpm_chip *ch
+ return -ETIME;
+ }
+
+- return count;
++ return 0;
+ }
+
+ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
--- /dev/null
+From a6dbe442755999960ca54a9b8ecfd9606be0ea75 Mon Sep 17 00:00:00 2001
+From: Nicolas Pitre <nicolas.pitre@linaro.org>
+Date: Mon, 11 Feb 2019 19:36:41 -0500
+Subject: vt: perform safe console erase in the right order
+
+From: Nicolas Pitre <nicolas.pitre@linaro.org>
+
+commit a6dbe442755999960ca54a9b8ecfd9606be0ea75 upstream.
+
+Commit 4b4ecd9cb853 ("vt: Perform safe console erase only once") removed
+what appeared to be an extra call to scr_memsetw(). This missed the fact
+that set_origin() must be called before clearing the screen otherwise
+old screen content gets restored on the screen when using vgacon. Let's
+fix that by moving all the scrollback handling to flush_scrollback()
+where it logically belongs, and invoking it before the actual screen
+clearing in csi_J(), making the code simpler in the end.
+
+Reported-by: Matthew Whitehead <tedheadster@gmail.com>
+Signed-off-by: Nicolas Pitre <nico@linaro.org>
+Tested-by: Matthew Whitehead <tedheadster@gmail.com>
+Fixes: 4b4ecd9cb853 ("vt: Perform safe console erase only once")
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/vt/vt.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -935,8 +935,11 @@ static void flush_scrollback(struct vc_d
+ {
+ WARN_CONSOLE_UNLOCKED();
+
++ set_origin(vc);
+ if (vc->vc_sw->con_flush_scrollback)
+ vc->vc_sw->con_flush_scrollback(vc);
++ else
++ vc->vc_sw->con_switch(vc);
+ }
+
+ /*
+@@ -1503,8 +1506,10 @@ static void csi_J(struct vc_data *vc, in
+ count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
+ start = (unsigned short *)vc->vc_origin;
+ break;
++ case 3: /* include scrollback */
++ flush_scrollback(vc);
++ /* fallthrough */
+ case 2: /* erase whole display */
+- case 3: /* (and scrollback buffer later) */
+ vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
+ count = vc->vc_cols * vc->vc_rows;
+ start = (unsigned short *)vc->vc_origin;
+@@ -1513,13 +1518,7 @@ static void csi_J(struct vc_data *vc, in
+ return;
+ }
+ scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
+- if (vpar == 3) {
+- set_origin(vc);
+- flush_scrollback(vc);
+- if (con_is_visible(vc))
+- update_screen(vc);
+- } else if (con_should_update(vc))
+- do_update_region(vc, (unsigned long) start, count);
++ update_region(vc, (unsigned long) start, count);
+ vc->vc_need_wrap = 0;
+ }
+
--- /dev/null
+From 745cfeaac09ce359130a5451d90cb0bd4094c290 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Mon, 4 Mar 2019 16:35:22 -0500
+Subject: x86/ftrace: Fix warning and considate ftrace_jmp_replace() and ftrace_call_replace()
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 745cfeaac09ce359130a5451d90cb0bd4094c290 upstream.
+
+Arnd reported the following compiler warning:
+
+arch/x86/kernel/ftrace.c:669:23: error: 'ftrace_jmp_replace' defined but not used [-Werror=unused-function]
+
+The ftrace_jmp_replace() function now only has a single user and should be
+simply moved by that user. But looking at the code, it shows that
+ftrace_jmp_replace() is similar to ftrace_call_replace() except that instead
+of using the opcode of 0xe8 it uses 0xe9. It makes more sense to consolidate
+that function into one implementation that both ftrace_jmp_replace() and
+ftrace_call_replace() use by passing in the op code separate.
+
+The structure in ftrace_code_union is also modified to replace the "e8"
+field with the more appropriate name "op".
+
+Cc: stable@vger.kernel.org
+Reported-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Link: http://lkml.kernel.org/r/20190304200748.1418790-1-arnd@arndb.de
+Fixes: d2a68c4effd8 ("x86/ftrace: Do not call function graph from dynamic trampolines")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/ftrace.c | 42 +++++++++++++++++-------------------------
+ 1 file changed, 17 insertions(+), 25 deletions(-)
+
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -49,7 +49,7 @@ int ftrace_arch_code_modify_post_process
+ union ftrace_code_union {
+ char code[MCOUNT_INSN_SIZE];
+ struct {
+- unsigned char e8;
++ unsigned char op;
+ int offset;
+ } __attribute__((packed));
+ };
+@@ -59,20 +59,23 @@ static int ftrace_calc_offset(long ip, l
+ return (int)(addr - ip);
+ }
+
+-static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
++static unsigned char *
++ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
+ {
+ static union ftrace_code_union calc;
+
+- calc.e8 = 0xe8;
++ calc.op = op;
+ calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
+
+- /*
+- * No locking needed, this must be called via kstop_machine
+- * which in essence is like running on a uniprocessor machine.
+- */
+ return calc.code;
+ }
+
++static unsigned char *
++ftrace_call_replace(unsigned long ip, unsigned long addr)
++{
++ return ftrace_text_replace(0xe8, ip, addr);
++}
++
+ static inline int
+ within(unsigned long addr, unsigned long start, unsigned long end)
+ {
+@@ -664,22 +667,6 @@ int __init ftrace_dyn_arch_init(void)
+ return 0;
+ }
+
+-#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
+-static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
+-{
+- static union ftrace_code_union calc;
+-
+- /* Jmp not a call (ignore the .e8) */
+- calc.e8 = 0xe9;
+- calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
+-
+- /*
+- * ftrace external locks synchronize the access to the static variable.
+- */
+- return calc.code;
+-}
+-#endif
+-
+ /* Currently only x86_64 supports dynamic trampolines */
+ #ifdef CONFIG_X86_64
+
+@@ -891,8 +878,8 @@ static void *addr_from_call(void *ptr)
+ return NULL;
+
+ /* Make sure this is a call */
+- if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
+- pr_warn("Expected e8, got %x\n", calc.e8);
++ if (WARN_ON_ONCE(calc.op != 0xe8)) {
++ pr_warn("Expected e8, got %x\n", calc.op);
+ return NULL;
+ }
+
+@@ -963,6 +950,11 @@ void arch_ftrace_trampoline_free(struct
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ extern void ftrace_graph_call(void);
+
++static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
++{
++ return ftrace_text_replace(0xe9, ip, addr);
++}
++
+ static int ftrace_mod_jmp(unsigned long ip, void *func)
+ {
+ unsigned char *new;
--- /dev/null
+From b5179ec4187251a751832193693d6e474d3445ac Mon Sep 17 00:00:00 2001
+From: Pavel Tatashin <pasha.tatashin@soleen.com>
+Date: Sat, 26 Jan 2019 12:49:56 -0500
+Subject: x86/kvmclock: set offset for kvm unstable clock
+
+From: Pavel Tatashin <pasha.tatashin@soleen.com>
+
+commit b5179ec4187251a751832193693d6e474d3445ac upstream.
+
+VMs may show incorrect uptime and dmesg printk offsets on hypervisors with
+unstable clock. The problem is produced when VM is rebooted without exiting
+from qemu.
+
+The fix is to calculate clock offset not only for stable clock but for
+unstable clock as well, and use kvm_sched_clock_read() which substracts
+the offset for both clocks.
+
+This is safe, because pvclock_clocksource_read() does the right thing and
+makes sure that clock always goes forward, so once offset is calculated
+with unstable clock, we won't get new reads that are smaller than offset,
+and thus won't get negative results.
+
+Thank you Jon DeVree for helping to reproduce this issue.
+
+Fixes: 857baa87b642 ("sched/clock: Enable sched clock early")
+Cc: stable@vger.kernel.org
+Reported-by: Dominique Martinet <asmadeus@codewreck.org>
+Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kvmclock.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -104,12 +104,8 @@ static u64 kvm_sched_clock_read(void)
+
+ static inline void kvm_sched_clock_init(bool stable)
+ {
+- if (!stable) {
+- pv_ops.time.sched_clock = kvm_clock_read;
++ if (!stable)
+ clear_sched_clock_stable();
+- return;
+- }
+-
+ kvm_sched_clock_offset = kvm_clock_read();
+ pv_ops.time.sched_clock = kvm_sched_clock_read;
+
--- /dev/null
+From f76a16adc485699f95bb71fce114f97c832fe664 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Wed, 6 Mar 2019 11:07:24 -0600
+Subject: x86/unwind/orc: Fix ORC unwind table alignment
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+commit f76a16adc485699f95bb71fce114f97c832fe664 upstream.
+
+The .orc_unwind section is a packed array of 6-byte structs. It's
+currently aligned to 6 bytes, which is causing warnings in the LLD
+linker.
+
+Six isn't a power of two, so it's not a valid alignment value. The
+actual alignment doesn't matter much because it's an array of packed
+structs. An alignment of two is sufficient. In reality it always gets
+aligned to four bytes because it comes immediately after the
+4-byte-aligned .orc_unwind_ip section.
+
+Fixes: ee9f8fce9964 ("x86/unwind: Add the ORC unwinder")
+Reported-by: Nick Desaulniers <ndesaulniers@google.com>
+Reported-by: Dmitry Golovin <dima@golovin.in>
+Reported-by: Sedat Dilek <sedat.dilek@gmail.com>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://github.com/ClangBuiltLinux/linux/issues/218
+Link: https://lkml.kernel.org/r/d55027ee95fe73e952dcd8be90aebd31b0095c45.1551892041.git.jpoimboe@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/asm-generic/vmlinux.lds.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -733,7 +733,7 @@
+ KEEP(*(.orc_unwind_ip)) \
+ __stop_orc_unwind_ip = .; \
+ } \
+- . = ALIGN(6); \
++ . = ALIGN(2); \
+ .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
+ __start_orc_unwind = .; \
+ KEEP(*(.orc_unwind)) \