From: Greg Kroah-Hartman Date: Sun, 13 Jun 2021 13:10:23 +0000 (+0200) Subject: 4.19-stable patches X-Git-Tag: v4.4.273~41 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=46f5191078d030c4bafdd74c6e22a99aa1ac101d;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: ib-mlx5-fix-initializing-cq-fragments-buffer.patch perf-fix-data-race-between-pin_count-increment-decrement.patch rdma-mlx4-do-not-map-the-core_clock-page-to-user-space-unless-enabled.patch regulator-core-resolve-supply-for-boot-on-always-on-regulators.patch regulator-max77620-use-device_set_of_node_from_dev.patch sched-fair-make-sure-to-update-tg-contrib-for-blocked-load.patch vmlinux.lds.h-avoid-orphan-section-with-smp.patch --- diff --git a/queue-4.19/ib-mlx5-fix-initializing-cq-fragments-buffer.patch b/queue-4.19/ib-mlx5-fix-initializing-cq-fragments-buffer.patch new file mode 100644 index 00000000000..debf852d0a0 --- /dev/null +++ b/queue-4.19/ib-mlx5-fix-initializing-cq-fragments-buffer.patch @@ -0,0 +1,79 @@ +From 2ba0aa2feebda680ecfc3c552e867cf4d1b05a3a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Thu, 10 Jun 2021 10:34:27 +0300 +Subject: IB/mlx5: Fix initializing CQ fragments buffer + +From: Alaa Hleihel + +commit 2ba0aa2feebda680ecfc3c552e867cf4d1b05a3a upstream. + +The function init_cq_frag_buf() can be called to initialize the current CQ +fragments buffer cq->buf, or the temporary cq->resize_buf that is filled +during CQ resize operation. + +However, the offending commit started to use function get_cqe() for +getting the CQEs, the issue with this change is that get_cqe() always +returns CQEs from cq->buf, which leads us to initialize the wrong buffer, +and in case of enlarging the CQ we try to access elements beyond the size +of the current cq->buf and eventually hit a kernel panic. + + [exception RIP: init_cq_frag_buf+103] + [ffff9f799ddcbcd8] mlx5_ib_resize_cq at ffffffffc0835d60 [mlx5_ib] + [ffff9f799ddcbdb0] ib_resize_cq at ffffffffc05270df [ib_core] + [ffff9f799ddcbdc0] llt_rdma_setup_qp at ffffffffc0a6a712 [llt] + [ffff9f799ddcbe10] llt_rdma_cc_event_action at ffffffffc0a6b411 [llt] + [ffff9f799ddcbe98] llt_rdma_client_conn_thread at ffffffffc0a6bb75 [llt] + [ffff9f799ddcbec8] kthread at ffffffffa66c5da1 + [ffff9f799ddcbf50] ret_from_fork_nospec_begin at ffffffffa6d95ddd + +Fix it by getting the needed CQE by calling mlx5_frag_buf_get_wqe() that +takes the correct source buffer as a parameter. + +Fixes: 388ca8be0037 ("IB/mlx5: Implement fragmented completion queue (CQ)") +Link: https://lore.kernel.org/r/90a0e8c924093cfa50a482880ad7e7edb73dc19a.1623309971.git.leonro@nvidia.com +Signed-off-by: Alaa Hleihel +Signed-off-by: Leon Romanovsky +Signed-off-by: Jason Gunthorpe +Signed-off-by: Greg Kroah-Hartman +--- + drivers/infiniband/hw/mlx5/cq.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +--- a/drivers/infiniband/hw/mlx5/cq.c ++++ b/drivers/infiniband/hw/mlx5/cq.c +@@ -896,15 +896,14 @@ static void destroy_cq_user(struct mlx5_ + ib_umem_release(cq->buf.umem); + } + +-static void init_cq_frag_buf(struct mlx5_ib_cq *cq, +- struct mlx5_ib_cq_buf *buf) ++static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf) + { + int i; + void *cqe; + struct mlx5_cqe64 *cqe64; + + for (i = 0; i < buf->nent; i++) { +- cqe = get_cqe(cq, i); ++ cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i); + cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; + cqe64->op_own = MLX5_CQE_INVALID << 4; + } +@@ -930,7 +929,7 @@ static int create_cq_kernel(struct mlx5_ + if (err) + goto err_db; + +- init_cq_frag_buf(cq, &cq->buf); ++ init_cq_frag_buf(&cq->buf); + + *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * +@@ -1253,7 +1252,7 @@ static int resize_kernel(struct mlx5_ib_ + if (err) + goto ex; + +- init_cq_frag_buf(cq, cq->resize_buf); ++ init_cq_frag_buf(cq->resize_buf); + + return 0; + diff --git a/queue-4.19/perf-fix-data-race-between-pin_count-increment-decrement.patch b/queue-4.19/perf-fix-data-race-between-pin_count-increment-decrement.patch new file mode 100644 index 00000000000..aacf590fc1b --- /dev/null +++ b/queue-4.19/perf-fix-data-race-between-pin_count-increment-decrement.patch @@ -0,0 +1,48 @@ +From 6c605f8371159432ec61cbb1488dcf7ad24ad19a Mon Sep 17 00:00:00 2001 +From: Marco Elver +Date: Thu, 27 May 2021 12:47:11 +0200 +Subject: perf: Fix data race between pin_count increment/decrement + +From: Marco Elver + +commit 6c605f8371159432ec61cbb1488dcf7ad24ad19a upstream. + +KCSAN reports a data race between increment and decrement of pin_count: + + write to 0xffff888237c2d4e0 of 4 bytes by task 15740 on cpu 1: + find_get_context kernel/events/core.c:4617 + __do_sys_perf_event_open kernel/events/core.c:12097 [inline] + __se_sys_perf_event_open kernel/events/core.c:11933 + ... + read to 0xffff888237c2d4e0 of 4 bytes by task 15743 on cpu 0: + perf_unpin_context kernel/events/core.c:1525 [inline] + __do_sys_perf_event_open kernel/events/core.c:12328 [inline] + __se_sys_perf_event_open kernel/events/core.c:11933 + ... + +Because neither read-modify-write here is atomic, this can lead to one +of the operations being lost, resulting in an inconsistent pin_count. +Fix it by adding the missing locking in the CPU-event case. + +Fixes: fe4b04fa31a6 ("perf: Cure task_oncpu_function_call() races") +Reported-by: syzbot+142c9018f5962db69c7e@syzkaller.appspotmail.com +Signed-off-by: Marco Elver +Signed-off-by: Peter Zijlstra (Intel) +Link: https://lkml.kernel.org/r/20210527104711.2671610-1-elver@google.com +Signed-off-by: Greg Kroah-Hartman +--- + kernel/events/core.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -4155,7 +4155,9 @@ find_get_context(struct pmu *pmu, struct + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); + ctx = &cpuctx->ctx; + get_ctx(ctx); ++ raw_spin_lock_irqsave(&ctx->lock, flags); + ++ctx->pin_count; ++ raw_spin_unlock_irqrestore(&ctx->lock, flags); + + return ctx; + } diff --git a/queue-4.19/rdma-mlx4-do-not-map-the-core_clock-page-to-user-space-unless-enabled.patch b/queue-4.19/rdma-mlx4-do-not-map-the-core_clock-page-to-user-space-unless-enabled.patch new file mode 100644 index 00000000000..43a25caf434 --- /dev/null +++ b/queue-4.19/rdma-mlx4-do-not-map-the-core_clock-page-to-user-space-unless-enabled.patch @@ -0,0 +1,112 @@ +From 404e5a12691fe797486475fe28cc0b80cb8bef2c Mon Sep 17 00:00:00 2001 +From: Shay Drory +Date: Thu, 3 Jun 2021 16:19:39 +0300 +Subject: RDMA/mlx4: Do not map the core_clock page to user space unless enabled + +From: Shay Drory + +commit 404e5a12691fe797486475fe28cc0b80cb8bef2c upstream. + +Currently when mlx4 maps the hca_core_clock page to the user space there +are read-modifiable registers, one of which is semaphore, on this page as +well as the clock counter. If user reads the wrong offset, it can modify +the semaphore and hang the device. + +Do not map the hca_core_clock page to the user space unless the device has +been put in a backwards compatibility mode to support this feature. + +After this patch, mlx4 core_clock won't be mapped to user space on the +majority of existing devices and the uverbs device time feature in +ibv_query_rt_values_ex() will be disabled. + +Fixes: 52033cfb5aab ("IB/mlx4: Add mmap call to map the hardware clock") +Link: https://lore.kernel.org/r/9632304e0d6790af84b3b706d8c18732bc0d5e27.1622726305.git.leonro@nvidia.com +Signed-off-by: Shay Drory +Signed-off-by: Leon Romanovsky +Signed-off-by: Jason Gunthorpe +Signed-off-by: Greg Kroah-Hartman +--- + drivers/infiniband/hw/mlx4/main.c | 5 +---- + drivers/net/ethernet/mellanox/mlx4/fw.c | 3 +++ + drivers/net/ethernet/mellanox/mlx4/fw.h | 1 + + drivers/net/ethernet/mellanox/mlx4/main.c | 6 ++++++ + include/linux/mlx4/device.h | 1 + + 5 files changed, 12 insertions(+), 4 deletions(-) + +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -577,12 +577,9 @@ static int mlx4_ib_query_device(struct i + props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT; + props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD; + +- if (!mlx4_is_slave(dev->dev)) +- err = mlx4_get_internal_clock_params(dev->dev, &clock_params); +- + if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { + resp.response_length += sizeof(resp.hca_core_clock_offset); +- if (!err && !mlx4_is_slave(dev->dev)) { ++ if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) { + resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET; + resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; + } +--- a/drivers/net/ethernet/mellanox/mlx4/fw.c ++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c +@@ -822,6 +822,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev * + #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 + #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 + #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac ++#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1 + #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc + #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 + #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 +@@ -840,6 +841,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev * + + if (mlx4_is_mfunc(dev)) + disable_unsupported_roce_caps(outbox); ++ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER); ++ dev_cap->map_clock_to_user = field & 0x80; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); + dev_cap->reserved_qps = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); +--- a/drivers/net/ethernet/mellanox/mlx4/fw.h ++++ b/drivers/net/ethernet/mellanox/mlx4/fw.h +@@ -131,6 +131,7 @@ struct mlx4_dev_cap { + u32 health_buffer_addrs; + struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; + bool wol_port[MLX4_MAX_PORTS + 1]; ++ bool map_clock_to_user; + }; + + struct mlx4_func_cap { +--- a/drivers/net/ethernet/mellanox/mlx4/main.c ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c +@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev + } + } + ++ dev->caps.map_clock_to_user = dev_cap->map_clock_to_user; + dev->caps.uar_page_size = PAGE_SIZE; + dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; + dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; +@@ -1949,6 +1950,11 @@ int mlx4_get_internal_clock_params(struc + if (mlx4_is_slave(dev)) + return -EOPNOTSUPP; + ++ if (!dev->caps.map_clock_to_user) { ++ mlx4_dbg(dev, "Map clock to user is not supported.\n"); ++ return -EOPNOTSUPP; ++ } ++ + if (!params) + return -EINVAL; + +--- a/include/linux/mlx4/device.h ++++ b/include/linux/mlx4/device.h +@@ -631,6 +631,7 @@ struct mlx4_caps { + bool wol_port[MLX4_MAX_PORTS + 1]; + struct mlx4_rate_limit_caps rl_caps; + u32 health_buffer_addrs; ++ bool map_clock_to_user; + }; + + struct mlx4_buf_list { diff --git a/queue-4.19/regulator-core-resolve-supply-for-boot-on-always-on-regulators.patch b/queue-4.19/regulator-core-resolve-supply-for-boot-on-always-on-regulators.patch new file mode 100644 index 00000000000..47a0c2626a3 --- /dev/null +++ b/queue-4.19/regulator-core-resolve-supply-for-boot-on-always-on-regulators.patch @@ -0,0 +1,38 @@ +From 98e48cd9283dbac0e1445ee780889f10b3d1db6a Mon Sep 17 00:00:00 2001 +From: Dmitry Baryshkov +Date: Thu, 20 May 2021 01:12:23 +0300 +Subject: regulator: core: resolve supply for boot-on/always-on regulators + +From: Dmitry Baryshkov + +commit 98e48cd9283dbac0e1445ee780889f10b3d1db6a upstream. + +For the boot-on/always-on regulators the set_machine_constrainst() is +called before resolving rdev->supply. Thus the code would try to enable +rdev before enabling supplying regulator. Enforce resolving supply +regulator before enabling rdev. + +Fixes: aea6cb99703e ("regulator: resolve supply after creating regulator") +Signed-off-by: Dmitry Baryshkov +Link: https://lore.kernel.org/r/20210519221224.2868496-1-dmitry.baryshkov@linaro.org +Signed-off-by: Mark Brown +Signed-off-by: Greg Kroah-Hartman +--- + drivers/regulator/core.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -1191,6 +1191,12 @@ static int set_machine_constraints(struc + * and we have control then make sure it is enabled. + */ + if (rdev->constraints->always_on || rdev->constraints->boot_on) { ++ /* If we want to enable this regulator, make sure that we know ++ * the supplying regulator. ++ */ ++ if (rdev->supply_name && !rdev->supply) ++ return -EPROBE_DEFER; ++ + if (rdev->supply) { + ret = regulator_enable(rdev->supply); + if (ret < 0) { diff --git a/queue-4.19/regulator-max77620-use-device_set_of_node_from_dev.patch b/queue-4.19/regulator-max77620-use-device_set_of_node_from_dev.patch new file mode 100644 index 00000000000..9ddb179fb85 --- /dev/null +++ b/queue-4.19/regulator-max77620-use-device_set_of_node_from_dev.patch @@ -0,0 +1,40 @@ +From 6f55c5dd1118b3076d11d9cb17f5c5f4bc3a1162 Mon Sep 17 00:00:00 2001 +From: Dmitry Osipenko +Date: Mon, 24 May 2021 01:42:42 +0300 +Subject: regulator: max77620: Use device_set_of_node_from_dev() + +From: Dmitry Osipenko + +commit 6f55c5dd1118b3076d11d9cb17f5c5f4bc3a1162 upstream. + +The MAX77620 driver fails to re-probe on deferred probe because driver +core tries to claim resources that are already claimed by the PINCTRL +device. Use device_set_of_node_from_dev() helper which marks OF node as +reused, skipping erroneous execution of pinctrl_bind_pins() for the PMIC +device on the re-probe. + +Fixes: aea6cb99703e ("regulator: resolve supply after creating regulator") +Signed-off-by: Dmitry Osipenko +Link: https://lore.kernel.org/r/20210523224243.13219-2-digetx@gmail.com +Signed-off-by: Mark Brown +Signed-off-by: Greg Kroah-Hartman +--- + drivers/regulator/max77620-regulator.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/drivers/regulator/max77620-regulator.c ++++ b/drivers/regulator/max77620-regulator.c +@@ -792,6 +792,13 @@ static int max77620_regulator_probe(stru + config.dev = dev; + config.driver_data = pmic; + ++ /* ++ * Set of_node_reuse flag to prevent driver core from attempting to ++ * claim any pinmux resources already claimed by the parent device. ++ * Otherwise PMIC driver will fail to re-probe. ++ */ ++ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent); ++ + for (id = 0; id < MAX77620_NUM_REGS; id++) { + struct regulator_dev *rdev; + struct regulator_desc *rdesc; diff --git a/queue-4.19/sched-fair-make-sure-to-update-tg-contrib-for-blocked-load.patch b/queue-4.19/sched-fair-make-sure-to-update-tg-contrib-for-blocked-load.patch new file mode 100644 index 00000000000..50a660c924f --- /dev/null +++ b/queue-4.19/sched-fair-make-sure-to-update-tg-contrib-for-blocked-load.patch @@ -0,0 +1,62 @@ +From 02da26ad5ed6ea8680e5d01f20661439611ed776 Mon Sep 17 00:00:00 2001 +From: Vincent Guittot +Date: Thu, 27 May 2021 14:29:16 +0200 +Subject: sched/fair: Make sure to update tg contrib for blocked load + +From: Vincent Guittot + +commit 02da26ad5ed6ea8680e5d01f20661439611ed776 upstream. + +During the update of fair blocked load (__update_blocked_fair()), we +update the contribution of the cfs in tg->load_avg if cfs_rq's pelt +has decayed. Nevertheless, the pelt values of a cfs_rq could have +been recently updated while propagating the change of a child. In this +case, cfs_rq's pelt will not decayed because it has already been +updated and we don't update tg->load_avg. + +__update_blocked_fair + ... + for_each_leaf_cfs_rq_safe: child cfs_rq + update cfs_rq_load_avg() for child cfs_rq + ... + update_load_avg(cfs_rq_of(se), se, 0) + ... + update cfs_rq_load_avg() for parent cfs_rq + -propagation of child's load makes parent cfs_rq->load_sum + becoming null + -UPDATE_TG is not set so it doesn't update parent + cfs_rq->tg_load_avg_contrib + .. + for_each_leaf_cfs_rq_safe: parent cfs_rq + update cfs_rq_load_avg() for parent cfs_rq + - nothing to do because parent cfs_rq has already been updated + recently so cfs_rq->tg_load_avg_contrib is not updated + ... + parent cfs_rq is decayed + list_del_leaf_cfs_rq parent cfs_rq + - but it still contibutes to tg->load_avg + +we must set UPDATE_TG flags when propagting pending load to the parent + +Fixes: 039ae8bcf7a5 ("sched/fair: Fix O(nr_cgroups) in the load balancing path") +Reported-by: Odin Ugedal +Signed-off-by: Vincent Guittot +Signed-off-by: Peter Zijlstra (Intel) +Reviewed-by: Odin Ugedal +Link: https://lkml.kernel.org/r/20210527122916.27683-3-vincent.guittot@linaro.org +Signed-off-by: Greg Kroah-Hartman +--- + kernel/sched/fair.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -7512,7 +7512,7 @@ static void update_blocked_averages(int + /* Propagate pending load changes to the parent, if any: */ + se = cfs_rq->tg->se[cpu]; + if (se && !skip_blocked_update(se)) +- update_load_avg(cfs_rq_of(se), se, 0); ++ update_load_avg(cfs_rq_of(se), se, UPDATE_TG); + + /* + * There can be a lot of idle CPU cgroups. Don't let fully diff --git a/queue-4.19/series b/queue-4.19/series index 722ceb049c8..8c6cd73eecb 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -46,3 +46,10 @@ usb-serial-cp210x-fix-alternate-function-for-cp2102n-qfn20.patch usb-gadget-eem-fix-wrong-eem-header-operation.patch usb-fix-various-gadgets-null-ptr-deref-on-10gbps-cabling.patch usb-fix-various-gadget-panics-on-10gbps-cabling.patch +regulator-core-resolve-supply-for-boot-on-always-on-regulators.patch +regulator-max77620-use-device_set_of_node_from_dev.patch +rdma-mlx4-do-not-map-the-core_clock-page-to-user-space-unless-enabled.patch +vmlinux.lds.h-avoid-orphan-section-with-smp.patch +perf-fix-data-race-between-pin_count-increment-decrement.patch +sched-fair-make-sure-to-update-tg-contrib-for-blocked-load.patch +ib-mlx5-fix-initializing-cq-fragments-buffer.patch diff --git a/queue-4.19/vmlinux.lds.h-avoid-orphan-section-with-smp.patch b/queue-4.19/vmlinux.lds.h-avoid-orphan-section-with-smp.patch new file mode 100644 index 00000000000..11d57156867 --- /dev/null +++ b/queue-4.19/vmlinux.lds.h-avoid-orphan-section-with-smp.patch @@ -0,0 +1,54 @@ +From d4c6399900364facd84c9e35ce1540b6046c345f Mon Sep 17 00:00:00 2001 +From: Nathan Chancellor +Date: Wed, 5 May 2021 17:14:11 -0700 +Subject: vmlinux.lds.h: Avoid orphan section with !SMP + +From: Nathan Chancellor + +commit d4c6399900364facd84c9e35ce1540b6046c345f upstream. + +With x86_64_defconfig and the following configs, there is an orphan +section warning: + +CONFIG_SMP=n +CONFIG_AMD_MEM_ENCRYPT=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_KVM=y +CONFIG_PARAVIRT=y + +ld: warning: orphan section `.data..decrypted' from `arch/x86/kernel/cpu/vmware.o' being placed in section `.data..decrypted' +ld: warning: orphan section `.data..decrypted' from `arch/x86/kernel/kvm.o' being placed in section `.data..decrypted' + +These sections are created with DEFINE_PER_CPU_DECRYPTED, which +ultimately turns into __PCPU_ATTRS, which in turn has a section +attribute with a value of PER_CPU_BASE_SECTION + the section name. When +CONFIG_SMP is not set, the base section is .data and that is not +currently handled in any linker script. + +Add .data..decrypted to PERCPU_DECRYPTED_SECTION, which is included in +PERCPU_INPUT -> PERCPU_SECTION, which is include in the x86 linker +script when either CONFIG_X86_64 or CONFIG_SMP is unset, taking care of +the warning. + +Fixes: ac26963a1175 ("percpu: Introduce DEFINE_PER_CPU_DECRYPTED") +Link: https://github.com/ClangBuiltLinux/linux/issues/1360 +Reported-by: kernel test robot +Signed-off-by: Nathan Chancellor +Tested-by: Nick Desaulniers # build +Signed-off-by: Kees Cook +Link: https://lore.kernel.org/r/20210506001410.1026691-1-nathan@kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + include/asm-generic/vmlinux.lds.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -842,6 +842,7 @@ + #ifdef CONFIG_AMD_MEM_ENCRYPT + #define PERCPU_DECRYPTED_SECTION \ + . = ALIGN(PAGE_SIZE); \ ++ *(.data..decrypted) \ + *(.data..percpu..decrypted) \ + . = ALIGN(PAGE_SIZE); + #else