--- /dev/null
+From eac8ce86cb90ba96cb4bcbf2549d7a8b6938aa30 Mon Sep 17 00:00:00 2001
+From: Niklas Cassel <niklas.cassel@linaro.org>
+Date: Mon, 14 Oct 2019 14:09:20 +0200
+Subject: arm64: dts: qcom: qcs404-evb: Set vdd_apc regulator in high power mode
+
+From: Niklas Cassel <niklas.cassel@linaro.org>
+
+commit eac8ce86cb90ba96cb4bcbf2549d7a8b6938aa30 upstream.
+
+vdd_apc is the regulator that supplies the main CPU cluster.
+
+At sudden CPU load changes, we have noticed invalid page faults on
+addresses with all bits shifted, as well as on addresses with individual
+bits flipped.
+
+By putting the vdd_apc regulator in high power mode, the voltage drops
+during sudden load changes will be less severe, and we have not been able
+to reproduce the invalid page faults with the regulator in this mode.
+
+Fixes: 8faea8edbb35 ("arm64: dts: qcom: qcs404-evb: add spmi regulators")
+Cc: stable@vger.kernel.org
+Suggested-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Niklas Cassel <niklas.cassel@linaro.org>
+Reviewed-by: Vinod Koul <vkoul@kernel.org>
+Link: https://lore.kernel.org/r/20191014120920.12691-1-niklas.cassel@linaro.org
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/qcom/qcs404-evb.dtsi | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+@@ -73,6 +73,7 @@
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd_apc";
++ regulator-initial-mode = <1>;
+ regulator-min-microvolt = <1048000>;
+ regulator-max-microvolt = <1384000>;
+ };
--- /dev/null
+From 87fbfffcc89b92a4281b0aa53bd06af714087889 Mon Sep 17 00:00:00 2001
+From: Casey Schaufler <casey@schaufler-ca.com>
+Date: Mon, 3 Feb 2020 09:15:00 -0800
+Subject: broken ping to ipv6 linklocal addresses on debian buster
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Casey Schaufler <casey@schaufler-ca.com>
+
+commit 87fbfffcc89b92a4281b0aa53bd06af714087889 upstream.
+
+I am seeing ping failures to IPv6 linklocal addresses with Debian
+buster. Easiest example to reproduce is:
+
+$ ping -c1 -w1 ff02::1%eth1
+connect: Invalid argument
+
+$ ping -c1 -w1 ff02::1%eth1
+PING ff02::01%eth1(ff02::1%eth1) 56 data bytes
+64 bytes from fe80::e0:f9ff:fe0c:37%eth1: icmp_seq=1 ttl=64 time=0.059 ms
+
+git bisect traced the failure to
+commit b9ef5513c99b ("smack: Check address length before reading address family")
+
+Arguably ping is being stupid since the buster version is not setting
+the address family properly (ping on stretch for example does):
+
+$ strace -e connect ping6 -c1 -w1 ff02::1%eth1
+connect(5, {sa_family=AF_UNSPEC,
+sa_data="\4\1\0\0\0\0\377\2\0\0\0\0\0\0\0\0\0\0\0\0\0\1\3\0\0\0"}, 28)
+= -1 EINVAL (Invalid argument)
+
+but the command works fine on kernels prior to this commit, so this is
+breakage which goes against the Linux paradigm of "don't break userspace"
+
+Cc: stable@vger.kernel.org
+Reported-by: David Ahern <dsahern@gmail.com>
+Suggested-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Signed-off-by: Casey Schaufler <casey@schaufler-ca.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+ security/smack/smack_lsm.c | 41 +++++++++++++++++++----------------------
+ security/smack/smack_lsm.c | 41 +++++++++++++++++++----------------------
+ 1 file changed, 19 insertions(+), 22 deletions(-)
+
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -2831,42 +2831,39 @@ static int smack_socket_connect(struct s
+ int addrlen)
+ {
+ int rc = 0;
+-#if IS_ENABLED(CONFIG_IPV6)
+- struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
+-#endif
+-#ifdef SMACK_IPV6_SECMARK_LABELING
+- struct smack_known *rsp;
+- struct socket_smack *ssp;
+-#endif
+
+ if (sock->sk == NULL)
+ return 0;
+-
++ if (sock->sk->sk_family != PF_INET &&
++ (!IS_ENABLED(CONFIG_IPV6) || sock->sk->sk_family != PF_INET6))
++ return 0;
++ if (addrlen < offsetofend(struct sockaddr, sa_family))
++ return 0;
++ if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) {
++ struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
+ #ifdef SMACK_IPV6_SECMARK_LABELING
+- ssp = sock->sk->sk_security;
++ struct smack_known *rsp;
+ #endif
+
+- switch (sock->sk->sk_family) {
+- case PF_INET:
+- if (addrlen < sizeof(struct sockaddr_in) ||
+- sap->sa_family != AF_INET)
+- return -EINVAL;
+- rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
+- break;
+- case PF_INET6:
+- if (addrlen < SIN6_LEN_RFC2133 || sap->sa_family != AF_INET6)
+- return -EINVAL;
++ if (addrlen < SIN6_LEN_RFC2133)
++ return 0;
+ #ifdef SMACK_IPV6_SECMARK_LABELING
+ rsp = smack_ipv6host_label(sip);
+- if (rsp != NULL)
++ if (rsp != NULL) {
++ struct socket_smack *ssp = sock->sk->sk_security;
++
+ rc = smk_ipv6_check(ssp->smk_out, rsp, sip,
+- SMK_CONNECTING);
++ SMK_CONNECTING);
++ }
+ #endif
+ #ifdef SMACK_IPV6_PORT_LABELING
+ rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
+ #endif
+- break;
++ return rc;
+ }
++ if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in))
++ return 0;
++ rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
+ return rc;
+ }
+
--- /dev/null
+From bf83b96f87ae2abb1e535306ea53608e8de5dfbb Mon Sep 17 00:00:00 2001
+From: Stephen Warren <swarren@nvidia.com>
+Date: Thu, 3 Oct 2019 14:50:30 -0600
+Subject: clk: tegra: Mark fuse clock as critical
+
+From: Stephen Warren <swarren@nvidia.com>
+
+commit bf83b96f87ae2abb1e535306ea53608e8de5dfbb upstream.
+
+For a little over a year, U-Boot on Tegra124 has configured the flow
+controller to perform automatic RAM re-repair on off->on power
+transitions of the CPU rail[1]. This is mandatory for correct operation
+of Tegra124. However, RAM re-repair relies on certain clocks, which the
+kernel must enable and leave running. The fuse clock is one of those
+clocks. Mark this clock as critical so that LP1 power mode (system
+suspend) operates correctly.
+
+[1] 3cc7942a4ae5 ARM: tegra: implement RAM repair
+
+Reported-by: Jonathan Hunter <jonathanh@nvidia.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Stephen Warren <swarren@nvidia.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clk/tegra/clk-tegra-periph.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/clk/tegra/clk-tegra-periph.c
++++ b/drivers/clk/tegra/clk-tegra-periph.c
+@@ -777,7 +777,11 @@ static struct tegra_periph_init_data gat
+ GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0),
+ GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0),
+ GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
+- GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
++ /*
++ * Critical for RAM re-repair operation, which must occur on resume
++ * from LP1 system suspend and as part of CCPLEX cluster switching.
++ */
++ GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, CLK_IS_CRITICAL),
+ GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
+ GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
+ GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),
--- /dev/null
+From 58fe03d6dec908a1bec07eea7e94907af5c07eec Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Fri, 24 Jan 2020 14:10:46 -0500
+Subject: drm/amd/dm/mst: Ignore payload update failures
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit 58fe03d6dec908a1bec07eea7e94907af5c07eec upstream.
+
+Disabling a display on MST can potentially happen after the entire MST
+topology has been removed, which means that we can't communicate with
+the topology at all in this scenario. Likewise, this also means that we
+can't properly update payloads on the topology and as such, it's a good
+idea to ignore payload update failures when disabling displays.
+Currently, amdgpu makes the mistake of halting the payload update
+process when any payload update failures occur, resulting in leaving
+DC's local copies of the payload tables out of date.
+
+This ends up causing problems with hotplugging MST topologies, and
+causes modesets on the second hotplug to fail like so:
+
+[drm] Failed to updateMST allocation table forpipe idx:1
+------------[ cut here ]------------
+WARNING: CPU: 5 PID: 1511 at
+drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link.c:2677
+update_mst_stream_alloc_table+0x11e/0x130 [amdgpu]
+Modules linked in: cdc_ether usbnet fuse xt_conntrack nf_conntrack
+nf_defrag_ipv6 libcrc32c nf_defrag_ipv4 ipt_REJECT nf_reject_ipv4
+nft_counter nft_compat nf_tables nfnetlink tun bridge stp llc sunrpc
+vfat fat wmi_bmof uvcvideo snd_hda_codec_realtek snd_hda_codec_generic
+snd_hda_codec_hdmi videobuf2_vmalloc snd_hda_intel videobuf2_memops
+videobuf2_v4l2 snd_intel_dspcfg videobuf2_common crct10dif_pclmul
+snd_hda_codec videodev crc32_pclmul snd_hwdep snd_hda_core
+ghash_clmulni_intel snd_seq mc joydev pcspkr snd_seq_device snd_pcm
+sp5100_tco k10temp i2c_piix4 snd_timer thinkpad_acpi ledtrig_audio snd
+wmi soundcore video i2c_scmi acpi_cpufreq ip_tables amdgpu(O)
+rtsx_pci_sdmmc amd_iommu_v2 gpu_sched mmc_core i2c_algo_bit ttm
+drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops cec drm
+crc32c_intel serio_raw hid_multitouch r8152 mii nvme r8169 nvme_core
+rtsx_pci pinctrl_amd
+CPU: 5 PID: 1511 Comm: gnome-shell Tainted: G O 5.5.0-rc7Lyude-Test+ #4
+Hardware name: LENOVO FA495SIT26/FA495SIT26, BIOS R12ET22W(0.22 ) 01/31/2019
+RIP: 0010:update_mst_stream_alloc_table+0x11e/0x130 [amdgpu]
+Code: 28 00 00 00 75 2b 48 8d 65 e0 5b 41 5c 41 5d 41 5e 5d c3 0f b6 06
+49 89 1c 24 41 88 44 24 08 0f b6 46 01 41 88 44 24 09 eb 93 <0f> 0b e9
+2f ff ff ff e8 a6 82 a3 c2 66 0f 1f 44 00 00 0f 1f 44 00
+RSP: 0018:ffffac428127f5b0 EFLAGS: 00010202
+RAX: 0000000000000002 RBX: ffff8d1e166eee80 RCX: 0000000000000000
+RDX: ffffac428127f668 RSI: ffff8d1e166eee80 RDI: ffffac428127f610
+RBP: ffffac428127f640 R08: ffffffffc03d94a8 R09: 0000000000000000
+R10: ffff8d1e24b02000 R11: ffffac428127f5b0 R12: ffff8d1e1b83d000
+R13: ffff8d1e1bea0b08 R14: 0000000000000002 R15: 0000000000000002
+FS: 00007fab23ffcd80(0000) GS:ffff8d1e28b40000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f151f1711e8 CR3: 00000005997c0000 CR4: 00000000003406e0
+Call Trace:
+ ? mutex_lock+0xe/0x30
+ dc_link_allocate_mst_payload+0x9a/0x210 [amdgpu]
+ ? dm_read_reg_func+0x39/0xb0 [amdgpu]
+ ? core_link_enable_stream+0x656/0x730 [amdgpu]
+ core_link_enable_stream+0x656/0x730 [amdgpu]
+ dce110_apply_ctx_to_hw+0x58e/0x5d0 [amdgpu]
+ ? dcn10_verify_allow_pstate_change_high+0x1d/0x280 [amdgpu]
+ ? dcn10_wait_for_mpcc_disconnect+0x3c/0x130 [amdgpu]
+ dc_commit_state+0x292/0x770 [amdgpu]
+ ? add_timer+0x101/0x1f0
+ ? ttm_bo_put+0x1a1/0x2f0 [ttm]
+ amdgpu_dm_atomic_commit_tail+0xb59/0x1ff0 [amdgpu]
+ ? amdgpu_move_blit.constprop.0+0xb8/0x1f0 [amdgpu]
+ ? amdgpu_bo_move+0x16d/0x2b0 [amdgpu]
+ ? ttm_bo_handle_move_mem+0x118/0x570 [ttm]
+ ? ttm_bo_validate+0x134/0x150 [ttm]
+ ? dm_plane_helper_prepare_fb+0x1b9/0x2a0 [amdgpu]
+ ? _cond_resched+0x15/0x30
+ ? wait_for_completion_timeout+0x38/0x160
+ ? _cond_resched+0x15/0x30
+ ? wait_for_completion_interruptible+0x33/0x190
+ commit_tail+0x94/0x130 [drm_kms_helper]
+ drm_atomic_helper_commit+0x113/0x140 [drm_kms_helper]
+ drm_atomic_helper_set_config+0x70/0xb0 [drm_kms_helper]
+ drm_mode_setcrtc+0x194/0x6a0 [drm]
+ ? _cond_resched+0x15/0x30
+ ? mutex_lock+0xe/0x30
+ ? drm_mode_getcrtc+0x180/0x180 [drm]
+ drm_ioctl_kernel+0xaa/0xf0 [drm]
+ drm_ioctl+0x208/0x390 [drm]
+ ? drm_mode_getcrtc+0x180/0x180 [drm]
+ amdgpu_drm_ioctl+0x49/0x80 [amdgpu]
+ do_vfs_ioctl+0x458/0x6d0
+ ksys_ioctl+0x5e/0x90
+ __x64_sys_ioctl+0x16/0x20
+ do_syscall_64+0x55/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+RIP: 0033:0x7fab2121f87b
+Code: 0f 1e fa 48 8b 05 0d 96 2c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff
+ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01
+f0 ff ff 73 01 c3 48 8b 0d dd 95 2c 00 f7 d8 64 89 01 48
+RSP: 002b:00007ffd045f9068 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00007ffd045f90a0 RCX: 00007fab2121f87b
+RDX: 00007ffd045f90a0 RSI: 00000000c06864a2 RDI: 000000000000000b
+RBP: 00007ffd045f90a0 R08: 0000000000000000 R09: 000055dbd2985d10
+R10: 000055dbd2196280 R11: 0000000000000246 R12: 00000000c06864a2
+R13: 000000000000000b R14: 0000000000000000 R15: 000055dbd2196280
+---[ end trace 6ea888c24d2059cd ]---
+
+Note as well, I have only been able to reproduce this on setups with 2
+MST displays.
+
+Changes since v1:
+* Don't return false when part 1 or part 2 of updating the payloads
+ fails, we don't want to abort at any step of the process even if
+ things fail
+
+Reviewed-by: Mikita Lipski <Mikita.Lipski@amd.com>
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -246,7 +246,8 @@ bool dm_helpers_dp_mst_write_payload_all
+ drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
+ }
+
+- ret = drm_dp_update_payload_part1(mst_mgr);
++ /* It's OK for this to fail */
++ drm_dp_update_payload_part1(mst_mgr);
+
+ /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+ * AUX message. The sequence is slot 1-63 allocated sequence for each
+@@ -255,9 +256,6 @@ bool dm_helpers_dp_mst_write_payload_all
+
+ get_payload_table(aconnector, proposed_table);
+
+- if (ret)
+- return false;
+-
+ return true;
+ }
+
+@@ -315,7 +313,6 @@ bool dm_helpers_dp_mst_send_payload_allo
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+- int ret;
+
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+@@ -329,10 +326,8 @@ bool dm_helpers_dp_mst_send_payload_allo
+ if (!mst_mgr->mst_state)
+ return false;
+
+- ret = drm_dp_update_payload_part2(mst_mgr);
+-
+- if (ret)
+- return false;
++ /* It's OK for this to fail */
++ drm_dp_update_payload_part2(mst_mgr);
+
+ if (!enable)
+ drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
--- /dev/null
+From 1cf8c930b378016846c88ef0f1444248033326ec Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 30 Jan 2020 16:46:38 +0800
+Subject: drm/amd/powerplay: fix navi10 system intermittent reboot issue V2
+
+From: Evan Quan <evan.quan@amd.com>
+
+commit 1cf8c930b378016846c88ef0f1444248033326ec upstream.
+
+This workaround is needed only for Navi10 12 Gbps SKUs.
+
+V2: added SMU firmware version guard
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 18 ++++++
+ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1
+ drivers/gpu/drm/amd/powerplay/inc/smu_types.h | 2
+ drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h | 5 +
+ drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 58 ++++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/smu_internal.h | 3 +
+ 6 files changed, 86 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+@@ -21,6 +21,7 @@
+ */
+
+ #include <linux/firmware.h>
++#include <linux/pci.h>
+
+ #include "pp_debug.h"
+ #include "amdgpu.h"
+@@ -1125,6 +1126,23 @@ static int smu_smc_table_hw_init(struct
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
+ if (ret)
+ return ret;
++
++ if (adev->asic_type == CHIP_NAVI10) {
++ if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
++ adev->pdev->revision == 0xc3 ||
++ adev->pdev->revision == 0xca ||
++ adev->pdev->revision == 0xcb)) ||
++ (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
++ adev->pdev->revision == 0xf4 ||
++ adev->pdev->revision == 0xf5 ||
++ adev->pdev->revision == 0xf6))) {
++ ret = smu_disable_umc_cdr_12gbps_workaround(smu);
++ if (ret) {
++ pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
++ return ret;
++ }
++ }
++ }
+ }
+
+ /*
+--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+@@ -550,6 +550,7 @@ struct pptable_funcs {
+ int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+ int (*override_pcie_parameters)(struct smu_context *smu);
+ uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
++ int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
+ };
+
+ int smu_load_microcode(struct smu_context *smu);
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+@@ -170,6 +170,8 @@
+ __SMU_DUMMY_MAP(SetSoftMinJpeg), \
+ __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
+ __SMU_DUMMY_MAP(DFCstateControl), \
++ __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
++ __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
+
+ #undef __SMU_DUMMY_MAP
+ #define __SMU_DUMMY_MAP(type) SMU_MSG_##type
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
+@@ -120,7 +120,10 @@
+ #define PPSMC_MSG_GetVoltageByDpmOverdrive 0x45
+ #define PPSMC_MSG_BacoAudioD3PME 0x48
+
+-#define PPSMC_Message_Count 0x49
++#define PPSMC_MSG_DALDisableDummyPstateChange 0x49
++#define PPSMC_MSG_DALEnableDummyPstateChange 0x4A
++
++#define PPSMC_Message_Count 0x4B
+
+ typedef uint32_t PPSMC_Result;
+ typedef uint32_t PPSMC_Msg;
+--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+@@ -119,6 +119,8 @@ static struct smu_11_0_cmn2aisc_mapping
+ MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg),
+ MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3),
++ MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange),
++ MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange),
+ };
+
+ static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
+@@ -2000,6 +2002,61 @@ static int navi10_run_btc(struct smu_con
+ return ret;
+ }
+
++static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
++{
++ int result = 0;
++
++ if (!enable)
++ result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
++ else
++ result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
++
++ return result;
++}
++
++static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
++{
++ uint32_t uclk_count, uclk_min, uclk_max;
++ uint32_t smu_version;
++ int ret = 0;
++
++ ret = smu_get_smc_version(smu, NULL, &smu_version);
++ if (ret)
++ return ret;
++
++ /* This workaround is available only for 42.50 or later SMC firmwares */
++ if (smu_version < 0x2A3200)
++ return 0;
++
++ ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
++ if (ret)
++ return ret;
++
++ ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
++ if (ret)
++ return ret;
++
++ ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
++ if (ret)
++ return ret;
++
++ /* Force UCLK out of the highest DPM */
++ ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min);
++ if (ret)
++ return ret;
++
++ /* Revert the UCLK Hardmax */
++ ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max);
++ if (ret)
++ return ret;
++
++ /*
++ * In this case, SMU already disabled dummy pstate during enablement
++ * of UCLK DPM, we have to re-enabled it.
++ * */
++ return navi10_dummy_pstate_control(smu, true);
++}
++
+ static const struct pptable_funcs navi10_ppt_funcs = {
+ .tables_init = navi10_tables_init,
+ .alloc_dpm_context = navi10_allocate_dpm_context,
+@@ -2091,6 +2148,7 @@ static const struct pptable_funcs navi10
+ .od_edit_dpm_table = navi10_od_edit_dpm_table,
+ .get_pptable_power_limit = navi10_get_pptable_power_limit,
+ .run_btc = navi10_run_btc,
++ .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
+ };
+
+ void navi10_set_ppt_funcs(struct smu_context *smu)
+--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
++++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
+@@ -201,4 +201,7 @@ int smu_send_smc_msg(struct smu_context
+ #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
+ ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
+
++#define smu_disable_umc_cdr_12gbps_workaround(smu) \
++ ((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
++
+ #endif
--- /dev/null
+From 0531aa6eb38bfa9514609e2727558a051da7365f Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Sat, 25 Jan 2020 13:30:45 -0500
+Subject: drm/amdgpu: fetch default VDDC curve voltages (v2)
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 0531aa6eb38bfa9514609e2727558a051da7365f upstream.
+
+Ask the SMU for the default VDDC curve voltage values. This
+properly reports the VDDC values in the OD interface.
+
+v2: only update if the original values are 0
+
+Bug: https://gitlab.freedesktop.org/drm/amd/issues/1020
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 5.5.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 50 ++++++++++++++++++++++++++++-
+ 1 file changed, 49 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+@@ -121,6 +121,8 @@ static struct smu_11_0_cmn2aisc_mapping
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3),
+ MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange),
+ MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange),
++ MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm),
++ MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive),
+ };
+
+ static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
+@@ -1782,6 +1784,28 @@ static int navi10_od_setting_check_range
+ return 0;
+ }
+
++static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
++ uint16_t *voltage,
++ uint32_t freq)
++{
++ uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
++ uint32_t value = 0;
++ int ret;
++
++ ret = smu_send_smc_msg_with_param(smu,
++ SMU_MSG_GetVoltageByDpm,
++ param);
++ if (ret) {
++ pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
++ return ret;
++ }
++
++ smu_read_smc_arg(smu, &value);
++ *voltage = (uint16_t)value;
++
++ return 0;
++}
++
+ static int navi10_setup_od_limits(struct smu_context *smu) {
+ struct smu_11_0_overdrive_table *overdrive_table = NULL;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+@@ -1808,16 +1832,40 @@ static int navi10_set_default_od_setting
+ if (ret)
+ return ret;
+
++ od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ if (initialize) {
+ ret = navi10_setup_od_limits(smu);
+ if (ret) {
+ pr_err("Failed to retrieve board OD limits\n");
+ return ret;
+ }
++ if (od_table) {
++ if (!od_table->GfxclkVolt1) {
++ ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
++ &od_table->GfxclkVolt1,
++ od_table->GfxclkFreq1);
++ if (ret)
++ od_table->GfxclkVolt1 = 0;
++ }
++
++ if (!od_table->GfxclkVolt2) {
++ ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
++ &od_table->GfxclkVolt2,
++ od_table->GfxclkFreq2);
++ if (ret)
++ od_table->GfxclkVolt2 = 0;
++ }
+
++ if (!od_table->GfxclkVolt3) {
++ ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
++ &od_table->GfxclkVolt3,
++ od_table->GfxclkFreq3);
++ if (ret)
++ od_table->GfxclkVolt3 = 0;
++ }
++ }
+ }
+
+- od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ if (od_table) {
+ navi10_dump_od_table(od_table);
+ }
--- /dev/null
+From 45826e9c4e9e952db43053f4fbed58ec602a410f Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Sat, 25 Jan 2020 11:51:41 -0500
+Subject: drm/amdgpu/navi: fix index for OD MCLK
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 45826e9c4e9e952db43053f4fbed58ec602a410f upstream.
+
+You can only adjust the max mclk, not the min.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/issues/1020
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 5.5.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+@@ -812,7 +812,7 @@ static int navi10_print_clk_levels(struc
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
+ break;
+ size += sprintf(buf + size, "OD_MCLK:\n");
+- size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
++ size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ if (!smu->od_enabled || !od_table || !od_settings)
--- /dev/null
+From ee23a518fdc2c1dd1aaaf3a2c7ffdd6c83b396ec Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Sat, 25 Jan 2020 11:27:06 -0500
+Subject: drm/amdgpu/navi10: add OD_RANGE for navi overclocking
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit ee23a518fdc2c1dd1aaaf3a2c7ffdd6c83b396ec upstream.
+
+So users can see the range of valid values.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/issues/1020
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 5.5.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 59 +++++++++++++++++++++++++++++
+ 1 file changed, 59 insertions(+)
+
+--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+@@ -710,6 +710,15 @@ static inline bool navi10_od_feature_is_
+ return od_table->cap[feature];
+ }
+
++static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
++ enum SMU_11_0_ODSETTING_ID setting,
++ uint32_t *min, uint32_t *max)
++{
++ if (min)
++ *min = od_table->min[setting];
++ if (max)
++ *max = od_table->max[setting];
++}
+
+ static int navi10_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+@@ -728,6 +737,7 @@ static int navi10_print_clk_levels(struc
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
++ uint32_t min_value, max_value;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+@@ -841,6 +851,55 @@ static int navi10_print_clk_levels(struc
+ size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+ }
+ break;
++ case SMU_OD_RANGE:
++ if (!smu->od_enabled || !od_table || !od_settings)
++ break;
++ size = sprintf(buf, "%s:\n", "OD_RANGE");
++
++ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
++ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
++ &min_value, NULL);
++ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
++ NULL, &max_value);
++ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
++ min_value, max_value);
++ }
++
++ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
++ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
++ &min_value, &max_value);
++ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
++ min_value, max_value);
++ }
++
++ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
++ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
++ &min_value, &max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
++ min_value, max_value);
++ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
++ &min_value, &max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
++ min_value, max_value);
++ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
++ &min_value, &max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
++ min_value, max_value);
++ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
++ &min_value, &max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
++ min_value, max_value);
++ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
++ &min_value, &max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
++ min_value, max_value);
++ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
++ &min_value, &max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
++ min_value, max_value);
++ }
++
++ break;
+ default:
+ break;
+ }
--- /dev/null
+From 93c5f1f66c6ad4a3b180c1644f74e1b3b4be7864 Mon Sep 17 00:00:00 2001
+From: Matt Coffin <mcoffin13@gmail.com>
+Date: Sat, 25 Jan 2020 13:04:05 -0500
+Subject: drm/amdgpu/smu_v11_0: Correct behavior of restoring default tables (v2)
+
+From: Matt Coffin <mcoffin13@gmail.com>
+
+commit 93c5f1f66c6ad4a3b180c1644f74e1b3b4be7864 upstream.
+
+Previously, the syfs functionality for restoring the default powerplay
+table was sourcing it's information from the currently-staged powerplay
+table.
+
+This patch adds a step to cache the first overdrive table that we see on
+boot, so that it can be used later to "restore" the powerplay table
+
+v2: sqaush my original with Matt's fix
+
+Bug: https://gitlab.freedesktop.org/drm/amd/issues/1020
+Signed-off-by: Matt Coffin <mcoffin13@gmail.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 5.5.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1
+ drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 7 ++++++
+ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 6 +++++
+ drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 28 +++++++------------------
+ 4 files changed, 22 insertions(+), 20 deletions(-)
+
+--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+@@ -263,6 +263,7 @@ struct smu_table_context
+ uint8_t thermal_controller_type;
+
+ void *overdrive_table;
++ void *boot_overdrive_table;
+ };
+
+ struct smu_dpm_context {
+--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+@@ -2020,6 +2020,13 @@ static int navi10_od_edit_dpm_table(stru
+ return ret;
+ od_table->UclkFmax = input[1];
+ break;
++ case PP_OD_RESTORE_DEFAULT_TABLE:
++ if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
++ pr_err("Overdrive table was not initialized!\n");
++ return -EINVAL;
++ }
++ memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
++ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ navi10_dump_od_table(od_table);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
+--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+@@ -1807,6 +1807,12 @@ int smu_v11_0_set_default_od_settings(st
+ pr_err("Failed to export overdrive table!\n");
+ return ret;
+ }
++ if (!table_context->boot_overdrive_table) {
++ table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL);
++ if (!table_context->boot_overdrive_table) {
++ return -ENOMEM;
++ }
++ }
+ }
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
+ if (ret) {
+--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+@@ -1702,22 +1702,11 @@ static int vega20_set_default_od_setting
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret;
+
+- if (initialize) {
+- if (table_context->overdrive_table)
+- return -EINVAL;
+-
+- table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
+-
+- if (!table_context->overdrive_table)
+- return -ENOMEM;
+-
+- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
+- table_context->overdrive_table, false);
+- if (ret) {
+- pr_err("Failed to export over drive table!\n");
+- return ret;
+- }
++ ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
++ if (ret)
++ return ret;
+
++ if (initialize) {
+ ret = vega20_set_default_od8_setttings(smu);
+ if (ret)
+ return ret;
+@@ -2774,12 +2763,11 @@ static int vega20_odn_edit_dpm_table(str
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
+- if (ret) {
+- pr_err("Failed to export over drive table!\n");
+- return ret;
++ if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
++ pr_err("Overdrive table was not initialized!\n");
++ return -EINVAL;
+ }
+-
++ memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
+ break;
+
+ case PP_OD_COMMIT_DPM_TABLE:
--- /dev/null
+From 2d9384ff91770a71bd1ff24c25952ef1187a0e9c Mon Sep 17 00:00:00 2001
+From: Thierry Reding <treding@nvidia.com>
+Date: Tue, 4 Feb 2020 14:59:24 +0100
+Subject: drm/tegra: Relax IOMMU usage criteria on old Tegra
+
+From: Thierry Reding <treding@nvidia.com>
+
+commit 2d9384ff91770a71bd1ff24c25952ef1187a0e9c upstream.
+
+Older Tegra devices only allow addressing 32 bits of memory, so whether
+or not the host1x is attached to an IOMMU doesn't matter. host1x IOMMU
+attachment is only needed on devices that can address memory beyond the
+32-bit boundary and where the host1x doesn't support the wide GATHER
+opcode that allows it to access buffers at higher addresses.
+
+Cc: <stable@vger.kernel.org> # v5.5
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Tested-by: Dmitry Osipenko <digetx@gmail.com>
+Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/tegra/drm.c | 49 ++++++++++++++++++++++++++++----------------
+ 1 file changed, 32 insertions(+), 17 deletions(-)
+
+--- a/drivers/gpu/drm/tegra/drm.c
++++ b/drivers/gpu/drm/tegra/drm.c
+@@ -1037,23 +1037,9 @@ void tegra_drm_free(struct tegra_drm *te
+ free_pages((unsigned long)virt, get_order(size));
+ }
+
+-static int host1x_drm_probe(struct host1x_device *dev)
++static bool host1x_drm_wants_iommu(struct host1x_device *dev)
+ {
+- struct drm_driver *driver = &tegra_drm_driver;
+ struct iommu_domain *domain;
+- struct tegra_drm *tegra;
+- struct drm_device *drm;
+- int err;
+-
+- drm = drm_dev_alloc(driver, &dev->dev);
+- if (IS_ERR(drm))
+- return PTR_ERR(drm);
+-
+- tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+- if (!tegra) {
+- err = -ENOMEM;
+- goto put;
+- }
+
+ /*
+ * If the Tegra DRM clients are backed by an IOMMU, push buffers are
+@@ -1082,9 +1068,38 @@ static int host1x_drm_probe(struct host1
+ * up the device tree appropriately. This is considered an problem
+ * of integration, so care must be taken for the DT to be consistent.
+ */
+- domain = iommu_get_domain_for_dev(drm->dev->parent);
++ domain = iommu_get_domain_for_dev(dev->dev.parent);
++
++ /*
++ * Tegra20 and Tegra30 don't support addressing memory beyond the
++ * 32-bit boundary, so the regular GATHER opcodes will always be
++ * sufficient and whether or not the host1x is attached to an IOMMU
++ * doesn't matter.
++ */
++ if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
++ return true;
++
++ return domain != NULL;
++}
++
++static int host1x_drm_probe(struct host1x_device *dev)
++{
++ struct drm_driver *driver = &tegra_drm_driver;
++ struct tegra_drm *tegra;
++ struct drm_device *drm;
++ int err;
++
++ drm = drm_dev_alloc(driver, &dev->dev);
++ if (IS_ERR(drm))
++ return PTR_ERR(drm);
++
++ tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
++ if (!tegra) {
++ err = -ENOMEM;
++ goto put;
++ }
+
+- if (domain && iommu_present(&platform_bus_type)) {
++ if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
+ tegra->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!tegra->domain) {
+ err = -ENOMEM;
--- /dev/null
+From 273da5a046965ccf0ec79eb63f2d5173467e20fa Mon Sep 17 00:00:00 2001
+From: Thierry Reding <treding@nvidia.com>
+Date: Tue, 4 Feb 2020 14:59:25 +0100
+Subject: drm/tegra: Reuse IOVA mapping where possible
+
+From: Thierry Reding <treding@nvidia.com>
+
+commit 273da5a046965ccf0ec79eb63f2d5173467e20fa upstream.
+
+This partially reverts the DMA API support that was recently merged
+because it was causing performance regressions on older Tegra devices.
+Unfortunately, the cache maintenance performed by dma_map_sg() and
+dma_unmap_sg() causes performance to drop by a factor of 10.
+
+The right solution for this would be to cache mappings for buffers per
+consumer device, but that's a bit involved. Instead, we simply revert to
+the old behaviour of sharing IOVA mappings when we know that devices can
+do so (i.e. they share the same IOMMU domain).
+
+Cc: <stable@vger.kernel.org> # v5.5
+Reported-by: Dmitry Osipenko <digetx@gmail.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Tested-by: Dmitry Osipenko <digetx@gmail.com>
+Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/tegra/gem.c | 10 ++++++++-
+ drivers/gpu/drm/tegra/plane.c | 46 +++++++++++++++++++++++-------------------
+ drivers/gpu/host1x/job.c | 32 ++++++++++++++++++++++++++---
+ 3 files changed, 64 insertions(+), 24 deletions(-)
+
+--- a/drivers/gpu/drm/tegra/gem.c
++++ b/drivers/gpu/drm/tegra/gem.c
+@@ -60,8 +60,16 @@ static struct sg_table *tegra_bo_pin(str
+ /*
+ * If we've manually mapped the buffer object through the IOMMU, make
+ * sure to return the IOVA address of our mapping.
++ *
++ * Similarly, for buffers that have been allocated by the DMA API the
++ * physical address can be used for devices that are not attached to
++ * an IOMMU. For these devices, callers must pass a valid pointer via
++ * the @phys argument.
++ *
++ * Imported buffers were also already mapped at import time, so the
++ * existing mapping can be reused.
+ */
+- if (phys && obj->mm) {
++ if (phys) {
+ *phys = obj->iova;
+ return NULL;
+ }
+--- a/drivers/gpu/drm/tegra/plane.c
++++ b/drivers/gpu/drm/tegra/plane.c
+@@ -3,6 +3,8 @@
+ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
+ */
+
++#include <linux/iommu.h>
++
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_fourcc.h>
+@@ -107,21 +109,27 @@ const struct drm_plane_funcs tegra_plane
+
+ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
+ {
++ struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
++ dma_addr_t phys_addr, *phys;
++ struct sg_table *sgt;
+
+- if (!dc->client.group) {
+- struct sg_table *sgt;
+-
+- sgt = host1x_bo_pin(dc->dev, &bo->base, NULL);
+- if (IS_ERR(sgt)) {
+- err = PTR_ERR(sgt);
+- goto unpin;
+- }
++ if (!domain || dc->client.group)
++ phys = &phys_addr;
++ else
++ phys = NULL;
++
++ sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
++ if (IS_ERR(sgt)) {
++ err = PTR_ERR(sgt);
++ goto unpin;
++ }
+
++ if (sgt) {
+ err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (err == 0) {
+@@ -143,7 +151,7 @@ static int tegra_dc_pin(struct tegra_dc
+ state->iova[i] = sg_dma_address(sgt->sgl);
+ state->sgt[i] = sgt;
+ } else {
+- state->iova[i] = bo->iova;
++ state->iova[i] = phys_addr;
+ }
+ }
+
+@@ -156,9 +164,11 @@ unpin:
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ struct sg_table *sgt = state->sgt[i];
+
+- dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+- host1x_bo_unpin(dc->dev, &bo->base, sgt);
++ if (sgt)
++ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
++ DMA_TO_DEVICE);
+
++ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+@@ -172,17 +182,13 @@ static void tegra_dc_unpin(struct tegra_
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
++ struct sg_table *sgt = state->sgt[i];
+
+- if (!dc->client.group) {
+- struct sg_table *sgt = state->sgt[i];
+-
+- if (sgt) {
+- dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+- DMA_TO_DEVICE);
+- host1x_bo_unpin(dc->dev, &bo->base, sgt);
+- }
+- }
++ if (sgt)
++ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
++ DMA_TO_DEVICE);
+
++ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+--- a/drivers/gpu/host1x/job.c
++++ b/drivers/gpu/host1x/job.c
+@@ -8,6 +8,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/err.h>
+ #include <linux/host1x.h>
++#include <linux/iommu.h>
+ #include <linux/kref.h>
+ #include <linux/module.h>
+ #include <linux/scatterlist.h>
+@@ -101,9 +102,11 @@ static unsigned int pin_job(struct host1
+ {
+ struct host1x_client *client = job->client;
+ struct device *dev = client->dev;
++ struct iommu_domain *domain;
+ unsigned int i;
+ int err;
+
++ domain = iommu_get_domain_for_dev(dev);
+ job->num_unpins = 0;
+
+ for (i = 0; i < job->num_relocs; i++) {
+@@ -117,7 +120,19 @@ static unsigned int pin_job(struct host1
+ goto unpin;
+ }
+
+- if (client->group)
++ /*
++ * If the client device is not attached to an IOMMU, the
++ * physical address of the buffer object can be used.
++ *
++ * Similarly, when an IOMMU domain is shared between all
++ * host1x clients, the IOVA is already available, so no
++ * need to map the buffer object again.
++ *
++ * XXX Note that this isn't always safe to do because it
++ * relies on an assumption that no cache maintenance is
++ * needed on the buffer objects.
++ */
++ if (!domain || client->group)
+ phys = &phys_addr;
+ else
+ phys = NULL;
+@@ -176,6 +191,7 @@ static unsigned int pin_job(struct host1
+ dma_addr_t phys_addr;
+ unsigned long shift;
+ struct iova *alloc;
++ dma_addr_t *phys;
+ unsigned int j;
+
+ g->bo = host1x_bo_get(g->bo);
+@@ -184,7 +200,17 @@ static unsigned int pin_job(struct host1
+ goto unpin;
+ }
+
+- sgt = host1x_bo_pin(host->dev, g->bo, NULL);
++ /**
++ * If the host1x is not attached to an IOMMU, there is no need
++ * to map the buffer object for the host1x, since the physical
++ * address can simply be used.
++ */
++ if (!iommu_get_domain_for_dev(host->dev))
++ phys = &phys_addr;
++ else
++ phys = NULL;
++
++ sgt = host1x_bo_pin(host->dev, g->bo, phys);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+@@ -214,7 +240,7 @@ static unsigned int pin_job(struct host1
+
+ job->unpins[job->num_unpins].size = gather_size;
+ phys_addr = iova_dma_addr(&host->iova, alloc);
+- } else {
++ } else if (sgt) {
+ err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (!err) {
--- /dev/null
+From f658adeea45e430a24c7a157c3d5448925ac2038 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Thu, 6 Feb 2020 16:39:28 +0100
+Subject: fix up iter on short count in fuse_direct_io()
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit f658adeea45e430a24c7a157c3d5448925ac2038 upstream.
+
+fuse_direct_io() can end up advancing the iterator by more than the amount
+of data read or written. This case is handled by the generic code if going
+through ->direct_IO(), but not in the FOPEN_DIRECT_IO case.
+
+Fix by reverting the extra bytes from the iterator in case of error or a
+short count.
+
+To test: install lxcfs, then the following testcase
+ int fd = open("/var/lib/lxcfs/proc/uptime", O_RDONLY);
+ sendfile(1, fd, NULL, 16777216);
+ sendfile(1, fd, NULL, 16777216);
+will spew WARN_ON() in iov_iter_pipe().
+
+Reported-by: Peter Geis <pgwipeout@gmail.com>
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Fixes: 3c3db095b68c ("fuse: use iov_iter based generic splice helpers")
+Cc: <stable@vger.kernel.org> # v5.1
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/file.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1465,6 +1465,7 @@ ssize_t fuse_direct_io(struct fuse_io_pr
+ }
+ ia = NULL;
+ if (nres < 0) {
++ iov_iter_revert(iter, nbytes);
+ err = nres;
+ break;
+ }
+@@ -1473,8 +1474,10 @@ ssize_t fuse_direct_io(struct fuse_io_pr
+ count -= nres;
+ res += nres;
+ pos += nres;
+- if (nres != nbytes)
++ if (nres != nbytes) {
++ iov_iter_revert(iter, nbytes - nres);
+ break;
++ }
+ if (count) {
+ max_pages = iov_iter_npages(iter, fc->max_pages);
+ ia = fuse_io_alloc(io, max_pages);
--- /dev/null
+From 0ed1325967ab5f7a4549a2641c6ebe115f76e228 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 3 Feb 2020 17:36:49 -0800
+Subject: mm/mmu_gather: invalidate TLB correctly on batch allocation failure and flush
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 0ed1325967ab5f7a4549a2641c6ebe115f76e228 upstream.
+
+Architectures for which we have hardware walkers of Linux page table
+should flush TLB on mmu gather batch allocation failures and batch flush.
+Some architectures like POWER supports multiple translation modes (hash
+and radix) and in the case of POWER only radix translation mode needs the
+above TLBI. This is because for hash translation mode kernel wants to
+avoid this extra flush since there are no hardware walkers of linux page
+table. With radix translation, the hardware also walks linux page table
+and with that, kernel needs to make sure to TLB invalidate page walk cache
+before page table pages are freed.
+
+More details in commit d86564a2f085 ("mm/tlb, x86/mm: Support invalidating
+TLB caches for RCU_TABLE_FREE")
+
+The changes to sparc are to make sure we keep the old behavior since we
+are now removing HAVE_RCU_TABLE_NO_INVALIDATE. The default value for
+tlb_needs_table_invalidate is to always force an invalidate and sparc can
+avoid the table invalidate. Hence we define tlb_needs_table_invalidate to
+false for sparc architecture.
+
+Link: http://lkml.kernel.org/r/20200116064531.483522-3-aneesh.kumar@linux.ibm.com
+Fixes: a46cc7a90fd8 ("powerpc/mm/radix: Improve TLB/PWC flushes")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
+Cc: <stable@vger.kernel.org> [4.14+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/Kconfig | 3 ---
+ arch/powerpc/Kconfig | 1 -
+ arch/powerpc/include/asm/tlb.h | 11 +++++++++++
+ arch/sparc/Kconfig | 1 -
+ arch/sparc/include/asm/tlb_64.h | 9 +++++++++
+ include/asm-generic/tlb.h | 22 +++++++++++++++-------
+ mm/mmu_gather.c | 16 ++++++++--------
+ 7 files changed, 43 insertions(+), 20 deletions(-)
+
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -396,9 +396,6 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
+ config HAVE_RCU_TABLE_FREE
+ bool
+
+-config HAVE_RCU_TABLE_NO_INVALIDATE
+- bool
+-
+ config HAVE_MMU_GATHER_PAGE_SIZE
+ bool
+
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -223,7 +223,6 @@ config PPC
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RCU_TABLE_FREE
+- select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
+ select HAVE_MMU_GATHER_PAGE_SIZE
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
+--- a/arch/powerpc/include/asm/tlb.h
++++ b/arch/powerpc/include/asm/tlb.h
+@@ -26,6 +26,17 @@
+
+ #define tlb_flush tlb_flush
+ extern void tlb_flush(struct mmu_gather *tlb);
++/*
++ * book3s:
++ * Hash does not use the linux page-tables, so we can avoid
++ * the TLB invalidate for page-table freeing, Radix otoh does use the
++ * page-tables and needs the TLBI.
++ *
++ * nohash:
++ * We still do TLB invalidate in the __pte_free_tlb routine before we
++ * add the page table pages to mmu gather table batch.
++ */
++#define tlb_needs_table_invalidate() radix_enabled()
+
+ /* Get the generic bits... */
+ #include <asm-generic/tlb.h>
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -65,7 +65,6 @@ config SPARC64
+ select HAVE_KRETPROBES
+ select HAVE_KPROBES
+ select HAVE_RCU_TABLE_FREE if SMP
+- select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
+ select HAVE_MEMBLOCK_NODE_MAP
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_DYNAMIC_FTRACE
+--- a/arch/sparc/include/asm/tlb_64.h
++++ b/arch/sparc/include/asm/tlb_64.h
+@@ -28,6 +28,15 @@ void flush_tlb_pending(void);
+ #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+ #define tlb_flush(tlb) flush_tlb_pending()
+
++/*
++ * SPARC64's hardware TLB fill does not use the Linux page-tables
++ * and therefore we don't need a TLBI when freeing page-table pages.
++ */
++
++#ifdef CONFIG_HAVE_RCU_TABLE_FREE
++#define tlb_needs_table_invalidate() (false)
++#endif
++
+ #include <asm-generic/tlb.h>
+
+ #endif /* _SPARC64_TLB_H */
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -137,13 +137,6 @@
+ * When used, an architecture is expected to provide __tlb_remove_table()
+ * which does the actual freeing of these pages.
+ *
+- * HAVE_RCU_TABLE_NO_INVALIDATE
+- *
+- * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
+- * freeing the page-table pages. This can be avoided if you use
+- * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
+- * page-tables natively.
+- *
+ * MMU_GATHER_NO_RANGE
+ *
+ * Use this if your architecture lacks an efficient flush_tlb_range().
+@@ -189,8 +182,23 @@ struct mmu_table_batch {
+
+ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+
++/*
++ * This allows an architecture that does not use the linux page-tables for
++ * hardware to skip the TLBI when freeing page tables.
++ */
++#ifndef tlb_needs_table_invalidate
++#define tlb_needs_table_invalidate() (true)
+ #endif
+
++#else
++
++#ifdef tlb_needs_table_invalidate
++#error tlb_needs_table_invalidate() requires HAVE_RCU_TABLE_FREE
++#endif
++
++#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
++
++
+ #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+ /*
+ * If we can't allocate a page to make a big batch of page pointers
+--- a/mm/mmu_gather.c
++++ b/mm/mmu_gather.c
+@@ -102,14 +102,14 @@ bool __tlb_remove_page_size(struct mmu_g
+ */
+ static inline void tlb_table_invalidate(struct mmu_gather *tlb)
+ {
+-#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
+- /*
+- * Invalidate page-table caches used by hardware walkers. Then we still
+- * need to RCU-sched wait while freeing the pages because software
+- * walkers can still be in-flight.
+- */
+- tlb_flush_mmu_tlbonly(tlb);
+-#endif
++ if (tlb_needs_table_invalidate()) {
++ /*
++ * Invalidate page-table caches used by hardware walkers. Then
++ * we still need to RCU-sched wait while freeing the pages
++ * because software walkers can still be in-flight.
++ */
++ tlb_flush_mmu_tlbonly(tlb);
++ }
+ }
+
+ static void tlb_remove_table_smp_sync(void *arg)
--- /dev/null
+From 264b0d2bee148073c117e7bbbde5be7125a53be1 Mon Sep 17 00:00:00 2001
+From: Erdem Aktas <erdemaktas@google.com>
+Date: Fri, 13 Dec 2019 13:31:46 -0800
+Subject: percpu: Separate decrypted varaibles anytime encryption can be enabled
+
+From: Erdem Aktas <erdemaktas@google.com>
+
+commit 264b0d2bee148073c117e7bbbde5be7125a53be1 upstream.
+
+CONFIG_VIRTUALIZATION may not be enabled for memory encrypted guests. If
+disabled, decrypted per-CPU variables may end up sharing the same page
+with variables that should be left encrypted.
+
+Always separate per-CPU variables that should be decrypted into their own
+page anytime memory encryption can be enabled in the guest rather than
+rely on any other config option that may not be enabled.
+
+Fixes: ac26963a1175 ("percpu: Introduce DEFINE_PER_CPU_DECRYPTED")
+Cc: stable@vger.kernel.org # 4.15+
+Signed-off-by: Erdem Aktas <erdemaktas@google.com>
+Signed-off-by: David Rientjes <rientjes@google.com>
+Signed-off-by: Dennis Zhou <dennis@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/percpu-defs.h | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/include/linux/percpu-defs.h
++++ b/include/linux/percpu-defs.h
+@@ -175,8 +175,7 @@
+ * Declaration/definition used for per-CPU variables that should be accessed
+ * as decrypted when memory encryption is enabled in the guest.
+ */
+-#if defined(CONFIG_VIRTUALIZATION) && defined(CONFIG_AMD_MEM_ENCRYPT)
+-
++#ifdef CONFIG_AMD_MEM_ENCRYPT
+ #define DECLARE_PER_CPU_DECRYPTED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, "..decrypted")
+
kvm-x86-use-raw-clock-values-consistently.patch
ocfs2-fix-oops-when-writing-cloned-file.patch
mm-page_alloc.c-fix-uninitialized-memmaps-on-a-partially-populated-last-section.patch
+arm64-dts-qcom-qcs404-evb-set-vdd_apc-regulator-in-high-power-mode.patch
+mm-mmu_gather-invalidate-tlb-correctly-on-batch-allocation-failure-and-flush.patch
+clk-tegra-mark-fuse-clock-as-critical.patch
+drm-amdgpu-navi-fix-index-for-od-mclk.patch
+drm-tegra-relax-iommu-usage-criteria-on-old-tegra.patch
+drm-tegra-reuse-iova-mapping-where-possible.patch
+drm-amd-powerplay-fix-navi10-system-intermittent-reboot-issue-v2.patch
+drm-amd-dm-mst-ignore-payload-update-failures.patch
+drm-amdgpu-fetch-default-vddc-curve-voltages-v2.patch
+drm-amdgpu-navi10-add-od_range-for-navi-overclocking.patch
+drm-amdgpu-smu_v11_0-correct-behavior-of-restoring-default-tables-v2.patch
+virtio-balloon-initialize-all-vq-callbacks.patch
+virtio-pci-check-name-when-counting-msi-x-vectors.patch
+fix-up-iter-on-short-count-in-fuse_direct_io.patch
+broken-ping-to-ipv6-linklocal-addresses-on-debian-buster.patch
+percpu-separate-decrypted-varaibles-anytime-encryption-can-be-enabled.patch
--- /dev/null
+From 5790b53390e18fdd21e70776e46d058c05eda2f2 Mon Sep 17 00:00:00 2001
+From: Daniel Verkamp <dverkamp@chromium.org>
+Date: Fri, 3 Jan 2020 10:40:43 -0800
+Subject: virtio-balloon: initialize all vq callbacks
+
+From: Daniel Verkamp <dverkamp@chromium.org>
+
+commit 5790b53390e18fdd21e70776e46d058c05eda2f2 upstream.
+
+Ensure that elements of the callbacks array that correspond to
+unavailable features are set to NULL; previously, they would be left
+uninitialized.
+
+Since the corresponding names array elements were explicitly set to
+NULL, the uninitialized callback pointers would not actually be
+dereferenced; however, the uninitialized callbacks elements would still
+be read in vp_find_vqs_msix() and used to calculate the number of MSI-X
+vectors required.
+
+Cc: stable@vger.kernel.org
+Fixes: 86a559787e6f ("virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT")
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Daniel Verkamp <dverkamp@chromium.org>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/virtio/virtio_balloon.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -475,7 +475,9 @@ static int init_vqs(struct virtio_balloo
+ names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate";
+ callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack;
+ names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate";
++ callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL;
+ names[VIRTIO_BALLOON_VQ_STATS] = NULL;
++ callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
+ names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
+
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
--- /dev/null
+From 303090b513fd1ee45aa1536b71a3838dc054bc05 Mon Sep 17 00:00:00 2001
+From: Daniel Verkamp <dverkamp@chromium.org>
+Date: Fri, 3 Jan 2020 10:40:45 -0800
+Subject: virtio-pci: check name when counting MSI-X vectors
+
+From: Daniel Verkamp <dverkamp@chromium.org>
+
+commit 303090b513fd1ee45aa1536b71a3838dc054bc05 upstream.
+
+VQs without a name specified are not valid; they are skipped in the
+later loop that assigns MSI-X vectors to queues, but the per_vq_vectors
+loop above that counts the required number of vectors previously still
+counted any queue with a non-NULL callback as needing a vector.
+
+Add a check to the per_vq_vectors loop so that vectors with no name are
+not counted to make the two loops consistent. This prevents
+over-counting unnecessary vectors (e.g. for features which were not
+negotiated with the device).
+
+Cc: stable@vger.kernel.org
+Fixes: 86a559787e6f ("virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT")
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Daniel Verkamp <dverkamp@chromium.org>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Wang, Wei W <wei.w.wang@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/virtio/virtio_pci_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -294,7 +294,7 @@ static int vp_find_vqs_msix(struct virti
+ /* Best option: one for change interrupt, one per vq. */
+ nvectors = 1;
+ for (i = 0; i < nvqs; ++i)
+- if (callbacks[i])
++ if (names[i] && callbacks[i])
+ ++nvectors;
+ } else {
+ /* Second best: one for change, shared for all vqs. */