--- /dev/null
+From 74fa4c81aadf418341f0d073c864ea7dca730a2e Mon Sep 17 00:00:00 2001
+From: Saaem Rizvi <SyedSaaem.Rizvi@amd.com>
+Date: Mon, 6 Mar 2023 15:10:13 -0500
+Subject: drm/amd/display: Implement workaround for writing to OTG_PIXEL_RATE_DIV register
+
+From: Saaem Rizvi <SyedSaaem.Rizvi@amd.com>
+
+commit 74fa4c81aadf418341f0d073c864ea7dca730a2e upstream.
+
+[Why and How]
+Current implementation requires FPGA builds to take a different
+code path from DCN32 to write to OTG_PIXEL_RATE_DIV. Now that
+we have a workaround to write to OTG_PIXEL_RATE_DIV register without
+blanking display on hotplug on DCN32, we can allow the code paths for
+FPGA to be exactly the same allowing for more consistent
+testing.
+
+Reviewed-by: Alvin Lee <Alvin.Lee2@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Saaem Rizvi <SyedSaaem.Rizvi@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: "Limonciello, Mario" <mario.limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h | 3 +-
+ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c | 22 ++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h | 3 +-
+ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 2 -
+ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h | 3 +-
+ 5 files changed, 29 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+@@ -230,7 +230,8 @@
+ type DTBCLK_P2_SRC_SEL;\
+ type DTBCLK_P2_EN;\
+ type DTBCLK_P3_SRC_SEL;\
+- type DTBCLK_P3_EN;
++ type DTBCLK_P3_EN;\
++ type DENTIST_DISPCLK_CHG_DONE;
+
+ struct dccg_shift {
+ DCCG_REG_FIELD_LIST(uint8_t)
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+@@ -42,6 +42,20 @@
+ #define DC_LOGGER \
+ dccg->ctx->logger
+
++/* This function is a workaround for writing to OTG_PIXEL_RATE_DIV
++ * without the probability of causing a DIG FIFO error.
++ */
++static void dccg32_wait_for_dentist_change_done(
++ struct dccg *dccg)
++{
++ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
++
++ uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
++
++ REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
++ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
++}
++
+ static void dccg32_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+@@ -110,21 +124,29 @@ static void dccg32_set_pixel_rate_div(
+ REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, k1,
+ OTG0_PIXEL_RATE_DIVK2, k2);
++
++ dccg32_wait_for_dentist_change_done(dccg);
+ break;
+ case 1:
+ REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, k1,
+ OTG1_PIXEL_RATE_DIVK2, k2);
++
++ dccg32_wait_for_dentist_change_done(dccg);
+ break;
+ case 2:
+ REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, k1,
+ OTG2_PIXEL_RATE_DIVK2, k2);
++
++ dccg32_wait_for_dentist_change_done(dccg);
+ break;
+ case 3:
+ REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, k1,
+ OTG3_PIXEL_RATE_DIVK2, k2);
++
++ dccg32_wait_for_dentist_change_done(dccg);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
+@@ -147,7 +147,8 @@
+ DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_SRC_SEL, mask_sh),\
+ DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\
+ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+- DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh)
++ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
++ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+
+
+ struct dccg *dccg32_create(
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -1177,7 +1177,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_
+ *k2_div = PIXEL_RATE_DIV_BY_2;
+ else
+ *k2_div = PIXEL_RATE_DIV_BY_4;
+- } else if (dc_is_dp_signal(stream->signal)) {
++ } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
+ if (two_pix_per_container) {
+ *k1_div = PIXEL_RATE_DIV_BY_1;
+ *k2_div = PIXEL_RATE_DIV_BY_2;
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+@@ -1272,7 +1272,8 @@ unsigned int dcn32_calc_num_avail_chans_
+ DCCG_SRII(PHASE, DTBCLK_DTO, 0), DCCG_SRII(PHASE, DTBCLK_DTO, 1), \
+ DCCG_SRII(PHASE, DTBCLK_DTO, 2), DCCG_SRII(PHASE, DTBCLK_DTO, 3), \
+ SR(DCCG_AUDIO_DTBCLK_DTO_MODULO), SR(DCCG_AUDIO_DTBCLK_DTO_PHASE), \
+- SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), SR(DCCG_AUDIO_DTO_SOURCE) \
++ SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), \
++ SR(DCCG_AUDIO_DTO_SOURCE), SR(DENTIST_DISPCLK_CNTL) \
+ )
+
+ /* VMID */
--- /dev/null
+From 32c877191e022b55fe3a374f3d7e9fb5741c514d Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Tue, 11 Jul 2023 15:09:41 -0700
+Subject: hugetlb: do not clear hugetlb dtor until allocating vmemmap
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit 32c877191e022b55fe3a374f3d7e9fb5741c514d upstream.
+
+Patch series "Fix hugetlb free path race with memory errors".
+
+In the discussion of Jiaqi Yan's series "Improve hugetlbfs read on
+HWPOISON hugepages" the race window was discovered.
+https://lore.kernel.org/linux-mm/20230616233447.GB7371@monkey/
+
+Freeing a hugetlb page back to low level memory allocators is performed
+in two steps.
+1) Under hugetlb lock, remove page from hugetlb lists and clear destructor
+2) Outside lock, allocate vmemmap if necessary and call low level free
+Between these two steps, the hugetlb page will appear as a normal
+compound page. However, vmemmap for tail pages could be missing.
+If a memory error occurs at this time, we could try to update page
+flags non-existant page structs.
+
+A much more detailed description is in the first patch.
+
+The first patch addresses the race window. However, it adds a
+hugetlb_lock lock/unlock cycle to every vmemmap optimized hugetlb page
+free operation. This could lead to slowdowns if one is freeing a large
+number of hugetlb pages.
+
+The second path optimizes the update_and_free_pages_bulk routine to only
+take the lock once in bulk operations.
+
+The second patch is technically not a bug fix, but includes a Fixes tag
+and Cc stable to avoid a performance regression. It can be combined with
+the first, but was done separately make reviewing easier.
+
+
+This patch (of 2):
+
+Freeing a hugetlb page and releasing base pages back to the underlying
+allocator such as buddy or cma is performed in two steps:
+- remove_hugetlb_folio() is called to remove the folio from hugetlb
+ lists, get a ref on the page and remove hugetlb destructor. This
+ all must be done under the hugetlb lock. After this call, the page
+ can be treated as a normal compound page or a collection of base
+ size pages.
+- update_and_free_hugetlb_folio() is called to allocate vmemmap if
+ needed and the free routine of the underlying allocator is called
+ on the resulting page. We can not hold the hugetlb lock here.
+
+One issue with this scheme is that a memory error could occur between
+these two steps. In this case, the memory error handling code treats
+the old hugetlb page as a normal compound page or collection of base
+pages. It will then try to SetPageHWPoison(page) on the page with an
+error. If the page with error is a tail page without vmemmap, a write
+error will occur when trying to set the flag.
+
+Address this issue by modifying remove_hugetlb_folio() and
+update_and_free_hugetlb_folio() such that the hugetlb destructor is not
+cleared until after allocating vmemmap. Since clearing the destructor
+requires holding the hugetlb lock, the clearing is done in
+remove_hugetlb_folio() if the vmemmap is present. This saves a
+lock/unlock cycle. Otherwise, destructor is cleared in
+update_and_free_hugetlb_folio() after allocating vmemmap.
+
+Note that this will leave hugetlb pages in a state where they are marked
+free (by hugetlb specific page flag) and have a ref count. This is not
+a normal state. The only code that would notice is the memory error
+code, and it is set up to retry in such a case.
+
+A subsequent patch will create a routine to do bulk processing of
+vmemmap allocation. This will eliminate a lock/unlock cycle for each
+hugetlb page in the case where we are freeing a large number of pages.
+
+Link: https://lkml.kernel.org/r/20230711220942.43706-1-mike.kravetz@oracle.com
+Link: https://lkml.kernel.org/r/20230711220942.43706-2-mike.kravetz@oracle.com
+Fixes: ad2fa3717b74 ("mm: hugetlb: alloc the vmemmap pages associated with each HugeTLB page")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Reviewed-by: Muchun Song <songmuchun@bytedance.com>
+Tested-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: James Houghton <jthoughton@google.com>
+Cc: Jiaqi Yan <jiaqiyan@google.com>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c | 75 ++++++++++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 51 insertions(+), 24 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1581,9 +1581,37 @@ static inline void destroy_compound_giga
+ unsigned int order) { }
+ #endif
+
++static inline void __clear_hugetlb_destructor(struct hstate *h,
++ struct page *page)
++{
++ lockdep_assert_held(&hugetlb_lock);
++
++ /*
++ * Very subtle
++ *
++ * For non-gigantic pages set the destructor to the normal compound
++ * page dtor. This is needed in case someone takes an additional
++ * temporary ref to the page, and freeing is delayed until they drop
++ * their reference.
++ *
++ * For gigantic pages set the destructor to the null dtor. This
++ * destructor will never be called. Before freeing the gigantic
++ * page destroy_compound_gigantic_folio will turn the folio into a
++ * simple group of pages. After this the destructor does not
++ * apply.
++ *
++ */
++ if (hstate_is_gigantic(h))
++ set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
++ else
++ set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
++}
++
+ /*
+- * Remove hugetlb page from lists, and update dtor so that page appears
+- * as just a compound page.
++ * Remove hugetlb page from lists.
++ * If vmemmap exists for the page, update dtor so that the page appears
++ * as just a compound page. Otherwise, wait until after allocating vmemmap
++ * to update dtor.
+ *
+ * A reference is held on the page, except in the case of demote.
+ *
+@@ -1614,31 +1642,19 @@ static void __remove_hugetlb_page(struct
+ }
+
+ /*
+- * Very subtle
+- *
+- * For non-gigantic pages set the destructor to the normal compound
+- * page dtor. This is needed in case someone takes an additional
+- * temporary ref to the page, and freeing is delayed until they drop
+- * their reference.
+- *
+- * For gigantic pages set the destructor to the null dtor. This
+- * destructor will never be called. Before freeing the gigantic
+- * page destroy_compound_gigantic_page will turn the compound page
+- * into a simple group of pages. After this the destructor does not
+- * apply.
+- *
+- * This handles the case where more than one ref is held when and
+- * after update_and_free_page is called.
+- *
+- * In the case of demote we do not ref count the page as it will soon
+- * be turned into a page of smaller size.
++ * We can only clear the hugetlb destructor after allocating vmemmap
++ * pages. Otherwise, someone (memory error handling) may try to write
++ * to tail struct pages.
++ */
++ if (!HPageVmemmapOptimized(page))
++ __clear_hugetlb_destructor(h, page);
++
++ /*
++ * In the case of demote we do not ref count the page as it will soon
++ * be turned into a page of smaller size.
+ */
+ if (!demote)
+ set_page_refcounted(page);
+- if (hstate_is_gigantic(h))
+- set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
+- else
+- set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
+
+ h->nr_huge_pages--;
+ h->nr_huge_pages_node[nid]--;
+@@ -1706,6 +1722,7 @@ static void __update_and_free_page(struc
+ {
+ int i;
+ struct page *subpage;
++ bool clear_dtor = HPageVmemmapOptimized(page);
+
+ if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ return;
+@@ -1736,6 +1753,16 @@ static void __update_and_free_page(struc
+ if (unlikely(PageHWPoison(page)))
+ hugetlb_clear_page_hwpoison(page);
+
++ /*
++ * If vmemmap pages were allocated above, then we need to clear the
++ * hugetlb destructor under the hugetlb lock.
++ */
++ if (clear_dtor) {
++ spin_lock_irq(&hugetlb_lock);
++ __clear_hugetlb_destructor(h, page);
++ spin_unlock_irq(&hugetlb_lock);
++ }
++
+ for (i = 0; i < pages_per_huge_page(h); i++) {
+ subpage = nth_page(page, i);
+ subpage->flags &= ~(1 << PG_locked | 1 << PG_error |