--- /dev/null
+From 9a199694c6a1519522ec73a4571f68abe9f13d5d Mon Sep 17 00:00:00 2001
+From: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Date: Fri, 15 Apr 2022 13:59:52 +0200
+Subject: media: i2c: imx412: Fix power_off ordering
+
+From: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+commit 9a199694c6a1519522ec73a4571f68abe9f13d5d upstream.
+
+The enable path does
+- gpio
+- clock
+
+The disable path does
+- gpio
+- clock
+
+Fix the order on the power-off path so that power-off and power-on have the
+same ordering for clock and gpio.
+
+Fixes: 9214e86c0cc1 ("media: i2c: Add imx412 camera sensor driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>
+Reviewed-by: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/i2c/imx412.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/media/i2c/imx412.c
++++ b/drivers/media/i2c/imx412.c
+@@ -1040,10 +1040,10 @@ static int imx412_power_off(struct devic
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx412 *imx412 = to_imx412(sd);
+
+- gpiod_set_value_cansleep(imx412->reset_gpio, 1);
+-
+ clk_disable_unprepare(imx412->inclk);
+
++ gpiod_set_value_cansleep(imx412->reset_gpio, 1);
++
+ return 0;
+ }
+
--- /dev/null
+From bb25f071fc92d3d227178a45853347c7b3b45a6b Mon Sep 17 00:00:00 2001
+From: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Date: Fri, 15 Apr 2022 13:59:51 +0200
+Subject: media: i2c: imx412: Fix reset GPIO polarity
+
+From: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+commit bb25f071fc92d3d227178a45853347c7b3b45a6b upstream.
+
+The imx412/imx577 sensor has a reset line that is active low not active
+high. Currently the logic for this is inverted.
+
+The right way to define the reset line is to declare it active low in the
+DTS and invert the logic currently contained in the driver.
+
+The DTS should represent the hardware does i.e. reset is active low.
+So:
++ reset-gpios = <&tlmm 78 GPIO_ACTIVE_LOW>;
+not:
+- reset-gpios = <&tlmm 78 GPIO_ACTIVE_HIGH>;
+
+I was a bit reticent about changing this logic since I thought it might
+negatively impact @intel.com users. Googling a bit though I believe this
+sensor is used on "Keem Bay" which is clearly a DTS based system and is not
+upstream yet.
+
+Fixes: 9214e86c0cc1 ("media: i2c: Add imx412 camera sensor driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>
+Reviewed-by: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/i2c/imx412.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/media/i2c/imx412.c
++++ b/drivers/media/i2c/imx412.c
+@@ -1011,7 +1011,7 @@ static int imx412_power_on(struct device
+ struct imx412 *imx412 = to_imx412(sd);
+ int ret;
+
+- gpiod_set_value_cansleep(imx412->reset_gpio, 1);
++ gpiod_set_value_cansleep(imx412->reset_gpio, 0);
+
+ ret = clk_prepare_enable(imx412->inclk);
+ if (ret) {
+@@ -1024,7 +1024,7 @@ static int imx412_power_on(struct device
+ return 0;
+
+ error_reset:
+- gpiod_set_value_cansleep(imx412->reset_gpio, 0);
++ gpiod_set_value_cansleep(imx412->reset_gpio, 1);
+
+ return ret;
+ }
+@@ -1040,7 +1040,7 @@ static int imx412_power_off(struct devic
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx412 *imx412 = to_imx412(sd);
+
+- gpiod_set_value_cansleep(imx412->reset_gpio, 0);
++ gpiod_set_value_cansleep(imx412->reset_gpio, 1);
+
+ clk_disable_unprepare(imx412->inclk);
+
fs-ntfs3-validate-boot-sectors_per_clusters.patch
hid-multitouch-add-support-for-google-whiskers-touchpad.patch
hid-multitouch-add-quirks-to-enable-lenovo-x12-trackpoint.patch
+x86-sgx-disconnect-backing-page-references-from-dirty-status.patch
+x86-sgx-mark-pcmd-page-as-dirty-when-modifying-contents.patch
+x86-sgx-obtain-backing-storage-page-with-enclave-mutex-held.patch
+x86-sgx-fix-race-between-reclaimer-and-page-fault-handler.patch
+x86-sgx-ensure-no-data-in-pcmd-page-after-truncate.patch
+media-i2c-imx412-fix-reset-gpio-polarity.patch
+media-i2c-imx412-fix-power_off-ordering.patch
+tpm-fix-buffer-access-in-tpm2_get_tpm_pt.patch
+tpm-ibmvtpm-correct-the-return-value-in-tpm_ibmvtpm_probe.patch
--- /dev/null
+From e57b2523bd37e6434f4e64c7a685e3715ad21e9a Mon Sep 17 00:00:00 2001
+From: Stefan Mahnke-Hartmann <stefan.mahnke-hartmann@infineon.com>
+Date: Fri, 13 May 2022 15:41:51 +0200
+Subject: tpm: Fix buffer access in tpm2_get_tpm_pt()
+
+From: Stefan Mahnke-Hartmann <stefan.mahnke-hartmann@infineon.com>
+
+commit e57b2523bd37e6434f4e64c7a685e3715ad21e9a upstream.
+
+Under certain conditions uninitialized memory will be accessed.
+As described by TCG Trusted Platform Module Library Specification,
+rev. 1.59 (Part 3: Commands), if a TPM2_GetCapability is received,
+requesting a capability, the TPM in field upgrade mode may return a
+zero length list.
+Check the property count in tpm2_get_tpm_pt().
+
+Fixes: 2ab3241161b3 ("tpm: migrate tpm2_get_tpm_pt() to use struct tpm_buf")
+Cc: stable@vger.kernel.org
+Signed-off-by: Stefan Mahnke-Hartmann <stefan.mahnke-hartmann@infineon.com>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm2-cmd.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip
+ if (!rc) {
+ out = (struct tpm2_get_cap_out *)
+ &buf.data[TPM_HEADER_SIZE];
+- *value = be32_to_cpu(out->value);
++ /*
++ * To prevent failing boot up of some systems, Infineon TPM2.0
++ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
++ * the TPM2_Getcapability command returns a zero length list
++ * in field upgrade mode.
++ */
++ if (be32_to_cpu(out->property_cnt) > 0)
++ *value = be32_to_cpu(out->value);
++ else
++ rc = -ENODATA;
+ }
+ tpm_buf_destroy(&buf);
+ return rc;
--- /dev/null
+From d0dc1a7100f19121f6e7450f9cdda11926aa3838 Mon Sep 17 00:00:00 2001
+From: Xiu Jianfeng <xiujianfeng@huawei.com>
+Date: Fri, 18 Mar 2022 14:02:01 +0800
+Subject: tpm: ibmvtpm: Correct the return value in tpm_ibmvtpm_probe()
+
+From: Xiu Jianfeng <xiujianfeng@huawei.com>
+
+commit d0dc1a7100f19121f6e7450f9cdda11926aa3838 upstream.
+
+Currently it returns zero when CRQ response timed out, it should return
+an error code instead.
+
+Fixes: d8d74ea3c002 ("tpm: ibmvtpm: Wait for buffer to be set before proceeding")
+Signed-off-by: Xiu Jianfeng <xiujianfeng@huawei.com>
+Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
+Acked-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm_ibmvtpm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -681,6 +681,7 @@ static int tpm_ibmvtpm_probe(struct vio_
+ if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
+ ibmvtpm->rtce_buf != NULL,
+ HZ)) {
++ rc = -ENODEV;
+ dev_err(dev, "CRQ response timed out\n");
+ goto init_irq_cleanup;
+ }
--- /dev/null
+From 6bd429643cc265e94a9d19839c771bcc5d008fa8 Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Thu, 12 May 2022 14:50:57 -0700
+Subject: x86/sgx: Disconnect backing page references from dirty status
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit 6bd429643cc265e94a9d19839c771bcc5d008fa8 upstream.
+
+SGX uses shmem backing storage to store encrypted enclave pages
+and their crypto metadata when enclave pages are moved out of
+enclave memory. Two shmem backing storage pages are associated with
+each enclave page - one backing page to contain the encrypted
+enclave page data and one backing page (shared by a few
+enclave pages) to contain the crypto metadata used by the
+processor to verify the enclave page when it is loaded back into
+the enclave.
+
+sgx_encl_put_backing() is used to release references to the
+backing storage and, optionally, mark both backing store pages
+as dirty.
+
+Managing references and dirty status together in this way results
+in both backing store pages marked as dirty, even if only one of
+the backing store pages are changed.
+
+Additionally, waiting until the page reference is dropped to set
+the page dirty risks a race with the page fault handler that
+may load outdated data into the enclave when a page is faulted
+right after it is reclaimed.
+
+Consider what happens if the reclaimer writes a page to the backing
+store and the page is immediately faulted back, before the reclaimer
+is able to set the dirty bit of the page:
+
+sgx_reclaim_pages() { sgx_vma_fault() {
+ ...
+ sgx_encl_get_backing();
+ ... ...
+ sgx_reclaimer_write() {
+ mutex_lock(&encl->lock);
+ /* Write data to backing store */
+ mutex_unlock(&encl->lock);
+ }
+ mutex_lock(&encl->lock);
+ __sgx_encl_eldu() {
+ ...
+ /*
+ * Enclave backing store
+ * page not released
+ * nor marked dirty -
+ * contents may not be
+ * up to date.
+ */
+ sgx_encl_get_backing();
+ ...
+ /*
+ * Enclave data restored
+ * from backing store
+ * and PCMD pages that
+ * are not up to date.
+ * ENCLS[ELDU] faults
+ * because of MAC or PCMD
+ * checking failure.
+ */
+ sgx_encl_put_backing();
+ }
+ ...
+ /* set page dirty */
+ sgx_encl_put_backing();
+ ...
+ mutex_unlock(&encl->lock);
+} }
+
+Remove the option to sgx_encl_put_backing() to set the backing
+pages as dirty and set the needed pages as dirty right after
+receiving important data while enclave mutex is held. This ensures that
+the page fault handler can get up to date data from a page and prepares
+the code for a following change where only one of the backing pages
+need to be marked as dirty.
+
+Cc: stable@vger.kernel.org
+Fixes: 1728ab54b4be ("x86/sgx: Add a page reclaimer")
+Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Tested-by: Haitao Huang <haitao.huang@intel.com>
+Link: https://lore.kernel.org/linux-sgx/8922e48f-6646-c7cc-6393-7c78dcf23d23@intel.com/
+Link: https://lkml.kernel.org/r/fa9f98986923f43e72ef4c6702a50b2a0b3c42e3.1652389823.git.reinette.chatre@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/sgx/encl.c | 10 ++--------
+ arch/x86/kernel/cpu/sgx/encl.h | 2 +-
+ arch/x86/kernel/cpu/sgx/main.c | 6 ++++--
+ 3 files changed, 7 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -94,7 +94,7 @@ static int __sgx_encl_eldu(struct sgx_en
+ kunmap_atomic(pcmd_page);
+ kunmap_atomic((void *)(unsigned long)pginfo.contents);
+
+- sgx_encl_put_backing(&b, false);
++ sgx_encl_put_backing(&b);
+
+ sgx_encl_truncate_backing_page(encl, page_index);
+
+@@ -645,15 +645,9 @@ int sgx_encl_get_backing(struct sgx_encl
+ /**
+ * sgx_encl_put_backing() - Unpin the backing storage
+ * @backing: data for accessing backing storage for the page
+- * @do_write: mark pages dirty
+ */
+-void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write)
++void sgx_encl_put_backing(struct sgx_backing *backing)
+ {
+- if (do_write) {
+- set_page_dirty(backing->pcmd);
+- set_page_dirty(backing->contents);
+- }
+-
+ put_page(backing->pcmd);
+ put_page(backing->contents);
+ }
+--- a/arch/x86/kernel/cpu/sgx/encl.h
++++ b/arch/x86/kernel/cpu/sgx/encl.h
+@@ -107,7 +107,7 @@ void sgx_encl_release(struct kref *ref);
+ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
+ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
+ struct sgx_backing *backing);
+-void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
++void sgx_encl_put_backing(struct sgx_backing *backing);
+ int sgx_encl_test_and_clear_young(struct mm_struct *mm,
+ struct sgx_encl_page *page);
+
+--- a/arch/x86/kernel/cpu/sgx/main.c
++++ b/arch/x86/kernel/cpu/sgx/main.c
+@@ -170,6 +170,8 @@ static int __sgx_encl_ewb(struct sgx_epc
+ backing->pcmd_offset;
+
+ ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
++ set_page_dirty(backing->pcmd);
++ set_page_dirty(backing->contents);
+
+ kunmap_atomic((void *)(unsigned long)(pginfo.metadata -
+ backing->pcmd_offset));
+@@ -299,7 +301,7 @@ static void sgx_reclaimer_write(struct s
+ sgx_encl_free_epc_page(encl->secs.epc_page);
+ encl->secs.epc_page = NULL;
+
+- sgx_encl_put_backing(&secs_backing, true);
++ sgx_encl_put_backing(&secs_backing);
+ }
+
+ out:
+@@ -392,7 +394,7 @@ skip:
+
+ encl_page = epc_page->owner;
+ sgx_reclaimer_write(epc_page, &backing[i]);
+- sgx_encl_put_backing(&backing[i], true);
++ sgx_encl_put_backing(&backing[i]);
+
+ kref_put(&encl_page->encl->refcount, sgx_encl_release);
+ epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
--- /dev/null
+From e3a3bbe3e99de73043a1d32d36cf4d211dc58c7e Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Thu, 12 May 2022 14:51:01 -0700
+Subject: x86/sgx: Ensure no data in PCMD page after truncate
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit e3a3bbe3e99de73043a1d32d36cf4d211dc58c7e upstream.
+
+A PCMD (Paging Crypto MetaData) page contains the PCMD
+structures of enclave pages that have been encrypted and
+moved to the shmem backing store. When all enclave pages
+sharing a PCMD page are loaded in the enclave, there is no
+need for the PCMD page and it can be truncated from the
+backing store.
+
+A few issues appeared around the truncation of PCMD pages. The
+known issues have been addressed but the PCMD handling code could
+be made more robust by loudly complaining if any new issue appears
+in this area.
+
+Add a check that will complain with a warning if the PCMD page is not
+actually empty after it has been truncated. There should never be data
+in the PCMD page at this point since it is was just checked to be empty
+and truncated with enclave mutex held and is updated with the
+enclave mutex held.
+
+Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Tested-by: Haitao Huang <haitao.huang@intel.com>
+Link: https://lkml.kernel.org/r/6495120fed43fafc1496d09dd23df922b9a32709.1652389823.git.reinette.chatre@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/sgx/encl.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -187,12 +187,20 @@ static int __sgx_encl_eldu(struct sgx_en
+ kunmap_atomic(pcmd_page);
+ kunmap_atomic((void *)(unsigned long)pginfo.contents);
+
++ get_page(b.pcmd);
+ sgx_encl_put_backing(&b);
+
+ sgx_encl_truncate_backing_page(encl, page_index);
+
+- if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page))
++ if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) {
+ sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
++ pcmd_page = kmap_atomic(b.pcmd);
++ if (memchr_inv(pcmd_page, 0, PAGE_SIZE))
++ pr_warn("PCMD page not empty after truncate.\n");
++ kunmap_atomic(pcmd_page);
++ }
++
++ put_page(b.pcmd);
+
+ return ret;
+ }
--- /dev/null
+From af117837ceb9a78e995804ade4726ad2c2c8981f Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Thu, 12 May 2022 14:51:00 -0700
+Subject: x86/sgx: Fix race between reclaimer and page fault handler
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit af117837ceb9a78e995804ade4726ad2c2c8981f upstream.
+
+Haitao reported encountering a WARN triggered by the ENCLS[ELDU]
+instruction faulting with a #GP.
+
+The WARN is encountered when the reclaimer evicts a range of
+pages from the enclave when the same pages are faulted back right away.
+
+Consider two enclave pages (ENCLAVE_A and ENCLAVE_B)
+sharing a PCMD page (PCMD_AB). ENCLAVE_A is in the
+enclave memory and ENCLAVE_B is in the backing store. PCMD_AB contains
+just one entry, that of ENCLAVE_B.
+
+Scenario proceeds where ENCLAVE_A is being evicted from the enclave
+while ENCLAVE_B is faulted in.
+
+sgx_reclaim_pages() {
+
+ ...
+
+ /*
+ * Reclaim ENCLAVE_A
+ */
+ mutex_lock(&encl->lock);
+ /*
+ * Get a reference to ENCLAVE_A's
+ * shmem page where enclave page
+ * encrypted data will be stored
+ * as well as a reference to the
+ * enclave page's PCMD data page,
+ * PCMD_AB.
+ * Release mutex before writing
+ * any data to the shmem pages.
+ */
+ sgx_encl_get_backing(...);
+ encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
+ mutex_unlock(&encl->lock);
+
+ /*
+ * Fault ENCLAVE_B
+ */
+
+ sgx_vma_fault() {
+
+ mutex_lock(&encl->lock);
+ /*
+ * Get reference to
+ * ENCLAVE_B's shmem page
+ * as well as PCMD_AB.
+ */
+ sgx_encl_get_backing(...)
+ /*
+ * Load page back into
+ * enclave via ELDU.
+ */
+ /*
+ * Release reference to
+ * ENCLAVE_B' shmem page and
+ * PCMD_AB.
+ */
+ sgx_encl_put_backing(...);
+ /*
+ * PCMD_AB is found empty so
+ * it and ENCLAVE_B's shmem page
+ * are truncated.
+ */
+ /* Truncate ENCLAVE_B backing page */
+ sgx_encl_truncate_backing_page();
+ /* Truncate PCMD_AB */
+ sgx_encl_truncate_backing_page();
+
+ mutex_unlock(&encl->lock);
+
+ ...
+ }
+ mutex_lock(&encl->lock);
+ encl_page->desc &=
+ ~SGX_ENCL_PAGE_BEING_RECLAIMED;
+ /*
+ * Write encrypted contents of
+ * ENCLAVE_A to ENCLAVE_A shmem
+ * page and its PCMD data to
+ * PCMD_AB.
+ */
+ sgx_encl_put_backing(...)
+
+ /*
+ * Reference to PCMD_AB is
+ * dropped and it is truncated.
+ * ENCLAVE_A's PCMD data is lost.
+ */
+ mutex_unlock(&encl->lock);
+}
+
+What happens next depends on whether it is ENCLAVE_A being faulted
+in or ENCLAVE_B being evicted - but both end up with ENCLS[ELDU] faulting
+with a #GP.
+
+If ENCLAVE_A is faulted then at the time sgx_encl_get_backing() is called
+a new PCMD page is allocated and providing the empty PCMD data for
+ENCLAVE_A would cause ENCLS[ELDU] to #GP
+
+If ENCLAVE_B is evicted first then a new PCMD_AB would be allocated by the
+reclaimer but later when ENCLAVE_A is faulted the ENCLS[ELDU] instruction
+would #GP during its checks of the PCMD value and the WARN would be
+encountered.
+
+Noting that the reclaimer sets SGX_ENCL_PAGE_BEING_RECLAIMED at the time
+it obtains a reference to the backing store pages of an enclave page it
+is in the process of reclaiming, fix the race by only truncating the PCMD
+page after ensuring that no page sharing the PCMD page is in the process
+of being reclaimed.
+
+Cc: stable@vger.kernel.org
+Fixes: 08999b2489b4 ("x86/sgx: Free backing memory after faulting the enclave page")
+Reported-by: Haitao Huang <haitao.huang@intel.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Tested-by: Haitao Huang <haitao.huang@intel.com>
+Link: https://lkml.kernel.org/r/ed20a5db516aa813873268e125680041ae11dfcf.1652389823.git.reinette.chatre@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/sgx/encl.c | 94 ++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 93 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -12,6 +12,92 @@
+ #include "encls.h"
+ #include "sgx.h"
+
++#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd))
++/*
++ * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to
++ * determine the page index associated with the first PCMD entry
++ * within a PCMD page.
++ */
++#define PCMD_FIRST_MASK GENMASK(4, 0)
++
++/**
++ * reclaimer_writing_to_pcmd() - Query if any enclave page associated with
++ * a PCMD page is in process of being reclaimed.
++ * @encl: Enclave to which PCMD page belongs
++ * @start_addr: Address of enclave page using first entry within the PCMD page
++ *
++ * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is
++ * stored. The PCMD data of a reclaimed enclave page contains enough
++ * information for the processor to verify the page at the time
++ * it is loaded back into the Enclave Page Cache (EPC).
++ *
++ * The backing storage to which enclave pages are reclaimed is laid out as
++ * follows:
++ * Encrypted enclave pages:SECS page:PCMD pages
++ *
++ * Each PCMD page contains the PCMD metadata of
++ * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages.
++ *
++ * A PCMD page can only be truncated if it is (a) empty, and (b) not in the
++ * process of getting data (and thus soon being non-empty). (b) is tested with
++ * a check if an enclave page sharing the PCMD page is in the process of being
++ * reclaimed.
++ *
++ * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it
++ * intends to reclaim that enclave page - it means that the PCMD page
++ * associated with that enclave page is about to get some data and thus
++ * even if the PCMD page is empty, it should not be truncated.
++ *
++ * Context: Enclave mutex (&sgx_encl->lock) must be held.
++ * Return: 1 if the reclaimer is about to write to the PCMD page
++ * 0 if the reclaimer has no intention to write to the PCMD page
++ */
++static int reclaimer_writing_to_pcmd(struct sgx_encl *encl,
++ unsigned long start_addr)
++{
++ int reclaimed = 0;
++ int i;
++
++ /*
++ * PCMD_FIRST_MASK is based on number of PCMD entries within
++ * PCMD page being 32.
++ */
++ BUILD_BUG_ON(PCMDS_PER_PAGE != 32);
++
++ for (i = 0; i < PCMDS_PER_PAGE; i++) {
++ struct sgx_encl_page *entry;
++ unsigned long addr;
++
++ addr = start_addr + i * PAGE_SIZE;
++
++ /*
++ * Stop when reaching the SECS page - it does not
++ * have a page_array entry and its reclaim is
++ * started and completed with enclave mutex held so
++ * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED
++ * flag.
++ */
++ if (addr == encl->base + encl->size)
++ break;
++
++ entry = xa_load(&encl->page_array, PFN_DOWN(addr));
++ if (!entry)
++ continue;
++
++ /*
++ * VA page slot ID uses same bit as the flag so it is important
++ * to ensure that the page is not already in backing store.
++ */
++ if (entry->epc_page &&
++ (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) {
++ reclaimed = 1;
++ break;
++ }
++ }
++
++ return reclaimed;
++}
++
+ /*
+ * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
+ * follow right after the EPC data in the backing storage. In addition to the
+@@ -47,6 +133,7 @@ static int __sgx_encl_eldu(struct sgx_en
+ unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
+ struct sgx_encl *encl = encl_page->encl;
+ pgoff_t page_index, page_pcmd_off;
++ unsigned long pcmd_first_page;
+ struct sgx_pageinfo pginfo;
+ struct sgx_backing b;
+ bool pcmd_page_empty;
+@@ -58,6 +145,11 @@ static int __sgx_encl_eldu(struct sgx_en
+ else
+ page_index = PFN_DOWN(encl->size);
+
++ /*
++ * Address of enclave page using the first entry within the PCMD page.
++ */
++ pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base;
++
+ page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
+
+ ret = sgx_encl_get_backing(encl, page_index, &b);
+@@ -99,7 +191,7 @@ static int __sgx_encl_eldu(struct sgx_en
+
+ sgx_encl_truncate_backing_page(encl, page_index);
+
+- if (pcmd_page_empty)
++ if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page))
+ sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
+
+ return ret;
--- /dev/null
+From 2154e1c11b7080aa19f47160bd26b6f39bbd7824 Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Thu, 12 May 2022 14:50:58 -0700
+Subject: x86/sgx: Mark PCMD page as dirty when modifying contents
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit 2154e1c11b7080aa19f47160bd26b6f39bbd7824 upstream.
+
+Recent commit 08999b2489b4 ("x86/sgx: Free backing memory
+after faulting the enclave page") expanded __sgx_encl_eldu()
+to clear an enclave page's PCMD (Paging Crypto MetaData)
+from the PCMD page in the backing store after the enclave
+page is restored to the enclave.
+
+Since the PCMD page in the backing store is modified the page
+should be marked as dirty to ensure the modified data is retained.
+
+Cc: stable@vger.kernel.org
+Fixes: 08999b2489b4 ("x86/sgx: Free backing memory after faulting the enclave page")
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Tested-by: Haitao Huang <haitao.huang@intel.com>
+Link: https://lkml.kernel.org/r/00cd2ac480db01058d112e347b32599c1a806bc4.1652389823.git.reinette.chatre@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/sgx/encl.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -84,6 +84,7 @@ static int __sgx_encl_eldu(struct sgx_en
+ }
+
+ memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
++ set_page_dirty(b.pcmd);
+
+ /*
+ * The area for the PCMD in the page was zeroed above. Check if the
--- /dev/null
+From 0e4e729a830c1e7f31d3b3fbf8feb355a402b117 Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Thu, 12 May 2022 14:50:59 -0700
+Subject: x86/sgx: Obtain backing storage page with enclave mutex held
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit 0e4e729a830c1e7f31d3b3fbf8feb355a402b117 upstream.
+
+Haitao reported encountering a WARN triggered by the ENCLS[ELDU]
+instruction faulting with a #GP.
+
+The WARN is encountered when the reclaimer evicts a range of
+pages from the enclave when the same pages are faulted back
+right away.
+
+The SGX backing storage is accessed on two paths: when there
+are insufficient free pages in the EPC the reclaimer works
+to move enclave pages to the backing storage and as enclaves
+access pages that have been moved to the backing storage
+they are retrieved from there as part of page fault handling.
+
+An oversubscribed SGX system will often run the reclaimer and
+page fault handler concurrently and needs to ensure that the
+backing store is accessed safely between the reclaimer and
+the page fault handler. This is not the case because the
+reclaimer accesses the backing store without the enclave mutex
+while the page fault handler accesses the backing store with
+the enclave mutex.
+
+Consider the scenario where a page is faulted while a page sharing
+a PCMD page with the faulted page is being reclaimed. The
+consequence is a race between the reclaimer and page fault
+handler, the reclaimer attempting to access a PCMD at the
+same time it is truncated by the page fault handler. This
+could result in lost PCMD data. Data may still be
+lost if the reclaimer wins the race, this is addressed in
+the following patch.
+
+The reclaimer accesses pages from the backing storage without
+holding the enclave mutex and runs the risk of concurrently
+accessing the backing storage with the page fault handler that
+does access the backing storage with the enclave mutex held.
+
+In the scenario below a PCMD page is truncated from the backing
+store after all its pages have been loaded in to the enclave
+at the same time the PCMD page is loaded from the backing store
+when one of its pages are reclaimed:
+
+sgx_reclaim_pages() { sgx_vma_fault() {
+ ...
+ mutex_lock(&encl->lock);
+ ...
+ __sgx_encl_eldu() {
+ ...
+ if (pcmd_page_empty) {
+/*
+ * EPC page being reclaimed /*
+ * shares a PCMD page with an * PCMD page truncated
+ * enclave page that is being * while requested from
+ * faulted in. * reclaimer.
+ */ */
+sgx_encl_get_backing() <----------> sgx_encl_truncate_backing_page()
+ }
+ mutex_unlock(&encl->lock);
+} }
+
+In this scenario there is a race between the reclaimer and the page fault
+handler when the reclaimer attempts to get access to the same PCMD page
+that is being truncated. This could result in the reclaimer writing to
+the PCMD page that is then truncated, causing the PCMD data to be lost,
+or in a new PCMD page being allocated. The lost PCMD data may still occur
+after protecting the backing store access with the mutex - this is fixed
+in the next patch. By ensuring the backing store is accessed with the mutex
+held the enclave page state can be made accurate with the
+SGX_ENCL_PAGE_BEING_RECLAIMED flag accurately reflecting that a page
+is in the process of being reclaimed.
+
+Consistently protect the reclaimer's backing store access with the
+enclave's mutex to ensure that it can safely run concurrently with the
+page fault handler.
+
+Cc: stable@vger.kernel.org
+Fixes: 1728ab54b4be ("x86/sgx: Add a page reclaimer")
+Reported-by: Haitao Huang <haitao.huang@intel.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Tested-by: Jarkko Sakkinen <jarkko@kernel.org>
+Tested-by: Haitao Huang <haitao.huang@intel.com>
+Link: https://lkml.kernel.org/r/fa2e04c561a8555bfe1f4e7adc37d60efc77387b.1652389823.git.reinette.chatre@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/sgx/main.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/sgx/main.c
++++ b/arch/x86/kernel/cpu/sgx/main.c
+@@ -289,6 +289,7 @@ static void sgx_reclaimer_write(struct s
+ sgx_encl_ewb(epc_page, backing);
+ encl_page->epc_page = NULL;
+ encl->secs_child_cnt--;
++ sgx_encl_put_backing(backing);
+
+ if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) {
+ ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size),
+@@ -362,11 +363,14 @@ static void sgx_reclaim_pages(void)
+ goto skip;
+
+ page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
++
++ mutex_lock(&encl_page->encl->lock);
+ ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]);
+- if (ret)
++ if (ret) {
++ mutex_unlock(&encl_page->encl->lock);
+ goto skip;
++ }
+
+- mutex_lock(&encl_page->encl->lock);
+ encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
+ mutex_unlock(&encl_page->encl->lock);
+ continue;
+@@ -394,7 +398,6 @@ skip:
+
+ encl_page = epc_page->owner;
+ sgx_reclaimer_write(epc_page, &backing[i]);
+- sgx_encl_put_backing(&backing[i]);
+
+ kref_put(&encl_page->encl->refcount, sgx_encl_release);
+ epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;