--- /dev/null
+From 445c1470b6ef96440e7cfc42dfc160f5004fd149 Mon Sep 17 00:00:00 2001
+From: Ross Philipson <ross.philipson@oracle.com>
+Date: Wed, 23 Feb 2022 21:07:36 -0500
+Subject: x86/boot: Add setup_indirect support in early_memremap_is_setup_data()
+
+From: Ross Philipson <ross.philipson@oracle.com>
+
+commit 445c1470b6ef96440e7cfc42dfc160f5004fd149 upstream.
+
+The x86 boot documentation describes the setup_indirect structures and
+how they are used. Only one of the two functions in ioremap.c that needed
+to be modified to be aware of the introduction of setup_indirect
+functionality was updated. Adds comparable support to the other function
+where it was missing.
+
+Fixes: b3c72fc9a78e ("x86/boot: Introduce setup_indirect")
+Signed-off-by: Ross Philipson <ross.philipson@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/1645668456-22036-3-git-send-email-ross.philipson@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/ioremap.c | 33 +++++++++++++++++++++++++++++++--
+ 1 file changed, 31 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -675,22 +675,51 @@ static bool memremap_is_setup_data(resou
+ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
+ unsigned long size)
+ {
++ struct setup_indirect *indirect;
+ struct setup_data *data;
+ u64 paddr, paddr_next;
+
+ paddr = boot_params.hdr.setup_data;
+ while (paddr) {
+- unsigned int len;
++ unsigned int len, size;
+
+ if (phys_addr == paddr)
+ return true;
+
+ data = early_memremap_decrypted(paddr, sizeof(*data));
++ if (!data) {
++ pr_warn("failed to early memremap setup_data entry\n");
++ return false;
++ }
++
++ size = sizeof(*data);
+
+ paddr_next = data->next;
+ len = data->len;
+
+- early_memunmap(data, sizeof(*data));
++ if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
++ early_memunmap(data, sizeof(*data));
++ return true;
++ }
++
++ if (data->type == SETUP_INDIRECT) {
++ size += len;
++ early_memunmap(data, sizeof(*data));
++ data = early_memremap_decrypted(paddr, size);
++ if (!data) {
++ pr_warn("failed to early memremap indirect setup_data\n");
++ return false;
++ }
++
++ indirect = (struct setup_indirect *)data->data;
++
++ if (indirect->type != SETUP_INDIRECT) {
++ paddr = indirect->addr;
++ len = indirect->len;
++ }
++ }
++
++ early_memunmap(data, size);
+
+ if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
+ return true;
--- /dev/null
+From 7228918b34615ef6317edcd9a058a057bc54aa32 Mon Sep 17 00:00:00 2001
+From: Ross Philipson <ross.philipson@oracle.com>
+Date: Wed, 23 Feb 2022 21:07:35 -0500
+Subject: x86/boot: Fix memremap of setup_indirect structures
+
+From: Ross Philipson <ross.philipson@oracle.com>
+
+commit 7228918b34615ef6317edcd9a058a057bc54aa32 upstream.
+
+As documented, the setup_indirect structure is nested inside
+the setup_data structures in the setup_data list. The code currently
+accesses the fields inside the setup_indirect structure but only
+the sizeof(struct setup_data) is being memremapped. No crash
+occurred but this is just due to how the area is remapped under the
+covers.
+
+Properly memremap both the setup_data and setup_indirect structures
+in these cases before accessing them.
+
+Fixes: b3c72fc9a78e ("x86/boot: Introduce setup_indirect")
+Signed-off-by: Ross Philipson <ross.philipson@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/1645668456-22036-2-git-send-email-ross.philipson@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/e820.c | 41 +++++++++++++++++------
+ arch/x86/kernel/kdebugfs.c | 35 +++++++++++++++-----
+ arch/x86/kernel/ksysfs.c | 77 +++++++++++++++++++++++++++++++++++----------
+ arch/x86/kernel/setup.c | 34 +++++++++++++++----
+ arch/x86/mm/ioremap.c | 24 +++++++++++---
+ 5 files changed, 165 insertions(+), 46 deletions(-)
+
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -995,8 +995,10 @@ early_param("memmap", parse_memmap_opt);
+ */
+ void __init e820__reserve_setup_data(void)
+ {
++ struct setup_indirect *indirect;
+ struct setup_data *data;
+- u64 pa_data;
++ u64 pa_data, pa_next;
++ u32 len;
+
+ pa_data = boot_params.hdr.setup_data;
+ if (!pa_data)
+@@ -1004,6 +1006,14 @@ void __init e820__reserve_setup_data(voi
+
+ while (pa_data) {
+ data = early_memremap(pa_data, sizeof(*data));
++ if (!data) {
++ pr_warn("e820: failed to memremap setup_data entry\n");
++ return;
++ }
++
++ len = sizeof(*data);
++ pa_next = data->next;
++
+ e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+
+ /*
+@@ -1015,18 +1025,27 @@ void __init e820__reserve_setup_data(voi
+ sizeof(*data) + data->len,
+ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+
+- if (data->type == SETUP_INDIRECT &&
+- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
+- e820__range_update(((struct setup_indirect *)data->data)->addr,
+- ((struct setup_indirect *)data->data)->len,
+- E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+- e820__range_update_kexec(((struct setup_indirect *)data->data)->addr,
+- ((struct setup_indirect *)data->data)->len,
+- E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
++ if (data->type == SETUP_INDIRECT) {
++ len += data->len;
++ early_memunmap(data, sizeof(*data));
++ data = early_memremap(pa_data, len);
++ if (!data) {
++ pr_warn("e820: failed to memremap indirect setup_data\n");
++ return;
++ }
++
++ indirect = (struct setup_indirect *)data->data;
++
++ if (indirect->type != SETUP_INDIRECT) {
++ e820__range_update(indirect->addr, indirect->len,
++ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
++ e820__range_update_kexec(indirect->addr, indirect->len,
++ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
++ }
+ }
+
+- pa_data = data->next;
+- early_memunmap(data, sizeof(*data));
++ pa_data = pa_next;
++ early_memunmap(data, len);
+ }
+
+ e820__update_table(e820_table);
+--- a/arch/x86/kernel/kdebugfs.c
++++ b/arch/x86/kernel/kdebugfs.c
+@@ -88,11 +88,13 @@ create_setup_data_node(struct dentry *pa
+
+ static int __init create_setup_data_nodes(struct dentry *parent)
+ {
++ struct setup_indirect *indirect;
+ struct setup_data_node *node;
+ struct setup_data *data;
+- int error;
++ u64 pa_data, pa_next;
+ struct dentry *d;
+- u64 pa_data;
++ int error;
++ u32 len;
+ int no = 0;
+
+ d = debugfs_create_dir("setup_data", parent);
+@@ -112,12 +114,29 @@ static int __init create_setup_data_node
+ error = -ENOMEM;
+ goto err_dir;
+ }
++ pa_next = data->next;
+
+- if (data->type == SETUP_INDIRECT &&
+- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
+- node->paddr = ((struct setup_indirect *)data->data)->addr;
+- node->type = ((struct setup_indirect *)data->data)->type;
+- node->len = ((struct setup_indirect *)data->data)->len;
++ if (data->type == SETUP_INDIRECT) {
++ len = sizeof(*data) + data->len;
++ memunmap(data);
++ data = memremap(pa_data, len, MEMREMAP_WB);
++ if (!data) {
++ kfree(node);
++ error = -ENOMEM;
++ goto err_dir;
++ }
++
++ indirect = (struct setup_indirect *)data->data;
++
++ if (indirect->type != SETUP_INDIRECT) {
++ node->paddr = indirect->addr;
++ node->type = indirect->type;
++ node->len = indirect->len;
++ } else {
++ node->paddr = pa_data;
++ node->type = data->type;
++ node->len = data->len;
++ }
+ } else {
+ node->paddr = pa_data;
+ node->type = data->type;
+@@ -125,7 +144,7 @@ static int __init create_setup_data_node
+ }
+
+ create_setup_data_node(d, no, node);
+- pa_data = data->next;
++ pa_data = pa_next;
+
+ memunmap(data);
+ no++;
+--- a/arch/x86/kernel/ksysfs.c
++++ b/arch/x86/kernel/ksysfs.c
+@@ -91,26 +91,41 @@ static int get_setup_data_paddr(int nr,
+
+ static int __init get_setup_data_size(int nr, size_t *size)
+ {
+- int i = 0;
++ u64 pa_data = boot_params.hdr.setup_data, pa_next;
++ struct setup_indirect *indirect;
+ struct setup_data *data;
+- u64 pa_data = boot_params.hdr.setup_data;
++ int i = 0;
++ u32 len;
+
+ while (pa_data) {
+ data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
+ if (!data)
+ return -ENOMEM;
++ pa_next = data->next;
++
+ if (nr == i) {
+- if (data->type == SETUP_INDIRECT &&
+- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
+- *size = ((struct setup_indirect *)data->data)->len;
+- else
++ if (data->type == SETUP_INDIRECT) {
++ len = sizeof(*data) + data->len;
++ memunmap(data);
++ data = memremap(pa_data, len, MEMREMAP_WB);
++ if (!data)
++ return -ENOMEM;
++
++ indirect = (struct setup_indirect *)data->data;
++
++ if (indirect->type != SETUP_INDIRECT)
++ *size = indirect->len;
++ else
++ *size = data->len;
++ } else {
+ *size = data->len;
++ }
+
+ memunmap(data);
+ return 0;
+ }
+
+- pa_data = data->next;
++ pa_data = pa_next;
+ memunmap(data);
+ i++;
+ }
+@@ -120,9 +135,11 @@ static int __init get_setup_data_size(in
+ static ssize_t type_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+ {
++ struct setup_indirect *indirect;
++ struct setup_data *data;
+ int nr, ret;
+ u64 paddr;
+- struct setup_data *data;
++ u32 len;
+
+ ret = kobj_to_setup_data_nr(kobj, &nr);
+ if (ret)
+@@ -135,10 +152,20 @@ static ssize_t type_show(struct kobject
+ if (!data)
+ return -ENOMEM;
+
+- if (data->type == SETUP_INDIRECT)
+- ret = sprintf(buf, "0x%x\n", ((struct setup_indirect *)data->data)->type);
+- else
++ if (data->type == SETUP_INDIRECT) {
++ len = sizeof(*data) + data->len;
++ memunmap(data);
++ data = memremap(paddr, len, MEMREMAP_WB);
++ if (!data)
++ return -ENOMEM;
++
++ indirect = (struct setup_indirect *)data->data;
++
++ ret = sprintf(buf, "0x%x\n", indirect->type);
++ } else {
+ ret = sprintf(buf, "0x%x\n", data->type);
++ }
++
+ memunmap(data);
+ return ret;
+ }
+@@ -149,9 +176,10 @@ static ssize_t setup_data_data_read(stru
+ char *buf,
+ loff_t off, size_t count)
+ {
++ struct setup_indirect *indirect;
++ struct setup_data *data;
+ int nr, ret = 0;
+ u64 paddr, len;
+- struct setup_data *data;
+ void *p;
+
+ ret = kobj_to_setup_data_nr(kobj, &nr);
+@@ -165,10 +193,27 @@ static ssize_t setup_data_data_read(stru
+ if (!data)
+ return -ENOMEM;
+
+- if (data->type == SETUP_INDIRECT &&
+- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
+- paddr = ((struct setup_indirect *)data->data)->addr;
+- len = ((struct setup_indirect *)data->data)->len;
++ if (data->type == SETUP_INDIRECT) {
++ len = sizeof(*data) + data->len;
++ memunmap(data);
++ data = memremap(paddr, len, MEMREMAP_WB);
++ if (!data)
++ return -ENOMEM;
++
++ indirect = (struct setup_indirect *)data->data;
++
++ if (indirect->type != SETUP_INDIRECT) {
++ paddr = indirect->addr;
++ len = indirect->len;
++ } else {
++ /*
++ * Even though this is technically undefined, return
++ * the data as though it is a normal setup_data struct.
++ * This will at least allow it to be inspected.
++ */
++ paddr += sizeof(*data);
++ len = data->len;
++ }
+ } else {
+ paddr += sizeof(*data);
+ len = data->len;
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -368,21 +368,41 @@ static void __init parse_setup_data(void
+
+ static void __init memblock_x86_reserve_range_setup_data(void)
+ {
++ struct setup_indirect *indirect;
+ struct setup_data *data;
+- u64 pa_data;
++ u64 pa_data, pa_next;
++ u32 len;
+
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = early_memremap(pa_data, sizeof(*data));
++ if (!data) {
++ pr_warn("setup: failed to memremap setup_data entry\n");
++ return;
++ }
++
++ len = sizeof(*data);
++ pa_next = data->next;
++
+ memblock_reserve(pa_data, sizeof(*data) + data->len);
+
+- if (data->type == SETUP_INDIRECT &&
+- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
+- memblock_reserve(((struct setup_indirect *)data->data)->addr,
+- ((struct setup_indirect *)data->data)->len);
++ if (data->type == SETUP_INDIRECT) {
++ len += data->len;
++ early_memunmap(data, sizeof(*data));
++ data = early_memremap(pa_data, len);
++ if (!data) {
++ pr_warn("setup: failed to memremap indirect setup_data\n");
++ return;
++ }
++
++ indirect = (struct setup_indirect *)data->data;
++
++ if (indirect->type != SETUP_INDIRECT)
++ memblock_reserve(indirect->addr, indirect->len);
++ }
+
+- pa_data = data->next;
+- early_memunmap(data, sizeof(*data));
++ pa_data = pa_next;
++ early_memunmap(data, len);
+ }
+ }
+
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -614,6 +614,7 @@ static bool memremap_is_efi_data(resourc
+ static bool memremap_is_setup_data(resource_size_t phys_addr,
+ unsigned long size)
+ {
++ struct setup_indirect *indirect;
+ struct setup_data *data;
+ u64 paddr, paddr_next;
+
+@@ -626,6 +627,10 @@ static bool memremap_is_setup_data(resou
+
+ data = memremap(paddr, sizeof(*data),
+ MEMREMAP_WB | MEMREMAP_DEC);
++ if (!data) {
++ pr_warn("failed to memremap setup_data entry\n");
++ return false;
++ }
+
+ paddr_next = data->next;
+ len = data->len;
+@@ -635,10 +640,21 @@ static bool memremap_is_setup_data(resou
+ return true;
+ }
+
+- if (data->type == SETUP_INDIRECT &&
+- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
+- paddr = ((struct setup_indirect *)data->data)->addr;
+- len = ((struct setup_indirect *)data->data)->len;
++ if (data->type == SETUP_INDIRECT) {
++ memunmap(data);
++ data = memremap(paddr, sizeof(*data) + len,
++ MEMREMAP_WB | MEMREMAP_DEC);
++ if (!data) {
++ pr_warn("failed to memremap indirect setup_data\n");
++ return false;
++ }
++
++ indirect = (struct setup_indirect *)data->data;
++
++ if (indirect->type != SETUP_INDIRECT) {
++ paddr = indirect->addr;
++ len = indirect->len;
++ }
+ }
+
+ memunmap(data);
--- /dev/null
+From 08999b2489b4c9b939d7483dbd03702ee4576d96 Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko@kernel.org>
+Date: Fri, 4 Mar 2022 00:38:58 +0200
+Subject: x86/sgx: Free backing memory after faulting the enclave page
+
+From: Jarkko Sakkinen <jarkko@kernel.org>
+
+commit 08999b2489b4c9b939d7483dbd03702ee4576d96 upstream.
+
+There is a limited amount of SGX memory (EPC) on each system. When that
+memory is used up, SGX has its own swapping mechanism which is similar
+in concept but totally separate from the core mm/* code. Instead of
+swapping to disk, SGX swaps from EPC to normal RAM. That normal RAM
+comes from a shared memory pseudo-file and can itself be swapped by the
+core mm code. There is a hierarchy like this:
+
+ EPC <-> shmem <-> disk
+
+After data is swapped back in from shmem to EPC, the shmem backing
+storage needs to be freed. Currently, the backing shmem is not freed.
+This effectively wastes the shmem while the enclave is running. The
+memory is recovered when the enclave is destroyed and the backing
+storage freed.
+
+Sort this out by freeing memory with shmem_truncate_range(), as soon as
+a page is faulted back to the EPC. In addition, free the memory for
+PCMD pages as soon as all PCMD's in a page have been marked as unused
+by zeroing its contents.
+
+Cc: stable@vger.kernel.org
+Fixes: 1728ab54b4be ("x86/sgx: Add a page reclaimer")
+Reported-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lkml.kernel.org/r/20220303223859.273187-1-jarkko@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/sgx/encl.c | 57 ++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 48 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -13,6 +13,30 @@
+ #include "sgx.h"
+
+ /*
++ * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
++ * follow right after the EPC data in the backing storage. In addition to the
++ * visible enclave pages, there's one extra page slot for SECS, before PCMD
++ * structs.
++ */
++static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl,
++ unsigned long page_index)
++{
++ pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs);
++
++ return epc_end_off + page_index * sizeof(struct sgx_pcmd);
++}
++
++/*
++ * Free a page from the backing storage in the given page index.
++ */
++static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index)
++{
++ struct inode *inode = file_inode(encl->backing);
++
++ shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1);
++}
++
++/*
+ * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
+ * Pages" in the SDM.
+ */
+@@ -22,9 +46,11 @@ static int __sgx_encl_eldu(struct sgx_en
+ {
+ unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
+ struct sgx_encl *encl = encl_page->encl;
++ pgoff_t page_index, page_pcmd_off;
+ struct sgx_pageinfo pginfo;
+ struct sgx_backing b;
+- pgoff_t page_index;
++ bool pcmd_page_empty;
++ u8 *pcmd_page;
+ int ret;
+
+ if (secs_page)
+@@ -32,14 +58,16 @@ static int __sgx_encl_eldu(struct sgx_en
+ else
+ page_index = PFN_DOWN(encl->size);
+
++ page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
++
+ ret = sgx_encl_get_backing(encl, page_index, &b);
+ if (ret)
+ return ret;
+
+ pginfo.addr = encl_page->desc & PAGE_MASK;
+ pginfo.contents = (unsigned long)kmap_atomic(b.contents);
+- pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) +
+- b.pcmd_offset;
++ pcmd_page = kmap_atomic(b.pcmd);
++ pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset;
+
+ if (secs_page)
+ pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
+@@ -55,11 +83,24 @@ static int __sgx_encl_eldu(struct sgx_en
+ ret = -EFAULT;
+ }
+
+- kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset));
++ memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
++
++ /*
++ * The area for the PCMD in the page was zeroed above. Check if the
++ * whole page is now empty meaning that all PCMD's have been zeroed:
++ */
++ pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
++
++ kunmap_atomic(pcmd_page);
+ kunmap_atomic((void *)(unsigned long)pginfo.contents);
+
+ sgx_encl_put_backing(&b, false);
+
++ sgx_encl_truncate_backing_page(encl, page_index);
++
++ if (pcmd_page_empty)
++ sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
++
+ return ret;
+ }
+
+@@ -579,7 +620,7 @@ static struct page *sgx_encl_get_backing
+ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
+ struct sgx_backing *backing)
+ {
+- pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5);
++ pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
+ struct page *contents;
+ struct page *pcmd;
+
+@@ -587,7 +628,7 @@ int sgx_encl_get_backing(struct sgx_encl
+ if (IS_ERR(contents))
+ return PTR_ERR(contents);
+
+- pcmd = sgx_encl_get_backing_page(encl, pcmd_index);
++ pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off));
+ if (IS_ERR(pcmd)) {
+ put_page(contents);
+ return PTR_ERR(pcmd);
+@@ -596,9 +637,7 @@ int sgx_encl_get_backing(struct sgx_encl
+ backing->page_index = page_index;
+ backing->contents = contents;
+ backing->pcmd = pcmd;
+- backing->pcmd_offset =
+- (page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) *
+- sizeof(struct sgx_pcmd);
++ backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
+
+ return 0;
+ }