#include <linux/cpumask.h>
#include <linux/iommu.h>
#include <linux/amd-iommu.h>
+#include <linux/nospec.h>
#include <asm/sev.h>
#include <asm/processor.h>
*/
#define RMPTABLE_CPU_BOOKKEEPING_SZ 0x4000
+/*
+ * For a non-segmented RMP table, use the maximum physical addressing as the
+ * segment size in order to always arrive at index 0 in the table.
+ */
+#define RMPTABLE_NON_SEGMENTED_SHIFT 52
+
+struct rmp_segment_desc {
+ struct rmpentry_raw *rmp_entry;
+ u64 max_index;
+ u64 size;
+};
+
+/*
+ * Segmented RMP Table support.
+ * - The segment size is used for two purposes:
+ * - Identify the amount of memory covered by an RMP segment
+ * - Quickly locate an RMP segment table entry for a physical address
+ *
+ * - The RMP segment table contains pointers to an RMP table that covers
+ * a specific portion of memory. There can be up to 512 8-byte entries,
+ * one pages worth.
+ */
+static struct rmp_segment_desc **rmp_segment_table __ro_after_init;
+static unsigned int rst_max_index __ro_after_init = 512;
+
+static unsigned int rmp_segment_shift;
+static u64 rmp_segment_size;
+static u64 rmp_segment_mask;
+
+#define RST_ENTRY_INDEX(x) ((x) >> rmp_segment_shift)
+#define RMP_ENTRY_INDEX(x) ((u64)(PHYS_PFN((x) & rmp_segment_mask)))
+
/* Mask to apply to a PFN to get the first PFN of a 2MB page */
#define PFN_PMD_MASK GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
static u64 probed_rmp_base, probed_rmp_size;
-static struct rmpentry_raw *rmptable __ro_after_init;
-static u64 rmptable_max_pfn __ro_after_init;
static LIST_HEAD(snp_leaked_pages_list);
static DEFINE_SPINLOCK(snp_leaked_pages_list_lock);
return true;
}
+static bool __init alloc_rmp_segment_desc(u64 segment_pa, u64 segment_size, u64 pa)
+{
+ u64 rst_index, rmp_segment_size_max;
+ struct rmp_segment_desc *desc;
+ void *rmp_segment;
+
+ /* Calculate the maximum size an RMP can be (16 bytes/page mapped) */
+ rmp_segment_size_max = PHYS_PFN(rmp_segment_size) << 4;
+
+ /* Validate the RMP segment size */
+ if (segment_size > rmp_segment_size_max) {
+ pr_err("Invalid RMP size 0x%llx for configured segment size 0x%llx\n",
+ segment_size, rmp_segment_size_max);
+ return false;
+ }
+
+ /* Validate the RMP segment table index */
+ rst_index = RST_ENTRY_INDEX(pa);
+ if (rst_index >= rst_max_index) {
+ pr_err("Invalid RMP segment base address 0x%llx for configured segment size 0x%llx\n",
+ pa, rmp_segment_size);
+ return false;
+ }
+
+ if (rmp_segment_table[rst_index]) {
+ pr_err("RMP segment descriptor already exists at index %llu\n", rst_index);
+ return false;
+ }
+
+ rmp_segment = memremap(segment_pa, segment_size, MEMREMAP_WB);
+ if (!rmp_segment) {
+ pr_err("Failed to map RMP segment addr 0x%llx size 0x%llx\n",
+ segment_pa, segment_size);
+ return false;
+ }
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ memunmap(rmp_segment);
+ return false;
+ }
+
+ desc->rmp_entry = rmp_segment;
+ desc->max_index = segment_size / sizeof(*desc->rmp_entry);
+ desc->size = segment_size;
+
+ rmp_segment_table[rst_index] = desc;
+
+ return true;
+}
+
+static void __init free_rmp_segment_table(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < rst_max_index; i++) {
+ struct rmp_segment_desc *desc;
+
+ desc = rmp_segment_table[i];
+ if (!desc)
+ continue;
+
+ memunmap(desc->rmp_entry);
+
+ kfree(desc);
+ }
+
+ free_page((unsigned long)rmp_segment_table);
+
+ rmp_segment_table = NULL;
+}
+
+/* Allocate the table used to index into the RMP segments */
+static bool __init alloc_rmp_segment_table(void)
+{
+ struct page *page;
+
+ page = alloc_page(__GFP_ZERO);
+ if (!page)
+ return false;
+
+ rmp_segment_table = page_address(page);
+
+ return true;
+}
+
/*
* Do the necessary preparations which are verified by the firmware as
* described in the SNP_INIT_EX firmware command description in the SNP
*/
static int __init snp_rmptable_init(void)
{
- u64 max_rmp_pfn, calc_rmp_sz, rmptable_size, rmp_end, val;
- void *rmptable_start;
+ u64 max_rmp_pfn, calc_rmp_sz, rmptable_segment, rmptable_size, rmp_end, val;
+ unsigned int i;
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
goto nosnp;
}
+ if (!alloc_rmp_segment_table())
+ goto nosnp;
+
/* Map only the RMP entries */
- rmptable_start = memremap(probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ,
- probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ,
- MEMREMAP_WB);
- if (!rmptable_start) {
- pr_err("Failed to map RMP table\n");
+ rmptable_segment = probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ;
+ rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ;
+
+ if (!alloc_rmp_segment_desc(rmptable_segment, rmptable_size, 0)) {
+ free_rmp_segment_table();
goto nosnp;
}
- rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ;
-
/*
* Check if SEV-SNP is already enabled, this can happen in case of
* kexec boot.
/* Zero out the RMP bookkeeping area */
if (!clear_rmptable_bookkeeping()) {
- memunmap(rmptable_start);
+ free_rmp_segment_table();
goto nosnp;
}
/* Zero out the RMP entries */
- memset(rmptable_start, 0, rmptable_size);
+ for (i = 0; i < rst_max_index; i++) {
+ struct rmp_segment_desc *desc;
+
+ desc = rmp_segment_table[i];
+ if (!desc)
+ continue;
+
+ memset(desc->rmp_entry, 0, desc->size);
+ }
/* Flush the caches to ensure that data is written before SNP is enabled. */
wbinvd_on_all_cpus();
on_each_cpu(snp_enable, NULL, 1);
skip_enable:
- rmptable = (struct rmpentry_raw *)rmptable_start;
- rmptable_max_pfn = rmptable_size / sizeof(struct rmpentry_raw) - 1;
-
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/rmptable_init:online", __snp_enable, NULL);
/*
*/
device_initcall(snp_rmptable_init);
+static void set_rmp_segment_info(unsigned int segment_shift)
+{
+ rmp_segment_shift = segment_shift;
+ rmp_segment_size = 1ULL << rmp_segment_shift;
+ rmp_segment_mask = rmp_segment_size - 1;
+}
+
#define RMP_ADDR_MASK GENMASK_ULL(51, 13)
bool snp_probe_rmptable_info(void)
rmp_sz = rmp_end - rmp_base + 1;
+ /* Treat the contiguous RMP table as a single segment */
+ rst_max_index = 1;
+
+ set_rmp_segment_info(RMPTABLE_NON_SEGMENTED_SHIFT);
+
probed_rmp_base = rmp_base;
probed_rmp_size = rmp_sz;
return true;
}
+/*
+ * About the array_index_nospec() usage below:
+ *
+ * This function can get called by exported functions like
+ * snp_lookup_rmpentry(), which is used by the KVM #PF handler, among
+ * others, and since the @pfn passed in cannot always be trusted,
+ * speculation should be stopped as a protective measure.
+ */
static struct rmpentry_raw *get_raw_rmpentry(u64 pfn)
{
- if (!rmptable)
+ u64 paddr, rst_index, segment_index;
+ struct rmp_segment_desc *desc;
+
+ if (!rmp_segment_table)
return ERR_PTR(-ENODEV);
- if (unlikely(pfn > rmptable_max_pfn))
+ paddr = pfn << PAGE_SHIFT;
+
+ rst_index = RST_ENTRY_INDEX(paddr);
+ if (unlikely(rst_index >= rst_max_index))
return ERR_PTR(-EFAULT);
- return rmptable + pfn;
+ rst_index = array_index_nospec(rst_index, rst_max_index);
+
+ desc = rmp_segment_table[rst_index];
+ if (unlikely(!desc))
+ return ERR_PTR(-EFAULT);
+
+ segment_index = RMP_ENTRY_INDEX(paddr);
+ if (unlikely(segment_index >= desc->max_index))
+ return ERR_PTR(-EFAULT);
+
+ segment_index = array_index_nospec(segment_index, desc->max_index);
+
+ return desc->rmp_entry + segment_index;
}
static int get_rmpentry(u64 pfn, struct rmpentry *e)