void snp_set_page_private(unsigned long paddr)
{
+ struct psc_desc d = {
+ SNP_PAGE_STATE_PRIVATE,
+ (struct svsm_ca *)boot_svsm_caa_pa,
+ boot_svsm_caa_pa
+ };
+
if (!sev_snp_enabled())
return;
- __page_state_change(paddr, paddr, SNP_PAGE_STATE_PRIVATE);
+ __page_state_change(paddr, paddr, &d);
}
void snp_set_page_shared(unsigned long paddr)
{
+ struct psc_desc d = {
+ SNP_PAGE_STATE_SHARED,
+ (struct svsm_ca *)boot_svsm_caa_pa,
+ boot_svsm_caa_pa
+ };
+
if (!sev_snp_enabled())
return;
- __page_state_change(paddr, paddr, SNP_PAGE_STATE_SHARED);
+ __page_state_change(paddr, paddr, &d);
}
bool early_setup_ghcb(void)
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{
+ struct psc_desc d = {
+ SNP_PAGE_STATE_PRIVATE,
+ (struct svsm_ca *)boot_svsm_caa_pa,
+ boot_svsm_caa_pa
+ };
+
for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
- __page_state_change(pa, pa, SNP_PAGE_STATE_PRIVATE);
+ __page_state_change(pa, pa, &d);
}
void sev_es_shutdown_ghcb(void)
return ret;
}
-static void __head svsm_pval_4k_page(unsigned long paddr, bool validate)
+static void __head svsm_pval_4k_page(unsigned long paddr, bool validate,
+ struct svsm_ca *caa, u64 caa_pa)
{
struct svsm_pvalidate_call *pc;
struct svsm_call call = {};
*/
flags = native_local_irq_save();
- call.caa = svsm_get_caa();
+ call.caa = caa;
pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
- pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
+ pc_pa = caa_pa + offsetof(struct svsm_ca, svsm_buffer);
pc->num_entries = 1;
pc->cur_index = 0;
}
static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
- bool validate)
+ bool validate, struct svsm_ca *caa, u64 caa_pa)
{
int ret;
if (snp_vmpl) {
- svsm_pval_4k_page(paddr, validate);
+ svsm_pval_4k_page(paddr, validate, caa, caa_pa);
} else {
ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
if (ret)
}
static void __head __page_state_change(unsigned long vaddr, unsigned long paddr,
- enum psc_op op)
+ const struct psc_desc *desc)
{
u64 val, msr;
* If private -> shared then invalidate the page before requesting the
* state change in the RMP table.
*/
- if (op == SNP_PAGE_STATE_SHARED)
- pvalidate_4k_page(vaddr, paddr, false);
+ if (desc->op == SNP_PAGE_STATE_SHARED)
+ pvalidate_4k_page(vaddr, paddr, false, desc->ca, desc->caa_pa);
/* Save the current GHCB MSR value */
msr = sev_es_rd_ghcb_msr();
/* Issue VMGEXIT to change the page state in RMP table. */
- sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
+ sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, desc->op));
VMGEXIT();
/* Read the response of the VMGEXIT. */
* Now that page state is changed in the RMP table, validate it so that it is
* consistent with the RMP entry.
*/
- if (op == SNP_PAGE_STATE_PRIVATE)
- pvalidate_4k_page(vaddr, paddr, true);
+ if (desc->op == SNP_PAGE_STATE_PRIVATE)
+ pvalidate_4k_page(vaddr, paddr, true, desc->ca, desc->caa_pa);
}
/*
void __head
early_set_pages_state(unsigned long vaddr, unsigned long paddr,
- unsigned long npages, enum psc_op op)
+ unsigned long npages, const struct psc_desc *desc)
{
unsigned long paddr_end;
paddr_end = paddr + (npages << PAGE_SHIFT);
while (paddr < paddr_end) {
- __page_state_change(vaddr, paddr, op);
+ __page_state_change(vaddr, paddr, desc);
vaddr += PAGE_SIZE;
paddr += PAGE_SIZE;
void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
unsigned long npages)
{
+ struct psc_desc d = {
+ SNP_PAGE_STATE_PRIVATE, svsm_get_caa(), svsm_get_caa_pa()
+ };
+
/*
* This can be invoked in early boot while running identity mapped, so
* use an open coded check for SNP instead of using cc_platform_has().
* Ask the hypervisor to mark the memory pages as private in the RMP
* table.
*/
- early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
+ early_set_pages_state(vaddr, paddr, npages, &d);
}
void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned long npages)
{
+ struct psc_desc d = {
+ SNP_PAGE_STATE_SHARED, svsm_get_caa(), svsm_get_caa_pa()
+ };
+
/*
* This can be invoked in early boot while running identity mapped, so
* use an open coded check for SNP instead of using cc_platform_has().
return;
/* Ask hypervisor to mark the memory pages shared in the RMP table. */
- early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
+ early_set_pages_state(vaddr, paddr, npages, &d);
}
/*
unsigned long vaddr_end;
/* Use the MSR protocol when a GHCB is not available. */
- if (!boot_ghcb)
- return early_set_pages_state(vaddr, __pa(vaddr), npages, op);
+ if (!boot_ghcb) {
+ struct psc_desc d = { op, svsm_get_caa(), svsm_get_caa_pa() };
+
+ return early_set_pages_state(vaddr, __pa(vaddr), npages, &d);
+ }
vaddr = vaddr & PAGE_MASK;
vaddr_end = vaddr + (npages << PAGE_SHIFT);
DECLARE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
- unsigned long npages, enum psc_op op);
+ unsigned long npages, const struct psc_desc *desc);
DECLARE_PER_CPU(struct svsm_ca *, svsm_caa);
DECLARE_PER_CPU(u64, svsm_caa_pa);
extern struct ghcb *boot_ghcb;
extern bool sev_snp_needs_sfw;
+struct psc_desc {
+ enum psc_op op;
+ struct svsm_ca *ca;
+ u64 caa_pa;
+};
+
#else /* !CONFIG_AMD_MEM_ENCRYPT */
#define snp_vmpl 0