* Andrew F. Davis <afd@ti.com>
*/
+#include <linux/cc_platform.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/dma-heap.h>
#include <linux/err.h>
#include <linux/highmem.h>
+#include <linux/mem_encrypt.h>
#include <linux/mm.h>
+#include <linux/set_memory.h>
#include <linux/module.h>
+#include <linux/pgtable.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+struct system_heap_priv {
+ bool cc_shared;
+};
+
struct system_heap_buffer {
struct dma_heap *heap;
struct list_head attachments;
struct sg_table sg_table;
int vmap_cnt;
void *vaddr;
+ bool cc_shared;
};
struct dma_heap_attachment {
struct sg_table table;
struct list_head list;
bool mapped;
+ bool cc_shared;
};
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
static const unsigned int orders[] = {8, 4, 0};
#define NUM_ORDERS ARRAY_SIZE(orders)
+static int system_heap_set_page_decrypted(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ unsigned int nr_pages = 1 << compound_order(page);
+ int ret;
+
+ ret = set_memory_decrypted(addr, nr_pages);
+ if (ret)
+ pr_warn_ratelimited("dma-buf system heap: failed to decrypt page at %p\n",
+ page_address(page));
+
+ return ret;
+}
+
+static int system_heap_set_page_encrypted(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ unsigned int nr_pages = 1 << compound_order(page);
+ int ret;
+
+ ret = set_memory_encrypted(addr, nr_pages);
+ if (ret)
+ pr_warn_ratelimited("dma-buf system heap: failed to re-encrypt page at %p, leaking memory\n",
+ page_address(page));
+
+ return ret;
+}
+
static int dup_sg_table(struct sg_table *from, struct sg_table *to)
{
struct scatterlist *sg, *new_sg;
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
a->mapped = false;
+ a->cc_shared = buffer->cc_shared;
attachment->priv = a;
{
struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = &a->table;
+ unsigned long attrs;
int ret;
- ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ attrs = a->cc_shared ? DMA_ATTR_CC_SHARED : 0;
+ ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
if (ret)
return ERR_PTR(ret);
unsigned long addr = vma->vm_start;
unsigned long pgoff = vma->vm_pgoff;
struct scatterlist *sg;
+ pgprot_t prot;
int i, ret;
+ prot = vma->vm_page_prot;
+ if (buffer->cc_shared)
+ prot = pgprot_decrypted(prot);
+
for_each_sgtable_sg(table, sg, i) {
unsigned long n = sg->length >> PAGE_SHIFT;
if (addr + size > vma->vm_end)
size = vma->vm_end - addr;
- ret = remap_pfn_range(vma, addr, page_to_pfn(page),
- size, vma->vm_page_prot);
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), size, prot);
if (ret)
return ret;
struct page **pages = vmalloc(sizeof(struct page *) * npages);
struct page **tmp = pages;
struct sg_page_iter piter;
+ pgprot_t prot;
void *vaddr;
if (!pages)
*tmp++ = sg_page_iter_page(&piter);
}
- vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+ prot = PAGE_KERNEL;
+ if (buffer->cc_shared)
+ prot = pgprot_decrypted(prot);
+ vaddr = vmap(pages, npages, VM_MAP, prot);
vfree(pages);
if (!vaddr)
for_each_sgtable_sg(table, sg, i) {
struct page *page = sg_page(sg);
+ /*
+ * Intentionally leak pages that cannot be re-encrypted
+ * to prevent shared memory from being reused.
+ */
+ if (buffer->cc_shared &&
+ system_heap_set_page_encrypted(page))
+ continue;
+
__free_pages(page, compound_order(page));
}
sg_free_table(table);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
unsigned long size_remaining = len;
unsigned int max_order = orders[0];
+ struct system_heap_priv *priv = dma_heap_get_drvdata(heap);
+ bool cc_shared = priv->cc_shared;
struct dma_buf *dmabuf;
struct sg_table *table;
struct scatterlist *sg;
mutex_init(&buffer->lock);
buffer->heap = heap;
buffer->len = len;
+ buffer->cc_shared = cc_shared;
INIT_LIST_HEAD(&pages);
i = 0;
list_del(&page->lru);
}
+ if (cc_shared) {
+ for_each_sgtable_sg(table, sg, i) {
+ ret = system_heap_set_page_decrypted(sg_page(sg));
+ if (ret)
+ goto free_pages;
+ }
+ }
+
/* create the dmabuf */
exp_info.exp_name = dma_heap_get_name(heap);
exp_info.ops = &system_heap_buf_ops;
for_each_sgtable_sg(table, sg, i) {
struct page *p = sg_page(sg);
+ /*
+ * Intentionally leak pages that cannot be re-encrypted
+ * to prevent shared memory from being reused.
+ */
+ if (buffer->cc_shared &&
+ system_heap_set_page_encrypted(p))
+ continue;
__free_pages(p, compound_order(p));
}
sg_free_table(table);
.allocate = system_heap_allocate,
};
+static struct system_heap_priv system_heap_priv = {
+ .cc_shared = false,
+};
+
+static struct system_heap_priv system_heap_cc_shared_priv = {
+ .cc_shared = true,
+};
+
static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
- exp_info.priv = NULL;
+ exp_info.priv = &system_heap_priv;
+
+ sys_heap = dma_heap_add(&exp_info);
+ if (IS_ERR(sys_heap))
+ return PTR_ERR(sys_heap);
+
+ if (IS_ENABLED(CONFIG_HIGHMEM) ||
+ !cc_platform_has(CC_ATTR_MEM_ENCRYPT))
+ return 0;
+ exp_info.name = "system_cc_shared";
+ exp_info.priv = &system_heap_cc_shared_priv;
sys_heap = dma_heap_add(&exp_info);
if (IS_ERR(sys_heap))
return PTR_ERR(sys_heap);