]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/6.8.6/dma-direct-leak-pages-on-dma_set_decrypted-failure.patch
Linux 6.8.6
[thirdparty/kernel/stable-queue.git] / releases / 6.8.6 / dma-direct-leak-pages-on-dma_set_decrypted-failure.patch
1 From da4edbcb9820cd7855593cd89f8d4f6e96b0858f Mon Sep 17 00:00:00 2001
2 From: Sasha Levin <sashal@kernel.org>
3 Date: Wed, 21 Feb 2024 16:17:21 -0800
4 Subject: dma-direct: Leak pages on dma_set_decrypted() failure
5
6 From: Rick Edgecombe <rick.p.edgecombe@intel.com>
7
8 [ Upstream commit b9fa16949d18e06bdf728a560f5c8af56d2bdcaf ]
9
10 On TDX it is possible for the untrusted host to cause
11 set_memory_encrypted() or set_memory_decrypted() to fail such that an
12 error is returned and the resulting memory is shared. Callers need to
13 take care to handle these errors to avoid returning decrypted (shared)
14 memory to the page allocator, which could lead to functional or security
15 issues.
16
17 DMA could free decrypted/shared pages if dma_set_decrypted() fails. This
18 should be a rare case. Just leak the pages in this case instead of
19 freeing them.
20
21 Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
22 Signed-off-by: Christoph Hellwig <hch@lst.de>
23 Signed-off-by: Sasha Levin <sashal@kernel.org>
24 ---
25 kernel/dma/direct.c | 9 +++++----
26 1 file changed, 5 insertions(+), 4 deletions(-)
27
28 diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
29 index 98b2e192fd696..4d543b1e9d577 100644
30 --- a/kernel/dma/direct.c
31 +++ b/kernel/dma/direct.c
32 @@ -286,7 +286,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
33 } else {
34 ret = page_address(page);
35 if (dma_set_decrypted(dev, ret, size))
36 - goto out_free_pages;
37 + goto out_leak_pages;
38 }
39
40 memset(ret, 0, size);
41 @@ -307,6 +307,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
42 out_free_pages:
43 __dma_direct_free_pages(dev, page, size);
44 return NULL;
45 +out_leak_pages:
46 + return NULL;
47 }
48
49 void dma_direct_free(struct device *dev, size_t size,
50 @@ -367,12 +369,11 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
51
52 ret = page_address(page);
53 if (dma_set_decrypted(dev, ret, size))
54 - goto out_free_pages;
55 + goto out_leak_pages;
56 memset(ret, 0, size);
57 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
58 return page;
59 -out_free_pages:
60 - __dma_direct_free_pages(dev, page, size);
61 +out_leak_pages:
62 return NULL;
63 }
64
65 --
66 2.43.0
67