]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.19/crypto-ccree-remove-special-handling-of-chained-sg.patch
Linux 4.14.121
[thirdparty/kernel/stable-queue.git] / queue-4.19 / crypto-ccree-remove-special-handling-of-chained-sg.patch
1 From c4b22bf51b815fb61a35a27fc847a88bc28ebb63 Mon Sep 17 00:00:00 2001
2 From: Gilad Ben-Yossef <gilad@benyossef.com>
3 Date: Thu, 18 Apr 2019 16:38:48 +0300
4 Subject: crypto: ccree - remove special handling of chained sg
5
6 From: Gilad Ben-Yossef <gilad@benyossef.com>
7
8 commit c4b22bf51b815fb61a35a27fc847a88bc28ebb63 upstream.
9
10 We were handling chained scattergather lists with specialized code
11 needlessly as the regular sg APIs handle them just fine. The code
12 handling this also had an (unused) code path with a use-before-init
13 error, flagged by Coverity.
14
15 Remove all special handling of chained sg and leave their handling
16 to the regular sg APIs.
17
18 Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
19 Cc: stable@vger.kernel.org # v4.19+
20 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
21 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
22
23 ---
24 drivers/crypto/ccree/cc_buffer_mgr.c | 98 +++++++----------------------------
25 1 file changed, 22 insertions(+), 76 deletions(-)
26
27 --- a/drivers/crypto/ccree/cc_buffer_mgr.c
28 +++ b/drivers/crypto/ccree/cc_buffer_mgr.c
29 @@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *d
30 */
31 static unsigned int cc_get_sgl_nents(struct device *dev,
32 struct scatterlist *sg_list,
33 - unsigned int nbytes, u32 *lbytes,
34 - bool *is_chained)
35 + unsigned int nbytes, u32 *lbytes)
36 {
37 unsigned int nents = 0;
38
39 while (nbytes && sg_list) {
40 - if (sg_list->length) {
41 - nents++;
42 - /* get the number of bytes in the last entry */
43 - *lbytes = nbytes;
44 - nbytes -= (sg_list->length > nbytes) ?
45 - nbytes : sg_list->length;
46 - sg_list = sg_next(sg_list);
47 - } else {
48 - sg_list = (struct scatterlist *)sg_page(sg_list);
49 - if (is_chained)
50 - *is_chained = true;
51 - }
52 + nents++;
53 + /* get the number of bytes in the last entry */
54 + *lbytes = nbytes;
55 + nbytes -= (sg_list->length > nbytes) ?
56 + nbytes : sg_list->length;
57 + sg_list = sg_next(sg_list);
58 }
59 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
60 return nents;
61 @@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *d
62 {
63 u32 nents, lbytes;
64
65 - nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
66 + nents = cc_get_sgl_nents(dev, sg, end, &lbytes);
67 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
68 (direct == CC_SG_TO_BUF));
69 }
70 @@ -311,40 +304,10 @@ static void cc_add_sg_entry(struct devic
71 sgl_data->num_of_buffers++;
72 }
73
74 -static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
75 - enum dma_data_direction direction)
76 -{
77 - u32 i, j;
78 - struct scatterlist *l_sg = sg;
79 -
80 - for (i = 0; i < nents; i++) {
81 - if (!l_sg)
82 - break;
83 - if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
84 - dev_err(dev, "dma_map_page() sg buffer failed\n");
85 - goto err;
86 - }
87 - l_sg = sg_next(l_sg);
88 - }
89 - return nents;
90 -
91 -err:
92 - /* Restore mapped parts */
93 - for (j = 0; j < i; j++) {
94 - if (!sg)
95 - break;
96 - dma_unmap_sg(dev, sg, 1, direction);
97 - sg = sg_next(sg);
98 - }
99 - return 0;
100 -}
101 -
102 static int cc_map_sg(struct device *dev, struct scatterlist *sg,
103 unsigned int nbytes, int direction, u32 *nents,
104 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
105 {
106 - bool is_chained = false;
107 -
108 if (sg_is_last(sg)) {
109 /* One entry only case -set to DLLI */
110 if (dma_map_sg(dev, sg, 1, direction) != 1) {
111 @@ -358,35 +321,21 @@ static int cc_map_sg(struct device *dev,
112 *nents = 1;
113 *mapped_nents = 1;
114 } else { /*sg_is_last*/
115 - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
116 - &is_chained);
117 + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
118 if (*nents > max_sg_nents) {
119 *nents = 0;
120 dev_err(dev, "Too many fragments. current %d max %d\n",
121 *nents, max_sg_nents);
122 return -ENOMEM;
123 }
124 - if (!is_chained) {
125 - /* In case of mmu the number of mapped nents might
126 - * be changed from the original sgl nents
127 - */
128 - *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
129 - if (*mapped_nents == 0) {
130 - *nents = 0;
131 - dev_err(dev, "dma_map_sg() sg buffer failed\n");
132 - return -ENOMEM;
133 - }
134 - } else {
135 - /*In this case the driver maps entry by entry so it
136 - * must have the same nents before and after map
137 - */
138 - *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
139 - direction);
140 - if (*mapped_nents != *nents) {
141 - *nents = *mapped_nents;
142 - dev_err(dev, "dma_map_sg() sg buffer failed\n");
143 - return -ENOMEM;
144 - }
145 + /* In case of mmu the number of mapped nents might
146 + * be changed from the original sgl nents
147 + */
148 + *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
149 + if (*mapped_nents == 0) {
150 + *nents = 0;
151 + dev_err(dev, "dma_map_sg() sg buffer failed\n");
152 + return -ENOMEM;
153 }
154 }
155
156 @@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device
157 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
158 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
159 u32 dummy;
160 - bool chained;
161 u32 size_to_unmap = 0;
162
163 if (areq_ctx->mac_buf_dma_addr) {
164 @@ -636,15 +584,14 @@ void cc_unmap_aead_request(struct device
165 size_to_unmap += crypto_aead_ivsize(tfm);
166
167 dma_unmap_sg(dev, req->src,
168 - cc_get_sgl_nents(dev, req->src, size_to_unmap,
169 - &dummy, &chained),
170 + cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy),
171 DMA_BIDIRECTIONAL);
172 if (req->src != req->dst) {
173 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
174 sg_virt(req->dst));
175 dma_unmap_sg(dev, req->dst,
176 cc_get_sgl_nents(dev, req->dst, size_to_unmap,
177 - &dummy, &chained),
178 + &dummy),
179 DMA_BIDIRECTIONAL);
180 }
181 if (drvdata->coherent &&
182 @@ -1022,7 +969,6 @@ static int cc_aead_chain_data(struct cc_
183 unsigned int size_for_map = req->assoclen + req->cryptlen;
184 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
185 u32 sg_index = 0;
186 - bool chained = false;
187 bool is_gcm4543 = areq_ctx->is_gcm4543;
188 u32 size_to_skip = req->assoclen;
189
190 @@ -1043,7 +989,7 @@ static int cc_aead_chain_data(struct cc_
191 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
192 authsize : 0;
193 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
194 - &src_last_bytes, &chained);
195 + &src_last_bytes);
196 sg_index = areq_ctx->src_sgl->length;
197 //check where the data starts
198 while (sg_index <= size_to_skip) {
199 @@ -1085,7 +1031,7 @@ static int cc_aead_chain_data(struct cc_
200 }
201
202 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
203 - &dst_last_bytes, &chained);
204 + &dst_last_bytes);
205 sg_index = areq_ctx->dst_sgl->length;
206 offset = size_to_skip;
207
208 @@ -1486,7 +1432,7 @@ int cc_map_hash_request_update(struct cc
209 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
210 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
211 areq_ctx->in_nents =
212 - cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
213 + cc_get_sgl_nents(dev, src, nbytes, &dummy);
214 sg_copy_to_buffer(src, areq_ctx->in_nents,
215 &curr_buff[*curr_buff_cnt], nbytes);
216 *curr_buff_cnt += nbytes;