]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
d56b75f042d0321d5107cfd919c675f450208f8f
[thirdparty/kernel/stable-queue.git] /
1 From foo@baz Tue Oct 2 04:59:29 PDT 2018
2 From: Srikanth Jampala <Jampala.Srikanth@cavium.com>
3 Date: Wed, 22 Aug 2018 12:40:52 +0530
4 Subject: crypto: cavium/nitrox - fix for command corruption in queue full case with backlog submissions.
5
6 From: Srikanth Jampala <Jampala.Srikanth@cavium.com>
7
8 [ Upstream commit 3d7c82060d1fe65bde4023aac41a0b1bd7718e07 ]
9
10 Earlier used to post the current command without checking queue full
11 after backlog submissions. So, post the current command only after
12 confirming the space in queue after backlog submissions.
13
14 Maintain host write index instead of reading device registers
15 to get the next free slot to post the command.
16
17 Return -ENOSPC in queue full case.
18
19 Signed-off-by: Srikanth Jampala <Jampala.Srikanth@cavium.com>
20 Reviewed-by: Gadam Sreerama <sgadam@cavium.com>
21 Tested-by: Jha, Chandan <Chandan.Jha@cavium.com>
22 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
23 Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
24 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
25 ---
26 drivers/crypto/cavium/nitrox/nitrox_dev.h | 3 -
27 drivers/crypto/cavium/nitrox/nitrox_lib.c | 1
28 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | 57 +++++++++++++++------------
29 3 files changed, 35 insertions(+), 26 deletions(-)
30
31 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
32 +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
33 @@ -35,6 +35,7 @@ struct nitrox_cmdq {
34 /* requests in backlog queues */
35 atomic_t backlog_count;
36
37 + int write_idx;
38 /* command size 32B/64B */
39 u8 instr_size;
40 u8 qno;
41 @@ -87,7 +88,7 @@ struct nitrox_bh {
42 struct bh_data *slc;
43 };
44
45 -/* NITROX-5 driver state */
46 +/* NITROX-V driver state */
47 #define NITROX_UCODE_LOADED 0
48 #define NITROX_READY 1
49
50 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
51 +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
52 @@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitro
53 cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
54 cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
55 cmdq->qsize = (qsize + PKT_IN_ALIGN);
56 + cmdq->write_idx = 0;
57
58 spin_lock_init(&cmdq->response_lock);
59 spin_lock_init(&cmdq->cmdq_lock);
60 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
61 +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
62 @@ -42,6 +42,16 @@
63 * Invalid flag options in AES-CCM IV.
64 */
65
66 +static inline int incr_index(int index, int count, int max)
67 +{
68 + if ((index + count) >= max)
69 + index = index + count - max;
70 + else
71 + index += count;
72 +
73 + return index;
74 +}
75 +
76 /**
77 * dma_free_sglist - unmap and free the sg lists.
78 * @ndev: N5 device
79 @@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_
80 struct nitrox_cmdq *cmdq)
81 {
82 struct nitrox_device *ndev = sr->ndev;
83 - union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
84 - u64 offset;
85 + int idx;
86 u8 *ent;
87
88 spin_lock_bh(&cmdq->cmdq_lock);
89
90 - /* get the next write offset */
91 - offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
92 - pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
93 + idx = cmdq->write_idx;
94 /* copy the instruction */
95 - ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
96 + ent = cmdq->head + (idx * cmdq->instr_size);
97 memcpy(ent, &sr->instr, cmdq->instr_size);
98 - /* flush the command queue updates */
99 - dma_wmb();
100
101 - sr->tstamp = jiffies;
102 atomic_set(&sr->status, REQ_POSTED);
103 response_list_add(sr, cmdq);
104 + sr->tstamp = jiffies;
105 + /* flush the command queue updates */
106 + dma_wmb();
107
108 /* Ring doorbell with count 1 */
109 writeq(1, cmdq->dbell_csr_addr);
110 /* orders the doorbell rings */
111 mmiowb();
112
113 + cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
114 +
115 spin_unlock_bh(&cmdq->cmdq_lock);
116 }
117
118 @@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitr
119 struct nitrox_softreq *sr, *tmp;
120 int ret = 0;
121
122 + if (!atomic_read(&cmdq->backlog_count))
123 + return 0;
124 +
125 spin_lock_bh(&cmdq->backlog_lock);
126
127 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
128 @@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitr
129
130 /* submit until space available */
131 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
132 - ret = -EBUSY;
133 + ret = -ENOSPC;
134 break;
135 }
136 /* delete from backlog list */
137 @@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct
138 {
139 struct nitrox_cmdq *cmdq = sr->cmdq;
140 struct nitrox_device *ndev = sr->ndev;
141 - int ret = -EBUSY;
142 +
143 + /* try to post backlog requests */
144 + post_backlog_cmds(cmdq);
145
146 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
147 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
148 - return -EAGAIN;
149 -
150 + return -ENOSPC;
151 + /* add to backlog list */
152 backlog_list_add(sr, cmdq);
153 - } else {
154 - ret = post_backlog_cmds(cmdq);
155 - if (ret) {
156 - backlog_list_add(sr, cmdq);
157 - return ret;
158 - }
159 - post_se_instr(sr, cmdq);
160 - ret = -EINPROGRESS;
161 + return -EBUSY;
162 }
163 - return ret;
164 + post_se_instr(sr, cmdq);
165 +
166 + return -EINPROGRESS;
167 }
168
169 /**
170 @@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nit
171 */
172 sr->instr.fdata[0] = *((u64 *)&req->gph);
173 sr->instr.fdata[1] = 0;
174 - /* flush the soft_req changes before posting the cmd */
175 - wmb();
176
177 ret = nitrox_enqueue_request(sr);
178 - if (ret == -EAGAIN)
179 + if (ret == -ENOSPC)
180 goto send_fail;
181
182 return ret;