]>
Commit | Line | Data |
---|---|---|
b889272d GKH |
1 | From 7b537b24e76a1e8e6d7ea91483a45d5b1426809b Mon Sep 17 00:00:00 2001 |
2 | From: Gary R Hook <gary.hook@amd.com> | |
3 | Date: Fri, 21 Apr 2017 10:50:05 -0500 | |
4 | Subject: crypto: ccp - Change ISR handler method for a v3 CCP | |
5 | ||
6 | From: Gary R Hook <gary.hook@amd.com> | |
7 | ||
8 | commit 7b537b24e76a1e8e6d7ea91483a45d5b1426809b upstream. | |
9 | ||
10 | The CCP has the ability to perform several operations simultaneously, | |
11 | but only one interrupt. When implemented as a PCI device and using | |
12 | MSI-X/MSI interrupts, use a tasklet model to service interrupts. By | |
13 | disabling and enabling interrupts from the CCP, coupled with the | |
14 | queuing that tasklets provide, we can ensure that all events | |
15 | (occurring on the device) are recognized and serviced. | |
16 | ||
17 | This change fixes a problem wherein 2 or more busy queues can cause | |
18 | notification bits to change state while a (CCP) interrupt is being | |
19 | serviced, but after the queue state has been evaluated. This results | |
20 | in the event being 'lost' and the queue hanging, waiting to be | |
21 | serviced. Since the status bits are never fully de-asserted, the | |
22 | CCP never generates another interrupt (all bits zero -> one or more | |
23 | bits one), and no further CCP operations will be executed. | |
24 | ||
25 | Signed-off-by: Gary R Hook <gary.hook@amd.com> | |
26 | Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> | |
27 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
28 | ||
29 | --- | |
30 | drivers/crypto/ccp/ccp-dev-v3.c | 120 +++++++++++++++++++++++----------------- | |
31 | drivers/crypto/ccp/ccp-dev.h | 3 + | |
32 | drivers/crypto/ccp/ccp-pci.c | 2 | |
33 | 3 files changed, 75 insertions(+), 50 deletions(-) | |
34 | ||
35 | --- a/drivers/crypto/ccp/ccp-dev-v3.c | |
36 | +++ b/drivers/crypto/ccp/ccp-dev-v3.c | |
37 | @@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op | |
38 | return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); | |
39 | } | |
40 | ||
41 | +static void ccp_disable_queue_interrupts(struct ccp_device *ccp) | |
42 | +{ | |
43 | + iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); | |
44 | +} | |
45 | + | |
46 | +static void ccp_enable_queue_interrupts(struct ccp_device *ccp) | |
47 | +{ | |
48 | + iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG); | |
49 | +} | |
50 | + | |
51 | +static void ccp_irq_bh(unsigned long data) | |
52 | +{ | |
53 | + struct ccp_device *ccp = (struct ccp_device *)data; | |
54 | + struct ccp_cmd_queue *cmd_q; | |
55 | + u32 q_int, status; | |
56 | + unsigned int i; | |
57 | + | |
58 | + status = ioread32(ccp->io_regs + IRQ_STATUS_REG); | |
59 | + | |
60 | + for (i = 0; i < ccp->cmd_q_count; i++) { | |
61 | + cmd_q = &ccp->cmd_q[i]; | |
62 | + | |
63 | + q_int = status & (cmd_q->int_ok | cmd_q->int_err); | |
64 | + if (q_int) { | |
65 | + cmd_q->int_status = status; | |
66 | + cmd_q->q_status = ioread32(cmd_q->reg_status); | |
67 | + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); | |
68 | + | |
69 | + /* On error, only save the first error value */ | |
70 | + if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) | |
71 | + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); | |
72 | + | |
73 | + cmd_q->int_rcvd = 1; | |
74 | + | |
75 | + /* Acknowledge the interrupt and wake the kthread */ | |
76 | + iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); | |
77 | + wake_up_interruptible(&cmd_q->int_queue); | |
78 | + } | |
79 | + } | |
80 | + ccp_enable_queue_interrupts(ccp); | |
81 | +} | |
82 | + | |
83 | +static irqreturn_t ccp_irq_handler(int irq, void *data) | |
84 | +{ | |
85 | + struct device *dev = data; | |
86 | + struct ccp_device *ccp = dev_get_drvdata(dev); | |
87 | + | |
88 | + ccp_disable_queue_interrupts(ccp); | |
89 | + if (ccp->use_tasklet) | |
90 | + tasklet_schedule(&ccp->irq_tasklet); | |
91 | + else | |
92 | + ccp_irq_bh((unsigned long)ccp); | |
93 | + | |
94 | + return IRQ_HANDLED; | |
95 | +} | |
96 | + | |
97 | static int ccp_init(struct ccp_device *ccp) | |
98 | { | |
99 | struct device *dev = ccp->dev; | |
100 | struct ccp_cmd_queue *cmd_q; | |
101 | struct dma_pool *dma_pool; | |
102 | char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; | |
103 | - unsigned int qmr, qim, i; | |
104 | + unsigned int qmr, i; | |
105 | int ret; | |
106 | ||
107 | /* Find available queues */ | |
108 | - qim = 0; | |
109 | + ccp->qim = 0; | |
110 | qmr = ioread32(ccp->io_regs + Q_MASK_REG); | |
111 | for (i = 0; i < MAX_HW_QUEUES; i++) { | |
112 | if (!(qmr & (1 << i))) | |
113 | @@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *c | |
114 | init_waitqueue_head(&cmd_q->int_queue); | |
115 | ||
116 | /* Build queue interrupt mask (two interrupts per queue) */ | |
117 | - qim |= cmd_q->int_ok | cmd_q->int_err; | |
118 | + ccp->qim |= cmd_q->int_ok | cmd_q->int_err; | |
119 | ||
120 | #ifdef CONFIG_ARM64 | |
121 | /* For arm64 set the recommended queue cache settings */ | |
122 | @@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *c | |
123 | dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); | |
124 | ||
125 | /* Disable and clear interrupts until ready */ | |
126 | - iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); | |
127 | + ccp_disable_queue_interrupts(ccp); | |
128 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
129 | cmd_q = &ccp->cmd_q[i]; | |
130 | ||
131 | ioread32(cmd_q->reg_int_status); | |
132 | ioread32(cmd_q->reg_status); | |
133 | } | |
134 | - iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); | |
135 | + iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); | |
136 | ||
137 | /* Request an irq */ | |
138 | ret = ccp->get_irq(ccp); | |
139 | @@ -404,6 +460,11 @@ static int ccp_init(struct ccp_device *c | |
140 | goto e_pool; | |
141 | } | |
142 | ||
143 | + /* Initialize the ISR tasklet? */ | |
144 | + if (ccp->use_tasklet) | |
145 | + tasklet_init(&ccp->irq_tasklet, ccp_irq_bh, | |
146 | + (unsigned long)ccp); | |
147 | + | |
148 | dev_dbg(dev, "Starting threads...\n"); | |
149 | /* Create a kthread for each queue */ | |
150 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
151 | @@ -426,7 +487,7 @@ static int ccp_init(struct ccp_device *c | |
152 | ||
153 | dev_dbg(dev, "Enabling interrupts...\n"); | |
154 | /* Enable interrupts */ | |
155 | - iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); | |
156 | + ccp_enable_queue_interrupts(ccp); | |
157 | ||
158 | dev_dbg(dev, "Registering device...\n"); | |
159 | ccp_add_device(ccp); | |
160 | @@ -463,7 +524,7 @@ static void ccp_destroy(struct ccp_devic | |
161 | { | |
162 | struct ccp_cmd_queue *cmd_q; | |
163 | struct ccp_cmd *cmd; | |
164 | - unsigned int qim, i; | |
165 | + unsigned int i; | |
166 | ||
167 | /* Unregister the DMA engine */ | |
168 | ccp_dmaengine_unregister(ccp); | |
169 | @@ -474,22 +535,15 @@ static void ccp_destroy(struct ccp_devic | |
170 | /* Remove this device from the list of available units */ | |
171 | ccp_del_device(ccp); | |
172 | ||
173 | - /* Build queue interrupt mask (two interrupt masks per queue) */ | |
174 | - qim = 0; | |
175 | - for (i = 0; i < ccp->cmd_q_count; i++) { | |
176 | - cmd_q = &ccp->cmd_q[i]; | |
177 | - qim |= cmd_q->int_ok | cmd_q->int_err; | |
178 | - } | |
179 | - | |
180 | /* Disable and clear interrupts */ | |
181 | - iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); | |
182 | + ccp_disable_queue_interrupts(ccp); | |
183 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
184 | cmd_q = &ccp->cmd_q[i]; | |
185 | ||
186 | ioread32(cmd_q->reg_int_status); | |
187 | ioread32(cmd_q->reg_status); | |
188 | } | |
189 | - iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); | |
190 | + iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); | |
191 | ||
192 | /* Stop the queue kthreads */ | |
193 | for (i = 0; i < ccp->cmd_q_count; i++) | |
194 | @@ -516,40 +570,6 @@ static void ccp_destroy(struct ccp_devic | |
195 | } | |
196 | } | |
197 | ||
198 | -static irqreturn_t ccp_irq_handler(int irq, void *data) | |
199 | -{ | |
200 | - struct device *dev = data; | |
201 | - struct ccp_device *ccp = dev_get_drvdata(dev); | |
202 | - struct ccp_cmd_queue *cmd_q; | |
203 | - u32 q_int, status; | |
204 | - unsigned int i; | |
205 | - | |
206 | - status = ioread32(ccp->io_regs + IRQ_STATUS_REG); | |
207 | - | |
208 | - for (i = 0; i < ccp->cmd_q_count; i++) { | |
209 | - cmd_q = &ccp->cmd_q[i]; | |
210 | - | |
211 | - q_int = status & (cmd_q->int_ok | cmd_q->int_err); | |
212 | - if (q_int) { | |
213 | - cmd_q->int_status = status; | |
214 | - cmd_q->q_status = ioread32(cmd_q->reg_status); | |
215 | - cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); | |
216 | - | |
217 | - /* On error, only save the first error value */ | |
218 | - if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) | |
219 | - cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); | |
220 | - | |
221 | - cmd_q->int_rcvd = 1; | |
222 | - | |
223 | - /* Acknowledge the interrupt and wake the kthread */ | |
224 | - iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); | |
225 | - wake_up_interruptible(&cmd_q->int_queue); | |
226 | - } | |
227 | - } | |
228 | - | |
229 | - return IRQ_HANDLED; | |
230 | -} | |
231 | - | |
232 | static const struct ccp_actions ccp3_actions = { | |
233 | .aes = ccp_perform_aes, | |
234 | .xts_aes = ccp_perform_xts_aes, | |
235 | --- a/drivers/crypto/ccp/ccp-dev.h | |
236 | +++ b/drivers/crypto/ccp/ccp-dev.h | |
237 | @@ -336,7 +336,10 @@ struct ccp_device { | |
238 | void *dev_specific; | |
239 | int (*get_irq)(struct ccp_device *ccp); | |
240 | void (*free_irq)(struct ccp_device *ccp); | |
241 | + unsigned int qim; | |
242 | unsigned int irq; | |
243 | + bool use_tasklet; | |
244 | + struct tasklet_struct irq_tasklet; | |
245 | ||
246 | /* I/O area used for device communication. The register mapping | |
247 | * starts at an offset into the mapped bar. | |
248 | --- a/drivers/crypto/ccp/ccp-pci.c | |
249 | +++ b/drivers/crypto/ccp/ccp-pci.c | |
250 | @@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_ | |
251 | goto e_irq; | |
252 | } | |
253 | } | |
254 | + ccp->use_tasklet = true; | |
255 | ||
256 | return 0; | |
257 | ||
258 | @@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_de | |
259 | dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); | |
260 | goto e_msi; | |
261 | } | |
262 | + ccp->use_tasklet = true; | |
263 | ||
264 | return 0; | |
265 |