]>
Commit | Line | Data |
---|---|---|
25d1270b SN |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright(c) 2023 Advanced Micro Devices, Inc */ | |
3 | ||
4 | #include <linux/pci.h> | |
5 | #include <linux/vdpa.h> | |
6 | #include <uapi/linux/vdpa.h> | |
151cc834 | 7 | #include <linux/virtio_pci_modern.h> |
25d1270b SN |
8 | |
9 | #include <linux/pds/pds_common.h> | |
10 | #include <linux/pds/pds_core_if.h> | |
11 | #include <linux/pds/pds_adminq.h> | |
12 | #include <linux/pds/pds_auxbus.h> | |
13 | ||
14 | #include "vdpa_dev.h" | |
15 | #include "aux_drv.h" | |
151cc834 SN |
16 | #include "cmds.h" |
17 | #include "debugfs.h" | |
25d1270b | 18 | |
151cc834 SN |
19 | static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev); |
20 | ||
21 | static struct pds_vdpa_device *vdpa_to_pdsv(struct vdpa_device *vdpa_dev) | |
22 | { | |
23 | return container_of(vdpa_dev, struct pds_vdpa_device, vdpa_dev); | |
24 | } | |
25 | ||
67f27b8b SN |
26 | static int pds_vdpa_notify_handler(struct notifier_block *nb, |
27 | unsigned long ecode, | |
28 | void *data) | |
29 | { | |
30 | struct pds_vdpa_device *pdsv = container_of(nb, struct pds_vdpa_device, nb); | |
31 | struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev; | |
32 | ||
33 | dev_dbg(dev, "%s: event code %lu\n", __func__, ecode); | |
34 | ||
35 | if (ecode == PDS_EVENT_RESET || ecode == PDS_EVENT_LINK_CHANGE) { | |
36 | if (pdsv->config_cb.callback) | |
37 | pdsv->config_cb.callback(pdsv->config_cb.private); | |
38 | } | |
39 | ||
40 | return 0; | |
41 | } | |
42 | ||
43 | static int pds_vdpa_register_event_handler(struct pds_vdpa_device *pdsv) | |
44 | { | |
45 | struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev; | |
46 | struct notifier_block *nb = &pdsv->nb; | |
47 | int err; | |
48 | ||
49 | if (!nb->notifier_call) { | |
50 | nb->notifier_call = pds_vdpa_notify_handler; | |
51 | err = pdsc_register_notify(nb); | |
52 | if (err) { | |
53 | nb->notifier_call = NULL; | |
54 | dev_err(dev, "failed to register pds event handler: %ps\n", | |
55 | ERR_PTR(err)); | |
56 | return -EINVAL; | |
57 | } | |
58 | dev_dbg(dev, "pds event handler registered\n"); | |
59 | } | |
60 | ||
61 | return 0; | |
62 | } | |
63 | ||
64 | static void pds_vdpa_unregister_event_handler(struct pds_vdpa_device *pdsv) | |
65 | { | |
66 | if (pdsv->nb.notifier_call) { | |
67 | pdsc_unregister_notify(&pdsv->nb); | |
68 | pdsv->nb.notifier_call = NULL; | |
69 | } | |
70 | } | |
71 | ||
151cc834 SN |
72 | static int pds_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, |
73 | u64 desc_addr, u64 driver_addr, u64 device_addr) | |
74 | { | |
75 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
76 | ||
77 | pdsv->vqs[qid].desc_addr = desc_addr; | |
78 | pdsv->vqs[qid].avail_addr = driver_addr; | |
79 | pdsv->vqs[qid].used_addr = device_addr; | |
80 | ||
81 | return 0; | |
82 | } | |
83 | ||
84 | static void pds_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num) | |
85 | { | |
86 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
87 | ||
88 | pdsv->vqs[qid].q_len = num; | |
89 | } | |
90 | ||
91 | static void pds_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) | |
92 | { | |
93 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
94 | ||
95 | iowrite16(qid, pdsv->vqs[qid].notify); | |
96 | } | |
97 | ||
98 | static void pds_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, | |
99 | struct vdpa_callback *cb) | |
100 | { | |
101 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
102 | ||
103 | pdsv->vqs[qid].event_cb = *cb; | |
104 | } | |
105 | ||
106 | static irqreturn_t pds_vdpa_isr(int irq, void *data) | |
107 | { | |
108 | struct pds_vdpa_vq_info *vq; | |
109 | ||
110 | vq = data; | |
111 | if (vq->event_cb.callback) | |
112 | vq->event_cb.callback(vq->event_cb.private); | |
113 | ||
114 | return IRQ_HANDLED; | |
115 | } | |
116 | ||
117 | static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid) | |
118 | { | |
119 | if (pdsv->vqs[qid].irq == VIRTIO_MSI_NO_VECTOR) | |
120 | return; | |
121 | ||
122 | free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]); | |
123 | pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR; | |
124 | } | |
125 | ||
126 | static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready) | |
127 | { | |
128 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
151cc834 SN |
129 | struct device *dev = &pdsv->vdpa_dev.dev; |
130 | u64 driver_features; | |
131 | u16 invert_idx = 0; | |
151cc834 SN |
132 | int err; |
133 | ||
134 | dev_dbg(dev, "%s: qid %d ready %d => %d\n", | |
135 | __func__, qid, pdsv->vqs[qid].ready, ready); | |
136 | if (ready == pdsv->vqs[qid].ready) | |
137 | return; | |
138 | ||
139 | driver_features = pds_vdpa_get_driver_features(vdpa_dev); | |
140 | if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) | |
141 | invert_idx = PDS_VDPA_PACKED_INVERT_IDX; | |
142 | ||
143 | if (ready) { | |
151cc834 SN |
144 | /* Pass vq setup info to DSC using adminq to gather up and |
145 | * send all info at once so FW can do its full set up in | |
146 | * one easy operation | |
147 | */ | |
148 | err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]); | |
149 | if (err) { | |
150 | dev_err(dev, "Failed to init vq %d: %pe\n", | |
151 | qid, ERR_PTR(err)); | |
151cc834 SN |
152 | ready = false; |
153 | } | |
154 | } else { | |
155 | err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]); | |
156 | if (err) | |
157 | dev_err(dev, "%s: reset_vq failed qid %d: %pe\n", | |
158 | __func__, qid, ERR_PTR(err)); | |
151cc834 SN |
159 | } |
160 | ||
161 | pdsv->vqs[qid].ready = ready; | |
162 | } | |
163 | ||
164 | static bool pds_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid) | |
165 | { | |
166 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
167 | ||
168 | return pdsv->vqs[qid].ready; | |
169 | } | |
170 | ||
171 | static int pds_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, | |
172 | const struct vdpa_vq_state *state) | |
173 | { | |
174 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
175 | struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev; | |
176 | struct device *dev = &padev->aux_dev.dev; | |
177 | u64 driver_features; | |
178 | u16 avail; | |
179 | u16 used; | |
180 | ||
181 | if (pdsv->vqs[qid].ready) { | |
182 | dev_err(dev, "Setting device position is denied while vq is enabled\n"); | |
183 | return -EINVAL; | |
184 | } | |
185 | ||
186 | driver_features = pds_vdpa_get_driver_features(vdpa_dev); | |
187 | if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) { | |
188 | avail = state->packed.last_avail_idx | | |
189 | (state->packed.last_avail_counter << 15); | |
190 | used = state->packed.last_used_idx | | |
191 | (state->packed.last_used_counter << 15); | |
192 | ||
193 | /* The avail and used index are stored with the packed wrap | |
194 | * counter bit inverted. This way, in case set_vq_state is | |
195 | * not called, the initial value can be set to zero prior to | |
196 | * feature negotiation, and it is good for both packed and | |
197 | * split vq. | |
198 | */ | |
199 | avail ^= PDS_VDPA_PACKED_INVERT_IDX; | |
200 | used ^= PDS_VDPA_PACKED_INVERT_IDX; | |
201 | } else { | |
202 | avail = state->split.avail_index; | |
203 | /* state->split does not provide a used_index: | |
204 | * the vq will be set to "empty" here, and the vq will read | |
205 | * the current used index the next time the vq is kicked. | |
206 | */ | |
207 | used = avail; | |
208 | } | |
209 | ||
210 | if (used != avail) { | |
211 | dev_dbg(dev, "Setting used equal to avail, for interoperability\n"); | |
212 | used = avail; | |
213 | } | |
214 | ||
215 | pdsv->vqs[qid].avail_idx = avail; | |
216 | pdsv->vqs[qid].used_idx = used; | |
217 | ||
218 | return 0; | |
219 | } | |
220 | ||
221 | static int pds_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, | |
222 | struct vdpa_vq_state *state) | |
223 | { | |
224 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
225 | struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev; | |
226 | struct device *dev = &padev->aux_dev.dev; | |
227 | u64 driver_features; | |
228 | u16 avail; | |
229 | u16 used; | |
230 | ||
231 | if (pdsv->vqs[qid].ready) { | |
232 | dev_err(dev, "Getting device position is denied while vq is enabled\n"); | |
233 | return -EINVAL; | |
234 | } | |
235 | ||
236 | avail = pdsv->vqs[qid].avail_idx; | |
237 | used = pdsv->vqs[qid].used_idx; | |
238 | ||
239 | driver_features = pds_vdpa_get_driver_features(vdpa_dev); | |
240 | if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) { | |
241 | avail ^= PDS_VDPA_PACKED_INVERT_IDX; | |
242 | used ^= PDS_VDPA_PACKED_INVERT_IDX; | |
243 | ||
244 | state->packed.last_avail_idx = avail & 0x7fff; | |
245 | state->packed.last_avail_counter = avail >> 15; | |
246 | state->packed.last_used_idx = used & 0x7fff; | |
247 | state->packed.last_used_counter = used >> 15; | |
248 | } else { | |
249 | state->split.avail_index = avail; | |
250 | /* state->split does not provide a used_index. */ | |
251 | } | |
252 | ||
253 | return 0; | |
254 | } | |
255 | ||
256 | static struct vdpa_notification_area | |
257 | pds_vdpa_get_vq_notification(struct vdpa_device *vdpa_dev, u16 qid) | |
258 | { | |
259 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
260 | struct virtio_pci_modern_device *vd_mdev; | |
261 | struct vdpa_notification_area area; | |
262 | ||
263 | area.addr = pdsv->vqs[qid].notify_pa; | |
264 | ||
265 | vd_mdev = &pdsv->vdpa_aux->vd_mdev; | |
266 | if (!vd_mdev->notify_offset_multiplier) | |
267 | area.size = PDS_PAGE_SIZE; | |
268 | else | |
269 | area.size = vd_mdev->notify_offset_multiplier; | |
270 | ||
271 | return area; | |
272 | } | |
273 | ||
274 | static int pds_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, u16 qid) | |
275 | { | |
276 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
277 | ||
278 | return pdsv->vqs[qid].irq; | |
279 | } | |
280 | ||
281 | static u32 pds_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) | |
282 | { | |
283 | return PDS_PAGE_SIZE; | |
284 | } | |
285 | ||
286 | static u32 pds_vdpa_get_vq_group(struct vdpa_device *vdpa_dev, u16 idx) | |
287 | { | |
288 | return 0; | |
289 | } | |
290 | ||
291 | static u64 pds_vdpa_get_device_features(struct vdpa_device *vdpa_dev) | |
292 | { | |
293 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
294 | ||
295 | return pdsv->supported_features; | |
296 | } | |
297 | ||
298 | static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features) | |
299 | { | |
300 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
301 | struct device *dev = &pdsv->vdpa_dev.dev; | |
302 | u64 driver_features; | |
303 | u64 nego_features; | |
abdf31bd | 304 | u64 hw_features; |
151cc834 SN |
305 | u64 missing; |
306 | ||
307 | if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) { | |
308 | dev_err(dev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n"); | |
309 | return -EOPNOTSUPP; | |
310 | } | |
311 | ||
151cc834 | 312 | /* Check for valid feature bits */ |
abdf31bd SN |
313 | nego_features = features & pdsv->supported_features; |
314 | missing = features & ~nego_features; | |
151cc834 SN |
315 | if (missing) { |
316 | dev_err(dev, "Can't support all requested features in %#llx, missing %#llx features\n", | |
abdf31bd | 317 | features, missing); |
151cc834 SN |
318 | return -EOPNOTSUPP; |
319 | } | |
320 | ||
321 | driver_features = pds_vdpa_get_driver_features(vdpa_dev); | |
cefc9ba6 | 322 | pdsv->negotiated_features = nego_features; |
151cc834 SN |
323 | dev_dbg(dev, "%s: %#llx => %#llx\n", |
324 | __func__, driver_features, nego_features); | |
325 | ||
abdf31bd SN |
326 | /* if we're faking the F_MAC, strip it before writing to device */ |
327 | hw_features = le64_to_cpu(pdsv->vdpa_aux->ident.hw_features); | |
328 | if (!(hw_features & BIT_ULL(VIRTIO_NET_F_MAC))) | |
329 | nego_features &= ~BIT_ULL(VIRTIO_NET_F_MAC); | |
330 | ||
151cc834 SN |
331 | if (driver_features == nego_features) |
332 | return 0; | |
333 | ||
334 | vp_modern_set_features(&pdsv->vdpa_aux->vd_mdev, nego_features); | |
335 | ||
336 | return 0; | |
337 | } | |
338 | ||
339 | static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev) | |
340 | { | |
341 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
342 | ||
abdf31bd | 343 | return pdsv->negotiated_features; |
151cc834 SN |
344 | } |
345 | ||
346 | static void pds_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, | |
347 | struct vdpa_callback *cb) | |
348 | { | |
349 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
350 | ||
351 | pdsv->config_cb.callback = cb->callback; | |
352 | pdsv->config_cb.private = cb->private; | |
353 | } | |
354 | ||
355 | static u16 pds_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) | |
356 | { | |
357 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
358 | ||
359 | /* qemu has assert() that vq_num_max <= VIRTQUEUE_MAX_SIZE (1024) */ | |
360 | return min_t(u16, 1024, BIT(le16_to_cpu(pdsv->vdpa_aux->ident.max_qlen))); | |
361 | } | |
362 | ||
363 | static u32 pds_vdpa_get_device_id(struct vdpa_device *vdpa_dev) | |
364 | { | |
365 | return VIRTIO_ID_NET; | |
366 | } | |
367 | ||
368 | static u32 pds_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) | |
369 | { | |
370 | return PCI_VENDOR_ID_PENSANDO; | |
371 | } | |
372 | ||
373 | static u8 pds_vdpa_get_status(struct vdpa_device *vdpa_dev) | |
374 | { | |
375 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
376 | ||
377 | return vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev); | |
378 | } | |
379 | ||
c0a6c5cb AH |
380 | static int pds_vdpa_request_irqs(struct pds_vdpa_device *pdsv) |
381 | { | |
382 | struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev; | |
383 | struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux; | |
384 | struct device *dev = &pdsv->vdpa_dev.dev; | |
385 | int max_vq, nintrs, qid, err; | |
386 | ||
387 | max_vq = vdpa_aux->vdpa_mdev.max_supported_vqs; | |
388 | ||
389 | nintrs = pci_alloc_irq_vectors(pdev, max_vq, max_vq, PCI_IRQ_MSIX); | |
390 | if (nintrs < 0) { | |
391 | dev_err(dev, "Couldn't get %d msix vectors: %pe\n", | |
392 | max_vq, ERR_PTR(nintrs)); | |
393 | return nintrs; | |
394 | } | |
395 | ||
396 | for (qid = 0; qid < pdsv->num_vqs; ++qid) { | |
397 | int irq = pci_irq_vector(pdev, qid); | |
398 | ||
399 | snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name), | |
400 | "vdpa-%s-%d", dev_name(dev), qid); | |
401 | ||
402 | err = request_irq(irq, pds_vdpa_isr, 0, | |
403 | pdsv->vqs[qid].irq_name, | |
404 | &pdsv->vqs[qid]); | |
405 | if (err) { | |
406 | dev_err(dev, "%s: no irq for qid %d: %pe\n", | |
407 | __func__, qid, ERR_PTR(err)); | |
408 | goto err_release; | |
409 | } | |
410 | ||
411 | pdsv->vqs[qid].irq = irq; | |
412 | } | |
413 | ||
414 | vdpa_aux->nintrs = nintrs; | |
415 | ||
416 | return 0; | |
417 | ||
418 | err_release: | |
419 | while (qid--) | |
420 | pds_vdpa_release_irq(pdsv, qid); | |
421 | ||
422 | pci_free_irq_vectors(pdev); | |
423 | ||
424 | vdpa_aux->nintrs = 0; | |
425 | ||
426 | return err; | |
427 | } | |
428 | ||
429 | static void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv) | |
430 | { | |
431 | struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev; | |
432 | struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux; | |
433 | int qid; | |
434 | ||
435 | if (!vdpa_aux->nintrs) | |
436 | return; | |
437 | ||
438 | for (qid = 0; qid < pdsv->num_vqs; qid++) | |
439 | pds_vdpa_release_irq(pdsv, qid); | |
440 | ||
441 | pci_free_irq_vectors(pdev); | |
442 | ||
443 | vdpa_aux->nintrs = 0; | |
444 | } | |
445 | ||
151cc834 SN |
446 | static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) |
447 | { | |
448 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
449 | struct device *dev = &pdsv->vdpa_dev.dev; | |
450 | u8 old_status; | |
451 | int i; | |
452 | ||
453 | old_status = pds_vdpa_get_status(vdpa_dev); | |
454 | dev_dbg(dev, "%s: old %#x new %#x\n", __func__, old_status, status); | |
455 | ||
c0a6c5cb AH |
456 | if (status & ~old_status & VIRTIO_CONFIG_S_DRIVER_OK) { |
457 | if (pds_vdpa_request_irqs(pdsv)) | |
458 | status = old_status | VIRTIO_CONFIG_S_FAILED; | |
459 | } | |
460 | ||
151cc834 SN |
461 | pds_vdpa_cmd_set_status(pdsv, status); |
462 | ||
151cc834 | 463 | if (status == 0) { |
dd3b8de1 SN |
464 | struct vdpa_callback null_cb = { }; |
465 | ||
466 | pds_vdpa_set_config_cb(vdpa_dev, &null_cb); | |
151cc834 SN |
467 | pds_vdpa_cmd_reset(pdsv); |
468 | ||
469 | for (i = 0; i < pdsv->num_vqs; i++) { | |
470 | pdsv->vqs[i].avail_idx = 0; | |
471 | pdsv->vqs[i].used_idx = 0; | |
472 | } | |
0cd2c13b AH |
473 | |
474 | pds_vdpa_cmd_set_mac(pdsv, pdsv->mac); | |
151cc834 SN |
475 | } |
476 | ||
477 | if (status & ~old_status & VIRTIO_CONFIG_S_FEATURES_OK) { | |
478 | for (i = 0; i < pdsv->num_vqs; i++) { | |
479 | pdsv->vqs[i].notify = | |
480 | vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev, | |
481 | i, &pdsv->vqs[i].notify_pa); | |
482 | } | |
483 | } | |
c0a6c5cb AH |
484 | |
485 | if (old_status & ~status & VIRTIO_CONFIG_S_DRIVER_OK) | |
486 | pds_vdpa_release_irqs(pdsv); | |
151cc834 SN |
487 | } |
488 | ||
ed888630 SN |
489 | static void pds_vdpa_init_vqs_entry(struct pds_vdpa_device *pdsv, int qid, |
490 | void __iomem *notify) | |
491 | { | |
492 | memset(&pdsv->vqs[qid], 0, sizeof(pdsv->vqs[0])); | |
493 | pdsv->vqs[qid].qid = qid; | |
494 | pdsv->vqs[qid].pdsv = pdsv; | |
495 | pdsv->vqs[qid].ready = false; | |
496 | pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR; | |
497 | pdsv->vqs[qid].notify = notify; | |
498 | } | |
499 | ||
151cc834 SN |
500 | static int pds_vdpa_reset(struct vdpa_device *vdpa_dev) |
501 | { | |
502 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
503 | struct device *dev; | |
504 | int err = 0; | |
505 | u8 status; | |
506 | int i; | |
507 | ||
508 | dev = &pdsv->vdpa_aux->padev->aux_dev.dev; | |
509 | status = pds_vdpa_get_status(vdpa_dev); | |
510 | ||
511 | if (status == 0) | |
512 | return 0; | |
513 | ||
514 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) { | |
515 | /* Reset the vqs */ | |
516 | for (i = 0; i < pdsv->num_vqs && !err; i++) { | |
517 | err = pds_vdpa_cmd_reset_vq(pdsv, i, 0, &pdsv->vqs[i]); | |
518 | if (err) | |
519 | dev_err(dev, "%s: reset_vq failed qid %d: %pe\n", | |
520 | __func__, i, ERR_PTR(err)); | |
151cc834 SN |
521 | } |
522 | } | |
523 | ||
524 | pds_vdpa_set_status(vdpa_dev, 0); | |
525 | ||
c0a6c5cb AH |
526 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) { |
527 | /* Reset the vq info */ | |
528 | for (i = 0; i < pdsv->num_vqs && !err; i++) | |
529 | pds_vdpa_init_vqs_entry(pdsv, i, pdsv->vqs[i].notify); | |
530 | } | |
531 | ||
151cc834 SN |
532 | return 0; |
533 | } | |
534 | ||
535 | static size_t pds_vdpa_get_config_size(struct vdpa_device *vdpa_dev) | |
536 | { | |
537 | return sizeof(struct virtio_net_config); | |
538 | } | |
539 | ||
540 | static void pds_vdpa_get_config(struct vdpa_device *vdpa_dev, | |
541 | unsigned int offset, | |
542 | void *buf, unsigned int len) | |
543 | { | |
544 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
545 | void __iomem *device; | |
546 | ||
547 | if (offset + len > sizeof(struct virtio_net_config)) { | |
548 | WARN(true, "%s: bad read, offset %d len %d\n", __func__, offset, len); | |
549 | return; | |
550 | } | |
551 | ||
552 | device = pdsv->vdpa_aux->vd_mdev.device; | |
553 | memcpy_fromio(buf, device + offset, len); | |
554 | } | |
555 | ||
556 | static void pds_vdpa_set_config(struct vdpa_device *vdpa_dev, | |
557 | unsigned int offset, const void *buf, | |
558 | unsigned int len) | |
559 | { | |
560 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); | |
561 | void __iomem *device; | |
562 | ||
563 | if (offset + len > sizeof(struct virtio_net_config)) { | |
564 | WARN(true, "%s: bad read, offset %d len %d\n", __func__, offset, len); | |
565 | return; | |
566 | } | |
567 | ||
568 | device = pdsv->vdpa_aux->vd_mdev.device; | |
569 | memcpy_toio(device + offset, buf, len); | |
570 | } | |
571 | ||
572 | static const struct vdpa_config_ops pds_vdpa_ops = { | |
573 | .set_vq_address = pds_vdpa_set_vq_address, | |
574 | .set_vq_num = pds_vdpa_set_vq_num, | |
575 | .kick_vq = pds_vdpa_kick_vq, | |
576 | .set_vq_cb = pds_vdpa_set_vq_cb, | |
577 | .set_vq_ready = pds_vdpa_set_vq_ready, | |
578 | .get_vq_ready = pds_vdpa_get_vq_ready, | |
579 | .set_vq_state = pds_vdpa_set_vq_state, | |
580 | .get_vq_state = pds_vdpa_get_vq_state, | |
581 | .get_vq_notification = pds_vdpa_get_vq_notification, | |
582 | .get_vq_irq = pds_vdpa_get_vq_irq, | |
583 | .get_vq_align = pds_vdpa_get_vq_align, | |
584 | .get_vq_group = pds_vdpa_get_vq_group, | |
585 | ||
586 | .get_device_features = pds_vdpa_get_device_features, | |
587 | .set_driver_features = pds_vdpa_set_driver_features, | |
588 | .get_driver_features = pds_vdpa_get_driver_features, | |
589 | .set_config_cb = pds_vdpa_set_config_cb, | |
590 | .get_vq_num_max = pds_vdpa_get_vq_num_max, | |
591 | .get_device_id = pds_vdpa_get_device_id, | |
592 | .get_vendor_id = pds_vdpa_get_vendor_id, | |
593 | .get_status = pds_vdpa_get_status, | |
594 | .set_status = pds_vdpa_set_status, | |
595 | .reset = pds_vdpa_reset, | |
596 | .get_config_size = pds_vdpa_get_config_size, | |
597 | .get_config = pds_vdpa_get_config, | |
598 | .set_config = pds_vdpa_set_config, | |
599 | }; | |
25d1270b SN |
600 | static struct virtio_device_id pds_vdpa_id_table[] = { |
601 | {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID}, | |
602 | {0}, | |
603 | }; | |
604 | ||
605 | static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, | |
606 | const struct vdpa_dev_set_config *add_config) | |
607 | { | |
151cc834 SN |
608 | struct pds_vdpa_aux *vdpa_aux; |
609 | struct pds_vdpa_device *pdsv; | |
610 | struct vdpa_mgmt_dev *mgmt; | |
611 | u16 fw_max_vqs, vq_pairs; | |
612 | struct device *dma_dev; | |
613 | struct pci_dev *pdev; | |
614 | struct device *dev; | |
151cc834 SN |
615 | int err; |
616 | int i; | |
617 | ||
618 | vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev); | |
619 | dev = &vdpa_aux->padev->aux_dev.dev; | |
620 | mgmt = &vdpa_aux->vdpa_mdev; | |
621 | ||
622 | if (vdpa_aux->pdsv) { | |
623 | dev_warn(dev, "Multiple vDPA devices on a VF is not supported.\n"); | |
624 | return -EOPNOTSUPP; | |
625 | } | |
626 | ||
627 | pdsv = vdpa_alloc_device(struct pds_vdpa_device, vdpa_dev, | |
628 | dev, &pds_vdpa_ops, 1, 1, name, false); | |
629 | if (IS_ERR(pdsv)) { | |
630 | dev_err(dev, "Failed to allocate vDPA structure: %pe\n", pdsv); | |
631 | return PTR_ERR(pdsv); | |
632 | } | |
633 | ||
634 | vdpa_aux->pdsv = pdsv; | |
635 | pdsv->vdpa_aux = vdpa_aux; | |
636 | ||
637 | pdev = vdpa_aux->padev->vf_pdev; | |
638 | dma_dev = &pdev->dev; | |
639 | pdsv->vdpa_dev.dma_dev = dma_dev; | |
640 | ||
641 | pdsv->supported_features = mgmt->supported_features; | |
642 | ||
643 | if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { | |
644 | u64 unsupp_features = | |
abdf31bd | 645 | add_config->device_features & ~pdsv->supported_features; |
151cc834 SN |
646 | |
647 | if (unsupp_features) { | |
648 | dev_err(dev, "Unsupported features: %#llx\n", unsupp_features); | |
649 | err = -EOPNOTSUPP; | |
650 | goto err_unmap; | |
651 | } | |
652 | ||
653 | pdsv->supported_features = add_config->device_features; | |
654 | } | |
655 | ||
656 | err = pds_vdpa_cmd_reset(pdsv); | |
657 | if (err) { | |
658 | dev_err(dev, "Failed to reset hw: %pe\n", ERR_PTR(err)); | |
659 | goto err_unmap; | |
660 | } | |
661 | ||
662 | err = pds_vdpa_init_hw(pdsv); | |
663 | if (err) { | |
664 | dev_err(dev, "Failed to init hw: %pe\n", ERR_PTR(err)); | |
665 | goto err_unmap; | |
666 | } | |
667 | ||
668 | fw_max_vqs = le16_to_cpu(pdsv->vdpa_aux->ident.max_vqs); | |
669 | vq_pairs = fw_max_vqs / 2; | |
670 | ||
671 | /* Make sure we have the queues being requested */ | |
672 | if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) | |
673 | vq_pairs = add_config->net.max_vq_pairs; | |
674 | ||
675 | pdsv->num_vqs = 2 * vq_pairs; | |
676 | if (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) | |
677 | pdsv->num_vqs++; | |
678 | ||
679 | if (pdsv->num_vqs > fw_max_vqs) { | |
680 | dev_err(dev, "%s: queue count requested %u greater than max %u\n", | |
681 | __func__, pdsv->num_vqs, fw_max_vqs); | |
682 | err = -ENOSPC; | |
683 | goto err_unmap; | |
684 | } | |
685 | ||
686 | if (pdsv->num_vqs != fw_max_vqs) { | |
687 | err = pds_vdpa_cmd_set_max_vq_pairs(pdsv, vq_pairs); | |
688 | if (err) { | |
689 | dev_err(dev, "Failed to set max_vq_pairs: %pe\n", | |
690 | ERR_PTR(err)); | |
691 | goto err_unmap; | |
692 | } | |
693 | } | |
694 | ||
695 | /* Set a mac, either from the user config if provided | |
abdf31bd SN |
696 | * or use the device's mac if not 00:..:00 |
697 | * or set a random mac | |
151cc834 SN |
698 | */ |
699 | if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) { | |
0cd2c13b | 700 | ether_addr_copy(pdsv->mac, add_config->net.mac); |
151cc834 SN |
701 | } else { |
702 | struct virtio_net_config __iomem *vc; | |
703 | ||
704 | vc = pdsv->vdpa_aux->vd_mdev.device; | |
0cd2c13b | 705 | memcpy_fromio(pdsv->mac, vc->mac, sizeof(pdsv->mac)); |
abdf31bd SN |
706 | if (is_zero_ether_addr(pdsv->mac) && |
707 | (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_MAC))) { | |
0cd2c13b AH |
708 | eth_random_addr(pdsv->mac); |
709 | dev_info(dev, "setting random mac %pM\n", pdsv->mac); | |
151cc834 SN |
710 | } |
711 | } | |
0cd2c13b | 712 | pds_vdpa_cmd_set_mac(pdsv, pdsv->mac); |
151cc834 SN |
713 | |
714 | for (i = 0; i < pdsv->num_vqs; i++) { | |
ed888630 SN |
715 | void __iomem *notify; |
716 | ||
717 | notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev, | |
718 | i, &pdsv->vqs[i].notify_pa); | |
719 | pds_vdpa_init_vqs_entry(pdsv, i, notify); | |
151cc834 SN |
720 | } |
721 | ||
722 | pdsv->vdpa_dev.mdev = &vdpa_aux->vdpa_mdev; | |
723 | ||
67f27b8b SN |
724 | err = pds_vdpa_register_event_handler(pdsv); |
725 | if (err) { | |
726 | dev_err(dev, "Failed to register for PDS events: %pe\n", ERR_PTR(err)); | |
727 | goto err_unmap; | |
728 | } | |
729 | ||
151cc834 SN |
730 | /* We use the _vdpa_register_device() call rather than the |
731 | * vdpa_register_device() to avoid a deadlock because our | |
732 | * dev_add() is called with the vdpa_dev_lock already set | |
733 | * by vdpa_nl_cmd_dev_add_set_doit() | |
734 | */ | |
735 | err = _vdpa_register_device(&pdsv->vdpa_dev, pdsv->num_vqs); | |
736 | if (err) { | |
737 | dev_err(dev, "Failed to register to vDPA bus: %pe\n", ERR_PTR(err)); | |
67f27b8b | 738 | goto err_unevent; |
151cc834 SN |
739 | } |
740 | ||
741 | pds_vdpa_debugfs_add_vdpadev(vdpa_aux); | |
742 | ||
743 | return 0; | |
744 | ||
67f27b8b SN |
745 | err_unevent: |
746 | pds_vdpa_unregister_event_handler(pdsv); | |
151cc834 SN |
747 | err_unmap: |
748 | put_device(&pdsv->vdpa_dev.dev); | |
749 | vdpa_aux->pdsv = NULL; | |
750 | return err; | |
25d1270b SN |
751 | } |
752 | ||
753 | static void pds_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, | |
754 | struct vdpa_device *vdpa_dev) | |
755 | { | |
67f27b8b | 756 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
151cc834 SN |
757 | struct pds_vdpa_aux *vdpa_aux; |
758 | ||
67f27b8b SN |
759 | pds_vdpa_unregister_event_handler(pdsv); |
760 | ||
151cc834 SN |
761 | vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev); |
762 | _vdpa_unregister_device(vdpa_dev); | |
763 | ||
764 | pds_vdpa_cmd_reset(vdpa_aux->pdsv); | |
765 | pds_vdpa_debugfs_reset_vdpadev(vdpa_aux); | |
766 | ||
767 | vdpa_aux->pdsv = NULL; | |
768 | ||
769 | dev_info(&vdpa_aux->padev->aux_dev.dev, "Removed vdpa device\n"); | |
25d1270b SN |
770 | } |
771 | ||
772 | static const struct vdpa_mgmtdev_ops pds_vdpa_mgmt_dev_ops = { | |
773 | .dev_add = pds_vdpa_dev_add, | |
774 | .dev_del = pds_vdpa_dev_del | |
775 | }; | |
776 | ||
777 | int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux) | |
778 | { | |
779 | union pds_core_adminq_cmd cmd = { | |
780 | .vdpa_ident.opcode = PDS_VDPA_CMD_IDENT, | |
781 | .vdpa_ident.vf_id = cpu_to_le16(vdpa_aux->vf_id), | |
782 | }; | |
783 | union pds_core_adminq_comp comp = {}; | |
784 | struct vdpa_mgmt_dev *mgmt; | |
785 | struct pci_dev *pf_pdev; | |
786 | struct device *pf_dev; | |
787 | struct pci_dev *pdev; | |
788 | dma_addr_t ident_pa; | |
789 | struct device *dev; | |
790 | u16 dev_intrs; | |
791 | u16 max_vqs; | |
792 | int err; | |
793 | ||
794 | dev = &vdpa_aux->padev->aux_dev.dev; | |
795 | pdev = vdpa_aux->padev->vf_pdev; | |
796 | mgmt = &vdpa_aux->vdpa_mdev; | |
797 | ||
798 | /* Get resource info through the PF's adminq. It is a block of info, | |
799 | * so we need to map some memory for PF to make available to the | |
800 | * firmware for writing the data. | |
801 | */ | |
802 | pf_pdev = pci_physfn(vdpa_aux->padev->vf_pdev); | |
803 | pf_dev = &pf_pdev->dev; | |
804 | ident_pa = dma_map_single(pf_dev, &vdpa_aux->ident, | |
805 | sizeof(vdpa_aux->ident), DMA_FROM_DEVICE); | |
806 | if (dma_mapping_error(pf_dev, ident_pa)) { | |
807 | dev_err(dev, "Failed to map ident space\n"); | |
808 | return -ENOMEM; | |
809 | } | |
810 | ||
811 | cmd.vdpa_ident.ident_pa = cpu_to_le64(ident_pa); | |
812 | cmd.vdpa_ident.len = cpu_to_le32(sizeof(vdpa_aux->ident)); | |
813 | err = pds_client_adminq_cmd(vdpa_aux->padev, &cmd, | |
814 | sizeof(cmd.vdpa_ident), &comp, 0); | |
815 | dma_unmap_single(pf_dev, ident_pa, | |
816 | sizeof(vdpa_aux->ident), DMA_FROM_DEVICE); | |
817 | if (err) { | |
818 | dev_err(dev, "Failed to ident hw, status %d: %pe\n", | |
819 | comp.status, ERR_PTR(err)); | |
820 | return err; | |
821 | } | |
822 | ||
823 | max_vqs = le16_to_cpu(vdpa_aux->ident.max_vqs); | |
824 | dev_intrs = pci_msix_vec_count(pdev); | |
825 | dev_dbg(dev, "ident.max_vqs %d dev_intrs %d\n", max_vqs, dev_intrs); | |
826 | ||
827 | max_vqs = min_t(u16, dev_intrs, max_vqs); | |
828 | mgmt->max_supported_vqs = min_t(u16, PDS_VDPA_MAX_QUEUES, max_vqs); | |
c0a6c5cb | 829 | vdpa_aux->nintrs = 0; |
25d1270b SN |
830 | |
831 | mgmt->ops = &pds_vdpa_mgmt_dev_ops; | |
832 | mgmt->id_table = pds_vdpa_id_table; | |
833 | mgmt->device = dev; | |
834 | mgmt->supported_features = le64_to_cpu(vdpa_aux->ident.hw_features); | |
abdf31bd SN |
835 | |
836 | /* advertise F_MAC even if the device doesn't */ | |
837 | mgmt->supported_features |= BIT_ULL(VIRTIO_NET_F_MAC); | |
838 | ||
25d1270b SN |
839 | mgmt->config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); |
840 | mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP); | |
151cc834 | 841 | mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES); |
25d1270b | 842 | |
25d1270b SN |
843 | return 0; |
844 | } |