]>
Commit | Line | Data |
---|---|---|
5a2414bc ZL |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Intel IFC VF NIC driver for virtio dataplane offloading | |
4 | * | |
5 | * Copyright (C) 2020 Intel Corporation. | |
6 | * | |
7 | * Author: Zhu Lingshan <lingshan.zhu@intel.com> | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/interrupt.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/pci.h> | |
14 | #include <linux/sysfs.h> | |
15 | #include "ifcvf_base.h" | |
16 | ||
17 | #define VERSION_STRING "0.1" | |
18 | #define DRIVER_AUTHOR "Intel Corporation" | |
19 | #define IFCVF_DRIVER_NAME "ifcvf" | |
20 | ||
21 | static irqreturn_t ifcvf_intr_handler(int irq, void *arg) | |
22 | { | |
23 | struct vring_info *vring = arg; | |
24 | ||
25 | if (vring->cb.callback) | |
26 | return vring->cb.callback(vring->cb.private); | |
27 | ||
28 | return IRQ_HANDLED; | |
29 | } | |
30 | ||
31 | static int ifcvf_start_datapath(void *private) | |
32 | { | |
33 | struct ifcvf_hw *vf = ifcvf_private_to_vf(private); | |
5a2414bc ZL |
34 | u8 status; |
35 | int ret; | |
36 | ||
5a2414bc ZL |
37 | vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2; |
38 | ret = ifcvf_start_hw(vf); | |
39 | if (ret < 0) { | |
40 | status = ifcvf_get_status(vf); | |
41 | status |= VIRTIO_CONFIG_S_FAILED; | |
42 | ifcvf_set_status(vf, status); | |
43 | } | |
44 | ||
45 | return ret; | |
46 | } | |
47 | ||
48 | static int ifcvf_stop_datapath(void *private) | |
49 | { | |
50 | struct ifcvf_hw *vf = ifcvf_private_to_vf(private); | |
51 | int i; | |
52 | ||
53 | for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) | |
54 | vf->vring[i].cb.callback = NULL; | |
55 | ||
56 | ifcvf_stop_hw(vf); | |
57 | ||
58 | return 0; | |
59 | } | |
60 | ||
61 | static void ifcvf_reset_vring(struct ifcvf_adapter *adapter) | |
62 | { | |
63 | struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter); | |
64 | int i; | |
65 | ||
66 | for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) { | |
67 | vf->vring[i].last_avail_idx = 0; | |
68 | vf->vring[i].desc = 0; | |
69 | vf->vring[i].avail = 0; | |
70 | vf->vring[i].used = 0; | |
71 | vf->vring[i].ready = 0; | |
72 | vf->vring[i].cb.callback = NULL; | |
73 | vf->vring[i].cb.private = NULL; | |
74 | } | |
75 | ||
76 | ifcvf_reset(vf); | |
77 | } | |
78 | ||
79 | static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev) | |
80 | { | |
81 | return container_of(vdpa_dev, struct ifcvf_adapter, vdpa); | |
82 | } | |
83 | ||
84 | static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev) | |
85 | { | |
86 | struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); | |
87 | ||
88 | return &adapter->vf; | |
89 | } | |
90 | ||
91 | static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev) | |
92 | { | |
93 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
94 | u64 features; | |
95 | ||
96 | features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES; | |
97 | ||
98 | return features; | |
99 | } | |
100 | ||
101 | static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features) | |
102 | { | |
103 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
104 | ||
105 | vf->req_features = features; | |
106 | ||
107 | return 0; | |
108 | } | |
109 | ||
110 | static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev) | |
111 | { | |
112 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
113 | ||
114 | return ifcvf_get_status(vf); | |
115 | } | |
116 | ||
117 | static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) | |
118 | { | |
119 | struct ifcvf_adapter *adapter; | |
120 | struct ifcvf_hw *vf; | |
121 | ||
122 | vf = vdpa_to_vf(vdpa_dev); | |
123 | adapter = dev_get_drvdata(vdpa_dev->dev.parent); | |
124 | ||
125 | if (status == 0) { | |
126 | ifcvf_stop_datapath(adapter); | |
127 | ifcvf_reset_vring(adapter); | |
128 | return; | |
129 | } | |
130 | ||
131 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) { | |
132 | if (ifcvf_start_datapath(adapter) < 0) | |
133 | IFCVF_ERR(adapter->pdev, | |
134 | "Failed to set ifcvf vdpa status %u\n", | |
135 | status); | |
136 | } | |
137 | ||
138 | ifcvf_set_status(vf, status); | |
139 | } | |
140 | ||
141 | static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) | |
142 | { | |
143 | return IFCVF_QUEUE_MAX; | |
144 | } | |
145 | ||
146 | static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid) | |
147 | { | |
148 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
149 | ||
150 | return ifcvf_get_vq_state(vf, qid); | |
151 | } | |
152 | ||
153 | static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, | |
154 | u64 num) | |
155 | { | |
156 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
157 | ||
158 | return ifcvf_set_vq_state(vf, qid, num); | |
159 | } | |
160 | ||
161 | static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, | |
162 | struct vdpa_callback *cb) | |
163 | { | |
164 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
165 | ||
166 | vf->vring[qid].cb = *cb; | |
167 | } | |
168 | ||
169 | static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, | |
170 | u16 qid, bool ready) | |
171 | { | |
172 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
173 | ||
174 | vf->vring[qid].ready = ready; | |
175 | } | |
176 | ||
177 | static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid) | |
178 | { | |
179 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
180 | ||
181 | return vf->vring[qid].ready; | |
182 | } | |
183 | ||
184 | static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, | |
185 | u32 num) | |
186 | { | |
187 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
188 | ||
189 | vf->vring[qid].size = num; | |
190 | } | |
191 | ||
192 | static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, | |
193 | u64 desc_area, u64 driver_area, | |
194 | u64 device_area) | |
195 | { | |
196 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
197 | ||
198 | vf->vring[qid].desc = desc_area; | |
199 | vf->vring[qid].avail = driver_area; | |
200 | vf->vring[qid].used = device_area; | |
201 | ||
202 | return 0; | |
203 | } | |
204 | ||
205 | static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) | |
206 | { | |
207 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
208 | ||
209 | ifcvf_notify_queue(vf, qid); | |
210 | } | |
211 | ||
212 | static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev) | |
213 | { | |
214 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
215 | ||
216 | return ioread8(&vf->common_cfg->config_generation); | |
217 | } | |
218 | ||
219 | static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev) | |
220 | { | |
221 | return VIRTIO_ID_NET; | |
222 | } | |
223 | ||
224 | static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) | |
225 | { | |
226 | return IFCVF_SUBSYS_VENDOR_ID; | |
227 | } | |
228 | ||
425a5070 | 229 | static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) |
5a2414bc ZL |
230 | { |
231 | return IFCVF_QUEUE_ALIGNMENT; | |
232 | } | |
233 | ||
234 | static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev, | |
235 | unsigned int offset, | |
236 | void *buf, unsigned int len) | |
237 | { | |
238 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
239 | ||
240 | WARN_ON(offset + len > sizeof(struct virtio_net_config)); | |
241 | ifcvf_read_net_config(vf, offset, buf, len); | |
242 | } | |
243 | ||
244 | static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev, | |
245 | unsigned int offset, const void *buf, | |
246 | unsigned int len) | |
247 | { | |
248 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); | |
249 | ||
250 | WARN_ON(offset + len > sizeof(struct virtio_net_config)); | |
251 | ifcvf_write_net_config(vf, offset, buf, len); | |
252 | } | |
253 | ||
254 | static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, | |
255 | struct vdpa_callback *cb) | |
256 | { | |
257 | /* We don't support config interrupt */ | |
258 | } | |
259 | ||
260 | /* | |
261 | * IFCVF currently does't have on-chip IOMMU, so not | |
262 | * implemented set_map()/dma_map()/dma_unmap() | |
263 | */ | |
264 | static const struct vdpa_config_ops ifc_vdpa_ops = { | |
265 | .get_features = ifcvf_vdpa_get_features, | |
266 | .set_features = ifcvf_vdpa_set_features, | |
267 | .get_status = ifcvf_vdpa_get_status, | |
268 | .set_status = ifcvf_vdpa_set_status, | |
269 | .get_vq_num_max = ifcvf_vdpa_get_vq_num_max, | |
270 | .get_vq_state = ifcvf_vdpa_get_vq_state, | |
271 | .set_vq_state = ifcvf_vdpa_set_vq_state, | |
272 | .set_vq_cb = ifcvf_vdpa_set_vq_cb, | |
273 | .set_vq_ready = ifcvf_vdpa_set_vq_ready, | |
274 | .get_vq_ready = ifcvf_vdpa_get_vq_ready, | |
275 | .set_vq_num = ifcvf_vdpa_set_vq_num, | |
276 | .set_vq_address = ifcvf_vdpa_set_vq_address, | |
277 | .kick_vq = ifcvf_vdpa_kick_vq, | |
278 | .get_generation = ifcvf_vdpa_get_generation, | |
279 | .get_device_id = ifcvf_vdpa_get_device_id, | |
280 | .get_vendor_id = ifcvf_vdpa_get_vendor_id, | |
281 | .get_vq_align = ifcvf_vdpa_get_vq_align, | |
282 | .get_config = ifcvf_vdpa_get_config, | |
283 | .set_config = ifcvf_vdpa_set_config, | |
284 | .set_config_cb = ifcvf_vdpa_set_config_cb, | |
285 | }; | |
286 | ||
287 | static int ifcvf_request_irq(struct ifcvf_adapter *adapter) | |
288 | { | |
289 | struct pci_dev *pdev = adapter->pdev; | |
290 | struct ifcvf_hw *vf = &adapter->vf; | |
291 | int vector, i, ret, irq; | |
292 | ||
293 | ||
294 | for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) { | |
295 | snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", | |
296 | pci_name(pdev), i); | |
297 | vector = i + IFCVF_MSI_QUEUE_OFF; | |
298 | irq = pci_irq_vector(pdev, vector); | |
299 | ret = devm_request_irq(&pdev->dev, irq, | |
300 | ifcvf_intr_handler, 0, | |
301 | vf->vring[i].msix_name, | |
302 | &vf->vring[i]); | |
303 | if (ret) { | |
304 | IFCVF_ERR(pdev, | |
305 | "Failed to request irq for vq %d\n", i); | |
306 | return ret; | |
307 | } | |
308 | vf->vring[i].irq = irq; | |
309 | } | |
310 | ||
311 | return 0; | |
312 | } | |
313 | ||
314 | static void ifcvf_free_irq_vectors(void *data) | |
315 | { | |
316 | pci_free_irq_vectors(data); | |
317 | } | |
318 | ||
319 | static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
320 | { | |
321 | struct device *dev = &pdev->dev; | |
322 | struct ifcvf_adapter *adapter; | |
323 | struct ifcvf_hw *vf; | |
324 | int ret; | |
325 | ||
326 | ret = pcim_enable_device(pdev); | |
327 | if (ret) { | |
328 | IFCVF_ERR(pdev, "Failed to enable device\n"); | |
329 | return ret; | |
330 | } | |
331 | ||
332 | ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4), | |
333 | IFCVF_DRIVER_NAME); | |
334 | if (ret) { | |
335 | IFCVF_ERR(pdev, "Failed to request MMIO region\n"); | |
336 | return ret; | |
337 | } | |
338 | ||
339 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
340 | if (ret) { | |
341 | IFCVF_ERR(pdev, "No usable DMA confiugration\n"); | |
342 | return ret; | |
343 | } | |
344 | ||
345 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
346 | if (ret) { | |
347 | IFCVF_ERR(pdev, | |
348 | "No usable coherent DMA confiugration\n"); | |
349 | return ret; | |
350 | } | |
351 | ||
352 | ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR, | |
353 | IFCVF_MAX_INTR, PCI_IRQ_MSIX); | |
354 | if (ret < 0) { | |
355 | IFCVF_ERR(pdev, "Failed to alloc irq vectors\n"); | |
356 | return ret; | |
357 | } | |
358 | ||
359 | ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev); | |
360 | if (ret) { | |
361 | IFCVF_ERR(pdev, | |
362 | "Failed for adding devres for freeing irq vectors\n"); | |
363 | return ret; | |
364 | } | |
365 | ||
366 | adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, | |
367 | dev, &ifc_vdpa_ops); | |
368 | if (adapter == NULL) { | |
369 | IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); | |
370 | return -ENOMEM; | |
371 | } | |
372 | ||
373 | pci_set_master(pdev); | |
374 | pci_set_drvdata(pdev, adapter); | |
375 | ||
376 | vf = &adapter->vf; | |
377 | vf->base = pcim_iomap_table(pdev); | |
378 | ||
379 | adapter->pdev = pdev; | |
380 | adapter->vdpa.dma_dev = &pdev->dev; | |
381 | ||
382 | ret = ifcvf_request_irq(adapter); | |
383 | if (ret) { | |
384 | IFCVF_ERR(pdev, "Failed to request MSI-X irq\n"); | |
385 | goto err; | |
386 | } | |
387 | ||
388 | ret = ifcvf_init_hw(vf, pdev); | |
389 | if (ret) { | |
390 | IFCVF_ERR(pdev, "Failed to init IFCVF hw\n"); | |
391 | goto err; | |
392 | } | |
393 | ||
394 | ret = vdpa_register_device(&adapter->vdpa); | |
395 | if (ret) { | |
396 | IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus"); | |
397 | goto err; | |
398 | } | |
399 | ||
400 | return 0; | |
401 | ||
402 | err: | |
403 | put_device(&adapter->vdpa.dev); | |
404 | return ret; | |
405 | } | |
406 | ||
407 | static void ifcvf_remove(struct pci_dev *pdev) | |
408 | { | |
409 | struct ifcvf_adapter *adapter = pci_get_drvdata(pdev); | |
410 | ||
411 | vdpa_unregister_device(&adapter->vdpa); | |
412 | } | |
413 | ||
414 | static struct pci_device_id ifcvf_pci_ids[] = { | |
415 | { PCI_DEVICE_SUB(IFCVF_VENDOR_ID, | |
416 | IFCVF_DEVICE_ID, | |
417 | IFCVF_SUBSYS_VENDOR_ID, | |
418 | IFCVF_SUBSYS_DEVICE_ID) }, | |
419 | { 0 }, | |
420 | }; | |
421 | MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids); | |
422 | ||
423 | static struct pci_driver ifcvf_driver = { | |
424 | .name = IFCVF_DRIVER_NAME, | |
425 | .id_table = ifcvf_pci_ids, | |
426 | .probe = ifcvf_probe, | |
427 | .remove = ifcvf_remove, | |
428 | }; | |
429 | ||
430 | module_pci_driver(ifcvf_driver); | |
431 | ||
432 | MODULE_LICENSE("GPL v2"); | |
433 | MODULE_VERSION(VERSION_STRING); |