1 // SPDX-License-Identifier: GPL-2.0+
3 * Mellanox BlueField SoC TmFifo driver
5 * Copyright (C) 2019 Mellanox Technologies
8 #include <linux/acpi.h>
9 #include <linux/bitfield.h>
10 #include <linux/circ_buf.h>
11 #include <linux/efi.h>
12 #include <linux/irq.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_console.h>
20 #include <linux/virtio_ids.h>
21 #include <linux/virtio_net.h>
22 #include <linux/virtio_ring.h>
24 #include "mlxbf-tmfifo-regs.h"
27 #define MLXBF_TMFIFO_VRING_SIZE SZ_1K
29 /* Console Tx buffer size. */
30 #define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K
32 /* Console Tx buffer reserved space. */
33 #define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8
35 /* House-keeping timer interval. */
36 #define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10)
38 /* Virtual devices sharing the TM FIFO. */
39 #define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1)
42 * Reserve 1/16 of TmFifo space, so console messages are not starved by
43 * the networking traffic.
45 #define MLXBF_TMFIFO_RESERVE_RATIO 16
47 /* Message with data needs at least two words (for header & data). */
48 #define MLXBF_TMFIFO_DATA_MIN_WORDS 2
50 /* ACPI UID for BlueField-3. */
51 #define TMFIFO_BF3_UID 1
56 * struct mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
57 * @va: virtual address of the ring
58 * @dma: dma address of the ring
59 * @vq: pointer to the virtio virtqueue
60 * @desc: current descriptor of the pending packet
61 * @desc_head: head descriptor of the pending packet
62 * @drop_desc: dummy desc for packet dropping
63 * @cur_len: processed length of the current descriptor
64 * @rem_len: remaining length of the pending packet
65 * @pkt_len: total length of the pending packet
66 * @next_avail: next avail descriptor id
67 * @num: vring size (number of descriptors)
68 * @align: vring alignment size
70 * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
71 * @fifo: pointer to the tmfifo structure
73 struct mlxbf_tmfifo_vring
{
77 struct vring_desc
*desc
;
78 struct vring_desc
*desc_head
;
79 struct vring_desc drop_desc
;
88 struct mlxbf_tmfifo
*fifo
;
91 /* Check whether vring is in drop mode. */
92 #define IS_VRING_DROP(_r) ({ \
93 typeof(_r) (r) = (_r); \
94 (r->desc_head == &r->drop_desc ? true : false); })
96 /* A stub length to drop maximum length packet. */
97 #define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
99 /* Interrupt types. */
108 /* Ring types (Rx & Tx). */
110 MLXBF_TMFIFO_VRING_RX
,
111 MLXBF_TMFIFO_VRING_TX
,
112 MLXBF_TMFIFO_VRING_MAX
116 * struct mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
117 * @vdev: virtio device, in which the vdev.id.device field has the
118 * VIRTIO_ID_xxx id to distinguish the virtual device.
119 * @status: status of the device
120 * @features: supported features of the device
121 * @vrings: array of tmfifo vrings of this device
122 * @config: non-anonymous union for cons and net
123 * @config.cons: virtual console config -
124 * select if vdev.id.device is VIRTIO_ID_CONSOLE
125 * @config.net: virtual network config -
126 * select if vdev.id.device is VIRTIO_ID_NET
127 * @tx_buf: tx buffer used to buffer data before writing into the FIFO
129 struct mlxbf_tmfifo_vdev
{
130 struct virtio_device vdev
;
133 struct mlxbf_tmfifo_vring vrings
[MLXBF_TMFIFO_VRING_MAX
];
135 struct virtio_console_config cons
;
136 struct virtio_net_config net
;
138 struct circ_buf tx_buf
;
142 * struct mlxbf_tmfifo_irq_info - Structure of the interrupt information
143 * @fifo: pointer to the tmfifo structure
144 * @irq: interrupt number
145 * @index: index into the interrupt array
147 struct mlxbf_tmfifo_irq_info
{
148 struct mlxbf_tmfifo
*fifo
;
154 * struct mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx)
155 * @ctl: control register offset (TMFIFO_RX_CTL / TMFIFO_TX_CTL)
156 * @sts: status register offset (TMFIFO_RX_STS / TMFIFO_TX_STS)
157 * @data: data register offset (TMFIFO_RX_DATA / TMFIFO_TX_DATA)
159 struct mlxbf_tmfifo_io
{
166 * struct mlxbf_tmfifo - Structure of the TmFifo
167 * @vdev: array of the virtual devices running over the TmFifo
168 * @lock: lock to protect the TmFifo access
169 * @res0: mapped resource block 0
170 * @res1: mapped resource block 1
171 * @rx: rx io resource
172 * @tx: tx io resource
173 * @rx_fifo_size: number of entries of the Rx FIFO
174 * @tx_fifo_size: number of entries of the Tx FIFO
175 * @pend_events: pending bits for deferred events
176 * @irq_info: interrupt information
177 * @work: work struct for deferred process
178 * @timer: background timer
180 * @spin_lock: Tx/Rx spin lock
181 * @is_ready: ready flag
183 struct mlxbf_tmfifo
{
184 struct mlxbf_tmfifo_vdev
*vdev
[MLXBF_TMFIFO_VDEV_MAX
];
185 struct mutex lock
; /* TmFifo lock */
188 struct mlxbf_tmfifo_io rx
;
189 struct mlxbf_tmfifo_io tx
;
192 unsigned long pend_events
;
193 struct mlxbf_tmfifo_irq_info irq_info
[MLXBF_TM_MAX_IRQ
];
194 struct work_struct work
;
195 struct timer_list timer
;
196 struct mlxbf_tmfifo_vring
*vring
[2];
197 spinlock_t spin_lock
[2]; /* spin lock */
202 * struct mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
203 * @type: message type
204 * @len: payload length in network byte order. Messages sent into the FIFO
205 * will be read by the other side as data stream in the same byte order.
206 * The length needs to be encoded into network order so both sides
207 * could understand it.
209 struct mlxbf_tmfifo_msg_hdr
{
214 } __packed
__aligned(sizeof(u64
));
218 * This MAC address will be read from EFI persistent variable if configured.
219 * It can also be reconfigured with standard Linux tools.
221 static u8 mlxbf_tmfifo_net_default_mac
[ETH_ALEN
] = {
222 0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01
225 /* EFI variable name of the MAC address. */
226 static efi_char16_t mlxbf_tmfifo_efi_name
[] = L
"RshimMacAddr";
228 /* Maximum L2 header length. */
229 #define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
231 /* Supported virtio-net features. */
232 #define MLXBF_TMFIFO_NET_FEATURES \
233 (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \
234 BIT_ULL(VIRTIO_NET_F_MAC))
236 #define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev)
238 /* Free vrings of the FIFO device. */
239 static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo
*fifo
,
240 struct mlxbf_tmfifo_vdev
*tm_vdev
)
242 struct mlxbf_tmfifo_vring
*vring
;
245 for (i
= 0; i
< ARRAY_SIZE(tm_vdev
->vrings
); i
++) {
246 vring
= &tm_vdev
->vrings
[i
];
248 size
= vring_size(vring
->num
, vring
->align
);
249 dma_free_coherent(tm_vdev
->vdev
.dev
.parent
, size
,
250 vring
->va
, vring
->dma
);
253 vring_del_virtqueue(vring
->vq
);
260 /* Allocate vrings for the FIFO. */
261 static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo
*fifo
,
262 struct mlxbf_tmfifo_vdev
*tm_vdev
)
264 struct mlxbf_tmfifo_vring
*vring
;
270 for (i
= 0; i
< ARRAY_SIZE(tm_vdev
->vrings
); i
++) {
271 vring
= &tm_vdev
->vrings
[i
];
273 vring
->num
= MLXBF_TMFIFO_VRING_SIZE
;
274 vring
->align
= SMP_CACHE_BYTES
;
276 vring
->vdev_id
= tm_vdev
->vdev
.id
.device
;
277 vring
->drop_desc
.len
= VRING_DROP_DESC_MAX_LEN
;
278 dev
= &tm_vdev
->vdev
.dev
;
280 size
= vring_size(vring
->num
, vring
->align
);
281 va
= dma_alloc_coherent(dev
->parent
, size
, &dma
, GFP_KERNEL
);
283 mlxbf_tmfifo_free_vrings(fifo
, tm_vdev
);
284 dev_err(dev
->parent
, "dma_alloc_coherent failed\n");
295 /* Disable interrupts of the FIFO device. */
296 static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo
*fifo
)
300 for (i
= 0; i
< MLXBF_TM_MAX_IRQ
; i
++) {
301 irq
= fifo
->irq_info
[i
].irq
;
302 fifo
->irq_info
[i
].irq
= 0;
307 /* Interrupt handler. */
308 static irqreturn_t
mlxbf_tmfifo_irq_handler(int irq
, void *arg
)
310 struct mlxbf_tmfifo_irq_info
*irq_info
= arg
;
312 if (!test_and_set_bit(irq_info
->index
, &irq_info
->fifo
->pend_events
))
313 schedule_work(&irq_info
->fifo
->work
);
318 /* Get the next packet descriptor from the vring. */
319 static struct vring_desc
*
320 mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring
*vring
)
322 const struct vring
*vr
= virtqueue_get_vring(vring
->vq
);
323 struct virtio_device
*vdev
= vring
->vq
->vdev
;
324 unsigned int idx
, head
;
326 if (vring
->next_avail
== virtio16_to_cpu(vdev
, vr
->avail
->idx
))
329 /* Make sure 'avail->idx' is visible already. */
332 idx
= vring
->next_avail
% vr
->num
;
333 head
= virtio16_to_cpu(vdev
, vr
->avail
->ring
[idx
]);
334 if (WARN_ON(head
>= vr
->num
))
339 return &vr
->desc
[head
];
342 /* Release virtio descriptor. */
343 static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring
*vring
,
344 struct vring_desc
*desc
, u32 len
)
346 const struct vring
*vr
= virtqueue_get_vring(vring
->vq
);
347 struct virtio_device
*vdev
= vring
->vq
->vdev
;
350 vr_idx
= virtio16_to_cpu(vdev
, vr
->used
->idx
);
351 idx
= vr_idx
% vr
->num
;
352 vr
->used
->ring
[idx
].id
= cpu_to_virtio32(vdev
, desc
- vr
->desc
);
353 vr
->used
->ring
[idx
].len
= cpu_to_virtio32(vdev
, len
);
356 * Virtio could poll and check the 'idx' to decide whether the desc is
357 * done or not. Add a memory barrier here to make sure the update above
358 * completes before updating the idx.
361 vr
->used
->idx
= cpu_to_virtio16(vdev
, vr_idx
+ 1);
364 /* Get the total length of the descriptor chain. */
365 static u32
mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring
*vring
,
366 struct vring_desc
*desc
)
368 const struct vring
*vr
= virtqueue_get_vring(vring
->vq
);
369 struct virtio_device
*vdev
= vring
->vq
->vdev
;
373 len
+= virtio32_to_cpu(vdev
, desc
->len
);
374 if (!(virtio16_to_cpu(vdev
, desc
->flags
) & VRING_DESC_F_NEXT
))
376 idx
= virtio16_to_cpu(vdev
, desc
->next
);
377 desc
= &vr
->desc
[idx
];
383 static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring
*vring
)
385 struct vring_desc
*desc_head
;
388 if (vring
->desc_head
) {
389 desc_head
= vring
->desc_head
;
390 len
= vring
->pkt_len
;
392 desc_head
= mlxbf_tmfifo_get_next_desc(vring
);
393 len
= mlxbf_tmfifo_get_pkt_len(vring
, desc_head
);
397 mlxbf_tmfifo_release_desc(vring
, desc_head
, len
);
401 vring
->desc_head
= NULL
;
404 static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring
*vring
,
405 struct vring_desc
*desc
, bool is_rx
)
407 struct virtio_device
*vdev
= vring
->vq
->vdev
;
408 struct virtio_net_hdr
*net_hdr
;
410 net_hdr
= phys_to_virt(virtio64_to_cpu(vdev
, desc
->addr
));
411 memset(net_hdr
, 0, sizeof(*net_hdr
));
414 /* Get and initialize the next packet. */
415 static struct vring_desc
*
416 mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring
*vring
, bool is_rx
)
418 struct vring_desc
*desc
;
420 desc
= mlxbf_tmfifo_get_next_desc(vring
);
421 if (desc
&& is_rx
&& vring
->vdev_id
== VIRTIO_ID_NET
)
422 mlxbf_tmfifo_init_net_desc(vring
, desc
, is_rx
);
424 vring
->desc_head
= desc
;
430 /* House-keeping timer. */
431 static void mlxbf_tmfifo_timer(struct timer_list
*t
)
433 struct mlxbf_tmfifo
*fifo
= container_of(t
, struct mlxbf_tmfifo
, timer
);
436 rx
= !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ
, &fifo
->pend_events
);
437 tx
= !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ
, &fifo
->pend_events
);
440 schedule_work(&fifo
->work
);
442 mod_timer(&fifo
->timer
, jiffies
+ MLXBF_TMFIFO_TIMER_INTERVAL
);
445 /* Copy one console packet into the output buffer. */
446 static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev
*cons
,
447 struct mlxbf_tmfifo_vring
*vring
,
448 struct vring_desc
*desc
)
450 const struct vring
*vr
= virtqueue_get_vring(vring
->vq
);
451 struct virtio_device
*vdev
= &cons
->vdev
;
456 addr
= phys_to_virt(virtio64_to_cpu(vdev
, desc
->addr
));
457 len
= virtio32_to_cpu(vdev
, desc
->len
);
459 seg
= CIRC_SPACE_TO_END(cons
->tx_buf
.head
, cons
->tx_buf
.tail
,
460 MLXBF_TMFIFO_CON_TX_BUF_SIZE
);
462 memcpy(cons
->tx_buf
.buf
+ cons
->tx_buf
.head
, addr
, len
);
464 memcpy(cons
->tx_buf
.buf
+ cons
->tx_buf
.head
, addr
, seg
);
466 memcpy(cons
->tx_buf
.buf
, addr
, len
- seg
);
468 cons
->tx_buf
.head
= (cons
->tx_buf
.head
+ len
) %
469 MLXBF_TMFIFO_CON_TX_BUF_SIZE
;
471 if (!(virtio16_to_cpu(vdev
, desc
->flags
) & VRING_DESC_F_NEXT
))
473 idx
= virtio16_to_cpu(vdev
, desc
->next
);
474 desc
= &vr
->desc
[idx
];
478 /* Copy console data into the output buffer. */
479 static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev
*cons
,
480 struct mlxbf_tmfifo_vring
*vring
)
482 struct vring_desc
*desc
;
485 desc
= mlxbf_tmfifo_get_next_desc(vring
);
487 /* Release the packet if not enough space. */
488 len
= mlxbf_tmfifo_get_pkt_len(vring
, desc
);
489 avail
= CIRC_SPACE(cons
->tx_buf
.head
, cons
->tx_buf
.tail
,
490 MLXBF_TMFIFO_CON_TX_BUF_SIZE
);
491 if (len
+ MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE
> avail
) {
492 mlxbf_tmfifo_release_desc(vring
, desc
, len
);
496 mlxbf_tmfifo_console_output_one(cons
, vring
, desc
);
497 mlxbf_tmfifo_release_desc(vring
, desc
, len
);
498 desc
= mlxbf_tmfifo_get_next_desc(vring
);
502 /* Get the number of available words in Rx FIFO for receiving. */
503 static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo
*fifo
)
507 sts
= readq(fifo
->rx
.sts
);
508 return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK
, sts
);
511 /* Get the number of available words in the TmFifo for sending. */
512 static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo
*fifo
, int vdev_id
)
518 /* Reserve some room in FIFO for console messages. */
519 if (vdev_id
== VIRTIO_ID_NET
)
520 tx_reserve
= fifo
->tx_fifo_size
/ MLXBF_TMFIFO_RESERVE_RATIO
;
524 sts
= readq(fifo
->tx
.sts
);
525 count
= FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK
, sts
);
526 return fifo
->tx_fifo_size
- tx_reserve
- count
;
529 /* Console Tx (move data from the output buffer into the TmFifo). */
530 static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo
*fifo
, int avail
)
532 struct mlxbf_tmfifo_msg_hdr hdr
;
533 struct mlxbf_tmfifo_vdev
*cons
;
539 /* Return if not enough space available. */
540 if (avail
< MLXBF_TMFIFO_DATA_MIN_WORDS
)
543 cons
= fifo
->vdev
[VIRTIO_ID_CONSOLE
];
544 if (!cons
|| !cons
->tx_buf
.buf
)
547 /* Return if no data to send. */
548 size
= CIRC_CNT(cons
->tx_buf
.head
, cons
->tx_buf
.tail
,
549 MLXBF_TMFIFO_CON_TX_BUF_SIZE
);
553 /* Adjust the size to available space. */
554 if (size
+ sizeof(hdr
) > avail
* sizeof(u64
))
555 size
= avail
* sizeof(u64
) - sizeof(hdr
);
558 hdr
.type
= VIRTIO_ID_CONSOLE
;
559 hdr
.len
= htons(size
);
560 writeq(*(u64
*)&hdr
, fifo
->tx
.data
);
562 /* Use spin-lock to protect the 'cons->tx_buf'. */
563 spin_lock_irqsave(&fifo
->spin_lock
[0], flags
);
566 addr
= cons
->tx_buf
.buf
+ cons
->tx_buf
.tail
;
568 seg
= CIRC_CNT_TO_END(cons
->tx_buf
.head
, cons
->tx_buf
.tail
,
569 MLXBF_TMFIFO_CON_TX_BUF_SIZE
);
570 if (seg
>= sizeof(u64
)) {
571 memcpy(&data
, addr
, sizeof(u64
));
573 memcpy(&data
, addr
, seg
);
574 memcpy((u8
*)&data
+ seg
, cons
->tx_buf
.buf
,
577 writeq(data
, fifo
->tx
.data
);
579 if (size
>= sizeof(u64
)) {
580 cons
->tx_buf
.tail
= (cons
->tx_buf
.tail
+ sizeof(u64
)) %
581 MLXBF_TMFIFO_CON_TX_BUF_SIZE
;
584 cons
->tx_buf
.tail
= (cons
->tx_buf
.tail
+ size
) %
585 MLXBF_TMFIFO_CON_TX_BUF_SIZE
;
590 spin_unlock_irqrestore(&fifo
->spin_lock
[0], flags
);
593 /* Rx/Tx one word in the descriptor buffer. */
594 static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring
*vring
,
595 struct vring_desc
*desc
,
598 struct virtio_device
*vdev
= vring
->vq
->vdev
;
599 struct mlxbf_tmfifo
*fifo
= vring
->fifo
;
603 /* Get the buffer address of this desc. */
604 addr
= phys_to_virt(virtio64_to_cpu(vdev
, desc
->addr
));
606 /* Read a word from FIFO for Rx. */
608 data
= readq(fifo
->rx
.data
);
610 if (vring
->cur_len
+ sizeof(u64
) <= len
) {
611 /* The whole word. */
613 if (!IS_VRING_DROP(vring
))
614 memcpy(addr
+ vring
->cur_len
, &data
,
617 memcpy(&data
, addr
+ vring
->cur_len
,
620 vring
->cur_len
+= sizeof(u64
);
622 /* Leftover bytes. */
624 if (!IS_VRING_DROP(vring
))
625 memcpy(addr
+ vring
->cur_len
, &data
,
626 len
- vring
->cur_len
);
629 memcpy(&data
, addr
+ vring
->cur_len
,
630 len
- vring
->cur_len
);
632 vring
->cur_len
= len
;
635 /* Write the word into FIFO for Tx. */
637 writeq(data
, fifo
->tx
.data
);
641 * Rx/Tx packet header.
643 * In Rx case, the packet might be found to belong to a different vring since
644 * the TmFifo is shared by different services. In such case, the 'vring_change'
647 static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring
*vring
,
648 struct vring_desc
**desc
,
649 bool is_rx
, bool *vring_change
)
651 struct mlxbf_tmfifo
*fifo
= vring
->fifo
;
652 struct virtio_net_config
*config
;
653 struct mlxbf_tmfifo_msg_hdr hdr
;
654 int vdev_id
, hdr_len
;
655 bool drop_rx
= false;
657 /* Read/Write packet header. */
659 /* Drain one word from the FIFO. */
660 *(u64
*)&hdr
= readq(fifo
->rx
.data
);
662 /* Skip the length 0 packets (keepalive). */
666 /* Check packet type. */
667 if (hdr
.type
== VIRTIO_ID_NET
) {
668 vdev_id
= VIRTIO_ID_NET
;
669 hdr_len
= sizeof(struct virtio_net_hdr
);
670 config
= &fifo
->vdev
[vdev_id
]->config
.net
;
671 /* A legacy-only interface for now. */
673 __virtio16_to_cpu(virtio_legacy_is_little_endian(),
675 MLXBF_TMFIFO_NET_L2_OVERHEAD
)
678 vdev_id
= VIRTIO_ID_CONSOLE
;
683 * Check whether the new packet still belongs to this vring.
684 * If not, update the pkt_len of the new vring.
686 if (vdev_id
!= vring
->vdev_id
) {
687 struct mlxbf_tmfifo_vdev
*tm_dev2
= fifo
->vdev
[vdev_id
];
692 vring
= &tm_dev2
->vrings
[MLXBF_TMFIFO_VRING_RX
];
693 *vring_change
= true;
696 if (drop_rx
&& !IS_VRING_DROP(vring
)) {
697 if (vring
->desc_head
)
698 mlxbf_tmfifo_release_pkt(vring
);
699 *desc
= &vring
->drop_desc
;
700 vring
->desc_head
= *desc
;
704 vring
->pkt_len
= ntohs(hdr
.len
) + hdr_len
;
706 /* Network virtio has an extra header. */
707 hdr_len
= (vring
->vdev_id
== VIRTIO_ID_NET
) ?
708 sizeof(struct virtio_net_hdr
) : 0;
709 vring
->pkt_len
= mlxbf_tmfifo_get_pkt_len(vring
, *desc
);
710 hdr
.type
= (vring
->vdev_id
== VIRTIO_ID_NET
) ?
711 VIRTIO_ID_NET
: VIRTIO_ID_CONSOLE
;
712 hdr
.len
= htons(vring
->pkt_len
- hdr_len
);
713 writeq(*(u64
*)&hdr
, fifo
->tx
.data
);
716 vring
->cur_len
= hdr_len
;
717 vring
->rem_len
= vring
->pkt_len
;
718 fifo
->vring
[is_rx
] = vring
;
722 * Rx/Tx one descriptor.
724 * Return true to indicate more data available.
726 static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring
*vring
,
727 bool is_rx
, int *avail
)
729 const struct vring
*vr
= virtqueue_get_vring(vring
->vq
);
730 struct mlxbf_tmfifo
*fifo
= vring
->fifo
;
731 struct virtio_device
*vdev
;
732 bool vring_change
= false;
733 struct vring_desc
*desc
;
737 vdev
= &fifo
->vdev
[vring
->vdev_id
]->vdev
;
739 /* Get the descriptor of the next packet. */
741 desc
= mlxbf_tmfifo_get_next_pkt(vring
, is_rx
);
743 /* Drop next Rx packet to avoid stuck. */
745 desc
= &vring
->drop_desc
;
746 vring
->desc_head
= desc
;
756 /* Beginning of a packet. Start to Rx/Tx packet header. */
757 if (vring
->pkt_len
== 0) {
758 mlxbf_tmfifo_rxtx_header(vring
, &desc
, is_rx
, &vring_change
);
761 /* Return if new packet is for another ring. */
764 goto mlxbf_tmfifo_desc_done
;
767 /* Get the length of this desc. */
768 len
= virtio32_to_cpu(vdev
, desc
->len
);
769 if (len
> vring
->rem_len
)
770 len
= vring
->rem_len
;
772 /* Rx/Tx one word (8 bytes) if not done. */
773 if (vring
->cur_len
< len
) {
774 mlxbf_tmfifo_rxtx_word(vring
, desc
, is_rx
, len
);
778 /* Check again whether it's done. */
779 if (vring
->cur_len
== len
) {
781 vring
->rem_len
-= len
;
783 /* Get the next desc on the chain. */
784 if (!IS_VRING_DROP(vring
) && vring
->rem_len
> 0 &&
785 (virtio16_to_cpu(vdev
, desc
->flags
) & VRING_DESC_F_NEXT
)) {
786 idx
= virtio16_to_cpu(vdev
, desc
->next
);
787 desc
= &vr
->desc
[idx
];
788 goto mlxbf_tmfifo_desc_done
;
791 /* Done and release the packet. */
793 fifo
->vring
[is_rx
] = NULL
;
794 if (!IS_VRING_DROP(vring
)) {
795 mlxbf_tmfifo_release_pkt(vring
);
798 vring
->desc_head
= NULL
;
804 * Make sure the load/store are in order before
805 * returning back to virtio.
809 /* Notify upper layer that packet is done. */
810 spin_lock_irqsave(&fifo
->spin_lock
[is_rx
], flags
);
811 vring_interrupt(0, vring
->vq
);
812 spin_unlock_irqrestore(&fifo
->spin_lock
[is_rx
], flags
);
815 mlxbf_tmfifo_desc_done
:
816 /* Save the current desc. */
822 /* Rx & Tx processing of a queue. */
823 static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring
*vring
, bool is_rx
)
825 int avail
= 0, devid
= vring
->vdev_id
;
826 struct mlxbf_tmfifo
*fifo
;
831 /* Return if vdev is not ready. */
832 if (!fifo
|| !fifo
->vdev
[devid
])
835 /* Return if another vring is running. */
836 if (fifo
->vring
[is_rx
] && fifo
->vring
[is_rx
] != vring
)
839 /* Only handle console and network for now. */
840 if (WARN_ON(devid
!= VIRTIO_ID_NET
&& devid
!= VIRTIO_ID_CONSOLE
))
844 /* Get available FIFO space. */
847 avail
= mlxbf_tmfifo_get_rx_avail(fifo
);
849 avail
= mlxbf_tmfifo_get_tx_avail(fifo
, devid
);
854 /* Console output always comes from the Tx buffer. */
855 if (!is_rx
&& devid
== VIRTIO_ID_CONSOLE
) {
856 mlxbf_tmfifo_console_tx(fifo
, avail
);
860 /* Handle one descriptor. */
861 more
= mlxbf_tmfifo_rxtx_one_desc(vring
, is_rx
, &avail
);
865 /* Handle Rx or Tx queues. */
866 static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo
*fifo
, int queue_id
,
867 int irq_id
, bool is_rx
)
869 struct mlxbf_tmfifo_vdev
*tm_vdev
;
870 struct mlxbf_tmfifo_vring
*vring
;
873 if (!test_and_clear_bit(irq_id
, &fifo
->pend_events
) ||
874 !fifo
->irq_info
[irq_id
].irq
)
877 for (i
= 0; i
< MLXBF_TMFIFO_VDEV_MAX
; i
++) {
878 tm_vdev
= fifo
->vdev
[i
];
880 vring
= &tm_vdev
->vrings
[queue_id
];
882 mlxbf_tmfifo_rxtx(vring
, is_rx
);
887 /* Work handler for Rx and Tx case. */
888 static void mlxbf_tmfifo_work_handler(struct work_struct
*work
)
890 struct mlxbf_tmfifo
*fifo
;
892 fifo
= container_of(work
, struct mlxbf_tmfifo
, work
);
896 mutex_lock(&fifo
->lock
);
898 /* Tx (Send data to the TmFifo). */
899 mlxbf_tmfifo_work_rxtx(fifo
, MLXBF_TMFIFO_VRING_TX
,
900 MLXBF_TM_TX_LWM_IRQ
, false);
902 /* Rx (Receive data from the TmFifo). */
903 mlxbf_tmfifo_work_rxtx(fifo
, MLXBF_TMFIFO_VRING_RX
,
904 MLXBF_TM_RX_HWM_IRQ
, true);
906 mutex_unlock(&fifo
->lock
);
909 /* The notify function is called when new buffers are posted. */
910 static bool mlxbf_tmfifo_virtio_notify(struct virtqueue
*vq
)
912 struct mlxbf_tmfifo_vring
*vring
= vq
->priv
;
913 struct mlxbf_tmfifo_vdev
*tm_vdev
;
914 struct mlxbf_tmfifo
*fifo
;
920 * Virtio maintains vrings in pairs, even number ring for Rx
921 * and odd number ring for Tx.
923 if (vring
->index
& BIT(0)) {
925 * Console could make blocking call with interrupts disabled.
926 * In such case, the vring needs to be served right away. For
927 * other cases, just set the TX LWM bit to start Tx in the
930 if (vring
->vdev_id
== VIRTIO_ID_CONSOLE
) {
931 spin_lock_irqsave(&fifo
->spin_lock
[0], flags
);
932 tm_vdev
= fifo
->vdev
[VIRTIO_ID_CONSOLE
];
933 mlxbf_tmfifo_console_output(tm_vdev
, vring
);
934 spin_unlock_irqrestore(&fifo
->spin_lock
[0], flags
);
935 set_bit(MLXBF_TM_TX_LWM_IRQ
, &fifo
->pend_events
);
936 } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ
,
937 &fifo
->pend_events
)) {
941 if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ
, &fifo
->pend_events
))
945 schedule_work(&fifo
->work
);
950 /* Get the array of feature bits for this device. */
951 static u64
mlxbf_tmfifo_virtio_get_features(struct virtio_device
*vdev
)
953 struct mlxbf_tmfifo_vdev
*tm_vdev
= mlxbf_vdev_to_tmfifo(vdev
);
955 return tm_vdev
->features
;
958 /* Confirm device features to use. */
959 static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device
*vdev
)
961 struct mlxbf_tmfifo_vdev
*tm_vdev
= mlxbf_vdev_to_tmfifo(vdev
);
963 tm_vdev
->features
= vdev
->features
;
968 /* Free virtqueues found by find_vqs(). */
969 static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device
*vdev
)
971 struct mlxbf_tmfifo_vdev
*tm_vdev
= mlxbf_vdev_to_tmfifo(vdev
);
972 struct mlxbf_tmfifo_vring
*vring
;
973 struct virtqueue
*vq
;
976 for (i
= 0; i
< ARRAY_SIZE(tm_vdev
->vrings
); i
++) {
977 vring
= &tm_vdev
->vrings
[i
];
979 /* Release the pending packet. */
981 mlxbf_tmfifo_release_pkt(vring
);
985 vring_del_virtqueue(vq
);
990 /* Create and initialize the virtual queues. */
991 static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device
*vdev
,
993 struct virtqueue
*vqs
[],
994 vq_callback_t
*callbacks
[],
995 const char * const names
[],
997 struct irq_affinity
*desc
)
999 struct mlxbf_tmfifo_vdev
*tm_vdev
= mlxbf_vdev_to_tmfifo(vdev
);
1000 struct mlxbf_tmfifo_vring
*vring
;
1001 struct virtqueue
*vq
;
1004 if (nvqs
> ARRAY_SIZE(tm_vdev
->vrings
))
1007 for (i
= 0; i
< nvqs
; ++i
) {
1012 vring
= &tm_vdev
->vrings
[i
];
1015 size
= vring_size(vring
->num
, vring
->align
);
1016 memset(vring
->va
, 0, size
);
1017 vq
= vring_new_virtqueue(i
, vring
->num
, vring
->align
, vdev
,
1018 false, false, vring
->va
,
1019 mlxbf_tmfifo_virtio_notify
,
1020 callbacks
[i
], names
[i
]);
1022 dev_err(&vdev
->dev
, "vring_new_virtqueue failed\n");
1027 vq
->num_max
= vring
->num
;
1031 /* Make vq update visible before using it. */
1041 mlxbf_tmfifo_virtio_del_vqs(vdev
);
1045 /* Read the status byte. */
1046 static u8
mlxbf_tmfifo_virtio_get_status(struct virtio_device
*vdev
)
1048 struct mlxbf_tmfifo_vdev
*tm_vdev
= mlxbf_vdev_to_tmfifo(vdev
);
1050 return tm_vdev
->status
;
1053 /* Write the status byte. */
1054 static void mlxbf_tmfifo_virtio_set_status(struct virtio_device
*vdev
,
1057 struct mlxbf_tmfifo_vdev
*tm_vdev
= mlxbf_vdev_to_tmfifo(vdev
);
1059 tm_vdev
->status
= status
;
1062 /* Reset the device. Not much here for now. */
1063 static void mlxbf_tmfifo_virtio_reset(struct virtio_device
*vdev
)
1065 struct mlxbf_tmfifo_vdev
*tm_vdev
= mlxbf_vdev_to_tmfifo(vdev
);
1067 tm_vdev
->status
= 0;
1070 /* Read the value of a configuration field. */
1071 static void mlxbf_tmfifo_virtio_get(struct virtio_device
*vdev
,
1072 unsigned int offset
,
1076 struct mlxbf_tmfifo_vdev
*tm_vdev
= mlxbf_vdev_to_tmfifo(vdev
);
1078 if ((u64
)offset
+ len
> sizeof(tm_vdev
->config
))
1081 memcpy(buf
, (u8
*)&tm_vdev
->config
+ offset
, len
);
1084 /* Write the value of a configuration field. */
1085 static void mlxbf_tmfifo_virtio_set(struct virtio_device
*vdev
,
1086 unsigned int offset
,
1090 struct mlxbf_tmfifo_vdev
*tm_vdev
= mlxbf_vdev_to_tmfifo(vdev
);
1092 if ((u64
)offset
+ len
> sizeof(tm_vdev
->config
))
1095 memcpy((u8
*)&tm_vdev
->config
+ offset
, buf
, len
);
1098 static void tmfifo_virtio_dev_release(struct device
*device
)
1100 struct virtio_device
*vdev
=
1101 container_of(device
, struct virtio_device
, dev
);
1102 struct mlxbf_tmfifo_vdev
*tm_vdev
= mlxbf_vdev_to_tmfifo(vdev
);
1107 /* Virtio config operations. */
1108 static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops
= {
1109 .get_features
= mlxbf_tmfifo_virtio_get_features
,
1110 .finalize_features
= mlxbf_tmfifo_virtio_finalize_features
,
1111 .find_vqs
= mlxbf_tmfifo_virtio_find_vqs
,
1112 .del_vqs
= mlxbf_tmfifo_virtio_del_vqs
,
1113 .reset
= mlxbf_tmfifo_virtio_reset
,
1114 .set_status
= mlxbf_tmfifo_virtio_set_status
,
1115 .get_status
= mlxbf_tmfifo_virtio_get_status
,
1116 .get
= mlxbf_tmfifo_virtio_get
,
1117 .set
= mlxbf_tmfifo_virtio_set
,
1120 /* Create vdev for the FIFO. */
1121 static int mlxbf_tmfifo_create_vdev(struct device
*dev
,
1122 struct mlxbf_tmfifo
*fifo
,
1123 int vdev_id
, u64 features
,
1124 void *config
, u32 size
)
1126 struct mlxbf_tmfifo_vdev
*tm_vdev
, *reg_dev
= NULL
;
1129 mutex_lock(&fifo
->lock
);
1131 tm_vdev
= fifo
->vdev
[vdev_id
];
1133 dev_err(dev
, "vdev %d already exists\n", vdev_id
);
1138 tm_vdev
= kzalloc(sizeof(*tm_vdev
), GFP_KERNEL
);
1144 tm_vdev
->vdev
.id
.device
= vdev_id
;
1145 tm_vdev
->vdev
.config
= &mlxbf_tmfifo_virtio_config_ops
;
1146 tm_vdev
->vdev
.dev
.parent
= dev
;
1147 tm_vdev
->vdev
.dev
.release
= tmfifo_virtio_dev_release
;
1148 tm_vdev
->features
= features
;
1150 memcpy(&tm_vdev
->config
, config
, size
);
1152 if (mlxbf_tmfifo_alloc_vrings(fifo
, tm_vdev
)) {
1153 dev_err(dev
, "unable to allocate vring\n");
1158 /* Allocate an output buffer for the console device. */
1159 if (vdev_id
== VIRTIO_ID_CONSOLE
)
1160 tm_vdev
->tx_buf
.buf
= devm_kmalloc(dev
,
1161 MLXBF_TMFIFO_CON_TX_BUF_SIZE
,
1163 fifo
->vdev
[vdev_id
] = tm_vdev
;
1165 /* Register the virtio device. */
1166 ret
= register_virtio_device(&tm_vdev
->vdev
);
1169 dev_err(dev
, "register_virtio_device failed\n");
1173 mutex_unlock(&fifo
->lock
);
1177 mlxbf_tmfifo_free_vrings(fifo
, tm_vdev
);
1178 fifo
->vdev
[vdev_id
] = NULL
;
1180 put_device(&tm_vdev
->vdev
.dev
);
1184 mutex_unlock(&fifo
->lock
);
1188 /* Delete vdev for the FIFO. */
1189 static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo
*fifo
, int vdev_id
)
1191 struct mlxbf_tmfifo_vdev
*tm_vdev
;
1193 mutex_lock(&fifo
->lock
);
1195 /* Unregister vdev. */
1196 tm_vdev
= fifo
->vdev
[vdev_id
];
1198 unregister_virtio_device(&tm_vdev
->vdev
);
1199 mlxbf_tmfifo_free_vrings(fifo
, tm_vdev
);
1200 fifo
->vdev
[vdev_id
] = NULL
;
1203 mutex_unlock(&fifo
->lock
);
1208 /* Read the configured network MAC address from efi variable. */
1209 static void mlxbf_tmfifo_get_cfg_mac(u8
*mac
)
1211 efi_guid_t guid
= EFI_GLOBAL_VARIABLE_GUID
;
1212 unsigned long size
= ETH_ALEN
;
1216 rc
= efi
.get_variable(mlxbf_tmfifo_efi_name
, &guid
, NULL
, &size
, buf
);
1217 if (rc
== EFI_SUCCESS
&& size
== ETH_ALEN
)
1218 ether_addr_copy(mac
, buf
);
1220 ether_addr_copy(mac
, mlxbf_tmfifo_net_default_mac
);
1223 /* Set TmFifo thresolds which is used to trigger interrupts. */
1224 static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo
*fifo
)
1228 /* Get Tx FIFO size and set the low/high watermark. */
1229 ctl
= readq(fifo
->tx
.ctl
);
1230 fifo
->tx_fifo_size
=
1231 FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK
, ctl
);
1232 ctl
= (ctl
& ~MLXBF_TMFIFO_TX_CTL__LWM_MASK
) |
1233 FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK
,
1234 fifo
->tx_fifo_size
/ 2);
1235 ctl
= (ctl
& ~MLXBF_TMFIFO_TX_CTL__HWM_MASK
) |
1236 FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK
,
1237 fifo
->tx_fifo_size
- 1);
1238 writeq(ctl
, fifo
->tx
.ctl
);
1240 /* Get Rx FIFO size and set the low/high watermark. */
1241 ctl
= readq(fifo
->rx
.ctl
);
1242 fifo
->rx_fifo_size
=
1243 FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK
, ctl
);
1244 ctl
= (ctl
& ~MLXBF_TMFIFO_RX_CTL__LWM_MASK
) |
1245 FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK
, 0);
1246 ctl
= (ctl
& ~MLXBF_TMFIFO_RX_CTL__HWM_MASK
) |
1247 FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK
, 1);
1248 writeq(ctl
, fifo
->rx
.ctl
);
1251 static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo
*fifo
)
1255 fifo
->is_ready
= false;
1256 del_timer_sync(&fifo
->timer
);
1257 mlxbf_tmfifo_disable_irqs(fifo
);
1258 cancel_work_sync(&fifo
->work
);
1259 for (i
= 0; i
< MLXBF_TMFIFO_VDEV_MAX
; i
++)
1260 mlxbf_tmfifo_delete_vdev(fifo
, i
);
1263 /* Probe the TMFIFO. */
1264 static int mlxbf_tmfifo_probe(struct platform_device
*pdev
)
1266 struct virtio_net_config net_config
;
1267 struct device
*dev
= &pdev
->dev
;
1268 struct mlxbf_tmfifo
*fifo
;
1272 rc
= acpi_dev_uid_to_integer(ACPI_COMPANION(dev
), &dev_id
);
1274 dev_err(dev
, "Cannot retrieve UID\n");
1278 fifo
= devm_kzalloc(dev
, sizeof(*fifo
), GFP_KERNEL
);
1282 spin_lock_init(&fifo
->spin_lock
[0]);
1283 spin_lock_init(&fifo
->spin_lock
[1]);
1284 INIT_WORK(&fifo
->work
, mlxbf_tmfifo_work_handler
);
1285 mutex_init(&fifo
->lock
);
1287 /* Get the resource of the Rx FIFO. */
1288 fifo
->res0
= devm_platform_ioremap_resource(pdev
, 0);
1289 if (IS_ERR(fifo
->res0
))
1290 return PTR_ERR(fifo
->res0
);
1292 /* Get the resource of the Tx FIFO. */
1293 fifo
->res1
= devm_platform_ioremap_resource(pdev
, 1);
1294 if (IS_ERR(fifo
->res1
))
1295 return PTR_ERR(fifo
->res1
);
1297 if (dev_id
== TMFIFO_BF3_UID
) {
1298 fifo
->rx
.ctl
= fifo
->res1
+ MLXBF_TMFIFO_RX_CTL_BF3
;
1299 fifo
->rx
.sts
= fifo
->res1
+ MLXBF_TMFIFO_RX_STS_BF3
;
1300 fifo
->rx
.data
= fifo
->res0
+ MLXBF_TMFIFO_RX_DATA_BF3
;
1301 fifo
->tx
.ctl
= fifo
->res1
+ MLXBF_TMFIFO_TX_CTL_BF3
;
1302 fifo
->tx
.sts
= fifo
->res1
+ MLXBF_TMFIFO_TX_STS_BF3
;
1303 fifo
->tx
.data
= fifo
->res0
+ MLXBF_TMFIFO_TX_DATA_BF3
;
1305 fifo
->rx
.ctl
= fifo
->res0
+ MLXBF_TMFIFO_RX_CTL
;
1306 fifo
->rx
.sts
= fifo
->res0
+ MLXBF_TMFIFO_RX_STS
;
1307 fifo
->rx
.data
= fifo
->res0
+ MLXBF_TMFIFO_RX_DATA
;
1308 fifo
->tx
.ctl
= fifo
->res1
+ MLXBF_TMFIFO_TX_CTL
;
1309 fifo
->tx
.sts
= fifo
->res1
+ MLXBF_TMFIFO_TX_STS
;
1310 fifo
->tx
.data
= fifo
->res1
+ MLXBF_TMFIFO_TX_DATA
;
1313 platform_set_drvdata(pdev
, fifo
);
1315 timer_setup(&fifo
->timer
, mlxbf_tmfifo_timer
, 0);
1317 for (i
= 0; i
< MLXBF_TM_MAX_IRQ
; i
++) {
1318 fifo
->irq_info
[i
].index
= i
;
1319 fifo
->irq_info
[i
].fifo
= fifo
;
1320 fifo
->irq_info
[i
].irq
= platform_get_irq(pdev
, i
);
1321 rc
= devm_request_irq(dev
, fifo
->irq_info
[i
].irq
,
1322 mlxbf_tmfifo_irq_handler
, 0,
1323 "tmfifo", &fifo
->irq_info
[i
]);
1325 dev_err(dev
, "devm_request_irq failed\n");
1326 fifo
->irq_info
[i
].irq
= 0;
1331 mlxbf_tmfifo_set_threshold(fifo
);
1333 /* Create the console vdev. */
1334 rc
= mlxbf_tmfifo_create_vdev(dev
, fifo
, VIRTIO_ID_CONSOLE
, 0, NULL
, 0);
1338 /* Create the network vdev. */
1339 memset(&net_config
, 0, sizeof(net_config
));
1341 /* A legacy-only interface for now. */
1342 net_config
.mtu
= __cpu_to_virtio16(virtio_legacy_is_little_endian(),
1344 net_config
.status
= __cpu_to_virtio16(virtio_legacy_is_little_endian(),
1345 VIRTIO_NET_S_LINK_UP
);
1346 mlxbf_tmfifo_get_cfg_mac(net_config
.mac
);
1347 rc
= mlxbf_tmfifo_create_vdev(dev
, fifo
, VIRTIO_ID_NET
,
1348 MLXBF_TMFIFO_NET_FEATURES
, &net_config
,
1349 sizeof(net_config
));
1353 mod_timer(&fifo
->timer
, jiffies
+ MLXBF_TMFIFO_TIMER_INTERVAL
);
1355 /* Make all updates visible before setting the 'is_ready' flag. */
1358 fifo
->is_ready
= true;
1362 mlxbf_tmfifo_cleanup(fifo
);
1366 /* Device remove function. */
1367 static int mlxbf_tmfifo_remove(struct platform_device
*pdev
)
1369 struct mlxbf_tmfifo
*fifo
= platform_get_drvdata(pdev
);
1371 mlxbf_tmfifo_cleanup(fifo
);
1376 static const struct acpi_device_id mlxbf_tmfifo_acpi_match
[] = {
1380 MODULE_DEVICE_TABLE(acpi
, mlxbf_tmfifo_acpi_match
);
1382 static struct platform_driver mlxbf_tmfifo_driver
= {
1383 .probe
= mlxbf_tmfifo_probe
,
1384 .remove
= mlxbf_tmfifo_remove
,
1386 .name
= "bf-tmfifo",
1387 .acpi_match_table
= mlxbf_tmfifo_acpi_match
,
1391 module_platform_driver(mlxbf_tmfifo_driver
);
1393 MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver");
1394 MODULE_LICENSE("GPL v2");
1395 MODULE_AUTHOR("Mellanox Technologies");