]> git.ipfire.org Git - people/ms/linux.git/blob - drivers/infiniband/hw/qib/qib_verbs.h
IB/qib: Support the new memory registration API
[people/ms/linux.git] / drivers / infiniband / hw / qib / qib_verbs.h
1 /*
2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #ifndef QIB_VERBS_H
36 #define QIB_VERBS_H
37
38 #include <linux/types.h>
39 #include <linux/spinlock.h>
40 #include <linux/kernel.h>
41 #include <linux/interrupt.h>
42 #include <linux/kref.h>
43 #include <linux/workqueue.h>
44 #include <linux/kthread.h>
45 #include <linux/completion.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_user_verbs.h>
48
49 struct qib_ctxtdata;
50 struct qib_pportdata;
51 struct qib_devdata;
52 struct qib_verbs_txreq;
53
54 #define QIB_MAX_RDMA_ATOMIC 16
55 #define QIB_GUIDS_PER_PORT 5
56
57 #define QPN_MAX (1 << 24)
58 #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
59
60 /*
61 * Increment this value if any changes that break userspace ABI
62 * compatibility are made.
63 */
64 #define QIB_UVERBS_ABI_VERSION 2
65
66 /*
67 * Define an ib_cq_notify value that is not valid so we know when CQ
68 * notifications are armed.
69 */
70 #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
71
72 #define IB_SEQ_NAK (3 << 29)
73
74 /* AETH NAK opcode values */
75 #define IB_RNR_NAK 0x20
76 #define IB_NAK_PSN_ERROR 0x60
77 #define IB_NAK_INVALID_REQUEST 0x61
78 #define IB_NAK_REMOTE_ACCESS_ERROR 0x62
79 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
80 #define IB_NAK_INVALID_RD_REQUEST 0x64
81
82 /* Flags for checking QP state (see ib_qib_state_ops[]) */
83 #define QIB_POST_SEND_OK 0x01
84 #define QIB_POST_RECV_OK 0x02
85 #define QIB_PROCESS_RECV_OK 0x04
86 #define QIB_PROCESS_SEND_OK 0x08
87 #define QIB_PROCESS_NEXT_SEND_OK 0x10
88 #define QIB_FLUSH_SEND 0x20
89 #define QIB_FLUSH_RECV 0x40
90 #define QIB_PROCESS_OR_FLUSH_SEND \
91 (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
92
93 /* IB Performance Manager status values */
94 #define IB_PMA_SAMPLE_STATUS_DONE 0x00
95 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01
96 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
97
98 /* Mandatory IB performance counter select values. */
99 #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
100 #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
101 #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
102 #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
103 #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
104
105 #define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
106
107 #define IB_BTH_REQ_ACK (1 << 31)
108 #define IB_BTH_SOLICITED (1 << 23)
109 #define IB_BTH_MIG_REQ (1 << 22)
110
111 /* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
112 #define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
113
114 #define IB_GRH_VERSION 6
115 #define IB_GRH_VERSION_MASK 0xF
116 #define IB_GRH_VERSION_SHIFT 28
117 #define IB_GRH_TCLASS_MASK 0xFF
118 #define IB_GRH_TCLASS_SHIFT 20
119 #define IB_GRH_FLOW_MASK 0xFFFFF
120 #define IB_GRH_FLOW_SHIFT 0
121 #define IB_GRH_NEXT_HDR 0x1B
122
123 #define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
124
125 /* Values for set/get portinfo VLCap OperationalVLs */
126 #define IB_VL_VL0 1
127 #define IB_VL_VL0_1 2
128 #define IB_VL_VL0_3 3
129 #define IB_VL_VL0_7 4
130 #define IB_VL_VL0_14 5
131
132 static inline int qib_num_vls(int vls)
133 {
134 switch (vls) {
135 default:
136 case IB_VL_VL0:
137 return 1;
138 case IB_VL_VL0_1:
139 return 2;
140 case IB_VL_VL0_3:
141 return 4;
142 case IB_VL_VL0_7:
143 return 8;
144 case IB_VL_VL0_14:
145 return 15;
146 }
147 }
148
149 struct ib_reth {
150 __be64 vaddr;
151 __be32 rkey;
152 __be32 length;
153 } __packed;
154
155 struct ib_atomic_eth {
156 __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
157 __be32 rkey;
158 __be64 swap_data;
159 __be64 compare_data;
160 } __packed;
161
162 struct qib_other_headers {
163 __be32 bth[3];
164 union {
165 struct {
166 __be32 deth[2];
167 __be32 imm_data;
168 } ud;
169 struct {
170 struct ib_reth reth;
171 __be32 imm_data;
172 } rc;
173 struct {
174 __be32 aeth;
175 __be32 atomic_ack_eth[2];
176 } at;
177 __be32 imm_data;
178 __be32 aeth;
179 struct ib_atomic_eth atomic_eth;
180 } u;
181 } __packed;
182
183 /*
184 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
185 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
186 * will be in the eager header buffer. The remaining 12 or 16 bytes
187 * are in the data buffer.
188 */
189 struct qib_ib_header {
190 __be16 lrh[4];
191 union {
192 struct {
193 struct ib_grh grh;
194 struct qib_other_headers oth;
195 } l;
196 struct qib_other_headers oth;
197 } u;
198 } __packed;
199
200 struct qib_pio_header {
201 __le32 pbc[2];
202 struct qib_ib_header hdr;
203 } __packed;
204
205 /*
206 * There is one struct qib_mcast for each multicast GID.
207 * All attached QPs are then stored as a list of
208 * struct qib_mcast_qp.
209 */
210 struct qib_mcast_qp {
211 struct list_head list;
212 struct qib_qp *qp;
213 };
214
215 struct qib_mcast {
216 struct rb_node rb_node;
217 union ib_gid mgid;
218 struct list_head qp_list;
219 wait_queue_head_t wait;
220 atomic_t refcount;
221 int n_attached;
222 };
223
224 /* Protection domain */
225 struct qib_pd {
226 struct ib_pd ibpd;
227 int user; /* non-zero if created from user space */
228 };
229
230 /* Address Handle */
231 struct qib_ah {
232 struct ib_ah ibah;
233 struct ib_ah_attr attr;
234 atomic_t refcount;
235 };
236
237 /*
238 * This structure is used by qib_mmap() to validate an offset
239 * when an mmap() request is made. The vm_area_struct then uses
240 * this as its vm_private_data.
241 */
242 struct qib_mmap_info {
243 struct list_head pending_mmaps;
244 struct ib_ucontext *context;
245 void *obj;
246 __u64 offset;
247 struct kref ref;
248 unsigned size;
249 };
250
251 /*
252 * This structure is used to contain the head pointer, tail pointer,
253 * and completion queue entries as a single memory allocation so
254 * it can be mmap'ed into user space.
255 */
256 struct qib_cq_wc {
257 u32 head; /* index of next entry to fill */
258 u32 tail; /* index of next ib_poll_cq() entry */
259 union {
260 /* these are actually size ibcq.cqe + 1 */
261 struct ib_uverbs_wc uqueue[0];
262 struct ib_wc kqueue[0];
263 };
264 };
265
266 /*
267 * The completion queue structure.
268 */
269 struct qib_cq {
270 struct ib_cq ibcq;
271 struct kthread_work comptask;
272 struct qib_devdata *dd;
273 spinlock_t lock; /* protect changes in this struct */
274 u8 notify;
275 u8 triggered;
276 struct qib_cq_wc *queue;
277 struct qib_mmap_info *ip;
278 };
279
280 /*
281 * A segment is a linear region of low physical memory.
282 * XXX Maybe we should use phys addr here and kmap()/kunmap().
283 * Used by the verbs layer.
284 */
285 struct qib_seg {
286 void *vaddr;
287 size_t length;
288 };
289
290 /* The number of qib_segs that fit in a page. */
291 #define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
292
293 struct qib_segarray {
294 struct qib_seg segs[QIB_SEGSZ];
295 };
296
297 struct qib_mregion {
298 struct ib_pd *pd; /* shares refcnt of ibmr.pd */
299 u64 user_base; /* User's address for this region */
300 u64 iova; /* IB start address of this region */
301 size_t length;
302 u32 lkey;
303 u32 offset; /* offset (bytes) to start of region */
304 int access_flags;
305 u32 max_segs; /* number of qib_segs in all the arrays */
306 u32 mapsz; /* size of the map array */
307 u8 page_shift; /* 0 - non unform/non powerof2 sizes */
308 u8 lkey_published; /* in global table */
309 struct completion comp; /* complete when refcount goes to zero */
310 struct rcu_head list;
311 atomic_t refcount;
312 struct qib_segarray *map[0]; /* the segments */
313 };
314
315 /*
316 * These keep track of the copy progress within a memory region.
317 * Used by the verbs layer.
318 */
319 struct qib_sge {
320 struct qib_mregion *mr;
321 void *vaddr; /* kernel virtual address of segment */
322 u32 sge_length; /* length of the SGE */
323 u32 length; /* remaining length of the segment */
324 u16 m; /* current index: mr->map[m] */
325 u16 n; /* current index: mr->map[m]->segs[n] */
326 };
327
328 /* Memory region */
329 struct qib_mr {
330 struct ib_mr ibmr;
331 struct ib_umem *umem;
332 struct qib_mregion mr; /* must be last */
333 u64 *pages;
334 u32 npages;
335 };
336
337 /*
338 * Send work request queue entry.
339 * The size of the sg_list is determined when the QP is created and stored
340 * in qp->s_max_sge.
341 */
342 struct qib_swqe {
343 union {
344 struct ib_send_wr wr; /* don't use wr.sg_list */
345 struct ib_ud_wr ud_wr;
346 struct ib_reg_wr reg_wr;
347 struct ib_fast_reg_wr fast_reg_wr;
348 struct ib_rdma_wr rdma_wr;
349 struct ib_atomic_wr atomic_wr;
350 };
351 u32 psn; /* first packet sequence number */
352 u32 lpsn; /* last packet sequence number */
353 u32 ssn; /* send sequence number */
354 u32 length; /* total length of data in sg_list */
355 struct qib_sge sg_list[0];
356 };
357
358 /*
359 * Receive work request queue entry.
360 * The size of the sg_list is determined when the QP (or SRQ) is created
361 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
362 */
363 struct qib_rwqe {
364 u64 wr_id;
365 u8 num_sge;
366 struct ib_sge sg_list[0];
367 };
368
369 /*
370 * This structure is used to contain the head pointer, tail pointer,
371 * and receive work queue entries as a single memory allocation so
372 * it can be mmap'ed into user space.
373 * Note that the wq array elements are variable size so you can't
374 * just index into the array to get the N'th element;
375 * use get_rwqe_ptr() instead.
376 */
377 struct qib_rwq {
378 u32 head; /* new work requests posted to the head */
379 u32 tail; /* receives pull requests from here. */
380 struct qib_rwqe wq[0];
381 };
382
383 struct qib_rq {
384 struct qib_rwq *wq;
385 u32 size; /* size of RWQE array */
386 u8 max_sge;
387 spinlock_t lock /* protect changes in this struct */
388 ____cacheline_aligned_in_smp;
389 };
390
391 struct qib_srq {
392 struct ib_srq ibsrq;
393 struct qib_rq rq;
394 struct qib_mmap_info *ip;
395 /* send signal when number of RWQEs < limit */
396 u32 limit;
397 };
398
399 struct qib_sge_state {
400 struct qib_sge *sg_list; /* next SGE to be used if any */
401 struct qib_sge sge; /* progress state for the current SGE */
402 u32 total_len;
403 u8 num_sge;
404 };
405
406 /*
407 * This structure holds the information that the send tasklet needs
408 * to send a RDMA read response or atomic operation.
409 */
410 struct qib_ack_entry {
411 u8 opcode;
412 u8 sent;
413 u32 psn;
414 u32 lpsn;
415 union {
416 struct qib_sge rdma_sge;
417 u64 atomic_data;
418 };
419 };
420
421 /*
422 * Variables prefixed with s_ are for the requester (sender).
423 * Variables prefixed with r_ are for the responder (receiver).
424 * Variables prefixed with ack_ are for responder replies.
425 *
426 * Common variables are protected by both r_rq.lock and s_lock in that order
427 * which only happens in modify_qp() or changing the QP 'state'.
428 */
429 struct qib_qp {
430 struct ib_qp ibqp;
431 /* read mostly fields above and below */
432 struct ib_ah_attr remote_ah_attr;
433 struct ib_ah_attr alt_ah_attr;
434 struct qib_qp __rcu *next; /* link list for QPN hash table */
435 struct qib_swqe *s_wq; /* send work queue */
436 struct qib_mmap_info *ip;
437 struct qib_ib_header *s_hdr; /* next packet header to send */
438 unsigned long timeout_jiffies; /* computed from timeout */
439
440 enum ib_mtu path_mtu;
441 u32 remote_qpn;
442 u32 pmtu; /* decoded from path_mtu */
443 u32 qkey; /* QKEY for this QP (for UD or RD) */
444 u32 s_size; /* send work queue size */
445 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
446
447 u8 state; /* QP state */
448 u8 qp_access_flags;
449 u8 alt_timeout; /* Alternate path timeout for this QP */
450 u8 timeout; /* Timeout for this QP */
451 u8 s_srate;
452 u8 s_mig_state;
453 u8 port_num;
454 u8 s_pkey_index; /* PKEY index to use */
455 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
456 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
457 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
458 u8 s_retry_cnt; /* number of times to retry */
459 u8 s_rnr_retry_cnt;
460 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
461 u8 s_max_sge; /* size of s_wq->sg_list */
462 u8 s_draining;
463
464 /* start of read/write fields */
465
466 atomic_t refcount ____cacheline_aligned_in_smp;
467 wait_queue_head_t wait;
468
469
470 struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
471 ____cacheline_aligned_in_smp;
472 struct qib_sge_state s_rdma_read_sge;
473
474 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
475 unsigned long r_aflags;
476 u64 r_wr_id; /* ID for current receive WQE */
477 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
478 u32 r_len; /* total length of r_sge */
479 u32 r_rcv_len; /* receive data len processed */
480 u32 r_psn; /* expected rcv packet sequence number */
481 u32 r_msn; /* message sequence number */
482
483 u8 r_state; /* opcode of last packet received */
484 u8 r_flags;
485 u8 r_head_ack_queue; /* index into s_ack_queue[] */
486
487 struct list_head rspwait; /* link for waititing to respond */
488
489 struct qib_sge_state r_sge; /* current receive data */
490 struct qib_rq r_rq; /* receive work queue */
491
492 spinlock_t s_lock ____cacheline_aligned_in_smp;
493 struct qib_sge_state *s_cur_sge;
494 u32 s_flags;
495 struct qib_verbs_txreq *s_tx;
496 struct qib_swqe *s_wqe;
497 struct qib_sge_state s_sge; /* current send request data */
498 struct qib_mregion *s_rdma_mr;
499 atomic_t s_dma_busy;
500 u32 s_cur_size; /* size of send packet in bytes */
501 u32 s_len; /* total length of s_sge */
502 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
503 u32 s_next_psn; /* PSN for next request */
504 u32 s_last_psn; /* last response PSN processed */
505 u32 s_sending_psn; /* lowest PSN that is being sent */
506 u32 s_sending_hpsn; /* highest PSN that is being sent */
507 u32 s_psn; /* current packet sequence number */
508 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
509 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
510 u32 s_head; /* new entries added here */
511 u32 s_tail; /* next entry to process */
512 u32 s_cur; /* current work queue entry */
513 u32 s_acked; /* last un-ACK'ed entry */
514 u32 s_last; /* last completed entry */
515 u32 s_ssn; /* SSN of tail entry */
516 u32 s_lsn; /* limit sequence number (credit) */
517 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
518 u16 s_rdma_ack_cnt;
519 u8 s_state; /* opcode of last packet sent */
520 u8 s_ack_state; /* opcode of packet to ACK */
521 u8 s_nak_state; /* non-zero if NAK is pending */
522 u8 r_nak_state; /* non-zero if NAK is pending */
523 u8 s_retry; /* requester retry counter */
524 u8 s_rnr_retry; /* requester RNR retry counter */
525 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
526 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
527
528 struct qib_sge_state s_ack_rdma_sge;
529 struct timer_list s_timer;
530 struct list_head iowait; /* link for wait PIO buf */
531
532 struct work_struct s_work;
533
534 wait_queue_head_t wait_dma;
535
536 struct qib_sge r_sg_list[0] /* verified SGEs */
537 ____cacheline_aligned_in_smp;
538 };
539
540 /*
541 * Atomic bit definitions for r_aflags.
542 */
543 #define QIB_R_WRID_VALID 0
544 #define QIB_R_REWIND_SGE 1
545
546 /*
547 * Bit definitions for r_flags.
548 */
549 #define QIB_R_REUSE_SGE 0x01
550 #define QIB_R_RDMAR_SEQ 0x02
551 #define QIB_R_RSP_NAK 0x04
552 #define QIB_R_RSP_SEND 0x08
553 #define QIB_R_COMM_EST 0x10
554
555 /*
556 * Bit definitions for s_flags.
557 *
558 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
559 * QIB_S_BUSY - send tasklet is processing the QP
560 * QIB_S_TIMER - the RC retry timer is active
561 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
562 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
563 * before processing the next SWQE
564 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
565 * before processing the next SWQE
566 * QIB_S_WAIT_RNR - waiting for RNR timeout
567 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
568 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
569 * next send completion entry not via send DMA
570 * QIB_S_WAIT_PIO - waiting for a send buffer to be available
571 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
572 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
573 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
574 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
575 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
576 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
577 */
578 #define QIB_S_SIGNAL_REQ_WR 0x0001
579 #define QIB_S_BUSY 0x0002
580 #define QIB_S_TIMER 0x0004
581 #define QIB_S_RESP_PENDING 0x0008
582 #define QIB_S_ACK_PENDING 0x0010
583 #define QIB_S_WAIT_FENCE 0x0020
584 #define QIB_S_WAIT_RDMAR 0x0040
585 #define QIB_S_WAIT_RNR 0x0080
586 #define QIB_S_WAIT_SSN_CREDIT 0x0100
587 #define QIB_S_WAIT_DMA 0x0200
588 #define QIB_S_WAIT_PIO 0x0400
589 #define QIB_S_WAIT_TX 0x0800
590 #define QIB_S_WAIT_DMA_DESC 0x1000
591 #define QIB_S_WAIT_KMEM 0x2000
592 #define QIB_S_WAIT_PSN 0x4000
593 #define QIB_S_WAIT_ACK 0x8000
594 #define QIB_S_SEND_ONE 0x10000
595 #define QIB_S_UNLIMITED_CREDIT 0x20000
596
597 /*
598 * Wait flags that would prevent any packet type from being sent.
599 */
600 #define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
601 QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
602
603 /*
604 * Wait flags that would prevent send work requests from making progress.
605 */
606 #define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
607 QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
608 QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
609
610 #define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
611
612 #define QIB_PSN_CREDIT 16
613
614 /*
615 * Since struct qib_swqe is not a fixed size, we can't simply index into
616 * struct qib_qp.s_wq. This function does the array index computation.
617 */
618 static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
619 unsigned n)
620 {
621 return (struct qib_swqe *)((char *)qp->s_wq +
622 (sizeof(struct qib_swqe) +
623 qp->s_max_sge *
624 sizeof(struct qib_sge)) * n);
625 }
626
627 /*
628 * Since struct qib_rwqe is not a fixed size, we can't simply index into
629 * struct qib_rwq.wq. This function does the array index computation.
630 */
631 static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
632 {
633 return (struct qib_rwqe *)
634 ((char *) rq->wq->wq +
635 (sizeof(struct qib_rwqe) +
636 rq->max_sge * sizeof(struct ib_sge)) * n);
637 }
638
639 /*
640 * QPN-map pages start out as NULL, they get allocated upon
641 * first use and are never deallocated. This way,
642 * large bitmaps are not allocated unless large numbers of QPs are used.
643 */
644 struct qpn_map {
645 void *page;
646 };
647
648 struct qib_qpn_table {
649 spinlock_t lock; /* protect changes in this struct */
650 unsigned flags; /* flags for QP0/1 allocated for each port */
651 u32 last; /* last QP number allocated */
652 u32 nmaps; /* size of the map table */
653 u16 limit;
654 u16 mask;
655 /* bit map of free QP numbers other than 0/1 */
656 struct qpn_map map[QPNMAP_ENTRIES];
657 };
658
659 #define MAX_LKEY_TABLE_BITS 23
660
661 struct qib_lkey_table {
662 spinlock_t lock; /* protect changes in this struct */
663 u32 next; /* next unused index (speeds search) */
664 u32 gen; /* generation count */
665 u32 max; /* size of the table */
666 struct qib_mregion __rcu **table;
667 };
668
669 struct qib_opcode_stats {
670 u64 n_packets; /* number of packets */
671 u64 n_bytes; /* total number of bytes */
672 };
673
674 struct qib_opcode_stats_perctx {
675 struct qib_opcode_stats stats[128];
676 };
677
678 struct qib_pma_counters {
679 u64 n_unicast_xmit; /* total unicast packets sent */
680 u64 n_unicast_rcv; /* total unicast packets received */
681 u64 n_multicast_xmit; /* total multicast packets sent */
682 u64 n_multicast_rcv; /* total multicast packets received */
683 };
684
685 struct qib_ibport {
686 struct qib_qp __rcu *qp0;
687 struct qib_qp __rcu *qp1;
688 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
689 struct qib_ah *sm_ah;
690 struct qib_ah *smi_ah;
691 struct rb_root mcast_tree;
692 spinlock_t lock; /* protect changes in this struct */
693
694 /* non-zero when timer is set */
695 unsigned long mkey_lease_timeout;
696 unsigned long trap_timeout;
697 __be64 gid_prefix; /* in network order */
698 __be64 mkey;
699 __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
700 u64 tid; /* TID for traps */
701 struct qib_pma_counters __percpu *pmastats;
702 u64 z_unicast_xmit; /* starting count for PMA */
703 u64 z_unicast_rcv; /* starting count for PMA */
704 u64 z_multicast_xmit; /* starting count for PMA */
705 u64 z_multicast_rcv; /* starting count for PMA */
706 u64 z_symbol_error_counter; /* starting count for PMA */
707 u64 z_link_error_recovery_counter; /* starting count for PMA */
708 u64 z_link_downed_counter; /* starting count for PMA */
709 u64 z_port_rcv_errors; /* starting count for PMA */
710 u64 z_port_rcv_remphys_errors; /* starting count for PMA */
711 u64 z_port_xmit_discards; /* starting count for PMA */
712 u64 z_port_xmit_data; /* starting count for PMA */
713 u64 z_port_rcv_data; /* starting count for PMA */
714 u64 z_port_xmit_packets; /* starting count for PMA */
715 u64 z_port_rcv_packets; /* starting count for PMA */
716 u32 z_local_link_integrity_errors; /* starting count for PMA */
717 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
718 u32 z_vl15_dropped; /* starting count for PMA */
719 u32 n_rc_resends;
720 u32 n_rc_acks;
721 u32 n_rc_qacks;
722 u32 n_rc_delayed_comp;
723 u32 n_seq_naks;
724 u32 n_rdma_seq;
725 u32 n_rnr_naks;
726 u32 n_other_naks;
727 u32 n_loop_pkts;
728 u32 n_pkt_drops;
729 u32 n_vl15_dropped;
730 u32 n_rc_timeouts;
731 u32 n_dmawait;
732 u32 n_unaligned;
733 u32 n_rc_dupreq;
734 u32 n_rc_seqnak;
735 u32 port_cap_flags;
736 u32 pma_sample_start;
737 u32 pma_sample_interval;
738 __be16 pma_counter_select[5];
739 u16 pma_tag;
740 u16 pkey_violations;
741 u16 qkey_violations;
742 u16 mkey_violations;
743 u16 mkey_lease_period;
744 u16 sm_lid;
745 u16 repress_traps;
746 u8 sm_sl;
747 u8 mkeyprot;
748 u8 subnet_timeout;
749 u8 vl_high_limit;
750 u8 sl_to_vl[16];
751
752 };
753
754
755 struct qib_ibdev {
756 struct ib_device ibdev;
757 struct list_head pending_mmaps;
758 spinlock_t mmap_offset_lock; /* protect mmap_offset */
759 u32 mmap_offset;
760 struct qib_mregion __rcu *dma_mr;
761
762 /* QP numbers are shared by all IB ports */
763 struct qib_qpn_table qpn_table;
764 struct qib_lkey_table lk_table;
765 struct list_head piowait; /* list for wait PIO buf */
766 struct list_head dmawait; /* list for wait DMA */
767 struct list_head txwait; /* list for wait qib_verbs_txreq */
768 struct list_head memwait; /* list for wait kernel memory */
769 struct list_head txreq_free;
770 struct timer_list mem_timer;
771 struct qib_qp __rcu **qp_table;
772 struct qib_pio_header *pio_hdrs;
773 dma_addr_t pio_hdrs_phys;
774 /* list of QPs waiting for RNR timer */
775 spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
776 u32 qp_table_size; /* size of the hash table */
777 u32 qp_rnd; /* random bytes for hash */
778 spinlock_t qpt_lock;
779
780 u32 n_piowait;
781 u32 n_txwait;
782
783 u32 n_pds_allocated; /* number of PDs allocated for device */
784 spinlock_t n_pds_lock;
785 u32 n_ahs_allocated; /* number of AHs allocated for device */
786 spinlock_t n_ahs_lock;
787 u32 n_cqs_allocated; /* number of CQs allocated for device */
788 spinlock_t n_cqs_lock;
789 u32 n_qps_allocated; /* number of QPs allocated for device */
790 spinlock_t n_qps_lock;
791 u32 n_srqs_allocated; /* number of SRQs allocated for device */
792 spinlock_t n_srqs_lock;
793 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
794 spinlock_t n_mcast_grps_lock;
795 #ifdef CONFIG_DEBUG_FS
796 /* per HCA debugfs */
797 struct dentry *qib_ibdev_dbg;
798 #endif
799 };
800
801 struct qib_verbs_counters {
802 u64 symbol_error_counter;
803 u64 link_error_recovery_counter;
804 u64 link_downed_counter;
805 u64 port_rcv_errors;
806 u64 port_rcv_remphys_errors;
807 u64 port_xmit_discards;
808 u64 port_xmit_data;
809 u64 port_rcv_data;
810 u64 port_xmit_packets;
811 u64 port_rcv_packets;
812 u32 local_link_integrity_errors;
813 u32 excessive_buffer_overrun_errors;
814 u32 vl15_dropped;
815 };
816
817 static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
818 {
819 return container_of(ibmr, struct qib_mr, ibmr);
820 }
821
822 static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
823 {
824 return container_of(ibpd, struct qib_pd, ibpd);
825 }
826
827 static inline struct qib_ah *to_iah(struct ib_ah *ibah)
828 {
829 return container_of(ibah, struct qib_ah, ibah);
830 }
831
832 static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
833 {
834 return container_of(ibcq, struct qib_cq, ibcq);
835 }
836
837 static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
838 {
839 return container_of(ibsrq, struct qib_srq, ibsrq);
840 }
841
842 static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
843 {
844 return container_of(ibqp, struct qib_qp, ibqp);
845 }
846
847 static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
848 {
849 return container_of(ibdev, struct qib_ibdev, ibdev);
850 }
851
852 /*
853 * Send if not busy or waiting for I/O and either
854 * a RC response is pending or we can process send work requests.
855 */
856 static inline int qib_send_ok(struct qib_qp *qp)
857 {
858 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
859 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
860 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
861 }
862
863 /*
864 * This must be called with s_lock held.
865 */
866 void qib_schedule_send(struct qib_qp *qp);
867
868 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
869 {
870 u16 p1 = pkey1 & 0x7FFF;
871 u16 p2 = pkey2 & 0x7FFF;
872
873 /*
874 * Low 15 bits must be non-zero and match, and
875 * one of the two must be a full member.
876 */
877 return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
878 }
879
880 void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
881 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
882 void qib_cap_mask_chg(struct qib_ibport *ibp);
883 void qib_sys_guid_chg(struct qib_ibport *ibp);
884 void qib_node_desc_chg(struct qib_ibport *ibp);
885 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
886 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
887 const struct ib_mad_hdr *in, size_t in_mad_size,
888 struct ib_mad_hdr *out, size_t *out_mad_size,
889 u16 *out_mad_pkey_index);
890 int qib_create_agents(struct qib_ibdev *dev);
891 void qib_free_agents(struct qib_ibdev *dev);
892
893 /*
894 * Compare the lower 24 bits of the two values.
895 * Returns an integer <, ==, or > than zero.
896 */
897 static inline int qib_cmp24(u32 a, u32 b)
898 {
899 return (((int) a) - ((int) b)) << 8;
900 }
901
902 struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
903
904 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
905 u64 *rwords, u64 *spkts, u64 *rpkts,
906 u64 *xmit_wait);
907
908 int qib_get_counters(struct qib_pportdata *ppd,
909 struct qib_verbs_counters *cntrs);
910
911 int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
912
913 int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
914
915 int qib_mcast_tree_empty(struct qib_ibport *ibp);
916
917 __be32 qib_compute_aeth(struct qib_qp *qp);
918
919 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
920
921 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
922 struct ib_qp_init_attr *init_attr,
923 struct ib_udata *udata);
924
925 int qib_destroy_qp(struct ib_qp *ibqp);
926
927 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
928
929 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
930 int attr_mask, struct ib_udata *udata);
931
932 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
933 int attr_mask, struct ib_qp_init_attr *init_attr);
934
935 unsigned qib_free_all_qps(struct qib_devdata *dd);
936
937 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
938
939 void qib_free_qpn_table(struct qib_qpn_table *qpt);
940
941 #ifdef CONFIG_DEBUG_FS
942
943 struct qib_qp_iter;
944
945 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
946
947 int qib_qp_iter_next(struct qib_qp_iter *iter);
948
949 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
950
951 #endif
952
953 void qib_get_credit(struct qib_qp *qp, u32 aeth);
954
955 unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
956
957 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
958
959 void qib_put_txreq(struct qib_verbs_txreq *tx);
960
961 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
962 u32 hdrwords, struct qib_sge_state *ss, u32 len);
963
964 void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
965 int release);
966
967 void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
968
969 void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
970 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
971
972 void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
973 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
974
975 int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
976
977 struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
978
979 void qib_rc_rnr_retry(unsigned long arg);
980
981 void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
982
983 void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
984
985 int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
986
987 void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
988 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
989
990 int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
991
992 void qib_free_lkey(struct qib_mregion *mr);
993
994 int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
995 struct qib_sge *isge, struct ib_sge *sge, int acc);
996
997 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
998 u32 len, u64 vaddr, u32 rkey, int acc);
999
1000 int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1001 struct ib_recv_wr **bad_wr);
1002
1003 struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
1004 struct ib_srq_init_attr *srq_init_attr,
1005 struct ib_udata *udata);
1006
1007 int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1008 enum ib_srq_attr_mask attr_mask,
1009 struct ib_udata *udata);
1010
1011 int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
1012
1013 int qib_destroy_srq(struct ib_srq *ibsrq);
1014
1015 int qib_cq_init(struct qib_devdata *dd);
1016
1017 void qib_cq_exit(struct qib_devdata *dd);
1018
1019 void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
1020
1021 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
1022
1023 struct ib_cq *qib_create_cq(struct ib_device *ibdev,
1024 const struct ib_cq_init_attr *attr,
1025 struct ib_ucontext *context,
1026 struct ib_udata *udata);
1027
1028 int qib_destroy_cq(struct ib_cq *ibcq);
1029
1030 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
1031
1032 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
1033
1034 struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
1035
1036 struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
1037 struct ib_phys_buf *buffer_list,
1038 int num_phys_buf, int acc, u64 *iova_start);
1039
1040 struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1041 u64 virt_addr, int mr_access_flags,
1042 struct ib_udata *udata);
1043
1044 int qib_dereg_mr(struct ib_mr *ibmr);
1045
1046 struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
1047 enum ib_mr_type mr_type,
1048 u32 max_entries);
1049
1050 int qib_map_mr_sg(struct ib_mr *ibmr,
1051 struct scatterlist *sg,
1052 int sg_nents);
1053
1054 struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
1055 struct ib_device *ibdev, int page_list_len);
1056
1057 void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
1058
1059 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
1060 int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr);
1061
1062 struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1063 struct ib_fmr_attr *fmr_attr);
1064
1065 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
1066 int list_len, u64 iova);
1067
1068 int qib_unmap_fmr(struct list_head *fmr_list);
1069
1070 int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1071
1072 static inline void qib_get_mr(struct qib_mregion *mr)
1073 {
1074 atomic_inc(&mr->refcount);
1075 }
1076
1077 void mr_rcu_callback(struct rcu_head *list);
1078
1079 static inline void qib_put_mr(struct qib_mregion *mr)
1080 {
1081 if (unlikely(atomic_dec_and_test(&mr->refcount)))
1082 call_rcu(&mr->list, mr_rcu_callback);
1083 }
1084
1085 static inline void qib_put_ss(struct qib_sge_state *ss)
1086 {
1087 while (ss->num_sge) {
1088 qib_put_mr(ss->sge.mr);
1089 if (--ss->num_sge)
1090 ss->sge = *ss->sg_list++;
1091 }
1092 }
1093
1094
1095 void qib_release_mmap_info(struct kref *ref);
1096
1097 struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
1098 struct ib_ucontext *context,
1099 void *obj);
1100
1101 void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
1102 u32 size, void *obj);
1103
1104 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1105
1106 int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1107
1108 void qib_migrate_qp(struct qib_qp *qp);
1109
1110 int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
1111 int has_grh, struct qib_qp *qp, u32 bth0);
1112
1113 u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
1114 struct ib_global_route *grh, u32 hwords, u32 nwords);
1115
1116 void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1117 u32 bth0, u32 bth2);
1118
1119 void qib_do_send(struct work_struct *work);
1120
1121 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1122 enum ib_wc_status status);
1123
1124 void qib_send_rc_ack(struct qib_qp *qp);
1125
1126 int qib_make_rc_req(struct qib_qp *qp);
1127
1128 int qib_make_uc_req(struct qib_qp *qp);
1129
1130 int qib_make_ud_req(struct qib_qp *qp);
1131
1132 int qib_register_ib_device(struct qib_devdata *);
1133
1134 void qib_unregister_ib_device(struct qib_devdata *);
1135
1136 void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
1137
1138 void qib_ib_piobufavail(struct qib_devdata *);
1139
1140 unsigned qib_get_npkeys(struct qib_devdata *);
1141
1142 unsigned qib_get_pkey(struct qib_ibport *, unsigned);
1143
1144 extern const enum ib_wc_opcode ib_qib_wc_opcode[];
1145
1146 /*
1147 * Below HCA-independent IB PhysPortState values, returned
1148 * by the f_ibphys_portstate() routine.
1149 */
1150 #define IB_PHYSPORTSTATE_SLEEP 1
1151 #define IB_PHYSPORTSTATE_POLL 2
1152 #define IB_PHYSPORTSTATE_DISABLED 3
1153 #define IB_PHYSPORTSTATE_CFG_TRAIN 4
1154 #define IB_PHYSPORTSTATE_LINKUP 5
1155 #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1156 #define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1157 #define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1158 #define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1159 #define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1160 #define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1161 #define IB_PHYSPORTSTATE_CFG_ENH 0x10
1162 #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1163
1164 extern const int ib_qib_state_ops[];
1165
1166 extern __be64 ib_qib_sys_image_guid; /* in network order */
1167
1168 extern unsigned int ib_qib_lkey_table_size;
1169
1170 extern unsigned int ib_qib_max_cqes;
1171
1172 extern unsigned int ib_qib_max_cqs;
1173
1174 extern unsigned int ib_qib_max_qp_wrs;
1175
1176 extern unsigned int ib_qib_max_qps;
1177
1178 extern unsigned int ib_qib_max_sges;
1179
1180 extern unsigned int ib_qib_max_mcast_grps;
1181
1182 extern unsigned int ib_qib_max_mcast_qp_attached;
1183
1184 extern unsigned int ib_qib_max_srqs;
1185
1186 extern unsigned int ib_qib_max_srq_sges;
1187
1188 extern unsigned int ib_qib_max_srq_wrs;
1189
1190 extern const u32 ib_qib_rnr_table[];
1191
1192 extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
1193
1194 #endif /* QIB_VERBS_H */