]>
Commit | Line | Data |
---|---|---|
f931551b RC |
1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | |
3 | * All rights reserved. | |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #ifndef QIB_VERBS_H | |
36 | #define QIB_VERBS_H | |
37 | ||
38 | #include <linux/types.h> | |
39 | #include <linux/spinlock.h> | |
40 | #include <linux/kernel.h> | |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/kref.h> | |
43 | #include <linux/workqueue.h> | |
6a82649f | 44 | #include <linux/completion.h> |
f931551b RC |
45 | #include <rdma/ib_pack.h> |
46 | #include <rdma/ib_user_verbs.h> | |
47 | ||
48 | struct qib_ctxtdata; | |
49 | struct qib_pportdata; | |
50 | struct qib_devdata; | |
51 | struct qib_verbs_txreq; | |
52 | ||
53 | #define QIB_MAX_RDMA_ATOMIC 16 | |
54 | #define QIB_GUIDS_PER_PORT 5 | |
55 | ||
56 | #define QPN_MAX (1 << 24) | |
57 | #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) | |
58 | ||
59 | /* | |
60 | * Increment this value if any changes that break userspace ABI | |
61 | * compatibility are made. | |
62 | */ | |
63 | #define QIB_UVERBS_ABI_VERSION 2 | |
64 | ||
65 | /* | |
66 | * Define an ib_cq_notify value that is not valid so we know when CQ | |
67 | * notifications are armed. | |
68 | */ | |
69 | #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1) | |
70 | ||
71 | #define IB_SEQ_NAK (3 << 29) | |
72 | ||
73 | /* AETH NAK opcode values */ | |
74 | #define IB_RNR_NAK 0x20 | |
75 | #define IB_NAK_PSN_ERROR 0x60 | |
76 | #define IB_NAK_INVALID_REQUEST 0x61 | |
77 | #define IB_NAK_REMOTE_ACCESS_ERROR 0x62 | |
78 | #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63 | |
79 | #define IB_NAK_INVALID_RD_REQUEST 0x64 | |
80 | ||
81 | /* Flags for checking QP state (see ib_qib_state_ops[]) */ | |
82 | #define QIB_POST_SEND_OK 0x01 | |
83 | #define QIB_POST_RECV_OK 0x02 | |
84 | #define QIB_PROCESS_RECV_OK 0x04 | |
85 | #define QIB_PROCESS_SEND_OK 0x08 | |
86 | #define QIB_PROCESS_NEXT_SEND_OK 0x10 | |
87 | #define QIB_FLUSH_SEND 0x20 | |
88 | #define QIB_FLUSH_RECV 0x40 | |
89 | #define QIB_PROCESS_OR_FLUSH_SEND \ | |
90 | (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND) | |
91 | ||
92 | /* IB Performance Manager status values */ | |
93 | #define IB_PMA_SAMPLE_STATUS_DONE 0x00 | |
94 | #define IB_PMA_SAMPLE_STATUS_STARTED 0x01 | |
95 | #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 | |
96 | ||
97 | /* Mandatory IB performance counter select values. */ | |
98 | #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001) | |
99 | #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002) | |
100 | #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003) | |
101 | #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004) | |
102 | #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005) | |
103 | ||
104 | #define QIB_VENDOR_IPG cpu_to_be16(0xFFA0) | |
105 | ||
106 | #define IB_BTH_REQ_ACK (1 << 31) | |
107 | #define IB_BTH_SOLICITED (1 << 23) | |
108 | #define IB_BTH_MIG_REQ (1 << 22) | |
109 | ||
110 | /* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */ | |
111 | #define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26) | |
112 | ||
113 | #define IB_GRH_VERSION 6 | |
114 | #define IB_GRH_VERSION_MASK 0xF | |
115 | #define IB_GRH_VERSION_SHIFT 28 | |
116 | #define IB_GRH_TCLASS_MASK 0xFF | |
117 | #define IB_GRH_TCLASS_SHIFT 20 | |
118 | #define IB_GRH_FLOW_MASK 0xFFFFF | |
119 | #define IB_GRH_FLOW_SHIFT 0 | |
120 | #define IB_GRH_NEXT_HDR 0x1B | |
121 | ||
122 | #define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL) | |
123 | ||
124 | /* Values for set/get portinfo VLCap OperationalVLs */ | |
125 | #define IB_VL_VL0 1 | |
126 | #define IB_VL_VL0_1 2 | |
127 | #define IB_VL_VL0_3 3 | |
128 | #define IB_VL_VL0_7 4 | |
129 | #define IB_VL_VL0_14 5 | |
130 | ||
131 | static inline int qib_num_vls(int vls) | |
132 | { | |
133 | switch (vls) { | |
134 | default: | |
135 | case IB_VL_VL0: | |
136 | return 1; | |
137 | case IB_VL_VL0_1: | |
138 | return 2; | |
139 | case IB_VL_VL0_3: | |
140 | return 4; | |
141 | case IB_VL_VL0_7: | |
142 | return 8; | |
143 | case IB_VL_VL0_14: | |
144 | return 15; | |
145 | } | |
146 | } | |
147 | ||
148 | struct ib_reth { | |
149 | __be64 vaddr; | |
150 | __be32 rkey; | |
151 | __be32 length; | |
152 | } __attribute__ ((packed)); | |
153 | ||
154 | struct ib_atomic_eth { | |
155 | __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */ | |
156 | __be32 rkey; | |
157 | __be64 swap_data; | |
158 | __be64 compare_data; | |
159 | } __attribute__ ((packed)); | |
160 | ||
161 | struct qib_other_headers { | |
162 | __be32 bth[3]; | |
163 | union { | |
164 | struct { | |
165 | __be32 deth[2]; | |
166 | __be32 imm_data; | |
167 | } ud; | |
168 | struct { | |
169 | struct ib_reth reth; | |
170 | __be32 imm_data; | |
171 | } rc; | |
172 | struct { | |
173 | __be32 aeth; | |
174 | __be32 atomic_ack_eth[2]; | |
175 | } at; | |
176 | __be32 imm_data; | |
177 | __be32 aeth; | |
178 | struct ib_atomic_eth atomic_eth; | |
179 | } u; | |
180 | } __attribute__ ((packed)); | |
181 | ||
182 | /* | |
183 | * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes | |
184 | * long (72 w/ imm_data). Only the first 56 bytes of the IB header | |
185 | * will be in the eager header buffer. The remaining 12 or 16 bytes | |
186 | * are in the data buffer. | |
187 | */ | |
188 | struct qib_ib_header { | |
189 | __be16 lrh[4]; | |
190 | union { | |
191 | struct { | |
192 | struct ib_grh grh; | |
193 | struct qib_other_headers oth; | |
194 | } l; | |
195 | struct qib_other_headers oth; | |
196 | } u; | |
197 | } __attribute__ ((packed)); | |
198 | ||
199 | struct qib_pio_header { | |
200 | __le32 pbc[2]; | |
201 | struct qib_ib_header hdr; | |
202 | } __attribute__ ((packed)); | |
203 | ||
204 | /* | |
205 | * There is one struct qib_mcast for each multicast GID. | |
206 | * All attached QPs are then stored as a list of | |
207 | * struct qib_mcast_qp. | |
208 | */ | |
209 | struct qib_mcast_qp { | |
210 | struct list_head list; | |
211 | struct qib_qp *qp; | |
212 | }; | |
213 | ||
214 | struct qib_mcast { | |
215 | struct rb_node rb_node; | |
216 | union ib_gid mgid; | |
217 | struct list_head qp_list; | |
218 | wait_queue_head_t wait; | |
219 | atomic_t refcount; | |
220 | int n_attached; | |
221 | }; | |
222 | ||
223 | /* Protection domain */ | |
224 | struct qib_pd { | |
225 | struct ib_pd ibpd; | |
226 | int user; /* non-zero if created from user space */ | |
227 | }; | |
228 | ||
229 | /* Address Handle */ | |
230 | struct qib_ah { | |
231 | struct ib_ah ibah; | |
232 | struct ib_ah_attr attr; | |
233 | atomic_t refcount; | |
234 | }; | |
235 | ||
236 | /* | |
237 | * This structure is used by qib_mmap() to validate an offset | |
238 | * when an mmap() request is made. The vm_area_struct then uses | |
239 | * this as its vm_private_data. | |
240 | */ | |
241 | struct qib_mmap_info { | |
242 | struct list_head pending_mmaps; | |
243 | struct ib_ucontext *context; | |
244 | void *obj; | |
245 | __u64 offset; | |
246 | struct kref ref; | |
247 | unsigned size; | |
248 | }; | |
249 | ||
250 | /* | |
251 | * This structure is used to contain the head pointer, tail pointer, | |
252 | * and completion queue entries as a single memory allocation so | |
253 | * it can be mmap'ed into user space. | |
254 | */ | |
255 | struct qib_cq_wc { | |
256 | u32 head; /* index of next entry to fill */ | |
257 | u32 tail; /* index of next ib_poll_cq() entry */ | |
258 | union { | |
259 | /* these are actually size ibcq.cqe + 1 */ | |
260 | struct ib_uverbs_wc uqueue[0]; | |
261 | struct ib_wc kqueue[0]; | |
262 | }; | |
263 | }; | |
264 | ||
265 | /* | |
266 | * The completion queue structure. | |
267 | */ | |
268 | struct qib_cq { | |
269 | struct ib_cq ibcq; | |
270 | struct work_struct comptask; | |
271 | spinlock_t lock; /* protect changes in this struct */ | |
272 | u8 notify; | |
273 | u8 triggered; | |
274 | struct qib_cq_wc *queue; | |
275 | struct qib_mmap_info *ip; | |
276 | }; | |
277 | ||
278 | /* | |
279 | * A segment is a linear region of low physical memory. | |
280 | * XXX Maybe we should use phys addr here and kmap()/kunmap(). | |
281 | * Used by the verbs layer. | |
282 | */ | |
283 | struct qib_seg { | |
284 | void *vaddr; | |
285 | size_t length; | |
286 | }; | |
287 | ||
288 | /* The number of qib_segs that fit in a page. */ | |
289 | #define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg)) | |
290 | ||
291 | struct qib_segarray { | |
292 | struct qib_seg segs[QIB_SEGSZ]; | |
293 | }; | |
294 | ||
295 | struct qib_mregion { | |
296 | struct ib_pd *pd; /* shares refcnt of ibmr.pd */ | |
297 | u64 user_base; /* User's address for this region */ | |
298 | u64 iova; /* IB start address of this region */ | |
299 | size_t length; | |
300 | u32 lkey; | |
301 | u32 offset; /* offset (bytes) to start of region */ | |
302 | int access_flags; | |
303 | u32 max_segs; /* number of qib_segs in all the arrays */ | |
304 | u32 mapsz; /* size of the map array */ | |
2a600f14 | 305 | u8 page_shift; /* 0 - non unform/non powerof2 sizes */ |
8aac4cc3 | 306 | u8 lkey_published; /* in global table */ |
6a82649f | 307 | struct completion comp; /* complete when refcount goes to zero */ |
8aac4cc3 | 308 | struct rcu_head list; |
f931551b RC |
309 | atomic_t refcount; |
310 | struct qib_segarray *map[0]; /* the segments */ | |
311 | }; | |
312 | ||
313 | /* | |
314 | * These keep track of the copy progress within a memory region. | |
315 | * Used by the verbs layer. | |
316 | */ | |
317 | struct qib_sge { | |
318 | struct qib_mregion *mr; | |
319 | void *vaddr; /* kernel virtual address of segment */ | |
320 | u32 sge_length; /* length of the SGE */ | |
321 | u32 length; /* remaining length of the segment */ | |
322 | u16 m; /* current index: mr->map[m] */ | |
323 | u16 n; /* current index: mr->map[m]->segs[n] */ | |
324 | }; | |
325 | ||
326 | /* Memory region */ | |
327 | struct qib_mr { | |
328 | struct ib_mr ibmr; | |
329 | struct ib_umem *umem; | |
330 | struct qib_mregion mr; /* must be last */ | |
331 | }; | |
332 | ||
333 | /* | |
334 | * Send work request queue entry. | |
335 | * The size of the sg_list is determined when the QP is created and stored | |
336 | * in qp->s_max_sge. | |
337 | */ | |
338 | struct qib_swqe { | |
339 | struct ib_send_wr wr; /* don't use wr.sg_list */ | |
340 | u32 psn; /* first packet sequence number */ | |
341 | u32 lpsn; /* last packet sequence number */ | |
342 | u32 ssn; /* send sequence number */ | |
343 | u32 length; /* total length of data in sg_list */ | |
344 | struct qib_sge sg_list[0]; | |
345 | }; | |
346 | ||
347 | /* | |
348 | * Receive work request queue entry. | |
349 | * The size of the sg_list is determined when the QP (or SRQ) is created | |
350 | * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). | |
351 | */ | |
352 | struct qib_rwqe { | |
353 | u64 wr_id; | |
354 | u8 num_sge; | |
355 | struct ib_sge sg_list[0]; | |
356 | }; | |
357 | ||
358 | /* | |
359 | * This structure is used to contain the head pointer, tail pointer, | |
360 | * and receive work queue entries as a single memory allocation so | |
361 | * it can be mmap'ed into user space. | |
362 | * Note that the wq array elements are variable size so you can't | |
363 | * just index into the array to get the N'th element; | |
364 | * use get_rwqe_ptr() instead. | |
365 | */ | |
366 | struct qib_rwq { | |
367 | u32 head; /* new work requests posted to the head */ | |
368 | u32 tail; /* receives pull requests from here. */ | |
369 | struct qib_rwqe wq[0]; | |
370 | }; | |
371 | ||
372 | struct qib_rq { | |
373 | struct qib_rwq *wq; | |
f931551b RC |
374 | u32 size; /* size of RWQE array */ |
375 | u8 max_sge; | |
1c94283d MM |
376 | spinlock_t lock /* protect changes in this struct */ |
377 | ____cacheline_aligned_in_smp; | |
f931551b RC |
378 | }; |
379 | ||
380 | struct qib_srq { | |
381 | struct ib_srq ibsrq; | |
382 | struct qib_rq rq; | |
383 | struct qib_mmap_info *ip; | |
384 | /* send signal when number of RWQEs < limit */ | |
385 | u32 limit; | |
386 | }; | |
387 | ||
388 | struct qib_sge_state { | |
389 | struct qib_sge *sg_list; /* next SGE to be used if any */ | |
390 | struct qib_sge sge; /* progress state for the current SGE */ | |
391 | u32 total_len; | |
392 | u8 num_sge; | |
393 | }; | |
394 | ||
395 | /* | |
396 | * This structure holds the information that the send tasklet needs | |
397 | * to send a RDMA read response or atomic operation. | |
398 | */ | |
399 | struct qib_ack_entry { | |
400 | u8 opcode; | |
401 | u8 sent; | |
402 | u32 psn; | |
403 | u32 lpsn; | |
404 | union { | |
405 | struct qib_sge rdma_sge; | |
406 | u64 atomic_data; | |
407 | }; | |
408 | }; | |
409 | ||
410 | /* | |
411 | * Variables prefixed with s_ are for the requester (sender). | |
412 | * Variables prefixed with r_ are for the responder (receiver). | |
413 | * Variables prefixed with ack_ are for responder replies. | |
414 | * | |
415 | * Common variables are protected by both r_rq.lock and s_lock in that order | |
416 | * which only happens in modify_qp() or changing the QP 'state'. | |
417 | */ | |
418 | struct qib_qp { | |
419 | struct ib_qp ibqp; | |
1c94283d | 420 | /* read mostly fields above and below */ |
f931551b RC |
421 | struct ib_ah_attr remote_ah_attr; |
422 | struct ib_ah_attr alt_ah_attr; | |
1c94283d MM |
423 | struct qib_qp *next; /* link list for QPN hash table */ |
424 | struct qib_swqe *s_wq; /* send work queue */ | |
f931551b | 425 | struct qib_mmap_info *ip; |
1c94283d MM |
426 | struct qib_ib_header *s_hdr; /* next packet header to send */ |
427 | unsigned long timeout_jiffies; /* computed from timeout */ | |
428 | ||
429 | enum ib_mtu path_mtu; | |
430 | u32 remote_qpn; | |
431 | u32 pmtu; /* decoded from path_mtu */ | |
432 | u32 qkey; /* QKEY for this QP (for UD or RD) */ | |
433 | u32 s_size; /* send work queue size */ | |
434 | u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ | |
435 | ||
436 | u8 state; /* QP state */ | |
437 | u8 qp_access_flags; | |
438 | u8 alt_timeout; /* Alternate path timeout for this QP */ | |
439 | u8 timeout; /* Timeout for this QP */ | |
440 | u8 s_srate; | |
441 | u8 s_mig_state; | |
442 | u8 port_num; | |
443 | u8 s_pkey_index; /* PKEY index to use */ | |
444 | u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ | |
445 | u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ | |
446 | u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ | |
447 | u8 s_retry_cnt; /* number of times to retry */ | |
448 | u8 s_rnr_retry_cnt; | |
449 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ | |
450 | u8 s_max_sge; /* size of s_wq->sg_list */ | |
451 | u8 s_draining; | |
452 | ||
453 | /* start of read/write fields */ | |
454 | ||
455 | atomic_t refcount ____cacheline_aligned_in_smp; | |
456 | wait_queue_head_t wait; | |
457 | ||
458 | ||
459 | struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1] | |
460 | ____cacheline_aligned_in_smp; | |
461 | struct qib_sge_state s_rdma_read_sge; | |
462 | ||
463 | spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ | |
464 | unsigned long r_aflags; | |
465 | u64 r_wr_id; /* ID for current receive WQE */ | |
466 | u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ | |
467 | u32 r_len; /* total length of r_sge */ | |
468 | u32 r_rcv_len; /* receive data len processed */ | |
469 | u32 r_psn; /* expected rcv packet sequence number */ | |
470 | u32 r_msn; /* message sequence number */ | |
471 | ||
472 | u8 r_state; /* opcode of last packet received */ | |
473 | u8 r_flags; | |
474 | u8 r_head_ack_queue; /* index into s_ack_queue[] */ | |
475 | ||
476 | struct list_head rspwait; /* link for waititing to respond */ | |
477 | ||
478 | struct qib_sge_state r_sge; /* current receive data */ | |
479 | struct qib_rq r_rq; /* receive work queue */ | |
480 | ||
481 | spinlock_t s_lock ____cacheline_aligned_in_smp; | |
f931551b | 482 | struct qib_sge_state *s_cur_sge; |
1c94283d | 483 | u32 s_flags; |
f931551b | 484 | struct qib_verbs_txreq *s_tx; |
1c94283d | 485 | struct qib_swqe *s_wqe; |
f931551b | 486 | struct qib_sge_state s_sge; /* current send request data */ |
1c94283d | 487 | struct qib_mregion *s_rdma_mr; |
f931551b | 488 | atomic_t s_dma_busy; |
f931551b RC |
489 | u32 s_cur_size; /* size of send packet in bytes */ |
490 | u32 s_len; /* total length of s_sge */ | |
491 | u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ | |
492 | u32 s_next_psn; /* PSN for next request */ | |
493 | u32 s_last_psn; /* last response PSN processed */ | |
494 | u32 s_sending_psn; /* lowest PSN that is being sent */ | |
495 | u32 s_sending_hpsn; /* highest PSN that is being sent */ | |
496 | u32 s_psn; /* current packet sequence number */ | |
497 | u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ | |
498 | u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ | |
1c94283d MM |
499 | u32 s_head; /* new entries added here */ |
500 | u32 s_tail; /* next entry to process */ | |
501 | u32 s_cur; /* current work queue entry */ | |
502 | u32 s_acked; /* last un-ACK'ed entry */ | |
503 | u32 s_last; /* last completed entry */ | |
504 | u32 s_ssn; /* SSN of tail entry */ | |
505 | u32 s_lsn; /* limit sequence number (credit) */ | |
f931551b RC |
506 | u16 s_hdrwords; /* size of s_hdr in 32 bit words */ |
507 | u16 s_rdma_ack_cnt; | |
f931551b RC |
508 | u8 s_state; /* opcode of last packet sent */ |
509 | u8 s_ack_state; /* opcode of packet to ACK */ | |
510 | u8 s_nak_state; /* non-zero if NAK is pending */ | |
f931551b | 511 | u8 r_nak_state; /* non-zero if NAK is pending */ |
f931551b RC |
512 | u8 s_retry; /* requester retry counter */ |
513 | u8 s_rnr_retry; /* requester RNR retry counter */ | |
f931551b RC |
514 | u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ |
515 | u8 s_tail_ack_queue; /* index into s_ack_queue[] */ | |
1c94283d MM |
516 | |
517 | struct qib_sge_state s_ack_rdma_sge; | |
518 | struct timer_list s_timer; | |
519 | struct list_head iowait; /* link for wait PIO buf */ | |
520 | ||
521 | struct work_struct s_work; | |
522 | ||
523 | wait_queue_head_t wait_dma; | |
524 | ||
525 | struct qib_sge r_sg_list[0] /* verified SGEs */ | |
526 | ____cacheline_aligned_in_smp; | |
f931551b RC |
527 | }; |
528 | ||
529 | /* | |
530 | * Atomic bit definitions for r_aflags. | |
531 | */ | |
532 | #define QIB_R_WRID_VALID 0 | |
533 | #define QIB_R_REWIND_SGE 1 | |
534 | ||
535 | /* | |
536 | * Bit definitions for r_flags. | |
537 | */ | |
538 | #define QIB_R_REUSE_SGE 0x01 | |
539 | #define QIB_R_RDMAR_SEQ 0x02 | |
540 | #define QIB_R_RSP_NAK 0x04 | |
541 | #define QIB_R_RSP_SEND 0x08 | |
542 | #define QIB_R_COMM_EST 0x10 | |
543 | ||
544 | /* | |
545 | * Bit definitions for s_flags. | |
546 | * | |
547 | * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled | |
548 | * QIB_S_BUSY - send tasklet is processing the QP | |
549 | * QIB_S_TIMER - the RC retry timer is active | |
550 | * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics | |
551 | * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs | |
552 | * before processing the next SWQE | |
553 | * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete | |
554 | * before processing the next SWQE | |
555 | * QIB_S_WAIT_RNR - waiting for RNR timeout | |
556 | * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE | |
557 | * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating | |
558 | * next send completion entry not via send DMA | |
559 | * QIB_S_WAIT_PIO - waiting for a send buffer to be available | |
560 | * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available | |
561 | * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available | |
562 | * QIB_S_WAIT_KMEM - waiting for kernel memory to be available | |
563 | * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue | |
564 | * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests | |
565 | * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK | |
566 | */ | |
567 | #define QIB_S_SIGNAL_REQ_WR 0x0001 | |
568 | #define QIB_S_BUSY 0x0002 | |
569 | #define QIB_S_TIMER 0x0004 | |
570 | #define QIB_S_RESP_PENDING 0x0008 | |
571 | #define QIB_S_ACK_PENDING 0x0010 | |
572 | #define QIB_S_WAIT_FENCE 0x0020 | |
573 | #define QIB_S_WAIT_RDMAR 0x0040 | |
574 | #define QIB_S_WAIT_RNR 0x0080 | |
575 | #define QIB_S_WAIT_SSN_CREDIT 0x0100 | |
576 | #define QIB_S_WAIT_DMA 0x0200 | |
577 | #define QIB_S_WAIT_PIO 0x0400 | |
578 | #define QIB_S_WAIT_TX 0x0800 | |
579 | #define QIB_S_WAIT_DMA_DESC 0x1000 | |
580 | #define QIB_S_WAIT_KMEM 0x2000 | |
581 | #define QIB_S_WAIT_PSN 0x4000 | |
582 | #define QIB_S_WAIT_ACK 0x8000 | |
583 | #define QIB_S_SEND_ONE 0x10000 | |
584 | #define QIB_S_UNLIMITED_CREDIT 0x20000 | |
585 | ||
586 | /* | |
587 | * Wait flags that would prevent any packet type from being sent. | |
588 | */ | |
589 | #define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \ | |
590 | QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM) | |
591 | ||
592 | /* | |
593 | * Wait flags that would prevent send work requests from making progress. | |
594 | */ | |
595 | #define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \ | |
596 | QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \ | |
597 | QIB_S_WAIT_PSN | QIB_S_WAIT_ACK) | |
598 | ||
599 | #define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND) | |
600 | ||
601 | #define QIB_PSN_CREDIT 16 | |
602 | ||
603 | /* | |
604 | * Since struct qib_swqe is not a fixed size, we can't simply index into | |
605 | * struct qib_qp.s_wq. This function does the array index computation. | |
606 | */ | |
607 | static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp, | |
608 | unsigned n) | |
609 | { | |
610 | return (struct qib_swqe *)((char *)qp->s_wq + | |
611 | (sizeof(struct qib_swqe) + | |
612 | qp->s_max_sge * | |
613 | sizeof(struct qib_sge)) * n); | |
614 | } | |
615 | ||
616 | /* | |
617 | * Since struct qib_rwqe is not a fixed size, we can't simply index into | |
618 | * struct qib_rwq.wq. This function does the array index computation. | |
619 | */ | |
620 | static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n) | |
621 | { | |
622 | return (struct qib_rwqe *) | |
623 | ((char *) rq->wq->wq + | |
624 | (sizeof(struct qib_rwqe) + | |
625 | rq->max_sge * sizeof(struct ib_sge)) * n); | |
626 | } | |
627 | ||
628 | /* | |
629 | * QPN-map pages start out as NULL, they get allocated upon | |
630 | * first use and are never deallocated. This way, | |
631 | * large bitmaps are not allocated unless large numbers of QPs are used. | |
632 | */ | |
633 | struct qpn_map { | |
634 | void *page; | |
635 | }; | |
636 | ||
637 | struct qib_qpn_table { | |
638 | spinlock_t lock; /* protect changes in this struct */ | |
639 | unsigned flags; /* flags for QP0/1 allocated for each port */ | |
640 | u32 last; /* last QP number allocated */ | |
641 | u32 nmaps; /* size of the map table */ | |
642 | u16 limit; | |
643 | u16 mask; | |
644 | /* bit map of free QP numbers other than 0/1 */ | |
645 | struct qpn_map map[QPNMAP_ENTRIES]; | |
646 | }; | |
647 | ||
648 | struct qib_lkey_table { | |
649 | spinlock_t lock; /* protect changes in this struct */ | |
650 | u32 next; /* next unused index (speeds search) */ | |
651 | u32 gen; /* generation count */ | |
652 | u32 max; /* size of the table */ | |
653 | struct qib_mregion **table; | |
654 | }; | |
655 | ||
656 | struct qib_opcode_stats { | |
657 | u64 n_packets; /* number of packets */ | |
658 | u64 n_bytes; /* total number of bytes */ | |
659 | }; | |
660 | ||
661 | struct qib_ibport { | |
662 | struct qib_qp *qp0; | |
663 | struct qib_qp *qp1; | |
664 | struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ | |
665 | struct qib_ah *sm_ah; | |
666 | struct qib_ah *smi_ah; | |
667 | struct rb_root mcast_tree; | |
668 | spinlock_t lock; /* protect changes in this struct */ | |
669 | ||
670 | /* non-zero when timer is set */ | |
671 | unsigned long mkey_lease_timeout; | |
672 | unsigned long trap_timeout; | |
673 | __be64 gid_prefix; /* in network order */ | |
674 | __be64 mkey; | |
675 | __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ | |
676 | u64 tid; /* TID for traps */ | |
677 | u64 n_unicast_xmit; /* total unicast packets sent */ | |
678 | u64 n_unicast_rcv; /* total unicast packets received */ | |
679 | u64 n_multicast_xmit; /* total multicast packets sent */ | |
680 | u64 n_multicast_rcv; /* total multicast packets received */ | |
681 | u64 z_symbol_error_counter; /* starting count for PMA */ | |
682 | u64 z_link_error_recovery_counter; /* starting count for PMA */ | |
683 | u64 z_link_downed_counter; /* starting count for PMA */ | |
684 | u64 z_port_rcv_errors; /* starting count for PMA */ | |
685 | u64 z_port_rcv_remphys_errors; /* starting count for PMA */ | |
686 | u64 z_port_xmit_discards; /* starting count for PMA */ | |
687 | u64 z_port_xmit_data; /* starting count for PMA */ | |
688 | u64 z_port_rcv_data; /* starting count for PMA */ | |
689 | u64 z_port_xmit_packets; /* starting count for PMA */ | |
690 | u64 z_port_rcv_packets; /* starting count for PMA */ | |
691 | u32 z_local_link_integrity_errors; /* starting count for PMA */ | |
692 | u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */ | |
693 | u32 z_vl15_dropped; /* starting count for PMA */ | |
694 | u32 n_rc_resends; | |
695 | u32 n_rc_acks; | |
696 | u32 n_rc_qacks; | |
697 | u32 n_rc_delayed_comp; | |
698 | u32 n_seq_naks; | |
699 | u32 n_rdma_seq; | |
700 | u32 n_rnr_naks; | |
701 | u32 n_other_naks; | |
702 | u32 n_loop_pkts; | |
703 | u32 n_pkt_drops; | |
704 | u32 n_vl15_dropped; | |
705 | u32 n_rc_timeouts; | |
706 | u32 n_dmawait; | |
707 | u32 n_unaligned; | |
708 | u32 n_rc_dupreq; | |
709 | u32 n_rc_seqnak; | |
710 | u32 port_cap_flags; | |
711 | u32 pma_sample_start; | |
712 | u32 pma_sample_interval; | |
713 | __be16 pma_counter_select[5]; | |
714 | u16 pma_tag; | |
715 | u16 pkey_violations; | |
716 | u16 qkey_violations; | |
717 | u16 mkey_violations; | |
718 | u16 mkey_lease_period; | |
719 | u16 sm_lid; | |
720 | u16 repress_traps; | |
721 | u8 sm_sl; | |
722 | u8 mkeyprot; | |
723 | u8 subnet_timeout; | |
724 | u8 vl_high_limit; | |
725 | u8 sl_to_vl[16]; | |
726 | ||
727 | struct qib_opcode_stats opstats[128]; | |
728 | }; | |
729 | ||
730 | struct qib_ibdev { | |
731 | struct ib_device ibdev; | |
732 | struct list_head pending_mmaps; | |
733 | spinlock_t mmap_offset_lock; /* protect mmap_offset */ | |
734 | u32 mmap_offset; | |
735 | struct qib_mregion *dma_mr; | |
736 | ||
737 | /* QP numbers are shared by all IB ports */ | |
738 | struct qib_qpn_table qpn_table; | |
739 | struct qib_lkey_table lk_table; | |
740 | struct list_head piowait; /* list for wait PIO buf */ | |
741 | struct list_head dmawait; /* list for wait DMA */ | |
742 | struct list_head txwait; /* list for wait qib_verbs_txreq */ | |
743 | struct list_head memwait; /* list for wait kernel memory */ | |
744 | struct list_head txreq_free; | |
745 | struct timer_list mem_timer; | |
746 | struct qib_qp **qp_table; | |
747 | struct qib_pio_header *pio_hdrs; | |
748 | dma_addr_t pio_hdrs_phys; | |
749 | /* list of QPs waiting for RNR timer */ | |
750 | spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ | |
af061a64 MM |
751 | u32 qp_table_size; /* size of the hash table */ |
752 | u32 qp_rnd; /* random bytes for hash */ | |
f931551b RC |
753 | spinlock_t qpt_lock; |
754 | ||
755 | u32 n_piowait; | |
756 | u32 n_txwait; | |
757 | ||
758 | u32 n_pds_allocated; /* number of PDs allocated for device */ | |
759 | spinlock_t n_pds_lock; | |
760 | u32 n_ahs_allocated; /* number of AHs allocated for device */ | |
761 | spinlock_t n_ahs_lock; | |
762 | u32 n_cqs_allocated; /* number of CQs allocated for device */ | |
763 | spinlock_t n_cqs_lock; | |
764 | u32 n_qps_allocated; /* number of QPs allocated for device */ | |
765 | spinlock_t n_qps_lock; | |
766 | u32 n_srqs_allocated; /* number of SRQs allocated for device */ | |
767 | spinlock_t n_srqs_lock; | |
768 | u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ | |
769 | spinlock_t n_mcast_grps_lock; | |
770 | }; | |
771 | ||
772 | struct qib_verbs_counters { | |
773 | u64 symbol_error_counter; | |
774 | u64 link_error_recovery_counter; | |
775 | u64 link_downed_counter; | |
776 | u64 port_rcv_errors; | |
777 | u64 port_rcv_remphys_errors; | |
778 | u64 port_xmit_discards; | |
779 | u64 port_xmit_data; | |
780 | u64 port_rcv_data; | |
781 | u64 port_xmit_packets; | |
782 | u64 port_rcv_packets; | |
783 | u32 local_link_integrity_errors; | |
784 | u32 excessive_buffer_overrun_errors; | |
785 | u32 vl15_dropped; | |
786 | }; | |
787 | ||
788 | static inline struct qib_mr *to_imr(struct ib_mr *ibmr) | |
789 | { | |
790 | return container_of(ibmr, struct qib_mr, ibmr); | |
791 | } | |
792 | ||
793 | static inline struct qib_pd *to_ipd(struct ib_pd *ibpd) | |
794 | { | |
795 | return container_of(ibpd, struct qib_pd, ibpd); | |
796 | } | |
797 | ||
798 | static inline struct qib_ah *to_iah(struct ib_ah *ibah) | |
799 | { | |
800 | return container_of(ibah, struct qib_ah, ibah); | |
801 | } | |
802 | ||
803 | static inline struct qib_cq *to_icq(struct ib_cq *ibcq) | |
804 | { | |
805 | return container_of(ibcq, struct qib_cq, ibcq); | |
806 | } | |
807 | ||
808 | static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq) | |
809 | { | |
810 | return container_of(ibsrq, struct qib_srq, ibsrq); | |
811 | } | |
812 | ||
813 | static inline struct qib_qp *to_iqp(struct ib_qp *ibqp) | |
814 | { | |
815 | return container_of(ibqp, struct qib_qp, ibqp); | |
816 | } | |
817 | ||
818 | static inline struct qib_ibdev *to_idev(struct ib_device *ibdev) | |
819 | { | |
820 | return container_of(ibdev, struct qib_ibdev, ibdev); | |
821 | } | |
822 | ||
823 | /* | |
824 | * Send if not busy or waiting for I/O and either | |
825 | * a RC response is pending or we can process send work requests. | |
826 | */ | |
827 | static inline int qib_send_ok(struct qib_qp *qp) | |
828 | { | |
829 | return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && | |
830 | (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || | |
831 | !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); | |
832 | } | |
833 | ||
f931551b RC |
834 | extern struct workqueue_struct *qib_cq_wq; |
835 | ||
836 | /* | |
837 | * This must be called with s_lock held. | |
838 | */ | |
839 | static inline void qib_schedule_send(struct qib_qp *qp) | |
840 | { | |
2528ea60 | 841 | if (qib_send_ok(qp)) |
f0626710 | 842 | queue_work(ib_wq, &qp->s_work); |
f931551b RC |
843 | } |
844 | ||
845 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) | |
846 | { | |
847 | u16 p1 = pkey1 & 0x7FFF; | |
848 | u16 p2 = pkey2 & 0x7FFF; | |
849 | ||
850 | /* | |
851 | * Low 15 bits must be non-zero and match, and | |
852 | * one of the two must be a full member. | |
853 | */ | |
854 | return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0); | |
855 | } | |
856 | ||
857 | void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, | |
858 | u32 qp1, u32 qp2, __be16 lid1, __be16 lid2); | |
859 | void qib_cap_mask_chg(struct qib_ibport *ibp); | |
860 | void qib_sys_guid_chg(struct qib_ibport *ibp); | |
861 | void qib_node_desc_chg(struct qib_ibport *ibp); | |
862 | int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |
863 | struct ib_wc *in_wc, struct ib_grh *in_grh, | |
864 | struct ib_mad *in_mad, struct ib_mad *out_mad); | |
865 | int qib_create_agents(struct qib_ibdev *dev); | |
866 | void qib_free_agents(struct qib_ibdev *dev); | |
867 | ||
868 | /* | |
869 | * Compare the lower 24 bits of the two values. | |
870 | * Returns an integer <, ==, or > than zero. | |
871 | */ | |
872 | static inline int qib_cmp24(u32 a, u32 b) | |
873 | { | |
874 | return (((int) a) - ((int) b)) << 8; | |
875 | } | |
876 | ||
877 | struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid); | |
878 | ||
879 | int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, | |
880 | u64 *rwords, u64 *spkts, u64 *rpkts, | |
881 | u64 *xmit_wait); | |
882 | ||
883 | int qib_get_counters(struct qib_pportdata *ppd, | |
884 | struct qib_verbs_counters *cntrs); | |
885 | ||
886 | int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | |
887 | ||
888 | int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | |
889 | ||
890 | int qib_mcast_tree_empty(struct qib_ibport *ibp); | |
891 | ||
892 | __be32 qib_compute_aeth(struct qib_qp *qp); | |
893 | ||
894 | struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn); | |
895 | ||
896 | struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | |
897 | struct ib_qp_init_attr *init_attr, | |
898 | struct ib_udata *udata); | |
899 | ||
900 | int qib_destroy_qp(struct ib_qp *ibqp); | |
901 | ||
902 | int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err); | |
903 | ||
904 | int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
905 | int attr_mask, struct ib_udata *udata); | |
906 | ||
907 | int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
908 | int attr_mask, struct ib_qp_init_attr *init_attr); | |
909 | ||
910 | unsigned qib_free_all_qps(struct qib_devdata *dd); | |
911 | ||
912 | void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt); | |
913 | ||
914 | void qib_free_qpn_table(struct qib_qpn_table *qpt); | |
915 | ||
916 | void qib_get_credit(struct qib_qp *qp, u32 aeth); | |
917 | ||
918 | unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult); | |
919 | ||
920 | void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail); | |
921 | ||
922 | void qib_put_txreq(struct qib_verbs_txreq *tx); | |
923 | ||
924 | int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, | |
925 | u32 hdrwords, struct qib_sge_state *ss, u32 len); | |
926 | ||
927 | void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, | |
928 | int release); | |
929 | ||
930 | void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release); | |
931 | ||
932 | void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |
933 | int has_grh, void *data, u32 tlen, struct qib_qp *qp); | |
934 | ||
935 | void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |
936 | int has_grh, void *data, u32 tlen, struct qib_qp *qp); | |
937 | ||
938 | int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); | |
939 | ||
940 | void qib_rc_rnr_retry(unsigned long arg); | |
941 | ||
942 | void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); | |
943 | ||
944 | void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err); | |
945 | ||
946 | int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); | |
947 | ||
948 | void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |
949 | int has_grh, void *data, u32 tlen, struct qib_qp *qp); | |
950 | ||
6a82649f | 951 | int qib_alloc_lkey(struct qib_mregion *mr, int dma_region); |
f931551b | 952 | |
6a82649f | 953 | void qib_free_lkey(struct qib_mregion *mr); |
f931551b RC |
954 | |
955 | int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |
956 | struct qib_sge *isge, struct ib_sge *sge, int acc); | |
957 | ||
958 | int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |
959 | u32 len, u64 vaddr, u32 rkey, int acc); | |
960 | ||
961 | int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
962 | struct ib_recv_wr **bad_wr); | |
963 | ||
964 | struct ib_srq *qib_create_srq(struct ib_pd *ibpd, | |
965 | struct ib_srq_init_attr *srq_init_attr, | |
966 | struct ib_udata *udata); | |
967 | ||
968 | int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
969 | enum ib_srq_attr_mask attr_mask, | |
970 | struct ib_udata *udata); | |
971 | ||
972 | int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | |
973 | ||
974 | int qib_destroy_srq(struct ib_srq *ibsrq); | |
975 | ||
976 | void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig); | |
977 | ||
978 | int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); | |
979 | ||
980 | struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, | |
981 | int comp_vector, struct ib_ucontext *context, | |
982 | struct ib_udata *udata); | |
983 | ||
984 | int qib_destroy_cq(struct ib_cq *ibcq); | |
985 | ||
986 | int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); | |
987 | ||
988 | int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); | |
989 | ||
990 | struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc); | |
991 | ||
992 | struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, | |
993 | struct ib_phys_buf *buffer_list, | |
994 | int num_phys_buf, int acc, u64 *iova_start); | |
995 | ||
996 | struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |
997 | u64 virt_addr, int mr_access_flags, | |
998 | struct ib_udata *udata); | |
999 | ||
1000 | int qib_dereg_mr(struct ib_mr *ibmr); | |
1001 | ||
1002 | struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len); | |
1003 | ||
1004 | struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list( | |
1005 | struct ib_device *ibdev, int page_list_len); | |
1006 | ||
1007 | void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl); | |
1008 | ||
1009 | int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr); | |
1010 | ||
1011 | struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | |
1012 | struct ib_fmr_attr *fmr_attr); | |
1013 | ||
1014 | int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |
1015 | int list_len, u64 iova); | |
1016 | ||
1017 | int qib_unmap_fmr(struct list_head *fmr_list); | |
1018 | ||
1019 | int qib_dealloc_fmr(struct ib_fmr *ibfmr); | |
1020 | ||
6a82649f MM |
1021 | static inline void qib_get_mr(struct qib_mregion *mr) |
1022 | { | |
1023 | atomic_inc(&mr->refcount); | |
1024 | } | |
1025 | ||
8aac4cc3 MM |
1026 | void mr_rcu_callback(struct rcu_head *list); |
1027 | ||
6a82649f MM |
1028 | static inline void qib_put_mr(struct qib_mregion *mr) |
1029 | { | |
1030 | if (unlikely(atomic_dec_and_test(&mr->refcount))) | |
8aac4cc3 | 1031 | call_rcu(&mr->list, mr_rcu_callback); |
6a82649f MM |
1032 | } |
1033 | ||
1034 | static inline void qib_put_ss(struct qib_sge_state *ss) | |
1035 | { | |
1036 | while (ss->num_sge) { | |
1037 | qib_put_mr(ss->sge.mr); | |
1038 | if (--ss->num_sge) | |
1039 | ss->sge = *ss->sg_list++; | |
1040 | } | |
1041 | } | |
1042 | ||
1043 | ||
f931551b RC |
1044 | void qib_release_mmap_info(struct kref *ref); |
1045 | ||
1046 | struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, | |
1047 | struct ib_ucontext *context, | |
1048 | void *obj); | |
1049 | ||
1050 | void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip, | |
1051 | u32 size, void *obj); | |
1052 | ||
1053 | int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | |
1054 | ||
1055 | int qib_get_rwqe(struct qib_qp *qp, int wr_id_only); | |
1056 | ||
1057 | void qib_migrate_qp(struct qib_qp *qp); | |
1058 | ||
1059 | int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |
1060 | int has_grh, struct qib_qp *qp, u32 bth0); | |
1061 | ||
1062 | u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, | |
1063 | struct ib_global_route *grh, u32 hwords, u32 nwords); | |
1064 | ||
1065 | void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, | |
1066 | u32 bth0, u32 bth2); | |
1067 | ||
1068 | void qib_do_send(struct work_struct *work); | |
1069 | ||
1070 | void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, | |
1071 | enum ib_wc_status status); | |
1072 | ||
1073 | void qib_send_rc_ack(struct qib_qp *qp); | |
1074 | ||
1075 | int qib_make_rc_req(struct qib_qp *qp); | |
1076 | ||
1077 | int qib_make_uc_req(struct qib_qp *qp); | |
1078 | ||
1079 | int qib_make_ud_req(struct qib_qp *qp); | |
1080 | ||
1081 | int qib_register_ib_device(struct qib_devdata *); | |
1082 | ||
1083 | void qib_unregister_ib_device(struct qib_devdata *); | |
1084 | ||
1085 | void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32); | |
1086 | ||
1087 | void qib_ib_piobufavail(struct qib_devdata *); | |
1088 | ||
1089 | unsigned qib_get_npkeys(struct qib_devdata *); | |
1090 | ||
1091 | unsigned qib_get_pkey(struct qib_ibport *, unsigned); | |
1092 | ||
1093 | extern const enum ib_wc_opcode ib_qib_wc_opcode[]; | |
1094 | ||
1095 | /* | |
1096 | * Below HCA-independent IB PhysPortState values, returned | |
1097 | * by the f_ibphys_portstate() routine. | |
1098 | */ | |
1099 | #define IB_PHYSPORTSTATE_SLEEP 1 | |
1100 | #define IB_PHYSPORTSTATE_POLL 2 | |
1101 | #define IB_PHYSPORTSTATE_DISABLED 3 | |
1102 | #define IB_PHYSPORTSTATE_CFG_TRAIN 4 | |
1103 | #define IB_PHYSPORTSTATE_LINKUP 5 | |
1104 | #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6 | |
1105 | #define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8 | |
1106 | #define IB_PHYSPORTSTATE_CFG_IDLE 0xB | |
1107 | #define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC | |
1108 | #define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE | |
1109 | #define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF | |
1110 | #define IB_PHYSPORTSTATE_CFG_ENH 0x10 | |
1111 | #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13 | |
1112 | ||
1113 | extern const int ib_qib_state_ops[]; | |
1114 | ||
1115 | extern __be64 ib_qib_sys_image_guid; /* in network order */ | |
1116 | ||
1117 | extern unsigned int ib_qib_lkey_table_size; | |
1118 | ||
1119 | extern unsigned int ib_qib_max_cqes; | |
1120 | ||
1121 | extern unsigned int ib_qib_max_cqs; | |
1122 | ||
1123 | extern unsigned int ib_qib_max_qp_wrs; | |
1124 | ||
1125 | extern unsigned int ib_qib_max_qps; | |
1126 | ||
1127 | extern unsigned int ib_qib_max_sges; | |
1128 | ||
1129 | extern unsigned int ib_qib_max_mcast_grps; | |
1130 | ||
1131 | extern unsigned int ib_qib_max_mcast_qp_attached; | |
1132 | ||
1133 | extern unsigned int ib_qib_max_srqs; | |
1134 | ||
1135 | extern unsigned int ib_qib_max_srq_sges; | |
1136 | ||
1137 | extern unsigned int ib_qib_max_srq_wrs; | |
1138 | ||
1139 | extern const u32 ib_qib_rnr_table[]; | |
1140 | ||
1141 | extern struct ib_dma_mapping_ops qib_dma_mapping_ops; | |
1142 | ||
1143 | #endif /* QIB_VERBS_H */ |