2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/types.h>
39 #include <linux/spinlock.h>
40 #include <linux/kernel.h>
41 #include <linux/interrupt.h>
42 #include <linux/kref.h>
43 #include <linux/workqueue.h>
44 #include <linux/kthread.h>
45 #include <linux/completion.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_user_verbs.h>
52 struct qib_verbs_txreq
;
54 #define QIB_MAX_RDMA_ATOMIC 16
55 #define QIB_GUIDS_PER_PORT 5
57 #define QPN_MAX (1 << 24)
58 #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
61 * Increment this value if any changes that break userspace ABI
62 * compatibility are made.
64 #define QIB_UVERBS_ABI_VERSION 2
67 * Define an ib_cq_notify value that is not valid so we know when CQ
68 * notifications are armed.
70 #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
72 #define IB_SEQ_NAK (3 << 29)
74 /* AETH NAK opcode values */
75 #define IB_RNR_NAK 0x20
76 #define IB_NAK_PSN_ERROR 0x60
77 #define IB_NAK_INVALID_REQUEST 0x61
78 #define IB_NAK_REMOTE_ACCESS_ERROR 0x62
79 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
80 #define IB_NAK_INVALID_RD_REQUEST 0x64
82 /* Flags for checking QP state (see ib_qib_state_ops[]) */
83 #define QIB_POST_SEND_OK 0x01
84 #define QIB_POST_RECV_OK 0x02
85 #define QIB_PROCESS_RECV_OK 0x04
86 #define QIB_PROCESS_SEND_OK 0x08
87 #define QIB_PROCESS_NEXT_SEND_OK 0x10
88 #define QIB_FLUSH_SEND 0x20
89 #define QIB_FLUSH_RECV 0x40
90 #define QIB_PROCESS_OR_FLUSH_SEND \
91 (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
93 /* IB Performance Manager status values */
94 #define IB_PMA_SAMPLE_STATUS_DONE 0x00
95 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01
96 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
98 /* Mandatory IB performance counter select values. */
99 #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
100 #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
101 #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
102 #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
103 #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
105 #define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
107 #define IB_BTH_REQ_ACK (1 << 31)
108 #define IB_BTH_SOLICITED (1 << 23)
109 #define IB_BTH_MIG_REQ (1 << 22)
111 /* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
112 #define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
114 #define IB_GRH_VERSION 6
115 #define IB_GRH_VERSION_MASK 0xF
116 #define IB_GRH_VERSION_SHIFT 28
117 #define IB_GRH_TCLASS_MASK 0xFF
118 #define IB_GRH_TCLASS_SHIFT 20
119 #define IB_GRH_FLOW_MASK 0xFFFFF
120 #define IB_GRH_FLOW_SHIFT 0
121 #define IB_GRH_NEXT_HDR 0x1B
123 #define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
125 /* Values for set/get portinfo VLCap OperationalVLs */
127 #define IB_VL_VL0_1 2
128 #define IB_VL_VL0_3 3
129 #define IB_VL_VL0_7 4
130 #define IB_VL_VL0_14 5
132 static inline int qib_num_vls(int vls
)
155 struct ib_atomic_eth
{
156 __be32 vaddr
[2]; /* unaligned so access as 2 32-bit words */
162 struct qib_other_headers
{
175 __be32 atomic_ack_eth
[2];
179 struct ib_atomic_eth atomic_eth
;
184 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
185 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
186 * will be in the eager header buffer. The remaining 12 or 16 bytes
187 * are in the data buffer.
189 struct qib_ib_header
{
194 struct qib_other_headers oth
;
196 struct qib_other_headers oth
;
200 struct qib_pio_header
{
202 struct qib_ib_header hdr
;
206 * There is one struct qib_mcast for each multicast GID.
207 * All attached QPs are then stored as a list of
208 * struct qib_mcast_qp.
210 struct qib_mcast_qp
{
211 struct list_head list
;
216 struct rb_node rb_node
;
218 struct list_head qp_list
;
219 wait_queue_head_t wait
;
224 /* Protection domain */
227 int user
; /* non-zero if created from user space */
233 struct ib_ah_attr attr
;
238 * This structure is used by qib_mmap() to validate an offset
239 * when an mmap() request is made. The vm_area_struct then uses
240 * this as its vm_private_data.
242 struct qib_mmap_info
{
243 struct list_head pending_mmaps
;
244 struct ib_ucontext
*context
;
252 * This structure is used to contain the head pointer, tail pointer,
253 * and completion queue entries as a single memory allocation so
254 * it can be mmap'ed into user space.
257 u32 head
; /* index of next entry to fill */
258 u32 tail
; /* index of next ib_poll_cq() entry */
260 /* these are actually size ibcq.cqe + 1 */
261 struct ib_uverbs_wc uqueue
[0];
262 struct ib_wc kqueue
[0];
267 * The completion queue structure.
271 struct kthread_work comptask
;
272 struct qib_devdata
*dd
;
273 spinlock_t lock
; /* protect changes in this struct */
276 struct qib_cq_wc
*queue
;
277 struct qib_mmap_info
*ip
;
281 * A segment is a linear region of low physical memory.
282 * XXX Maybe we should use phys addr here and kmap()/kunmap().
283 * Used by the verbs layer.
290 /* The number of qib_segs that fit in a page. */
291 #define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
293 struct qib_segarray
{
294 struct qib_seg segs
[QIB_SEGSZ
];
298 struct ib_pd
*pd
; /* shares refcnt of ibmr.pd */
299 u64 user_base
; /* User's address for this region */
300 u64 iova
; /* IB start address of this region */
303 u32 offset
; /* offset (bytes) to start of region */
305 u32 max_segs
; /* number of qib_segs in all the arrays */
306 u32 mapsz
; /* size of the map array */
307 u8 page_shift
; /* 0 - non unform/non powerof2 sizes */
308 u8 lkey_published
; /* in global table */
309 struct completion comp
; /* complete when refcount goes to zero */
310 struct rcu_head list
;
312 struct qib_segarray
*map
[0]; /* the segments */
316 * These keep track of the copy progress within a memory region.
317 * Used by the verbs layer.
320 struct qib_mregion
*mr
;
321 void *vaddr
; /* kernel virtual address of segment */
322 u32 sge_length
; /* length of the SGE */
323 u32 length
; /* remaining length of the segment */
324 u16 m
; /* current index: mr->map[m] */
325 u16 n
; /* current index: mr->map[m]->segs[n] */
331 struct ib_umem
*umem
;
332 struct qib_mregion mr
; /* must be last */
336 * Send work request queue entry.
337 * The size of the sg_list is determined when the QP is created and stored
341 struct ib_send_wr wr
; /* don't use wr.sg_list */
342 u32 psn
; /* first packet sequence number */
343 u32 lpsn
; /* last packet sequence number */
344 u32 ssn
; /* send sequence number */
345 u32 length
; /* total length of data in sg_list */
346 struct qib_sge sg_list
[0];
350 * Receive work request queue entry.
351 * The size of the sg_list is determined when the QP (or SRQ) is created
352 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
357 struct ib_sge sg_list
[0];
361 * This structure is used to contain the head pointer, tail pointer,
362 * and receive work queue entries as a single memory allocation so
363 * it can be mmap'ed into user space.
364 * Note that the wq array elements are variable size so you can't
365 * just index into the array to get the N'th element;
366 * use get_rwqe_ptr() instead.
369 u32 head
; /* new work requests posted to the head */
370 u32 tail
; /* receives pull requests from here. */
371 struct qib_rwqe wq
[0];
376 u32 size
; /* size of RWQE array */
378 spinlock_t lock
/* protect changes in this struct */
379 ____cacheline_aligned_in_smp
;
385 struct qib_mmap_info
*ip
;
386 /* send signal when number of RWQEs < limit */
390 struct qib_sge_state
{
391 struct qib_sge
*sg_list
; /* next SGE to be used if any */
392 struct qib_sge sge
; /* progress state for the current SGE */
398 * This structure holds the information that the send tasklet needs
399 * to send a RDMA read response or atomic operation.
401 struct qib_ack_entry
{
407 struct qib_sge rdma_sge
;
413 * Variables prefixed with s_ are for the requester (sender).
414 * Variables prefixed with r_ are for the responder (receiver).
415 * Variables prefixed with ack_ are for responder replies.
417 * Common variables are protected by both r_rq.lock and s_lock in that order
418 * which only happens in modify_qp() or changing the QP 'state'.
422 /* read mostly fields above and below */
423 struct ib_ah_attr remote_ah_attr
;
424 struct ib_ah_attr alt_ah_attr
;
425 struct qib_qp __rcu
*next
; /* link list for QPN hash table */
426 struct qib_swqe
*s_wq
; /* send work queue */
427 struct qib_mmap_info
*ip
;
428 struct qib_ib_header
*s_hdr
; /* next packet header to send */
429 unsigned long timeout_jiffies
; /* computed from timeout */
431 enum ib_mtu path_mtu
;
433 u32 pmtu
; /* decoded from path_mtu */
434 u32 qkey
; /* QKEY for this QP (for UD or RD) */
435 u32 s_size
; /* send work queue size */
436 u32 s_rnr_timeout
; /* number of milliseconds for RNR timeout */
438 u8 state
; /* QP state */
440 u8 alt_timeout
; /* Alternate path timeout for this QP */
441 u8 timeout
; /* Timeout for this QP */
445 u8 s_pkey_index
; /* PKEY index to use */
446 u8 s_alt_pkey_index
; /* Alternate path PKEY index to use */
447 u8 r_max_rd_atomic
; /* max number of RDMA read/atomic to receive */
448 u8 s_max_rd_atomic
; /* max number of RDMA read/atomic to send */
449 u8 s_retry_cnt
; /* number of times to retry */
451 u8 r_min_rnr_timer
; /* retry timeout value for RNR NAKs */
452 u8 s_max_sge
; /* size of s_wq->sg_list */
455 /* start of read/write fields */
457 atomic_t refcount ____cacheline_aligned_in_smp
;
458 wait_queue_head_t wait
;
461 struct qib_ack_entry s_ack_queue
[QIB_MAX_RDMA_ATOMIC
+ 1]
462 ____cacheline_aligned_in_smp
;
463 struct qib_sge_state s_rdma_read_sge
;
465 spinlock_t r_lock ____cacheline_aligned_in_smp
; /* used for APM */
466 unsigned long r_aflags
;
467 u64 r_wr_id
; /* ID for current receive WQE */
468 u32 r_ack_psn
; /* PSN for next ACK or atomic ACK */
469 u32 r_len
; /* total length of r_sge */
470 u32 r_rcv_len
; /* receive data len processed */
471 u32 r_psn
; /* expected rcv packet sequence number */
472 u32 r_msn
; /* message sequence number */
474 u8 r_state
; /* opcode of last packet received */
476 u8 r_head_ack_queue
; /* index into s_ack_queue[] */
478 struct list_head rspwait
; /* link for waititing to respond */
480 struct qib_sge_state r_sge
; /* current receive data */
481 struct qib_rq r_rq
; /* receive work queue */
483 spinlock_t s_lock ____cacheline_aligned_in_smp
;
484 struct qib_sge_state
*s_cur_sge
;
486 struct qib_verbs_txreq
*s_tx
;
487 struct qib_swqe
*s_wqe
;
488 struct qib_sge_state s_sge
; /* current send request data */
489 struct qib_mregion
*s_rdma_mr
;
491 u32 s_cur_size
; /* size of send packet in bytes */
492 u32 s_len
; /* total length of s_sge */
493 u32 s_rdma_read_len
; /* total length of s_rdma_read_sge */
494 u32 s_next_psn
; /* PSN for next request */
495 u32 s_last_psn
; /* last response PSN processed */
496 u32 s_sending_psn
; /* lowest PSN that is being sent */
497 u32 s_sending_hpsn
; /* highest PSN that is being sent */
498 u32 s_psn
; /* current packet sequence number */
499 u32 s_ack_rdma_psn
; /* PSN for sending RDMA read responses */
500 u32 s_ack_psn
; /* PSN for acking sends and RDMA writes */
501 u32 s_head
; /* new entries added here */
502 u32 s_tail
; /* next entry to process */
503 u32 s_cur
; /* current work queue entry */
504 u32 s_acked
; /* last un-ACK'ed entry */
505 u32 s_last
; /* last completed entry */
506 u32 s_ssn
; /* SSN of tail entry */
507 u32 s_lsn
; /* limit sequence number (credit) */
508 u16 s_hdrwords
; /* size of s_hdr in 32 bit words */
510 u8 s_state
; /* opcode of last packet sent */
511 u8 s_ack_state
; /* opcode of packet to ACK */
512 u8 s_nak_state
; /* non-zero if NAK is pending */
513 u8 r_nak_state
; /* non-zero if NAK is pending */
514 u8 s_retry
; /* requester retry counter */
515 u8 s_rnr_retry
; /* requester RNR retry counter */
516 u8 s_num_rd_atomic
; /* number of RDMA read/atomic pending */
517 u8 s_tail_ack_queue
; /* index into s_ack_queue[] */
519 struct qib_sge_state s_ack_rdma_sge
;
520 struct timer_list s_timer
;
521 struct list_head iowait
; /* link for wait PIO buf */
523 struct work_struct s_work
;
525 wait_queue_head_t wait_dma
;
527 struct qib_sge r_sg_list
[0] /* verified SGEs */
528 ____cacheline_aligned_in_smp
;
532 * Atomic bit definitions for r_aflags.
534 #define QIB_R_WRID_VALID 0
535 #define QIB_R_REWIND_SGE 1
538 * Bit definitions for r_flags.
540 #define QIB_R_REUSE_SGE 0x01
541 #define QIB_R_RDMAR_SEQ 0x02
542 #define QIB_R_RSP_NAK 0x04
543 #define QIB_R_RSP_SEND 0x08
544 #define QIB_R_COMM_EST 0x10
547 * Bit definitions for s_flags.
549 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
550 * QIB_S_BUSY - send tasklet is processing the QP
551 * QIB_S_TIMER - the RC retry timer is active
552 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
553 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
554 * before processing the next SWQE
555 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
556 * before processing the next SWQE
557 * QIB_S_WAIT_RNR - waiting for RNR timeout
558 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
559 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
560 * next send completion entry not via send DMA
561 * QIB_S_WAIT_PIO - waiting for a send buffer to be available
562 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
563 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
564 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
565 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
566 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
567 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
569 #define QIB_S_SIGNAL_REQ_WR 0x0001
570 #define QIB_S_BUSY 0x0002
571 #define QIB_S_TIMER 0x0004
572 #define QIB_S_RESP_PENDING 0x0008
573 #define QIB_S_ACK_PENDING 0x0010
574 #define QIB_S_WAIT_FENCE 0x0020
575 #define QIB_S_WAIT_RDMAR 0x0040
576 #define QIB_S_WAIT_RNR 0x0080
577 #define QIB_S_WAIT_SSN_CREDIT 0x0100
578 #define QIB_S_WAIT_DMA 0x0200
579 #define QIB_S_WAIT_PIO 0x0400
580 #define QIB_S_WAIT_TX 0x0800
581 #define QIB_S_WAIT_DMA_DESC 0x1000
582 #define QIB_S_WAIT_KMEM 0x2000
583 #define QIB_S_WAIT_PSN 0x4000
584 #define QIB_S_WAIT_ACK 0x8000
585 #define QIB_S_SEND_ONE 0x10000
586 #define QIB_S_UNLIMITED_CREDIT 0x20000
589 * Wait flags that would prevent any packet type from being sent.
591 #define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
592 QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
595 * Wait flags that would prevent send work requests from making progress.
597 #define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
598 QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
599 QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
601 #define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
603 #define QIB_PSN_CREDIT 16
606 * Since struct qib_swqe is not a fixed size, we can't simply index into
607 * struct qib_qp.s_wq. This function does the array index computation.
609 static inline struct qib_swqe
*get_swqe_ptr(struct qib_qp
*qp
,
612 return (struct qib_swqe
*)((char *)qp
->s_wq
+
613 (sizeof(struct qib_swqe
) +
615 sizeof(struct qib_sge
)) * n
);
619 * Since struct qib_rwqe is not a fixed size, we can't simply index into
620 * struct qib_rwq.wq. This function does the array index computation.
622 static inline struct qib_rwqe
*get_rwqe_ptr(struct qib_rq
*rq
, unsigned n
)
624 return (struct qib_rwqe
*)
625 ((char *) rq
->wq
->wq
+
626 (sizeof(struct qib_rwqe
) +
627 rq
->max_sge
* sizeof(struct ib_sge
)) * n
);
631 * QPN-map pages start out as NULL, they get allocated upon
632 * first use and are never deallocated. This way,
633 * large bitmaps are not allocated unless large numbers of QPs are used.
639 struct qib_qpn_table
{
640 spinlock_t lock
; /* protect changes in this struct */
641 unsigned flags
; /* flags for QP0/1 allocated for each port */
642 u32 last
; /* last QP number allocated */
643 u32 nmaps
; /* size of the map table */
646 /* bit map of free QP numbers other than 0/1 */
647 struct qpn_map map
[QPNMAP_ENTRIES
];
650 struct qib_lkey_table
{
651 spinlock_t lock
; /* protect changes in this struct */
652 u32 next
; /* next unused index (speeds search) */
653 u32 gen
; /* generation count */
654 u32 max
; /* size of the table */
655 struct qib_mregion __rcu
**table
;
658 struct qib_opcode_stats
{
659 u64 n_packets
; /* number of packets */
660 u64 n_bytes
; /* total number of bytes */
663 struct qib_opcode_stats_perctx
{
664 struct qib_opcode_stats stats
[128];
668 struct qib_qp __rcu
*qp0
;
669 struct qib_qp __rcu
*qp1
;
670 struct ib_mad_agent
*send_agent
; /* agent for SMI (traps) */
671 struct qib_ah
*sm_ah
;
672 struct qib_ah
*smi_ah
;
673 struct rb_root mcast_tree
;
674 spinlock_t lock
; /* protect changes in this struct */
676 /* non-zero when timer is set */
677 unsigned long mkey_lease_timeout
;
678 unsigned long trap_timeout
;
679 __be64 gid_prefix
; /* in network order */
681 __be64 guids
[QIB_GUIDS_PER_PORT
- 1]; /* writable GUIDs */
682 u64 tid
; /* TID for traps */
683 u64 n_unicast_xmit
; /* total unicast packets sent */
684 u64 n_unicast_rcv
; /* total unicast packets received */
685 u64 n_multicast_xmit
; /* total multicast packets sent */
686 u64 n_multicast_rcv
; /* total multicast packets received */
687 u64 z_symbol_error_counter
; /* starting count for PMA */
688 u64 z_link_error_recovery_counter
; /* starting count for PMA */
689 u64 z_link_downed_counter
; /* starting count for PMA */
690 u64 z_port_rcv_errors
; /* starting count for PMA */
691 u64 z_port_rcv_remphys_errors
; /* starting count for PMA */
692 u64 z_port_xmit_discards
; /* starting count for PMA */
693 u64 z_port_xmit_data
; /* starting count for PMA */
694 u64 z_port_rcv_data
; /* starting count for PMA */
695 u64 z_port_xmit_packets
; /* starting count for PMA */
696 u64 z_port_rcv_packets
; /* starting count for PMA */
697 u32 z_local_link_integrity_errors
; /* starting count for PMA */
698 u32 z_excessive_buffer_overrun_errors
; /* starting count for PMA */
699 u32 z_vl15_dropped
; /* starting count for PMA */
703 u32 n_rc_delayed_comp
;
717 u32 pma_sample_start
;
718 u32 pma_sample_interval
;
719 __be16 pma_counter_select
[5];
724 u16 mkey_lease_period
;
737 struct ib_device ibdev
;
738 struct list_head pending_mmaps
;
739 spinlock_t mmap_offset_lock
; /* protect mmap_offset */
741 struct qib_mregion __rcu
*dma_mr
;
743 /* QP numbers are shared by all IB ports */
744 struct qib_qpn_table qpn_table
;
745 struct qib_lkey_table lk_table
;
746 struct list_head piowait
; /* list for wait PIO buf */
747 struct list_head dmawait
; /* list for wait DMA */
748 struct list_head txwait
; /* list for wait qib_verbs_txreq */
749 struct list_head memwait
; /* list for wait kernel memory */
750 struct list_head txreq_free
;
751 struct timer_list mem_timer
;
752 struct qib_qp __rcu
**qp_table
;
753 struct qib_pio_header
*pio_hdrs
;
754 dma_addr_t pio_hdrs_phys
;
755 /* list of QPs waiting for RNR timer */
756 spinlock_t pending_lock
; /* protect wait lists, PMA counters, etc. */
757 u32 qp_table_size
; /* size of the hash table */
758 u32 qp_rnd
; /* random bytes for hash */
764 u32 n_pds_allocated
; /* number of PDs allocated for device */
765 spinlock_t n_pds_lock
;
766 u32 n_ahs_allocated
; /* number of AHs allocated for device */
767 spinlock_t n_ahs_lock
;
768 u32 n_cqs_allocated
; /* number of CQs allocated for device */
769 spinlock_t n_cqs_lock
;
770 u32 n_qps_allocated
; /* number of QPs allocated for device */
771 spinlock_t n_qps_lock
;
772 u32 n_srqs_allocated
; /* number of SRQs allocated for device */
773 spinlock_t n_srqs_lock
;
774 u32 n_mcast_grps_allocated
; /* number of mcast groups allocated */
775 spinlock_t n_mcast_grps_lock
;
776 #ifdef CONFIG_DEBUG_FS
777 /* per HCA debugfs */
778 struct dentry
*qib_ibdev_dbg
;
782 struct qib_verbs_counters
{
783 u64 symbol_error_counter
;
784 u64 link_error_recovery_counter
;
785 u64 link_downed_counter
;
787 u64 port_rcv_remphys_errors
;
788 u64 port_xmit_discards
;
791 u64 port_xmit_packets
;
792 u64 port_rcv_packets
;
793 u32 local_link_integrity_errors
;
794 u32 excessive_buffer_overrun_errors
;
798 static inline struct qib_mr
*to_imr(struct ib_mr
*ibmr
)
800 return container_of(ibmr
, struct qib_mr
, ibmr
);
803 static inline struct qib_pd
*to_ipd(struct ib_pd
*ibpd
)
805 return container_of(ibpd
, struct qib_pd
, ibpd
);
808 static inline struct qib_ah
*to_iah(struct ib_ah
*ibah
)
810 return container_of(ibah
, struct qib_ah
, ibah
);
813 static inline struct qib_cq
*to_icq(struct ib_cq
*ibcq
)
815 return container_of(ibcq
, struct qib_cq
, ibcq
);
818 static inline struct qib_srq
*to_isrq(struct ib_srq
*ibsrq
)
820 return container_of(ibsrq
, struct qib_srq
, ibsrq
);
823 static inline struct qib_qp
*to_iqp(struct ib_qp
*ibqp
)
825 return container_of(ibqp
, struct qib_qp
, ibqp
);
828 static inline struct qib_ibdev
*to_idev(struct ib_device
*ibdev
)
830 return container_of(ibdev
, struct qib_ibdev
, ibdev
);
834 * Send if not busy or waiting for I/O and either
835 * a RC response is pending or we can process send work requests.
837 static inline int qib_send_ok(struct qib_qp
*qp
)
839 return !(qp
->s_flags
& (QIB_S_BUSY
| QIB_S_ANY_WAIT_IO
)) &&
840 (qp
->s_hdrwords
|| (qp
->s_flags
& QIB_S_RESP_PENDING
) ||
841 !(qp
->s_flags
& QIB_S_ANY_WAIT_SEND
));
845 * This must be called with s_lock held.
847 void qib_schedule_send(struct qib_qp
*qp
);
849 static inline int qib_pkey_ok(u16 pkey1
, u16 pkey2
)
851 u16 p1
= pkey1
& 0x7FFF;
852 u16 p2
= pkey2
& 0x7FFF;
855 * Low 15 bits must be non-zero and match, and
856 * one of the two must be a full member.
858 return p1
&& p1
== p2
&& ((__s16
)pkey1
< 0 || (__s16
)pkey2
< 0);
861 void qib_bad_pqkey(struct qib_ibport
*ibp
, __be16 trap_num
, u32 key
, u32 sl
,
862 u32 qp1
, u32 qp2
, __be16 lid1
, __be16 lid2
);
863 void qib_cap_mask_chg(struct qib_ibport
*ibp
);
864 void qib_sys_guid_chg(struct qib_ibport
*ibp
);
865 void qib_node_desc_chg(struct qib_ibport
*ibp
);
866 int qib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
867 struct ib_wc
*in_wc
, struct ib_grh
*in_grh
,
868 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
);
869 int qib_create_agents(struct qib_ibdev
*dev
);
870 void qib_free_agents(struct qib_ibdev
*dev
);
873 * Compare the lower 24 bits of the two values.
874 * Returns an integer <, ==, or > than zero.
876 static inline int qib_cmp24(u32 a
, u32 b
)
878 return (((int) a
) - ((int) b
)) << 8;
881 struct qib_mcast
*qib_mcast_find(struct qib_ibport
*ibp
, union ib_gid
*mgid
);
883 int qib_snapshot_counters(struct qib_pportdata
*ppd
, u64
*swords
,
884 u64
*rwords
, u64
*spkts
, u64
*rpkts
,
887 int qib_get_counters(struct qib_pportdata
*ppd
,
888 struct qib_verbs_counters
*cntrs
);
890 int qib_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
);
892 int qib_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
);
894 int qib_mcast_tree_empty(struct qib_ibport
*ibp
);
896 __be32
qib_compute_aeth(struct qib_qp
*qp
);
898 struct qib_qp
*qib_lookup_qpn(struct qib_ibport
*ibp
, u32 qpn
);
900 struct ib_qp
*qib_create_qp(struct ib_pd
*ibpd
,
901 struct ib_qp_init_attr
*init_attr
,
902 struct ib_udata
*udata
);
904 int qib_destroy_qp(struct ib_qp
*ibqp
);
906 int qib_error_qp(struct qib_qp
*qp
, enum ib_wc_status err
);
908 int qib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
909 int attr_mask
, struct ib_udata
*udata
);
911 int qib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
912 int attr_mask
, struct ib_qp_init_attr
*init_attr
);
914 unsigned qib_free_all_qps(struct qib_devdata
*dd
);
916 void qib_init_qpn_table(struct qib_devdata
*dd
, struct qib_qpn_table
*qpt
);
918 void qib_free_qpn_table(struct qib_qpn_table
*qpt
);
920 #ifdef CONFIG_DEBUG_FS
924 struct qib_qp_iter
*qib_qp_iter_init(struct qib_ibdev
*dev
);
926 int qib_qp_iter_next(struct qib_qp_iter
*iter
);
928 void qib_qp_iter_print(struct seq_file
*s
, struct qib_qp_iter
*iter
);
932 void qib_get_credit(struct qib_qp
*qp
, u32 aeth
);
934 unsigned qib_pkt_delay(u32 plen
, u8 snd_mult
, u8 rcv_mult
);
936 void qib_verbs_sdma_desc_avail(struct qib_pportdata
*ppd
, unsigned avail
);
938 void qib_put_txreq(struct qib_verbs_txreq
*tx
);
940 int qib_verbs_send(struct qib_qp
*qp
, struct qib_ib_header
*hdr
,
941 u32 hdrwords
, struct qib_sge_state
*ss
, u32 len
);
943 void qib_copy_sge(struct qib_sge_state
*ss
, void *data
, u32 length
,
946 void qib_skip_sge(struct qib_sge_state
*ss
, u32 length
, int release
);
948 void qib_uc_rcv(struct qib_ibport
*ibp
, struct qib_ib_header
*hdr
,
949 int has_grh
, void *data
, u32 tlen
, struct qib_qp
*qp
);
951 void qib_rc_rcv(struct qib_ctxtdata
*rcd
, struct qib_ib_header
*hdr
,
952 int has_grh
, void *data
, u32 tlen
, struct qib_qp
*qp
);
954 int qib_check_ah(struct ib_device
*ibdev
, struct ib_ah_attr
*ah_attr
);
956 struct ib_ah
*qib_create_qp0_ah(struct qib_ibport
*ibp
, u16 dlid
);
958 void qib_rc_rnr_retry(unsigned long arg
);
960 void qib_rc_send_complete(struct qib_qp
*qp
, struct qib_ib_header
*hdr
);
962 void qib_rc_error(struct qib_qp
*qp
, enum ib_wc_status err
);
964 int qib_post_ud_send(struct qib_qp
*qp
, struct ib_send_wr
*wr
);
966 void qib_ud_rcv(struct qib_ibport
*ibp
, struct qib_ib_header
*hdr
,
967 int has_grh
, void *data
, u32 tlen
, struct qib_qp
*qp
);
969 int qib_alloc_lkey(struct qib_mregion
*mr
, int dma_region
);
971 void qib_free_lkey(struct qib_mregion
*mr
);
973 int qib_lkey_ok(struct qib_lkey_table
*rkt
, struct qib_pd
*pd
,
974 struct qib_sge
*isge
, struct ib_sge
*sge
, int acc
);
976 int qib_rkey_ok(struct qib_qp
*qp
, struct qib_sge
*sge
,
977 u32 len
, u64 vaddr
, u32 rkey
, int acc
);
979 int qib_post_srq_receive(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
980 struct ib_recv_wr
**bad_wr
);
982 struct ib_srq
*qib_create_srq(struct ib_pd
*ibpd
,
983 struct ib_srq_init_attr
*srq_init_attr
,
984 struct ib_udata
*udata
);
986 int qib_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
987 enum ib_srq_attr_mask attr_mask
,
988 struct ib_udata
*udata
);
990 int qib_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
);
992 int qib_destroy_srq(struct ib_srq
*ibsrq
);
994 int qib_cq_init(struct qib_devdata
*dd
);
996 void qib_cq_exit(struct qib_devdata
*dd
);
998 void qib_cq_enter(struct qib_cq
*cq
, struct ib_wc
*entry
, int sig
);
1000 int qib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*entry
);
1002 struct ib_cq
*qib_create_cq(struct ib_device
*ibdev
, int entries
,
1003 int comp_vector
, struct ib_ucontext
*context
,
1004 struct ib_udata
*udata
);
1006 int qib_destroy_cq(struct ib_cq
*ibcq
);
1008 int qib_req_notify_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags notify_flags
);
1010 int qib_resize_cq(struct ib_cq
*ibcq
, int cqe
, struct ib_udata
*udata
);
1012 struct ib_mr
*qib_get_dma_mr(struct ib_pd
*pd
, int acc
);
1014 struct ib_mr
*qib_reg_phys_mr(struct ib_pd
*pd
,
1015 struct ib_phys_buf
*buffer_list
,
1016 int num_phys_buf
, int acc
, u64
*iova_start
);
1018 struct ib_mr
*qib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1019 u64 virt_addr
, int mr_access_flags
,
1020 struct ib_udata
*udata
);
1022 int qib_dereg_mr(struct ib_mr
*ibmr
);
1024 struct ib_mr
*qib_alloc_fast_reg_mr(struct ib_pd
*pd
, int max_page_list_len
);
1026 struct ib_fast_reg_page_list
*qib_alloc_fast_reg_page_list(
1027 struct ib_device
*ibdev
, int page_list_len
);
1029 void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list
*pl
);
1031 int qib_fast_reg_mr(struct qib_qp
*qp
, struct ib_send_wr
*wr
);
1033 struct ib_fmr
*qib_alloc_fmr(struct ib_pd
*pd
, int mr_access_flags
,
1034 struct ib_fmr_attr
*fmr_attr
);
1036 int qib_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
1037 int list_len
, u64 iova
);
1039 int qib_unmap_fmr(struct list_head
*fmr_list
);
1041 int qib_dealloc_fmr(struct ib_fmr
*ibfmr
);
1043 static inline void qib_get_mr(struct qib_mregion
*mr
)
1045 atomic_inc(&mr
->refcount
);
1048 void mr_rcu_callback(struct rcu_head
*list
);
1050 static inline void qib_put_mr(struct qib_mregion
*mr
)
1052 if (unlikely(atomic_dec_and_test(&mr
->refcount
)))
1053 call_rcu(&mr
->list
, mr_rcu_callback
);
1056 static inline void qib_put_ss(struct qib_sge_state
*ss
)
1058 while (ss
->num_sge
) {
1059 qib_put_mr(ss
->sge
.mr
);
1061 ss
->sge
= *ss
->sg_list
++;
1066 void qib_release_mmap_info(struct kref
*ref
);
1068 struct qib_mmap_info
*qib_create_mmap_info(struct qib_ibdev
*dev
, u32 size
,
1069 struct ib_ucontext
*context
,
1072 void qib_update_mmap_info(struct qib_ibdev
*dev
, struct qib_mmap_info
*ip
,
1073 u32 size
, void *obj
);
1075 int qib_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
);
1077 int qib_get_rwqe(struct qib_qp
*qp
, int wr_id_only
);
1079 void qib_migrate_qp(struct qib_qp
*qp
);
1081 int qib_ruc_check_hdr(struct qib_ibport
*ibp
, struct qib_ib_header
*hdr
,
1082 int has_grh
, struct qib_qp
*qp
, u32 bth0
);
1084 u32
qib_make_grh(struct qib_ibport
*ibp
, struct ib_grh
*hdr
,
1085 struct ib_global_route
*grh
, u32 hwords
, u32 nwords
);
1087 void qib_make_ruc_header(struct qib_qp
*qp
, struct qib_other_headers
*ohdr
,
1088 u32 bth0
, u32 bth2
);
1090 void qib_do_send(struct work_struct
*work
);
1092 void qib_send_complete(struct qib_qp
*qp
, struct qib_swqe
*wqe
,
1093 enum ib_wc_status status
);
1095 void qib_send_rc_ack(struct qib_qp
*qp
);
1097 int qib_make_rc_req(struct qib_qp
*qp
);
1099 int qib_make_uc_req(struct qib_qp
*qp
);
1101 int qib_make_ud_req(struct qib_qp
*qp
);
1103 int qib_register_ib_device(struct qib_devdata
*);
1105 void qib_unregister_ib_device(struct qib_devdata
*);
1107 void qib_ib_rcv(struct qib_ctxtdata
*, void *, void *, u32
);
1109 void qib_ib_piobufavail(struct qib_devdata
*);
1111 unsigned qib_get_npkeys(struct qib_devdata
*);
1113 unsigned qib_get_pkey(struct qib_ibport
*, unsigned);
1115 extern const enum ib_wc_opcode ib_qib_wc_opcode
[];
1118 * Below HCA-independent IB PhysPortState values, returned
1119 * by the f_ibphys_portstate() routine.
1121 #define IB_PHYSPORTSTATE_SLEEP 1
1122 #define IB_PHYSPORTSTATE_POLL 2
1123 #define IB_PHYSPORTSTATE_DISABLED 3
1124 #define IB_PHYSPORTSTATE_CFG_TRAIN 4
1125 #define IB_PHYSPORTSTATE_LINKUP 5
1126 #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1127 #define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1128 #define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1129 #define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1130 #define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1131 #define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1132 #define IB_PHYSPORTSTATE_CFG_ENH 0x10
1133 #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1135 extern const int ib_qib_state_ops
[];
1137 extern __be64 ib_qib_sys_image_guid
; /* in network order */
1139 extern unsigned int ib_qib_lkey_table_size
;
1141 extern unsigned int ib_qib_max_cqes
;
1143 extern unsigned int ib_qib_max_cqs
;
1145 extern unsigned int ib_qib_max_qp_wrs
;
1147 extern unsigned int ib_qib_max_qps
;
1149 extern unsigned int ib_qib_max_sges
;
1151 extern unsigned int ib_qib_max_mcast_grps
;
1153 extern unsigned int ib_qib_max_mcast_qp_attached
;
1155 extern unsigned int ib_qib_max_srqs
;
1157 extern unsigned int ib_qib_max_srq_sges
;
1159 extern unsigned int ib_qib_max_srq_wrs
;
1161 extern const u32 ib_qib_rnr_table
[];
1163 extern struct ib_dma_mapping_ops qib_dma_mapping_ops
;
1165 #endif /* QIB_VERBS_H */