2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/types.h>
38 #include <linux/spinlock.h>
39 #include <linux/kernel.h>
40 #include <linux/interrupt.h>
41 #include <linux/kref.h>
42 #include <rdma/ib_pack.h>
43 #include <rdma/ib_user_verbs.h>
45 #include "ipath_kernel.h"
47 #define IPATH_MAX_RDMA_ATOMIC 4
49 #define QPN_MAX (1 << 24)
50 #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
53 * Increment this value if any changes that break userspace ABI
54 * compatibility are made.
56 #define IPATH_UVERBS_ABI_VERSION 2
59 * Define an ib_cq_notify value that is not valid so we know when CQ
60 * notifications are armed.
62 #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
64 /* AETH NAK opcode values */
65 #define IB_RNR_NAK 0x20
66 #define IB_NAK_PSN_ERROR 0x60
67 #define IB_NAK_INVALID_REQUEST 0x61
68 #define IB_NAK_REMOTE_ACCESS_ERROR 0x62
69 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
70 #define IB_NAK_INVALID_RD_REQUEST 0x64
72 /* Flags for checking QP state (see ib_ipath_state_ops[]) */
73 #define IPATH_POST_SEND_OK 0x01
74 #define IPATH_POST_RECV_OK 0x02
75 #define IPATH_PROCESS_RECV_OK 0x04
76 #define IPATH_PROCESS_SEND_OK 0x08
77 #define IPATH_PROCESS_NEXT_SEND_OK 0x10
78 #define IPATH_FLUSH_SEND 0x20
79 #define IPATH_FLUSH_RECV 0x40
80 #define IPATH_PROCESS_OR_FLUSH_SEND \
81 (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
83 /* IB Performance Manager status values */
84 #define IB_PMA_SAMPLE_STATUS_DONE 0x00
85 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01
86 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
88 /* Mandatory IB performance counter select values. */
89 #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
90 #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
91 #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
92 #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
93 #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
99 } __attribute__ ((packed
));
101 struct ib_atomic_eth
{
102 __be32 vaddr
[2]; /* unaligned so access as 2 32-bit words */
106 } __attribute__ ((packed
));
108 struct ipath_other_headers
{
121 __be32 atomic_ack_eth
[2];
125 struct ib_atomic_eth atomic_eth
;
127 } __attribute__ ((packed
));
130 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
131 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
132 * will be in the eager header buffer. The remaining 12 or 16 bytes
133 * are in the data buffer.
135 struct ipath_ib_header
{
140 struct ipath_other_headers oth
;
142 struct ipath_other_headers oth
;
144 } __attribute__ ((packed
));
146 struct ipath_pio_header
{
148 struct ipath_ib_header hdr
;
149 } __attribute__ ((packed
));
152 * There is one struct ipath_mcast for each multicast GID.
153 * All attached QPs are then stored as a list of
154 * struct ipath_mcast_qp.
156 struct ipath_mcast_qp
{
157 struct list_head list
;
162 struct rb_node rb_node
;
164 struct list_head qp_list
;
165 wait_queue_head_t wait
;
170 /* Protection domain */
173 int user
; /* non-zero if created from user space */
179 struct ib_ah_attr attr
;
183 * This structure is used by ipath_mmap() to validate an offset
184 * when an mmap() request is made. The vm_area_struct then uses
185 * this as its vm_private_data.
187 struct ipath_mmap_info
{
188 struct list_head pending_mmaps
;
189 struct ib_ucontext
*context
;
197 * This structure is used to contain the head pointer, tail pointer,
198 * and completion queue entries as a single memory allocation so
199 * it can be mmap'ed into user space.
202 u32 head
; /* index of next entry to fill */
203 u32 tail
; /* index of next ib_poll_cq() entry */
205 /* these are actually size ibcq.cqe + 1 */
206 struct ib_uverbs_wc uqueue
[0];
207 struct ib_wc kqueue
[0];
212 * The completion queue structure.
216 struct tasklet_struct comptask
;
220 struct ipath_cq_wc
*queue
;
221 struct ipath_mmap_info
*ip
;
225 * A segment is a linear region of low physical memory.
226 * XXX Maybe we should use phys addr here and kmap()/kunmap().
227 * Used by the verbs layer.
234 /* The number of ipath_segs that fit in a page. */
235 #define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
237 struct ipath_segarray
{
238 struct ipath_seg segs
[IPATH_SEGSZ
];
241 struct ipath_mregion
{
242 struct ib_pd
*pd
; /* shares refcnt of ibmr.pd */
243 u64 user_base
; /* User's address for this region */
244 u64 iova
; /* IB start address of this region */
247 u32 offset
; /* offset (bytes) to start of region */
249 u32 max_segs
; /* number of ipath_segs in all the arrays */
250 u32 mapsz
; /* size of the map array */
251 struct ipath_segarray
*map
[0]; /* the segments */
255 * These keep track of the copy progress within a memory region.
256 * Used by the verbs layer.
259 struct ipath_mregion
*mr
;
260 void *vaddr
; /* kernel virtual address of segment */
261 u32 sge_length
; /* length of the SGE */
262 u32 length
; /* remaining length of the segment */
263 u16 m
; /* current index: mr->map[m] */
264 u16 n
; /* current index: mr->map[m]->segs[n] */
270 struct ib_umem
*umem
;
271 struct ipath_mregion mr
; /* must be last */
275 * Send work request queue entry.
276 * The size of the sg_list is determined when the QP is created and stored
280 struct ib_send_wr wr
; /* don't use wr.sg_list */
281 u32 psn
; /* first packet sequence number */
282 u32 lpsn
; /* last packet sequence number */
283 u32 ssn
; /* send sequence number */
284 u32 length
; /* total length of data in sg_list */
285 struct ipath_sge sg_list
[0];
289 * Receive work request queue entry.
290 * The size of the sg_list is determined when the QP (or SRQ) is created
291 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
296 struct ib_sge sg_list
[0];
300 * This structure is used to contain the head pointer, tail pointer,
301 * and receive work queue entries as a single memory allocation so
302 * it can be mmap'ed into user space.
303 * Note that the wq array elements are variable size so you can't
304 * just index into the array to get the N'th element;
305 * use get_rwqe_ptr() instead.
308 u32 head
; /* new work requests posted to the head */
309 u32 tail
; /* receives pull requests from here. */
310 struct ipath_rwqe wq
[0];
314 struct ipath_rwq
*wq
;
316 u32 size
; /* size of RWQE array */
323 struct ipath_mmap_info
*ip
;
324 /* send signal when number of RWQEs < limit */
328 struct ipath_sge_state
{
329 struct ipath_sge
*sg_list
; /* next SGE to be used if any */
330 struct ipath_sge sge
; /* progress state for the current SGE */
336 * This structure holds the information that the send tasklet needs
337 * to send a RDMA read response or atomic operation.
339 struct ipath_ack_entry
{
344 struct ipath_sge_state rdma_sge
;
350 * Variables prefixed with s_ are for the requester (sender).
351 * Variables prefixed with r_ are for the responder (receiver).
352 * Variables prefixed with ack_ are for responder replies.
354 * Common variables are protected by both r_rq.lock and s_lock in that order
355 * which only happens in modify_qp() or changing the QP 'state'.
359 struct ipath_qp
*next
; /* link list for QPN hash table */
360 struct ipath_qp
*timer_next
; /* link list for ipath_ib_timer() */
361 struct ipath_qp
*pio_next
; /* link for ipath_ib_piobufavail() */
362 struct list_head piowait
; /* link for wait PIO buf */
363 struct list_head timerwait
; /* link for waiting for timeouts */
364 struct ib_ah_attr remote_ah_attr
;
365 struct ipath_ib_header s_hdr
; /* next packet header to send */
367 wait_queue_head_t wait
;
368 wait_queue_head_t wait_dma
;
369 struct tasklet_struct s_task
;
370 struct ipath_mmap_info
*ip
;
371 struct ipath_sge_state
*s_cur_sge
;
372 struct ipath_verbs_txreq
*s_tx
;
373 struct ipath_sge_state s_sge
; /* current send request data */
374 struct ipath_ack_entry s_ack_queue
[IPATH_MAX_RDMA_ATOMIC
+ 1];
375 struct ipath_sge_state s_ack_rdma_sge
;
376 struct ipath_sge_state s_rdma_read_sge
;
377 struct ipath_sge_state r_sge
; /* current receive data */
381 u16 s_hdrwords
; /* size of s_hdr in 32 bit words */
382 u32 s_cur_size
; /* size of send packet in bytes */
383 u32 s_len
; /* total length of s_sge */
384 u32 s_rdma_read_len
; /* total length of s_rdma_read_sge */
385 u32 s_next_psn
; /* PSN for next request */
386 u32 s_last_psn
; /* last response PSN processed */
387 u32 s_psn
; /* current packet sequence number */
388 u32 s_ack_rdma_psn
; /* PSN for sending RDMA read responses */
389 u32 s_ack_psn
; /* PSN for acking sends and RDMA writes */
390 u32 s_rnr_timeout
; /* number of milliseconds for RNR timeout */
391 u32 r_ack_psn
; /* PSN for next ACK or atomic ACK */
392 u64 r_wr_id
; /* ID for current receive WQE */
393 unsigned long r_aflags
;
394 u32 r_len
; /* total length of r_sge */
395 u32 r_rcv_len
; /* receive data len processed */
396 u32 r_psn
; /* expected rcv packet sequence number */
397 u32 r_msn
; /* message sequence number */
398 u8 state
; /* QP state */
399 u8 s_state
; /* opcode of last packet sent */
400 u8 s_ack_state
; /* opcode of packet to ACK */
401 u8 s_nak_state
; /* non-zero if NAK is pending */
402 u8 r_state
; /* opcode of last packet received */
403 u8 r_nak_state
; /* non-zero if NAK is pending */
404 u8 r_min_rnr_timer
; /* retry timeout value for RNR NAKs */
406 u8 r_max_rd_atomic
; /* max number of RDMA read/atomic to receive */
407 u8 r_head_ack_queue
; /* index into s_ack_queue[] */
409 u8 s_max_sge
; /* size of s_wq->sg_list */
410 u8 s_retry_cnt
; /* number of times to retry */
412 u8 s_retry
; /* requester retry counter */
413 u8 s_rnr_retry
; /* requester RNR retry counter */
414 u8 s_pkey_index
; /* PKEY index to use */
415 u8 s_max_rd_atomic
; /* max number of RDMA read/atomic to send */
416 u8 s_num_rd_atomic
; /* number of RDMA read/atomic pending */
417 u8 s_tail_ack_queue
; /* index into s_ack_queue[] */
421 u8 timeout
; /* Timeout for this QP */
422 enum ib_mtu path_mtu
;
424 u32 qkey
; /* QKEY for this QP (for UD or RD) */
425 u32 s_size
; /* send work queue size */
426 u32 s_head
; /* new entries added here */
427 u32 s_tail
; /* next entry to process */
428 u32 s_cur
; /* current work queue entry */
429 u32 s_last
; /* last un-ACK'ed entry */
430 u32 s_ssn
; /* SSN of tail entry */
431 u32 s_lsn
; /* limit sequence number (credit) */
432 struct ipath_swqe
*s_wq
; /* send work queue */
433 struct ipath_swqe
*s_wqe
;
434 struct ipath_sge
*r_ud_sg_list
;
435 struct ipath_rq r_rq
; /* receive work queue */
436 struct ipath_sge r_sg_list
[0]; /* verified SGEs */
440 * Atomic bit definitions for r_aflags.
442 #define IPATH_R_WRID_VALID 0
445 * Bit definitions for r_flags.
447 #define IPATH_R_REUSE_SGE 0x01
448 #define IPATH_R_RDMAR_SEQ 0x02
451 * Bit definitions for s_flags.
453 * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
454 * before processing the next SWQE
455 * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
456 * before processing the next SWQE
457 * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
458 * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
459 * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
460 * next send completion entry not via send DMA.
462 #define IPATH_S_SIGNAL_REQ_WR 0x01
463 #define IPATH_S_FENCE_PENDING 0x02
464 #define IPATH_S_RDMAR_PENDING 0x04
465 #define IPATH_S_ACK_PENDING 0x08
466 #define IPATH_S_BUSY 0x10
467 #define IPATH_S_WAITING 0x20
468 #define IPATH_S_WAIT_SSN_CREDIT 0x40
469 #define IPATH_S_WAIT_DMA 0x80
471 #define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
472 IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
474 #define IPATH_PSN_CREDIT 512
477 * Since struct ipath_swqe is not a fixed size, we can't simply index into
478 * struct ipath_qp.s_wq. This function does the array index computation.
480 static inline struct ipath_swqe
*get_swqe_ptr(struct ipath_qp
*qp
,
483 return (struct ipath_swqe
*)((char *)qp
->s_wq
+
484 (sizeof(struct ipath_swqe
) +
486 sizeof(struct ipath_sge
)) * n
);
490 * Since struct ipath_rwqe is not a fixed size, we can't simply index into
491 * struct ipath_rwq.wq. This function does the array index computation.
493 static inline struct ipath_rwqe
*get_rwqe_ptr(struct ipath_rq
*rq
,
496 return (struct ipath_rwqe
*)
497 ((char *) rq
->wq
->wq
+
498 (sizeof(struct ipath_rwqe
) +
499 rq
->max_sge
* sizeof(struct ib_sge
)) * n
);
503 * QPN-map pages start out as NULL, they get allocated upon
504 * first use and are never deallocated. This way,
505 * large bitmaps are not allocated unless large numbers of QPs are used.
512 struct ipath_qp_table
{
514 u32 last
; /* last QP number allocated */
515 u32 max
; /* size of the hash table */
516 u32 nmaps
; /* size of the map table */
517 struct ipath_qp
**table
;
518 /* bit map of free numbers */
519 struct qpn_map map
[QPNMAP_ENTRIES
];
522 struct ipath_lkey_table
{
524 u32 next
; /* next unused index (speeds search) */
525 u32 gen
; /* generation count */
526 u32 max
; /* size of the table */
527 struct ipath_mregion
**table
;
530 struct ipath_opcode_stats
{
531 u64 n_packets
; /* number of packets */
532 u64 n_bytes
; /* total number of bytes */
536 struct ib_device ibdev
;
537 struct ipath_devdata
*dd
;
538 struct list_head pending_mmaps
;
539 spinlock_t mmap_offset_lock
;
541 int ib_unit
; /* This is the device number */
542 u16 sm_lid
; /* in host order */
545 /* non-zero when timer is set */
546 unsigned long mkey_lease_timeout
;
548 /* The following fields are really per port. */
549 struct ipath_qp_table qp_table
;
550 struct ipath_lkey_table lk_table
;
551 struct list_head pending
[3]; /* FIFO of QPs waiting for ACKs */
552 struct list_head piowait
; /* list for wait PIO buf */
553 struct list_head txreq_free
;
555 /* list of QPs waiting for RNR timer */
556 struct list_head rnrwait
;
557 spinlock_t pending_lock
;
558 __be64 sys_image_guid
; /* in network order */
559 __be64 gid_prefix
; /* in network order */
562 u32 n_pds_allocated
; /* number of PDs allocated for device */
563 spinlock_t n_pds_lock
;
564 u32 n_ahs_allocated
; /* number of AHs allocated for device */
565 spinlock_t n_ahs_lock
;
566 u32 n_cqs_allocated
; /* number of CQs allocated for device */
567 spinlock_t n_cqs_lock
;
568 u32 n_qps_allocated
; /* number of QPs allocated for device */
569 spinlock_t n_qps_lock
;
570 u32 n_srqs_allocated
; /* number of SRQs allocated for device */
571 spinlock_t n_srqs_lock
;
572 u32 n_mcast_grps_allocated
; /* number of mcast groups allocated */
573 spinlock_t n_mcast_grps_lock
;
575 u64 ipath_sword
; /* total dwords sent (sample result) */
576 u64 ipath_rword
; /* total dwords received (sample result) */
577 u64 ipath_spkts
; /* total packets sent (sample result) */
578 u64 ipath_rpkts
; /* total packets received (sample result) */
579 /* # of ticks no data sent (sample result) */
581 u64 rcv_errors
; /* # of packets with SW detected rcv errs */
582 u64 n_unicast_xmit
; /* total unicast packets sent */
583 u64 n_unicast_rcv
; /* total unicast packets received */
584 u64 n_multicast_xmit
; /* total multicast packets sent */
585 u64 n_multicast_rcv
; /* total multicast packets received */
586 u64 z_symbol_error_counter
; /* starting count for PMA */
587 u64 z_link_error_recovery_counter
; /* starting count for PMA */
588 u64 z_link_downed_counter
; /* starting count for PMA */
589 u64 z_port_rcv_errors
; /* starting count for PMA */
590 u64 z_port_rcv_remphys_errors
; /* starting count for PMA */
591 u64 z_port_xmit_discards
; /* starting count for PMA */
592 u64 z_port_xmit_data
; /* starting count for PMA */
593 u64 z_port_rcv_data
; /* starting count for PMA */
594 u64 z_port_xmit_packets
; /* starting count for PMA */
595 u64 z_port_rcv_packets
; /* starting count for PMA */
596 u32 z_pkey_violations
; /* starting count for PMA */
597 u32 z_local_link_integrity_errors
; /* starting count for PMA */
598 u32 z_excessive_buffer_overrun_errors
; /* starting count for PMA */
599 u32 z_vl15_dropped
; /* starting count for PMA */
615 u32 pma_sample_start
;
616 u32 pma_sample_interval
;
617 __be16 pma_counter_select
[5];
621 u16 mkey_lease_period
;
622 u16 pending_index
; /* which pending queue is active */
623 u8 pma_sample_status
;
626 struct ipath_opcode_stats opstats
[128];
629 struct ipath_verbs_counters
{
630 u64 symbol_error_counter
;
631 u64 link_error_recovery_counter
;
632 u64 link_downed_counter
;
634 u64 port_rcv_remphys_errors
;
635 u64 port_xmit_discards
;
638 u64 port_xmit_packets
;
639 u64 port_rcv_packets
;
640 u32 local_link_integrity_errors
;
641 u32 excessive_buffer_overrun_errors
;
645 struct ipath_verbs_txreq
{
647 struct ipath_swqe
*wqe
;
650 struct ipath_sge_state
*ss
;
651 struct ipath_pio_header hdr
;
652 struct ipath_sdma_txreq txreq
;
655 static inline struct ipath_mr
*to_imr(struct ib_mr
*ibmr
)
657 return container_of(ibmr
, struct ipath_mr
, ibmr
);
660 static inline struct ipath_pd
*to_ipd(struct ib_pd
*ibpd
)
662 return container_of(ibpd
, struct ipath_pd
, ibpd
);
665 static inline struct ipath_ah
*to_iah(struct ib_ah
*ibah
)
667 return container_of(ibah
, struct ipath_ah
, ibah
);
670 static inline struct ipath_cq
*to_icq(struct ib_cq
*ibcq
)
672 return container_of(ibcq
, struct ipath_cq
, ibcq
);
675 static inline struct ipath_srq
*to_isrq(struct ib_srq
*ibsrq
)
677 return container_of(ibsrq
, struct ipath_srq
, ibsrq
);
680 static inline struct ipath_qp
*to_iqp(struct ib_qp
*ibqp
)
682 return container_of(ibqp
, struct ipath_qp
, ibqp
);
685 static inline struct ipath_ibdev
*to_idev(struct ib_device
*ibdev
)
687 return container_of(ibdev
, struct ipath_ibdev
, ibdev
);
691 * This must be called with s_lock held.
693 static inline void ipath_schedule_send(struct ipath_qp
*qp
)
695 if (qp
->s_flags
& IPATH_S_ANY_WAIT
)
696 qp
->s_flags
&= ~IPATH_S_ANY_WAIT
;
697 if (!(qp
->s_flags
& IPATH_S_BUSY
))
698 tasklet_hi_schedule(&qp
->s_task
);
701 int ipath_process_mad(struct ib_device
*ibdev
,
705 struct ib_grh
*in_grh
,
706 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
);
709 * Compare the lower 24 bits of the two values.
710 * Returns an integer <, ==, or > than zero.
712 static inline int ipath_cmp24(u32 a
, u32 b
)
714 return (((int) a
) - ((int) b
)) << 8;
717 struct ipath_mcast
*ipath_mcast_find(union ib_gid
*mgid
);
719 int ipath_snapshot_counters(struct ipath_devdata
*dd
, u64
*swords
,
720 u64
*rwords
, u64
*spkts
, u64
*rpkts
,
723 int ipath_get_counters(struct ipath_devdata
*dd
,
724 struct ipath_verbs_counters
*cntrs
);
726 int ipath_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
);
728 int ipath_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
);
730 int ipath_mcast_tree_empty(void);
732 __be32
ipath_compute_aeth(struct ipath_qp
*qp
);
734 struct ipath_qp
*ipath_lookup_qpn(struct ipath_qp_table
*qpt
, u32 qpn
);
736 struct ib_qp
*ipath_create_qp(struct ib_pd
*ibpd
,
737 struct ib_qp_init_attr
*init_attr
,
738 struct ib_udata
*udata
);
740 int ipath_destroy_qp(struct ib_qp
*ibqp
);
742 int ipath_error_qp(struct ipath_qp
*qp
, enum ib_wc_status err
);
744 int ipath_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
745 int attr_mask
, struct ib_udata
*udata
);
747 int ipath_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
748 int attr_mask
, struct ib_qp_init_attr
*init_attr
);
750 unsigned ipath_free_all_qps(struct ipath_qp_table
*qpt
);
752 int ipath_init_qp_table(struct ipath_ibdev
*idev
, int size
);
754 void ipath_get_credit(struct ipath_qp
*qp
, u32 aeth
);
756 unsigned ipath_ib_rate_to_mult(enum ib_rate rate
);
758 int ipath_verbs_send(struct ipath_qp
*qp
, struct ipath_ib_header
*hdr
,
759 u32 hdrwords
, struct ipath_sge_state
*ss
, u32 len
);
761 void ipath_copy_sge(struct ipath_sge_state
*ss
, void *data
, u32 length
);
763 void ipath_skip_sge(struct ipath_sge_state
*ss
, u32 length
);
765 void ipath_uc_rcv(struct ipath_ibdev
*dev
, struct ipath_ib_header
*hdr
,
766 int has_grh
, void *data
, u32 tlen
, struct ipath_qp
*qp
);
768 void ipath_rc_rcv(struct ipath_ibdev
*dev
, struct ipath_ib_header
*hdr
,
769 int has_grh
, void *data
, u32 tlen
, struct ipath_qp
*qp
);
771 void ipath_restart_rc(struct ipath_qp
*qp
, u32 psn
);
773 void ipath_rc_error(struct ipath_qp
*qp
, enum ib_wc_status err
);
775 int ipath_post_ud_send(struct ipath_qp
*qp
, struct ib_send_wr
*wr
);
777 void ipath_ud_rcv(struct ipath_ibdev
*dev
, struct ipath_ib_header
*hdr
,
778 int has_grh
, void *data
, u32 tlen
, struct ipath_qp
*qp
);
780 int ipath_alloc_lkey(struct ipath_lkey_table
*rkt
,
781 struct ipath_mregion
*mr
);
783 void ipath_free_lkey(struct ipath_lkey_table
*rkt
, u32 lkey
);
785 int ipath_lkey_ok(struct ipath_qp
*qp
, struct ipath_sge
*isge
,
786 struct ib_sge
*sge
, int acc
);
788 int ipath_rkey_ok(struct ipath_qp
*qp
, struct ipath_sge_state
*ss
,
789 u32 len
, u64 vaddr
, u32 rkey
, int acc
);
791 int ipath_post_srq_receive(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
792 struct ib_recv_wr
**bad_wr
);
794 struct ib_srq
*ipath_create_srq(struct ib_pd
*ibpd
,
795 struct ib_srq_init_attr
*srq_init_attr
,
796 struct ib_udata
*udata
);
798 int ipath_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
799 enum ib_srq_attr_mask attr_mask
,
800 struct ib_udata
*udata
);
802 int ipath_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
);
804 int ipath_destroy_srq(struct ib_srq
*ibsrq
);
806 void ipath_cq_enter(struct ipath_cq
*cq
, struct ib_wc
*entry
, int sig
);
808 int ipath_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*entry
);
810 struct ib_cq
*ipath_create_cq(struct ib_device
*ibdev
, int entries
, int comp_vector
,
811 struct ib_ucontext
*context
,
812 struct ib_udata
*udata
);
814 int ipath_destroy_cq(struct ib_cq
*ibcq
);
816 int ipath_req_notify_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags notify_flags
);
818 int ipath_resize_cq(struct ib_cq
*ibcq
, int cqe
, struct ib_udata
*udata
);
820 struct ib_mr
*ipath_get_dma_mr(struct ib_pd
*pd
, int acc
);
822 struct ib_mr
*ipath_reg_phys_mr(struct ib_pd
*pd
,
823 struct ib_phys_buf
*buffer_list
,
824 int num_phys_buf
, int acc
, u64
*iova_start
);
826 struct ib_mr
*ipath_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
827 u64 virt_addr
, int mr_access_flags
,
828 struct ib_udata
*udata
);
830 int ipath_dereg_mr(struct ib_mr
*ibmr
);
832 struct ib_fmr
*ipath_alloc_fmr(struct ib_pd
*pd
, int mr_access_flags
,
833 struct ib_fmr_attr
*fmr_attr
);
835 int ipath_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
* page_list
,
836 int list_len
, u64 iova
);
838 int ipath_unmap_fmr(struct list_head
*fmr_list
);
840 int ipath_dealloc_fmr(struct ib_fmr
*ibfmr
);
842 void ipath_release_mmap_info(struct kref
*ref
);
844 struct ipath_mmap_info
*ipath_create_mmap_info(struct ipath_ibdev
*dev
,
846 struct ib_ucontext
*context
,
849 void ipath_update_mmap_info(struct ipath_ibdev
*dev
,
850 struct ipath_mmap_info
*ip
,
851 u32 size
, void *obj
);
853 int ipath_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
);
855 void ipath_insert_rnr_queue(struct ipath_qp
*qp
);
857 int ipath_init_sge(struct ipath_qp
*qp
, struct ipath_rwqe
*wqe
,
858 u32
*lengthp
, struct ipath_sge_state
*ss
);
860 int ipath_get_rwqe(struct ipath_qp
*qp
, int wr_id_only
);
862 u32
ipath_make_grh(struct ipath_ibdev
*dev
, struct ib_grh
*hdr
,
863 struct ib_global_route
*grh
, u32 hwords
, u32 nwords
);
865 void ipath_make_ruc_header(struct ipath_ibdev
*dev
, struct ipath_qp
*qp
,
866 struct ipath_other_headers
*ohdr
,
869 void ipath_do_send(unsigned long data
);
871 void ipath_send_complete(struct ipath_qp
*qp
, struct ipath_swqe
*wqe
,
872 enum ib_wc_status status
);
874 int ipath_make_rc_req(struct ipath_qp
*qp
);
876 int ipath_make_uc_req(struct ipath_qp
*qp
);
878 int ipath_make_ud_req(struct ipath_qp
*qp
);
880 int ipath_register_ib_device(struct ipath_devdata
*);
882 void ipath_unregister_ib_device(struct ipath_ibdev
*);
884 void ipath_ib_rcv(struct ipath_ibdev
*, void *, void *, u32
);
886 int ipath_ib_piobufavail(struct ipath_ibdev
*);
888 unsigned ipath_get_npkeys(struct ipath_devdata
*);
890 u32
ipath_get_cr_errpkey(struct ipath_devdata
*);
892 unsigned ipath_get_pkey(struct ipath_devdata
*, unsigned);
894 extern const enum ib_wc_opcode ib_ipath_wc_opcode
[];
897 * Below converts HCA-specific LinkTrainingState to IB PhysPortState
900 extern const u8 ipath_cvt_physportstate
[];
901 #define IB_PHYSPORTSTATE_SLEEP 1
902 #define IB_PHYSPORTSTATE_POLL 2
903 #define IB_PHYSPORTSTATE_DISABLED 3
904 #define IB_PHYSPORTSTATE_CFG_TRAIN 4
905 #define IB_PHYSPORTSTATE_LINKUP 5
906 #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
908 extern const int ib_ipath_state_ops
[];
910 extern unsigned int ib_ipath_lkey_table_size
;
912 extern unsigned int ib_ipath_max_cqes
;
914 extern unsigned int ib_ipath_max_cqs
;
916 extern unsigned int ib_ipath_max_qp_wrs
;
918 extern unsigned int ib_ipath_max_qps
;
920 extern unsigned int ib_ipath_max_sges
;
922 extern unsigned int ib_ipath_max_mcast_grps
;
924 extern unsigned int ib_ipath_max_mcast_qp_attached
;
926 extern unsigned int ib_ipath_max_srqs
;
928 extern unsigned int ib_ipath_max_srq_sges
;
930 extern unsigned int ib_ipath_max_srq_wrs
;
932 extern const u32 ib_ipath_rnr_table
[];
934 extern struct ib_dma_mapping_ops ipath_dma_mapping_ops
;
936 #endif /* IPATH_VERBS_H */