4 * Copyright(c) 2015-2017 Intel Corporation.
6 * This file is provided under a dual BSD/GPLv2 license. When using or
7 * redistributing this file, you may do so under either license.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * - Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * - Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in
30 * the documentation and/or other materials provided with the
32 * - Neither the name of Intel Corporation nor the names of its
33 * contributors may be used to endorse or promote products derived
34 * from this software without specific prior written permission.
36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #include <linux/interrupt.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/mutex.h>
54 #include <linux/list.h>
55 #include <linux/scatterlist.h>
56 #include <linux/slab.h>
57 #include <linux/idr.h>
60 #include <linux/completion.h>
61 #include <linux/kref.h>
62 #include <linux/sched.h>
63 #include <linux/cdev.h>
64 #include <linux/delay.h>
65 #include <linux/kthread.h>
66 #include <linux/i2c.h>
67 #include <linux/i2c-algo-bit.h>
68 #include <rdma/ib_hdrs.h>
69 #include <rdma/opa_addr.h>
70 #include <linux/rhashtable.h>
71 #include <linux/netdevice.h>
72 #include <rdma/rdma_vt.h>
73 #include <rdma/opa_addr.h>
75 #include "chip_registers.h"
85 /* bumped 1 from s/w major version of TrueScale */
86 #define HFI1_CHIP_VERS_MAJ 3U
88 /* don't care about this except printing */
89 #define HFI1_CHIP_VERS_MIN 0U
91 /* The Organization Unique Identifier (Mfg code), and its position in GUID */
92 #define HFI1_OUI 0x001175
93 #define HFI1_OUI_LSB 40
95 #define DROP_PACKET_OFF 0
96 #define DROP_PACKET_ON 1
98 #define NEIGHBOR_TYPE_HFI 0
99 #define NEIGHBOR_TYPE_SWITCH 1
101 extern unsigned long hfi1_cap_mask
;
102 #define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
103 #define HFI1_CAP_UGET_MASK(mask, cap) \
104 (((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap)
105 #define HFI1_CAP_KGET(cap) (HFI1_CAP_KGET_MASK(hfi1_cap_mask, cap))
106 #define HFI1_CAP_UGET(cap) (HFI1_CAP_UGET_MASK(hfi1_cap_mask, cap))
107 #define HFI1_CAP_IS_KSET(cap) (!!HFI1_CAP_KGET(cap))
108 #define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap))
109 #define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \
111 /* Offline Disabled Reason is 4-bits */
112 #define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON)
115 * Control context is always 0 and handles the error packets.
116 * It also handles the VL15 and multicast packets.
118 #define HFI1_CTRL_CTXT 0
121 * Driver context will store software counters for each of the events
122 * associated with these status registers
124 #define NUM_CCE_ERR_STATUS_COUNTERS 41
125 #define NUM_RCV_ERR_STATUS_COUNTERS 64
126 #define NUM_MISC_ERR_STATUS_COUNTERS 13
127 #define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36
128 #define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4
129 #define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64
130 #define NUM_SEND_ERR_STATUS_COUNTERS 3
131 #define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5
132 #define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24
135 * per driver stats, either not device nor port-specific, or
136 * summed over all of the devices and ports.
137 * They are described by name via ipathfs filesystem, so layout
138 * and number of elements can change without breaking compatibility.
139 * If members are added or deleted hfi1_statnames[] in debugfs.c must
142 struct hfi1_ib_stats
{
143 __u64 sps_ints
; /* number of interrupts handled */
144 __u64 sps_errints
; /* number of error interrupts */
145 __u64 sps_txerrs
; /* tx-related packet errors */
146 __u64 sps_rcverrs
; /* non-crc rcv packet errors */
147 __u64 sps_hwerrs
; /* hardware errors reported (parity, etc.) */
148 __u64 sps_nopiobufs
; /* no pio bufs avail from kernel */
149 __u64 sps_ctxts
; /* number of contexts currently open */
150 __u64 sps_lenerrs
; /* number of kernel packets where RHF != LRH len */
155 extern struct hfi1_ib_stats hfi1_stats
;
156 extern const struct pci_error_handlers hfi1_pci_err_handler
;
159 * First-cut criterion for "device is active" is
160 * two thousand dwords combined Tx, Rx traffic per
161 * 5-second interval. SMA packets are 64 dwords,
162 * and occur "a few per second", presumably each way.
164 #define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000)
167 * Below contains all data related to a single context (formerly called port).
170 struct hfi1_opcode_stats_perctx
;
172 struct ctxt_eager_bufs
{
173 ssize_t size
; /* total size of eager buffers */
174 u32 count
; /* size of buffers array */
175 u32 numbufs
; /* number of buffers allocated */
176 u32 alloced
; /* number of rcvarray entries used */
177 u32 rcvtid_size
; /* size of each eager rcv tid */
178 u32 threshold
; /* head update threshold */
179 struct eager_buffer
{
191 struct list_head list
;
195 struct hfi1_ctxtdata
{
196 /* shadow the ctxt's RcvCtrl register */
198 /* rcvhdrq base, needs mmap before useful */
200 /* kernel virtual address where hdrqtail is updated */
201 volatile __le64
*rcvhdrtail_kvaddr
;
202 /* when waiting for rcv or pioavail */
203 wait_queue_head_t wait
;
204 /* rcvhdrq size (for freeing) */
206 /* number of rcvhdrq entries */
208 /* size of each of the rcvhdrq entries */
210 /* mmap of hdrq, must fit in 44 bits */
211 dma_addr_t rcvhdrq_dma
;
212 dma_addr_t rcvhdrqtailaddr_dma
;
213 struct ctxt_eager_bufs egrbufs
;
214 /* this receive context's assigned PIO ACK send context */
215 struct send_context
*sc
;
217 /* dynamic receive available interrupt timeout */
218 u32 rcvavail_timeout
;
219 /* Reference count the base context usage */
222 /* Device context index */
225 * non-zero if ctxt can be shared, and defines the maximum number of
226 * sub-contexts for this device context.
229 /* non-zero if ctxt is being shared. */
234 /* number of RcvArray groups for this context. */
235 u32 rcv_array_groups
;
236 /* index of first eager TID entry. */
238 /* number of expected TID entries */
240 /* index of first expected TID entry. */
243 struct exp_tid_set tid_group_list
;
244 struct exp_tid_set tid_used_list
;
245 struct exp_tid_set tid_full_list
;
247 /* lock protecting all Expected TID data */
248 struct mutex exp_lock
;
249 /* per-context configuration flags */
251 /* per-context event flags for fileops/intr communication */
252 unsigned long event_flags
;
253 /* total number of polled urgent packets */
255 /* saved total number of polled urgent packets for poll edge trigger */
257 /* same size as task_struct .comm[], command that opened context */
258 char comm
[TASK_COMM_LEN
];
259 /* so file ops can get at unit */
260 struct hfi1_devdata
*dd
;
261 /* so functions that need physical port can get it easily */
262 struct hfi1_pportdata
*ppd
;
263 /* associated msix interrupt */
265 /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
266 void *subctxt_uregbase
;
267 /* An array of pages for the eager receive buffers * N */
268 void *subctxt_rcvegrbuf
;
269 /* An array of pages for the eager header queue entries * N */
270 void *subctxt_rcvhdr_base
;
271 /* Bitmask of in use context(s) */
272 DECLARE_BITMAP(in_use_ctxts
, HFI1_MAX_SHARED_CTXTS
);
273 /* The version of the library which opened this ctxt */
275 /* Type of packets or conditions we want to poll for */
277 /* receive packet sequence counter */
279 /* ctxt rcvhdrq head offset */
281 /* QPs waiting for context processing */
282 struct list_head qp_wait_list
;
283 /* interrupt handling */
284 u64 imask
; /* clear interrupt mask */
285 int ireg
; /* clear interrupt register */
286 unsigned numa_id
; /* numa node of this context */
287 /* verbs rx_stats per rcd */
288 struct hfi1_opcode_stats_perctx
*opstats
;
290 /* Is ASPM interrupt supported for this context */
291 bool aspm_intr_supported
;
292 /* ASPM state (enabled/disabled) for this context */
294 /* Timer for re-enabling ASPM if interrupt activity quietens down */
295 struct timer_list aspm_timer
;
296 /* Lock to serialize between intr, timer intr and user threads */
297 spinlock_t aspm_lock
;
298 /* Is ASPM processing enabled for this context (in intr context) */
299 bool aspm_intr_enable
;
300 /* Last interrupt timestamp */
301 ktime_t aspm_ts_last_intr
;
302 /* Last timestamp at which we scheduled a timer for this context */
303 ktime_t aspm_ts_timer_sched
;
306 * The interrupt handler for a particular receive context can vary
307 * throughout it's lifetime. This is not a lock protected data member so
308 * it must be updated atomically and the prev and new value must always
309 * be valid. Worst case is we process an extra interrupt and up to 64
310 * packets with the wrong interrupt handler.
312 int (*do_interrupt
)(struct hfi1_ctxtdata
*rcd
, int threaded
);
314 /* Indicates that this is vnic context */
317 /* vnic queue index this context is mapped to */
322 * Represents a single packet at a high level. Put commonly computed things in
323 * here so we do not have to keep doing them over and over. The rule of thumb is
324 * if something is used one time to derive some value, store that something in
325 * here. If it is used multiple times, then store the result of that derivation
332 struct hfi1_ctxtdata
*rcd
;
335 struct ib_other_headers
*ohdr
;
359 #define HFI1_PKT_TYPE_9B 0
360 #define HFI1_PKT_TYPE_16B 1
365 #define OPA_16B_L4_MASK 0xFFull
366 #define OPA_16B_SC_MASK 0x1F00000ull
367 #define OPA_16B_SC_SHIFT 20
368 #define OPA_16B_LID_MASK 0xFFFFFull
369 #define OPA_16B_DLID_MASK 0xF000ull
370 #define OPA_16B_DLID_SHIFT 20
371 #define OPA_16B_DLID_HIGH_SHIFT 12
372 #define OPA_16B_SLID_MASK 0xF00ull
373 #define OPA_16B_SLID_SHIFT 20
374 #define OPA_16B_SLID_HIGH_SHIFT 8
375 #define OPA_16B_BECN_MASK 0x80000000ull
376 #define OPA_16B_BECN_SHIFT 31
377 #define OPA_16B_FECN_MASK 0x10000000ull
378 #define OPA_16B_FECN_SHIFT 28
379 #define OPA_16B_L2_MASK 0x60000000ull
380 #define OPA_16B_L2_SHIFT 29
381 #define OPA_16B_PKEY_MASK 0xFFFF0000ull
382 #define OPA_16B_PKEY_SHIFT 16
383 #define OPA_16B_LEN_MASK 0x7FF00000ull
384 #define OPA_16B_LEN_SHIFT 20
385 #define OPA_16B_RC_MASK 0xE000000ull
386 #define OPA_16B_RC_SHIFT 25
387 #define OPA_16B_AGE_MASK 0xFF0000ull
388 #define OPA_16B_AGE_SHIFT 16
389 #define OPA_16B_ENTROPY_MASK 0xFFFFull
392 * OPA 16B L2/L4 Encodings
394 #define OPA_16B_L4_9B 0x00
395 #define OPA_16B_L2_TYPE 0x02
396 #define OPA_16B_L4_IB_LOCAL 0x09
397 #define OPA_16B_L4_IB_GLOBAL 0x0A
398 #define OPA_16B_L4_ETHR OPA_VNIC_L4_ETHR
400 static inline u8
hfi1_16B_get_l4(struct hfi1_16b_header
*hdr
)
402 return (u8
)(hdr
->lrh
[2] & OPA_16B_L4_MASK
);
405 static inline u8
hfi1_16B_get_sc(struct hfi1_16b_header
*hdr
)
407 return (u8
)((hdr
->lrh
[1] & OPA_16B_SC_MASK
) >> OPA_16B_SC_SHIFT
);
410 static inline u32
hfi1_16B_get_dlid(struct hfi1_16b_header
*hdr
)
412 return (u32
)((hdr
->lrh
[1] & OPA_16B_LID_MASK
) |
413 (((hdr
->lrh
[2] & OPA_16B_DLID_MASK
) >>
414 OPA_16B_DLID_HIGH_SHIFT
) << OPA_16B_DLID_SHIFT
));
417 static inline u32
hfi1_16B_get_slid(struct hfi1_16b_header
*hdr
)
419 return (u32
)((hdr
->lrh
[0] & OPA_16B_LID_MASK
) |
420 (((hdr
->lrh
[2] & OPA_16B_SLID_MASK
) >>
421 OPA_16B_SLID_HIGH_SHIFT
) << OPA_16B_SLID_SHIFT
));
424 static inline u8
hfi1_16B_get_becn(struct hfi1_16b_header
*hdr
)
426 return (u8
)((hdr
->lrh
[0] & OPA_16B_BECN_MASK
) >> OPA_16B_BECN_SHIFT
);
429 static inline u8
hfi1_16B_get_fecn(struct hfi1_16b_header
*hdr
)
431 return (u8
)((hdr
->lrh
[1] & OPA_16B_FECN_MASK
) >> OPA_16B_FECN_SHIFT
);
434 static inline u8
hfi1_16B_get_l2(struct hfi1_16b_header
*hdr
)
436 return (u8
)((hdr
->lrh
[1] & OPA_16B_L2_MASK
) >> OPA_16B_L2_SHIFT
);
439 static inline u16
hfi1_16B_get_pkey(struct hfi1_16b_header
*hdr
)
441 return (u16
)((hdr
->lrh
[2] & OPA_16B_PKEY_MASK
) >> OPA_16B_PKEY_SHIFT
);
444 static inline u8
hfi1_16B_get_rc(struct hfi1_16b_header
*hdr
)
446 return (u8
)((hdr
->lrh
[1] & OPA_16B_RC_MASK
) >> OPA_16B_RC_SHIFT
);
449 static inline u8
hfi1_16B_get_age(struct hfi1_16b_header
*hdr
)
451 return (u8
)((hdr
->lrh
[3] & OPA_16B_AGE_MASK
) >> OPA_16B_AGE_SHIFT
);
454 static inline u16
hfi1_16B_get_len(struct hfi1_16b_header
*hdr
)
456 return (u16
)((hdr
->lrh
[0] & OPA_16B_LEN_MASK
) >> OPA_16B_LEN_SHIFT
);
459 static inline u16
hfi1_16B_get_entropy(struct hfi1_16b_header
*hdr
)
461 return (u16
)(hdr
->lrh
[3] & OPA_16B_ENTROPY_MASK
);
464 #define OPA_16B_MAKE_QW(low_dw, high_dw) (((u64)(high_dw) << 32) | (low_dw))
469 #define OPA_16B_BTH_PAD_MASK 7
470 static inline u8
hfi1_16B_bth_get_pad(struct ib_other_headers
*ohdr
)
472 return (u8
)((be32_to_cpu(ohdr
->bth
[0]) >> IB_BTH_PAD_SHIFT
) &
473 OPA_16B_BTH_PAD_MASK
);
476 struct rvt_sge_state
;
479 * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
480 * Mostly for MADs that set or query link parameters, also ipath
483 #define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
484 #define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */
485 #define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */
486 #define HFI1_IB_CFG_LWID 3 /* currently active Link-width */
487 #define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
488 #define HFI1_IB_CFG_SPD 5 /* current Link spd */
489 #define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
490 #define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
491 #define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
492 #define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
493 #define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */
494 #define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
495 #define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
496 #define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
497 #define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
498 #define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
499 #define HFI1_IB_CFG_PKEYS 16 /* update partition keys */
500 #define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */
501 #define HFI1_IB_CFG_VL_HIGH_LIMIT 19
502 #define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
503 #define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */
506 * HFI or Host Link States
508 * These describe the states the driver thinks the logical and physical
509 * states are in. Used as an argument to set_link_state(). Implemented
510 * as bits for easy multi-state checking. The actual state can only be
513 #define __HLS_UP_INIT_BP 0
514 #define __HLS_UP_ARMED_BP 1
515 #define __HLS_UP_ACTIVE_BP 2
516 #define __HLS_DN_DOWNDEF_BP 3 /* link down default */
517 #define __HLS_DN_POLL_BP 4
518 #define __HLS_DN_DISABLE_BP 5
519 #define __HLS_DN_OFFLINE_BP 6
520 #define __HLS_VERIFY_CAP_BP 7
521 #define __HLS_GOING_UP_BP 8
522 #define __HLS_GOING_OFFLINE_BP 9
523 #define __HLS_LINK_COOLDOWN_BP 10
525 #define HLS_UP_INIT BIT(__HLS_UP_INIT_BP)
526 #define HLS_UP_ARMED BIT(__HLS_UP_ARMED_BP)
527 #define HLS_UP_ACTIVE BIT(__HLS_UP_ACTIVE_BP)
528 #define HLS_DN_DOWNDEF BIT(__HLS_DN_DOWNDEF_BP) /* link down default */
529 #define HLS_DN_POLL BIT(__HLS_DN_POLL_BP)
530 #define HLS_DN_DISABLE BIT(__HLS_DN_DISABLE_BP)
531 #define HLS_DN_OFFLINE BIT(__HLS_DN_OFFLINE_BP)
532 #define HLS_VERIFY_CAP BIT(__HLS_VERIFY_CAP_BP)
533 #define HLS_GOING_UP BIT(__HLS_GOING_UP_BP)
534 #define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP)
535 #define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
537 #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
538 #define HLS_DOWN ~(HLS_UP)
540 #define HLS_DEFAULT HLS_DN_POLL
542 /* use this MTU size if none other is given */
543 #define HFI1_DEFAULT_ACTIVE_MTU 10240
544 /* use this MTU size as the default maximum */
545 #define HFI1_DEFAULT_MAX_MTU 10240
546 /* default partition key */
547 #define DEFAULT_PKEY 0xffff
550 * Possible fabric manager config parameters for fm_{get,set}_table()
552 #define FM_TBL_VL_HIGH_ARB 1 /* Get/set VL high prio weights */
553 #define FM_TBL_VL_LOW_ARB 2 /* Get/set VL low prio weights */
554 #define FM_TBL_BUFFER_CONTROL 3 /* Get/set Buffer Control */
555 #define FM_TBL_SC2VLNT 4 /* Get/set SC->VLnt */
556 #define FM_TBL_VL_PREEMPT_ELEMS 5 /* Get (no set) VL preempt elems */
557 #define FM_TBL_VL_PREEMPT_MATRIX 6 /* Get (no set) VL preempt matrix */
560 * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
561 * these are bits so they can be combined, e.g.
562 * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB
564 #define HFI1_RCVCTRL_TAILUPD_ENB 0x01
565 #define HFI1_RCVCTRL_TAILUPD_DIS 0x02
566 #define HFI1_RCVCTRL_CTXT_ENB 0x04
567 #define HFI1_RCVCTRL_CTXT_DIS 0x08
568 #define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10
569 #define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20
570 #define HFI1_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
571 #define HFI1_RCVCTRL_PKEY_DIS 0x80
572 #define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400
573 #define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800
574 #define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000
575 #define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000
576 #define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000
577 #define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
578 #define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
579 #define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
581 /* partition enforcement flags */
582 #define HFI1_PART_ENFORCE_IN 0x1
583 #define HFI1_PART_ENFORCE_OUT 0x2
585 /* how often we check for synthetic counter wrap around */
586 #define SYNTH_CNT_TIME 3
589 #define CNTR_NORMAL 0x0 /* Normal counters, just read register */
590 #define CNTR_SYNTH 0x1 /* Synthetic counters, saturate at all 1s */
591 #define CNTR_DISABLED 0x2 /* Disable this counter */
592 #define CNTR_32BIT 0x4 /* Simulate 64 bits for this counter */
593 #define CNTR_VL 0x8 /* Per VL counter */
594 #define CNTR_SDMA 0x10
595 #define CNTR_INVALID_VL -1 /* Specifies invalid VL */
596 #define CNTR_MODE_W 0x0
597 #define CNTR_MODE_R 0x1
599 /* VLs Supported/Operational */
600 #define HFI1_MIN_VLS_SUPPORTED 1
601 #define HFI1_MAX_VLS_SUPPORTED 8
603 #define HFI1_GUIDS_PER_PORT 5
604 #define HFI1_PORT_GUID_INDEX 0
606 static inline void incr_cntr64(u64
*cntr
)
608 if (*cntr
< (u64
)-1LL)
612 static inline void incr_cntr32(u32
*cntr
)
614 if (*cntr
< (u32
)-1LL)
618 #define MAX_NAME_SIZE 64
619 struct hfi1_msix_entry
{
624 struct irq_affinity_notify notify
;
627 /* per-SL CCA information */
629 struct hrtimer hrtimer
;
630 struct hfi1_pportdata
*ppd
; /* read-only */
631 int sl
; /* read-only */
632 u16 ccti
; /* read/write - current value of CCTI */
635 struct link_down_reason
{
637 * SMA-facing value. Should be set from .latest when
638 * HLS_UP_* -> HLS_DN_* transition actually occurs.
650 struct vl_arb_cache
{
651 /* protect vl arb cache */
653 struct ib_vl_weight_elem table
[VL_ARB_TABLE_SIZE
];
657 * The structure below encapsulates data relevant to a physical IB Port.
658 * Current chips support only one such port, but the separation
659 * clarifies things a bit. Note that to conform to IB conventions,
660 * port-numbers are one-based. The first or only port is port1.
662 struct hfi1_pportdata
{
663 struct hfi1_ibport ibport_data
;
665 struct hfi1_devdata
*dd
;
666 struct kobject pport_cc_kobj
;
667 struct kobject sc2vl_kobj
;
668 struct kobject sl2sc_kobj
;
669 struct kobject vl2mtu_kobj
;
672 struct qsfp_data qsfp_info
;
673 /* Values for SI tuning of SerDes */
683 /* did we read platform config from scratch registers? */
684 bool config_from_scratch
;
686 /* GUIDs for this interface, in host order, guids[0] is a port guid */
687 u64 guids
[HFI1_GUIDS_PER_PORT
];
689 /* GUID for peer interface, in host order */
692 /* up or down physical link state */
696 * this address is mapped read-only into user processes so they can
697 * get status cheaply, whenever they want. One qword of status per port
701 /* SendDMA related entries */
703 struct workqueue_struct
*hfi1_wq
;
704 struct workqueue_struct
*link_wq
;
706 /* move out of interrupt context */
707 struct work_struct link_vc_work
;
708 struct work_struct link_up_work
;
709 struct work_struct link_down_work
;
710 struct work_struct sma_message_work
;
711 struct work_struct freeze_work
;
712 struct work_struct link_downgrade_work
;
713 struct work_struct link_bounce_work
;
714 struct delayed_work start_link_work
;
715 /* host link state variables */
716 struct mutex hls_lock
;
719 /* these are the "32 bit" regs */
721 u32 ibmtu
; /* The MTU programmed for this unit */
723 * Current max size IB packet (in bytes) including IB headers, that
724 * we can send. Changes when ibmtu changes.
727 u32 current_egress_rate
; /* units [10^6 bits/sec] */
728 /* LID programmed for this instance */
730 /* list of pkeys programmed; 0 if not set */
731 u16 pkeys
[MAX_PKEY_VALUES
];
732 u16 link_width_supported
;
733 u16 link_width_downgrade_supported
;
734 u16 link_speed_supported
;
735 u16 link_width_enabled
;
736 u16 link_width_downgrade_enabled
;
737 u16 link_speed_enabled
;
738 u16 link_width_active
;
739 u16 link_width_downgrade_tx_active
;
740 u16 link_width_downgrade_rx_active
;
741 u16 link_speed_active
;
744 u8 actual_vls_operational
;
745 /* LID mask control */
747 /* Rx Polarity inversion (compensate for ~tx on partner) */
750 u8 hw_pidx
; /* physical port index */
751 u8 port
; /* IB port number and index into dd->pports - 1 */
752 /* type of neighbor node */
755 u8 neighbor_fm_security
; /* 1 if firmware checking is disabled */
756 u8 neighbor_port_number
;
757 u8 is_sm_config_started
;
758 u8 offline_disabled_reason
;
759 u8 is_active_optimize_enabled
;
760 u8 driver_link_ready
; /* driver ready for active link */
761 u8 link_enabled
; /* link enabled? */
763 u8 local_tx_rate
; /* rate given to 8051 firmware */
766 /* placeholders for IB MAD packet settings */
767 u8 overrun_threshold
;
768 u8 phy_error_threshold
;
769 unsigned int is_link_down_queued
;
771 /* Used to override LED behavior for things like maintenance beaconing*/
773 * Alternates per phase of blink
774 * [0] holds LED off duration, [1] holds LED on duration
776 unsigned long led_override_vals
[2];
777 u8 led_override_phase
; /* LSB picks from vals[] */
778 atomic_t led_override_timer_active
;
779 /* Used to flash LEDs in override mode */
780 struct timer_list led_override_timer
;
786 * cca_timer_lock protects access to the per-SL cca_timer
787 * structures (specifically the ccti member).
789 spinlock_t cca_timer_lock ____cacheline_aligned_in_smp
;
790 struct cca_timer cca_timer
[OPA_MAX_SLS
];
792 /* List of congestion control table entries */
793 struct ib_cc_table_entry_shadow ccti_entries
[CC_TABLE_SHADOW_MAX
];
795 /* congestion entries, each entry corresponding to a SL */
796 struct opa_congestion_setting_entry_shadow
797 congestion_entries
[OPA_MAX_SLS
];
800 * cc_state_lock protects (write) access to the per-port
803 spinlock_t cc_state_lock ____cacheline_aligned_in_smp
;
805 struct cc_state __rcu
*cc_state
;
807 /* Total number of congestion control table entries */
810 /* Bit map identifying service level */
811 u32 cc_sl_control_map
;
813 /* CA's max number of 64 entry units in the congestion control table */
814 u8 cc_max_table_entries
;
817 * begin congestion log related entries
818 * cc_log_lock protects all congestion log related data
820 spinlock_t cc_log_lock ____cacheline_aligned_in_smp
;
821 u8 threshold_cong_event_map
[OPA_MAX_SLS
/ 8];
822 u16 threshold_event_counter
;
823 struct opa_hfi1_cong_log_event_internal cc_events
[OPA_CONG_LOG_ELEMS
];
824 int cc_log_idx
; /* index for logging events */
825 int cc_mad_idx
; /* index for reporting events */
826 /* end congestion log related entries */
828 struct vl_arb_cache vl_arb_cache
[MAX_PRIO_TABLE
];
830 /* port relative counter buffer */
832 /* port relative synthetic counter buffer */
834 /* port_xmit_discards are synthesized from different egress errors */
835 u64 port_xmit_discards
;
836 u64 port_xmit_discards_vl
[C_VL_COUNT
];
837 u64 port_xmit_constraint_errors
;
838 u64 port_rcv_constraint_errors
;
839 /* count of 'link_err' interrupts from DC */
841 /* number of times link retrained successfully */
843 /* number of times a link unknown frame was reported */
844 u64 unknown_frame_count
;
845 /* port_ltp_crc_mode is returned in 'portinfo' MADs */
846 u16 port_ltp_crc_mode
;
847 /* port_crc_mode_enabled is the crc we support */
848 u8 port_crc_mode_enabled
;
849 /* mgmt_allowed is also returned in 'portinfo' MADs */
851 u8 part_enforce
; /* partition enforcement flags */
852 struct link_down_reason local_link_down_reason
;
853 struct link_down_reason neigh_link_down_reason
;
854 /* Value to be sent to link peer on LinkDown .*/
855 u8 remote_link_down_reason
;
856 /* Error events that will cause a port bounce. */
857 u32 port_error_action
;
858 struct work_struct linkstate_active_work
;
859 /* Does this port need to prescan for FECNs */
862 * Sample sendWaitCnt & sendWaitVlCnt during link transition
863 * and counter request.
865 u64 port_vl_xmit_wait_last
[C_VL_COUNT
+ 1];
867 u64 vl_xmit_flit_cnt
[C_VL_COUNT
+ 1];
870 typedef int (*rhf_rcv_function_ptr
)(struct hfi1_packet
*packet
);
872 typedef void (*opcode_handler
)(struct hfi1_packet
*packet
);
873 typedef void (*hfi1_make_req
)(struct rvt_qp
*qp
,
874 struct hfi1_pkt_state
*ps
,
875 struct rvt_swqe
*wqe
);
878 /* return values for the RHF receive functions */
879 #define RHF_RCV_CONTINUE 0 /* keep going */
880 #define RHF_RCV_DONE 1 /* stop, this packet processed */
881 #define RHF_RCV_REPROCESS 2 /* stop. retain this packet */
883 struct rcv_array_data
{
891 struct send_context
*sc
;
894 /* 16 to directly index */
895 #define PER_VL_SEND_CONTEXTS 16
897 struct err_info_rcvport
{
903 struct err_info_constraint
{
910 unsigned int curr
; /* current temperature */
911 unsigned int lo_lim
; /* low temperature limit */
912 unsigned int hi_lim
; /* high temperature limit */
913 unsigned int crit_lim
; /* critical temperature limit */
914 u8 triggers
; /* temperature triggers */
917 struct hfi1_i2c_bus
{
918 struct hfi1_devdata
*controlling_dd
; /* current controlling device */
919 struct i2c_adapter adapter
; /* bus details */
920 struct i2c_algo_bit_data algo
; /* bus algorithm details */
921 int num
; /* bus number, 0 or 1 */
924 /* common data between shared ASIC HFIs */
925 struct hfi1_asic_data
{
926 struct hfi1_devdata
*dds
[2]; /* back pointers */
927 struct mutex asic_resource_mutex
;
928 struct hfi1_i2c_bus
*i2c_bus0
;
929 struct hfi1_i2c_bus
*i2c_bus1
;
932 /* sizes for both the QP and RSM map tables */
933 #define NUM_MAP_ENTRIES 256
934 #define NUM_MAP_REGS 32
937 * Number of VNIC contexts used. Ensure it is less than or equal to
938 * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
940 #define HFI1_NUM_VNIC_CTXT 8
942 /* Number of VNIC RSM entries */
943 #define NUM_VNIC_MAP_ENTRIES 8
945 /* Virtual NIC information */
946 struct hfi1_vnic_data
{
947 struct hfi1_ctxtdata
*ctxt
[HFI1_NUM_VNIC_CTXT
];
948 struct kmem_cache
*txreq_cache
;
956 struct hfi1_vnic_vport_info
;
958 /* device data struct now contains only "general per-device" info.
959 * fields related to a physical IB port are in a hfi1_pportdata struct.
964 #define BOARD_VERS_MAX 96 /* how long the version string can be */
965 #define SERIAL_MAX 16 /* length of the serial number */
967 typedef int (*send_routine
)(struct rvt_qp
*, struct hfi1_pkt_state
*, u64
);
968 struct hfi1_devdata
{
969 struct hfi1_ibdev verbs_dev
; /* must be first */
970 struct list_head list
;
971 /* pointers to related structs for this device */
972 /* pci access data structure */
973 struct pci_dev
*pcidev
;
974 struct cdev user_cdev
;
975 struct cdev diag_cdev
;
977 struct device
*user_device
;
978 struct device
*diag_device
;
979 struct device
*ui_device
;
981 /* first mapping up to RcvArray */
982 u8 __iomem
*kregbase1
;
983 resource_size_t physaddr
;
985 /* second uncached mapping from RcvArray to pio send buffers */
986 u8 __iomem
*kregbase2
;
987 /* for detecting offset above kregbase2 address */
990 /* Per VL data. Enough for all VLs but not all elements are set/used. */
991 struct per_vl_data vld
[PER_VL_SEND_CONTEXTS
];
992 /* send context data */
993 struct send_context_info
*send_contexts
;
994 /* map hardware send contexts to software index */
996 /* spinlock for allocating and releasing send context resources */
998 /* lock for pio_map */
999 spinlock_t pio_map_lock
;
1000 /* Send Context initialization lock. */
1001 spinlock_t sc_init_lock
;
1002 /* lock for sdma_map */
1003 spinlock_t sde_map_lock
;
1004 /* array of kernel send contexts */
1005 struct send_context
**kernel_send_context
;
1006 /* array of vl maps */
1007 struct pio_vl_map __rcu
*pio_map
;
1008 /* default flags to last descriptor */
1011 /* fields common to all SDMA engines */
1013 volatile __le64
*sdma_heads_dma
; /* DMA'ed by chip */
1014 dma_addr_t sdma_heads_phys
;
1015 void *sdma_pad_dma
; /* DMA'ed by chip */
1016 dma_addr_t sdma_pad_phys
;
1017 /* for deallocation */
1018 size_t sdma_heads_size
;
1019 /* number from the chip */
1020 u32 chip_sdma_engines
;
1023 /* array of engines sized by num_sdma */
1024 struct sdma_engine
*per_sdma
;
1025 /* array of vl maps */
1026 struct sdma_vl_map __rcu
*sdma_map
;
1027 /* SPC freeze waitqueue and variable */
1028 wait_queue_head_t sdma_unfreeze_wq
;
1029 atomic_t sdma_unfreeze_count
;
1031 u32 lcb_access_count
; /* count of LCB users */
1033 /* common data between shared ASIC HFIs in this OS */
1034 struct hfi1_asic_data
*asic_data
;
1036 /* mem-mapped pointer to base of PIO buffers */
1037 void __iomem
*piobase
;
1039 * write-combining mem-mapped pointer to base of RcvArray
1042 void __iomem
*rcvarray_wc
;
1044 * credit return base - a per-NUMA range of DMA address that
1045 * the chip will use to update the per-context free counter
1047 struct credit_return_base
*cr_base
;
1049 /* send context numbers and sizes for each type */
1050 struct sc_config_sizes sc_sizes
[SC_MAX
];
1052 char *boardname
; /* human readable board info */
1057 u64 z_send_schedule
;
1059 u64 __percpu
*send_schedule
;
1060 /* number of reserved contexts for VNIC usage */
1061 u16 num_vnic_contexts
;
1062 /* number of receive contexts in use by the driver */
1063 u32 num_rcv_contexts
;
1064 /* number of pio send contexts in use by the driver */
1065 u32 num_send_contexts
;
1067 * number of ctxts available for PSM open
1070 /* total number of available user/PSM contexts */
1071 u32 num_user_contexts
;
1072 /* base receive interrupt timeout, in CSR units */
1073 u32 rcv_intr_timeout_csr
;
1075 u32 freezelen
; /* max length of freezemsg */
1076 u64 __iomem
*egrtidbase
;
1077 spinlock_t sendctrl_lock
; /* protect changes to SendCtrl */
1078 spinlock_t rcvctrl_lock
; /* protect changes to RcvCtrl */
1079 spinlock_t uctxt_lock
; /* protect rcd changes */
1080 struct mutex dc8051_lock
; /* exclusive access to 8051 */
1081 struct workqueue_struct
*update_cntr_wq
;
1082 struct work_struct update_cntr_work
;
1083 /* exclusive access to 8051 memory */
1084 spinlock_t dc8051_memlock
;
1085 int dc8051_timed_out
; /* remember if the 8051 timed out */
1087 * A page that will hold event notification bitmaps for all
1088 * contexts. This page will be mapped into all processes.
1090 unsigned long *events
;
1092 * per unit status, see also portdata statusp
1093 * mapped read-only into user processes so they can get unit and
1094 * IB link status cheaply
1096 struct hfi1_status
*status
;
1098 /* revision register shadow */
1100 /* Base GUID for device (network order) */
1103 /* these are the "32 bit" regs */
1105 /* value we put in kr_rcvhdrsize */
1107 /* number of receive contexts the chip supports */
1108 u32 chip_rcv_contexts
;
1109 /* number of receive array entries */
1110 u32 chip_rcv_array_count
;
1111 /* number of PIO send contexts the chip supports */
1112 u32 chip_send_contexts
;
1113 /* number of bytes in the PIO memory buffer */
1114 u32 chip_pio_mem_size
;
1115 /* number of bytes in the SDMA memory buffer */
1116 u32 chip_sdma_mem_size
;
1118 /* size of each rcvegrbuffer */
1121 u16 rcvegrbufsize_shift
;
1122 /* both sides of the PCIe link are gen3 capable */
1123 u8 link_gen3_capable
;
1125 /* localbus width (1, 2,4,8,16,32) from config space */
1127 /* localbus speed in MHz */
1129 int unit
; /* unit # of this chip */
1130 int node
; /* home node of this chip */
1132 /* save these PCI fields to restore after a reset */
1144 * ASCII serial number, from flash, large enough for original
1145 * all digit strings, and longer serial number format
1147 u8 serial
[SERIAL_MAX
];
1148 /* human readable board version */
1149 u8 boardversion
[BOARD_VERS_MAX
];
1150 u8 lbus_info
[32]; /* human readable localbus info */
1151 /* chip major rev, from CceRevision */
1153 /* chip minor rev, from CceRevision */
1157 /* implementation code */
1159 /* vAU of this device */
1161 /* vCU of this device */
1163 /* link credits of this device */
1165 /* initial vl15 credits to use */
1169 * Cached value for vl15buf, read during verify cap interrupt. VL15
1170 * credits are to be kept at 0 and set when handling the link-up
1171 * interrupt. This removes the possibility of receiving VL15 MAD
1172 * packets before this HFI is ready.
1176 /* Misc small ints */
1180 u16 irev
; /* implementation revision */
1181 u32 dc8051_ver
; /* 8051 firmware version */
1183 spinlock_t hfi1_diag_trans_lock
; /* protect diag observer ops */
1184 struct platform_config platform_config
;
1185 struct platform_config_cache pcfg_cache
;
1187 struct diag_client
*diag_client
;
1189 /* MSI-X information */
1190 struct hfi1_msix_entry
*msix_entries
;
1191 u32 num_msix_entries
;
1192 u32 first_dyn_msix_idx
;
1194 /* INTx information */
1195 u32 requested_intx_irq
; /* did we request one? */
1197 /* general interrupt: mask of handled interrupts */
1198 u64 gi_mask
[CCE_NUM_INT_CSRS
];
1200 struct rcv_array_data rcv_entries
;
1202 /* cycle length of PS* counters in HW (in picoseconds) */
1203 u16 psxmitwait_check_rate
;
1206 * 64 bit synthetic counters
1208 struct timer_list synth_stats_timer
;
1214 size_t cntrnameslen
;
1220 * remembered values for synthetic counters
1229 char *portcntrnames
;
1230 size_t portcntrnameslen
;
1232 struct err_info_rcvport err_info_rcvport
;
1233 struct err_info_constraint err_info_rcv_constraint
;
1234 struct err_info_constraint err_info_xmit_constraint
;
1236 atomic_t drop_packet
;
1238 u8 err_info_uncorrectable
;
1239 u8 err_info_fmconfig
;
1242 * Software counters for the status bits defined by the
1243 * associated error status registers
1245 u64 cce_err_status_cnt
[NUM_CCE_ERR_STATUS_COUNTERS
];
1246 u64 rcv_err_status_cnt
[NUM_RCV_ERR_STATUS_COUNTERS
];
1247 u64 misc_err_status_cnt
[NUM_MISC_ERR_STATUS_COUNTERS
];
1248 u64 send_pio_err_status_cnt
[NUM_SEND_PIO_ERR_STATUS_COUNTERS
];
1249 u64 send_dma_err_status_cnt
[NUM_SEND_DMA_ERR_STATUS_COUNTERS
];
1250 u64 send_egress_err_status_cnt
[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS
];
1251 u64 send_err_status_cnt
[NUM_SEND_ERR_STATUS_COUNTERS
];
1253 /* Software counter that spans all contexts */
1254 u64 sw_ctxt_err_status_cnt
[NUM_SEND_CTXT_ERR_STATUS_COUNTERS
];
1255 /* Software counter that spans all DMA engines */
1256 u64 sw_send_dma_eng_err_status_cnt
[
1257 NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS
];
1258 /* Software counter that aggregates all cce_err_status errors */
1259 u64 sw_cce_err_status_aggregate
;
1260 /* Software counter that aggregates all bypass packet rcv errors */
1261 u64 sw_rcv_bypass_packet_errors
;
1262 /* receive interrupt function */
1263 rhf_rcv_function_ptr normal_rhf_rcv_functions
[8];
1265 /* Save the enabled LCB error bits */
1269 * Capability to have different send engines simply by changing a
1272 send_routine process_pio_send ____cacheline_aligned_in_smp
;
1273 send_routine process_dma_send
;
1274 void (*pio_inline_send
)(struct hfi1_devdata
*dd
, struct pio_buf
*pbuf
,
1275 u64 pbc
, const void *from
, size_t count
);
1276 int (*process_vnic_dma_send
)(struct hfi1_devdata
*dd
, u8 q_idx
,
1277 struct hfi1_vnic_vport_info
*vinfo
,
1278 struct sk_buff
*skb
, u64 pbc
, u8 plen
);
1279 /* hfi1_pportdata, points to array of (physical) port-specific
1280 * data structs, indexed by pidx (0..n-1)
1282 struct hfi1_pportdata
*pport
;
1283 /* receive context data */
1284 struct hfi1_ctxtdata
**rcd
;
1285 u64 __percpu
*int_counter
;
1286 /* verbs tx opcode stats */
1287 struct hfi1_opcode_stats_perctx __percpu
*tx_opstats
;
1288 /* device (not port) flags, basically device capabilities */
1290 /* Number of physical ports available */
1292 /* Lowest context number which can be used by user processes or VNIC */
1293 u8 first_dyn_alloc_ctxt
;
1294 /* adding a new field here would make it part of this cacheline */
1296 /* seqlock for sc2vl */
1297 seqlock_t sc2vl_lock ____cacheline_aligned_in_smp
;
1299 /* receive interrupt functions */
1300 rhf_rcv_function_ptr
*rhf_rcv_function_map
;
1301 u64 __percpu
*rcv_limit
;
1302 u16 rhf_offset
; /* offset of RHF within receive header entry */
1303 /* adding a new field here would make it part of this cacheline */
1305 /* OUI comes from the HW. Used everywhere as 3 separate bytes. */
1310 /* Timer and counter used to detect RcvBufOvflCnt changes */
1311 struct timer_list rcverr_timer
;
1313 wait_queue_head_t event_queue
;
1315 /* receive context tail dummy address */
1316 __le64
*rcvhdrtail_dummy_kvaddr
;
1317 dma_addr_t rcvhdrtail_dummy_dma
;
1320 /* Serialize ASPM enable/disable between multiple verbs contexts */
1321 spinlock_t aspm_lock
;
1322 /* Number of verbs contexts which have disabled ASPM */
1323 atomic_t aspm_disabled_cnt
;
1324 /* Keeps track of user space clients */
1325 atomic_t user_refcount
;
1326 /* Used to wait for outstanding user space clients before dev removal */
1327 struct completion user_comp
;
1329 bool eprom_available
; /* true if EPROM is available for this device */
1330 bool aspm_supported
; /* Does HW support ASPM */
1331 bool aspm_enabled
; /* ASPM state: enabled/disabled */
1332 struct rhashtable
*sdma_rht
;
1334 struct kobject kobj
;
1337 struct hfi1_vnic_data vnic
;
1340 static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata
*dd
, int spare
)
1342 return (dd
->vnic
.rmt_start
+ spare
) > NUM_MAP_ENTRIES
;
1345 /* 8051 firmware version helper */
1346 #define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c))
1347 #define dc8051_ver_maj(a) (((a) & 0xff0000) >> 16)
1348 #define dc8051_ver_min(a) (((a) & 0x00ff00) >> 8)
1349 #define dc8051_ver_patch(a) ((a) & 0x0000ff)
1351 /* f_put_tid types */
1352 #define PT_EXPECTED 0
1354 #define PT_INVALID_FLUSH 2
1355 #define PT_INVALID 3
1359 struct mmu_rb_handler
;
1361 /* Private data for file operations */
1362 struct hfi1_filedata
{
1363 struct hfi1_devdata
*dd
;
1364 struct hfi1_ctxtdata
*uctxt
;
1365 struct hfi1_user_sdma_comp_q
*cq
;
1366 struct hfi1_user_sdma_pkt_q
*pq
;
1368 /* for cpu affinity; -1 if none */
1371 struct mmu_rb_handler
*handler
;
1372 struct tid_rb_node
**entry_to_rb
;
1373 spinlock_t tid_lock
; /* protect tid_[limit,used] counters */
1377 u32 invalid_tid_idx
;
1378 /* protect invalid_tids array and invalid_tid_idx */
1379 spinlock_t invalid_lock
;
1380 struct mm_struct
*mm
;
1383 extern struct list_head hfi1_dev_list
;
1384 extern spinlock_t hfi1_devs_lock
;
1385 struct hfi1_devdata
*hfi1_lookup(int unit
);
1387 static inline unsigned long uctxt_offset(struct hfi1_ctxtdata
*uctxt
)
1389 return (uctxt
->ctxt
- uctxt
->dd
->first_dyn_alloc_ctxt
) *
1390 HFI1_MAX_SHARED_CTXTS
;
1393 int hfi1_init(struct hfi1_devdata
*dd
, int reinit
);
1394 int hfi1_count_active_units(void);
1396 int hfi1_diag_add(struct hfi1_devdata
*dd
);
1397 void hfi1_diag_remove(struct hfi1_devdata
*dd
);
1398 void handle_linkup_change(struct hfi1_devdata
*dd
, u32 linkup
);
1400 void handle_user_interrupt(struct hfi1_ctxtdata
*rcd
);
1402 int hfi1_create_rcvhdrq(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
);
1403 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata
*rcd
);
1404 int hfi1_create_kctxts(struct hfi1_devdata
*dd
);
1405 int hfi1_create_ctxtdata(struct hfi1_pportdata
*ppd
, int numa
,
1406 struct hfi1_ctxtdata
**rcd
);
1407 void hfi1_free_ctxt(struct hfi1_ctxtdata
*rcd
);
1408 void hfi1_init_pportdata(struct pci_dev
*pdev
, struct hfi1_pportdata
*ppd
,
1409 struct hfi1_devdata
*dd
, u8 hw_pidx
, u8 port
);
1410 void hfi1_free_ctxtdata(struct hfi1_devdata
*dd
, struct hfi1_ctxtdata
*rcd
);
1411 int hfi1_rcd_put(struct hfi1_ctxtdata
*rcd
);
1412 void hfi1_rcd_get(struct hfi1_ctxtdata
*rcd
);
1413 struct hfi1_ctxtdata
*hfi1_rcd_get_by_index_safe(struct hfi1_devdata
*dd
,
1415 struct hfi1_ctxtdata
*hfi1_rcd_get_by_index(struct hfi1_devdata
*dd
, u16 ctxt
);
1416 int handle_receive_interrupt(struct hfi1_ctxtdata
*rcd
, int thread
);
1417 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata
*rcd
, int thread
);
1418 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata
*rcd
, int thread
);
1419 void set_all_slowpath(struct hfi1_devdata
*dd
);
1420 void hfi1_vnic_synchronize_irq(struct hfi1_devdata
*dd
);
1421 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata
*rcd
);
1422 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata
*rcd
);
1424 extern const struct pci_device_id hfi1_pci_tbl
[];
1425 void hfi1_make_ud_req_9B(struct rvt_qp
*qp
,
1426 struct hfi1_pkt_state
*ps
,
1427 struct rvt_swqe
*wqe
);
1429 void hfi1_make_ud_req_16B(struct rvt_qp
*qp
,
1430 struct hfi1_pkt_state
*ps
,
1431 struct rvt_swqe
*wqe
);
1433 /* receive packet handler dispositions */
1434 #define RCV_PKT_OK 0x0 /* keep going */
1435 #define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */
1436 #define RCV_PKT_DONE 0x2 /* stop, no more packets detected */
1438 /* calculate the current RHF address */
1439 static inline __le32
*get_rhf_addr(struct hfi1_ctxtdata
*rcd
)
1441 return (__le32
*)rcd
->rcvhdrq
+ rcd
->head
+ rcd
->dd
->rhf_offset
;
1444 int hfi1_reset_device(int);
1446 void receive_interrupt_work(struct work_struct
*work
);
1448 /* extract service channel from header and rhf */
1449 static inline int hfi1_9B_get_sc5(struct ib_header
*hdr
, u64 rhf
)
1451 return ib_get_sc(hdr
) | ((!!(rhf_dc_info(rhf
))) << 4);
1454 #define HFI1_JKEY_WIDTH 16
1455 #define HFI1_JKEY_MASK (BIT(16) - 1)
1456 #define HFI1_ADMIN_JKEY_RANGE 32
1459 * J_KEYs are split and allocated in the following groups:
1460 * 0 - 31 - users with administrator privileges
1461 * 32 - 63 - kernel protocols using KDETH packets
1462 * 64 - 65535 - all other users using KDETH packets
1464 static inline u16
generate_jkey(kuid_t uid
)
1466 u16 jkey
= from_kuid(current_user_ns(), uid
) & HFI1_JKEY_MASK
;
1468 if (capable(CAP_SYS_ADMIN
))
1469 jkey
&= HFI1_ADMIN_JKEY_RANGE
- 1;
1471 jkey
|= BIT(HFI1_JKEY_WIDTH
- 1);
1477 * active_egress_rate
1479 * returns the active egress rate in units of [10^6 bits/sec]
1481 static inline u32
active_egress_rate(struct hfi1_pportdata
*ppd
)
1483 u16 link_speed
= ppd
->link_speed_active
;
1484 u16 link_width
= ppd
->link_width_active
;
1487 if (link_speed
== OPA_LINK_SPEED_25G
)
1488 egress_rate
= 25000;
1489 else /* assume OPA_LINK_SPEED_12_5G */
1490 egress_rate
= 12500;
1492 switch (link_width
) {
1493 case OPA_LINK_WIDTH_4X
:
1496 case OPA_LINK_WIDTH_3X
:
1499 case OPA_LINK_WIDTH_2X
:
1503 /* assume IB_WIDTH_1X */
1513 * Returns the number of 'fabric clock cycles' to egress a packet
1514 * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock
1515 * rate is (approximately) 805 MHz, the units of the returned value
1518 static inline u32
egress_cycles(u32 len
, u32 rate
)
1525 * (length) [bits] / (rate) [bits/sec]
1526 * ---------------------------------------------------
1527 * fabric_clock_period == 1 /(805 * 10^6) [cycles/sec]
1530 cycles
= len
* 8; /* bits */
1537 void set_link_ipg(struct hfi1_pportdata
*ppd
);
1538 void process_becn(struct hfi1_pportdata
*ppd
, u8 sl
, u32 rlid
, u32 lqpn
,
1539 u32 rqpn
, u8 svc_type
);
1540 void return_cnp(struct hfi1_ibport
*ibp
, struct rvt_qp
*qp
, u32 remote_qpn
,
1541 u32 pkey
, u32 slid
, u32 dlid
, u8 sc5
,
1542 const struct ib_grh
*old_grh
);
1543 void return_cnp_16B(struct hfi1_ibport
*ibp
, struct rvt_qp
*qp
,
1544 u32 remote_qpn
, u32 pkey
, u32 slid
, u32 dlid
,
1545 u8 sc5
, const struct ib_grh
*old_grh
);
1546 typedef void (*hfi1_handle_cnp
)(struct hfi1_ibport
*ibp
, struct rvt_qp
*qp
,
1547 u32 remote_qpn
, u32 pkey
, u32 slid
, u32 dlid
,
1548 u8 sc5
, const struct ib_grh
*old_grh
);
1550 #define PKEY_CHECK_INVALID -1
1551 int egress_pkey_check(struct hfi1_pportdata
*ppd
, u32 slid
, u16 pkey
,
1552 u8 sc5
, int8_t s_pkey_index
);
1554 #define PACKET_EGRESS_TIMEOUT 350
1555 static inline void pause_for_credit_return(struct hfi1_devdata
*dd
)
1557 /* Pause at least 1us, to ensure chip returns all credits */
1558 u32 usec
= cclock_to_ns(dd
, PACKET_EGRESS_TIMEOUT
) / 1000;
1560 udelay(usec
? usec
: 1);
1564 * sc_to_vlt() reverse lookup sc to vl
1568 static inline u8
sc_to_vlt(struct hfi1_devdata
*dd
, u8 sc5
)
1573 if (sc5
>= OPA_MAX_SCS
)
1577 seq
= read_seqbegin(&dd
->sc2vl_lock
);
1578 rval
= *(((u8
*)dd
->sc2vl
) + sc5
);
1579 } while (read_seqretry(&dd
->sc2vl_lock
, seq
));
1584 #define PKEY_MEMBER_MASK 0x8000
1585 #define PKEY_LOW_15_MASK 0x7fff
1588 * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1589 * being an entry from the ingress partition key table), return 0
1590 * otherwise. Use the matching criteria for ingress partition keys
1591 * specified in the OPAv1 spec., section 9.10.14.
1593 static inline int ingress_pkey_matches_entry(u16 pkey
, u16 ent
)
1595 u16 mkey
= pkey
& PKEY_LOW_15_MASK
;
1596 u16 ment
= ent
& PKEY_LOW_15_MASK
;
1600 * If pkey[15] is clear (limited partition member),
1601 * is bit 15 in the corresponding table element
1602 * clear (limited member)?
1604 if (!(pkey
& PKEY_MEMBER_MASK
))
1605 return !!(ent
& PKEY_MEMBER_MASK
);
1612 * ingress_pkey_table_search - search the entire pkey table for
1613 * an entry which matches 'pkey'. return 0 if a match is found,
1616 static int ingress_pkey_table_search(struct hfi1_pportdata
*ppd
, u16 pkey
)
1620 for (i
= 0; i
< MAX_PKEY_VALUES
; i
++) {
1621 if (ingress_pkey_matches_entry(pkey
, ppd
->pkeys
[i
]))
1628 * ingress_pkey_table_fail - record a failure of ingress pkey validation,
1629 * i.e., increment port_rcv_constraint_errors for the port, and record
1630 * the 'error info' for this failure.
1632 static void ingress_pkey_table_fail(struct hfi1_pportdata
*ppd
, u16 pkey
,
1635 struct hfi1_devdata
*dd
= ppd
->dd
;
1637 incr_cntr64(&ppd
->port_rcv_constraint_errors
);
1638 if (!(dd
->err_info_rcv_constraint
.status
& OPA_EI_STATUS_SMASK
)) {
1639 dd
->err_info_rcv_constraint
.status
|= OPA_EI_STATUS_SMASK
;
1640 dd
->err_info_rcv_constraint
.slid
= slid
;
1641 dd
->err_info_rcv_constraint
.pkey
= pkey
;
1646 * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
1647 * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx
1648 * is a hint as to the best place in the partition key table to begin
1649 * searching. This function should not be called on the data path because
1650 * of performance reasons. On datapath pkey check is expected to be done
1651 * by HW and rcv_pkey_check function should be called instead.
1653 static inline int ingress_pkey_check(struct hfi1_pportdata
*ppd
, u16 pkey
,
1654 u8 sc5
, u8 idx
, u32 slid
, bool force
)
1656 if (!(force
) && !(ppd
->part_enforce
& HFI1_PART_ENFORCE_IN
))
1659 /* If SC15, pkey[0:14] must be 0x7fff */
1660 if ((sc5
== 0xf) && ((pkey
& PKEY_LOW_15_MASK
) != PKEY_LOW_15_MASK
))
1663 /* Is the pkey = 0x0, or 0x8000? */
1664 if ((pkey
& PKEY_LOW_15_MASK
) == 0)
1667 /* The most likely matching pkey has index 'idx' */
1668 if (ingress_pkey_matches_entry(pkey
, ppd
->pkeys
[idx
]))
1671 /* no match - try the whole table */
1672 if (!ingress_pkey_table_search(ppd
, pkey
))
1676 ingress_pkey_table_fail(ppd
, pkey
, slid
);
1681 * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
1682 * otherwise. It only ensures pkey is vlid for QP0. This function
1683 * should be called on the data path instead of ingress_pkey_check
1684 * as on data path, pkey check is done by HW (except for QP0).
1686 static inline int rcv_pkey_check(struct hfi1_pportdata
*ppd
, u16 pkey
,
1689 if (!(ppd
->part_enforce
& HFI1_PART_ENFORCE_IN
))
1692 /* If SC15, pkey[0:14] must be 0x7fff */
1693 if ((sc5
== 0xf) && ((pkey
& PKEY_LOW_15_MASK
) != PKEY_LOW_15_MASK
))
1698 ingress_pkey_table_fail(ppd
, pkey
, slid
);
1704 /* MTU enumeration, 256-4k match IB */
1706 #define OPA_MTU_256 1
1707 #define OPA_MTU_512 2
1708 #define OPA_MTU_1024 3
1709 #define OPA_MTU_2048 4
1710 #define OPA_MTU_4096 5
1712 u32
lrh_max_header_bytes(struct hfi1_devdata
*dd
);
1713 int mtu_to_enum(u32 mtu
, int default_if_bad
);
1714 u16
enum_to_mtu(int mtu
);
1715 static inline int valid_ib_mtu(unsigned int mtu
)
1717 return mtu
== 256 || mtu
== 512 ||
1718 mtu
== 1024 || mtu
== 2048 ||
1722 static inline int valid_opa_max_mtu(unsigned int mtu
)
1724 return mtu
>= 2048 &&
1725 (valid_ib_mtu(mtu
) || mtu
== 8192 || mtu
== 10240);
1728 int set_mtu(struct hfi1_pportdata
*ppd
);
1730 int hfi1_set_lid(struct hfi1_pportdata
*ppd
, u32 lid
, u8 lmc
);
1731 void hfi1_disable_after_error(struct hfi1_devdata
*dd
);
1732 int hfi1_set_uevent_bits(struct hfi1_pportdata
*ppd
, const int evtbit
);
1733 int hfi1_rcvbuf_validate(u32 size
, u8 type
, u16
*encode
);
1735 int fm_get_table(struct hfi1_pportdata
*ppd
, int which
, void *t
);
1736 int fm_set_table(struct hfi1_pportdata
*ppd
, int which
, void *t
);
1738 void set_up_vau(struct hfi1_devdata
*dd
, u8 vau
);
1739 void set_up_vl15(struct hfi1_devdata
*dd
, u16 vl15buf
);
1740 void reset_link_credits(struct hfi1_devdata
*dd
);
1741 void assign_remote_cm_au_table(struct hfi1_devdata
*dd
, u8 vcu
);
1743 int set_buffer_control(struct hfi1_pportdata
*ppd
, struct buffer_control
*bc
);
1745 static inline struct hfi1_devdata
*dd_from_ppd(struct hfi1_pportdata
*ppd
)
1750 static inline struct hfi1_devdata
*dd_from_dev(struct hfi1_ibdev
*dev
)
1752 return container_of(dev
, struct hfi1_devdata
, verbs_dev
);
1755 static inline struct hfi1_devdata
*dd_from_ibdev(struct ib_device
*ibdev
)
1757 return dd_from_dev(to_idev(ibdev
));
1760 static inline struct hfi1_pportdata
*ppd_from_ibp(struct hfi1_ibport
*ibp
)
1762 return container_of(ibp
, struct hfi1_pportdata
, ibport_data
);
1765 static inline struct hfi1_ibdev
*dev_from_rdi(struct rvt_dev_info
*rdi
)
1767 return container_of(rdi
, struct hfi1_ibdev
, rdi
);
1770 static inline struct hfi1_ibport
*to_iport(struct ib_device
*ibdev
, u8 port
)
1772 struct hfi1_devdata
*dd
= dd_from_ibdev(ibdev
);
1773 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
1775 WARN_ON(pidx
>= dd
->num_pports
);
1776 return &dd
->pport
[pidx
].ibport_data
;
1779 static inline struct hfi1_ibport
*rcd_to_iport(struct hfi1_ctxtdata
*rcd
)
1781 return &rcd
->ppd
->ibport_data
;
1784 void hfi1_process_ecn_slowpath(struct rvt_qp
*qp
, struct hfi1_packet
*pkt
,
1786 static inline bool process_ecn(struct rvt_qp
*qp
, struct hfi1_packet
*pkt
,
1792 if (pkt
->etype
== RHF_RCV_TYPE_BYPASS
) {
1793 fecn
= hfi1_16B_get_fecn(pkt
->hdr
);
1794 becn
= hfi1_16B_get_becn(pkt
->hdr
);
1796 fecn
= ib_bth_get_fecn(pkt
->ohdr
);
1797 becn
= ib_bth_get_becn(pkt
->ohdr
);
1799 if (unlikely(fecn
|| becn
)) {
1800 hfi1_process_ecn_slowpath(qp
, pkt
, do_cnp
);
1807 * Return the indexed PKEY from the port PKEY table.
1809 static inline u16
hfi1_get_pkey(struct hfi1_ibport
*ibp
, unsigned index
)
1811 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1814 if (index
>= ARRAY_SIZE(ppd
->pkeys
))
1817 ret
= ppd
->pkeys
[index
];
1823 * Return the indexed GUID from the port GUIDs table.
1825 static inline __be64
get_sguid(struct hfi1_ibport
*ibp
, unsigned int index
)
1827 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1829 WARN_ON(index
>= HFI1_GUIDS_PER_PORT
);
1830 return cpu_to_be64(ppd
->guids
[index
]);
1834 * Called by readers of cc_state only, must call under rcu_read_lock().
1836 static inline struct cc_state
*get_cc_state(struct hfi1_pportdata
*ppd
)
1838 return rcu_dereference(ppd
->cc_state
);
1842 * Called by writers of cc_state only, must call under cc_state_lock.
1845 struct cc_state
*get_cc_state_protected(struct hfi1_pportdata
*ppd
)
1847 return rcu_dereference_protected(ppd
->cc_state
,
1848 lockdep_is_held(&ppd
->cc_state_lock
));
1852 * values for dd->flags (_device_ related flags)
1854 #define HFI1_INITTED 0x1 /* chip and driver up and initted */
1855 #define HFI1_PRESENT 0x2 /* chip accesses can be done */
1856 #define HFI1_FROZEN 0x4 /* chip in SPC freeze */
1857 #define HFI1_HAS_SDMA_TIMEOUT 0x8
1858 #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
1859 #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
1861 /* IB dword length mask in PBC (lower 11 bits); same for all chips */
1862 #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
1864 /* ctxt_flag bit offsets */
1865 /* base context has not finished initializing */
1866 #define HFI1_CTXT_BASE_UNINIT 1
1867 /* base context initaliation failed */
1868 #define HFI1_CTXT_BASE_FAILED 2
1869 /* waiting for a packet to arrive */
1870 #define HFI1_CTXT_WAITING_RCV 3
1871 /* waiting for an urgent packet to arrive */
1872 #define HFI1_CTXT_WAITING_URG 4
1874 /* free up any allocated data at closes */
1875 struct hfi1_devdata
*hfi1_init_dd(struct pci_dev
*pdev
,
1876 const struct pci_device_id
*ent
);
1877 void hfi1_free_devdata(struct hfi1_devdata
*dd
);
1878 struct hfi1_devdata
*hfi1_alloc_devdata(struct pci_dev
*pdev
, size_t extra
);
1880 /* LED beaconing functions */
1881 void hfi1_start_led_override(struct hfi1_pportdata
*ppd
, unsigned int timeon
,
1882 unsigned int timeoff
);
1883 void shutdown_led_override(struct hfi1_pportdata
*ppd
);
1885 #define HFI1_CREDIT_RETURN_RATE (100)
1888 * The number of words for the KDETH protocol field. If this is
1889 * larger then the actual field used, then part of the payload
1890 * will be in the header.
1892 * Optimally, we want this sized so that a typical case will
1893 * use full cache lines. The typical local KDETH header would
1904 * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
1906 #define DEFAULT_RCVHDRSIZE 9
1909 * Maximal header byte count:
1920 * We also want to maintain a cache line alignment to assist DMA'ing
1921 * of the header bytes. Round up to a good size.
1923 #define DEFAULT_RCVHDR_ENTSIZE 32
1925 bool hfi1_can_pin_pages(struct hfi1_devdata
*dd
, struct mm_struct
*mm
,
1926 u32 nlocked
, u32 npages
);
1927 int hfi1_acquire_user_pages(struct mm_struct
*mm
, unsigned long vaddr
,
1928 size_t npages
, bool writable
, struct page
**pages
);
1929 void hfi1_release_user_pages(struct mm_struct
*mm
, struct page
**p
,
1930 size_t npages
, bool dirty
);
1932 static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata
*rcd
)
1934 *((u64
*)rcd
->rcvhdrtail_kvaddr
) = 0ULL;
1937 static inline u32
get_rcvhdrtail(const struct hfi1_ctxtdata
*rcd
)
1940 * volatile because it's a DMA target from the chip, routine is
1941 * inlined, and don't want register caching or reordering.
1943 return (u32
)le64_to_cpu(*rcd
->rcvhdrtail_kvaddr
);
1950 extern const char ib_hfi1_version
[];
1952 int hfi1_device_create(struct hfi1_devdata
*dd
);
1953 void hfi1_device_remove(struct hfi1_devdata
*dd
);
1955 int hfi1_create_port_files(struct ib_device
*ibdev
, u8 port_num
,
1956 struct kobject
*kobj
);
1957 int hfi1_verbs_register_sysfs(struct hfi1_devdata
*dd
);
1958 void hfi1_verbs_unregister_sysfs(struct hfi1_devdata
*dd
);
1959 /* Hook for sysfs read of QSFP */
1960 int qsfp_dump(struct hfi1_pportdata
*ppd
, char *buf
, int len
);
1962 int hfi1_pcie_init(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
1963 void hfi1_clean_up_interrupts(struct hfi1_devdata
*dd
);
1964 void hfi1_pcie_cleanup(struct pci_dev
*pdev
);
1965 int hfi1_pcie_ddinit(struct hfi1_devdata
*dd
, struct pci_dev
*pdev
);
1966 void hfi1_pcie_ddcleanup(struct hfi1_devdata
*);
1967 int pcie_speeds(struct hfi1_devdata
*dd
);
1968 int request_msix(struct hfi1_devdata
*dd
, u32 msireq
);
1969 int restore_pci_variables(struct hfi1_devdata
*dd
);
1970 int save_pci_variables(struct hfi1_devdata
*dd
);
1971 int do_pcie_gen3_transition(struct hfi1_devdata
*dd
);
1972 int parse_platform_config(struct hfi1_devdata
*dd
);
1973 int get_platform_config_field(struct hfi1_devdata
*dd
,
1974 enum platform_config_table_type_encoding
1975 table_type
, int table_index
, int field_index
,
1976 u32
*data
, u32 len
);
1978 struct pci_dev
*get_pci_dev(struct rvt_dev_info
*rdi
);
1981 * Flush write combining store buffers (if present) and perform a write
1984 static inline void flush_wc(void)
1986 asm volatile("sfence" : : : "memory");
1989 void handle_eflags(struct hfi1_packet
*packet
);
1990 int process_receive_ib(struct hfi1_packet
*packet
);
1991 int process_receive_bypass(struct hfi1_packet
*packet
);
1992 int process_receive_error(struct hfi1_packet
*packet
);
1993 int kdeth_process_expected(struct hfi1_packet
*packet
);
1994 int kdeth_process_eager(struct hfi1_packet
*packet
);
1995 int process_receive_invalid(struct hfi1_packet
*packet
);
1996 void seqfile_dump_rcd(struct seq_file
*s
, struct hfi1_ctxtdata
*rcd
);
1998 /* global module parameter variables */
1999 extern unsigned int hfi1_max_mtu
;
2000 extern unsigned int hfi1_cu
;
2001 extern unsigned int user_credit_return_threshold
;
2002 extern int num_user_contexts
;
2003 extern unsigned long n_krcvqs
;
2004 extern uint krcvqs
[];
2005 extern int krcvqsset
;
2006 extern uint kdeth_qp
;
2007 extern uint loopback
;
2008 extern uint quick_linkup
;
2009 extern uint rcv_intr_timeout
;
2010 extern uint rcv_intr_count
;
2011 extern uint rcv_intr_dynamic
;
2012 extern ushort link_crc_mask
;
2014 extern struct mutex hfi1_mutex
;
2016 /* Number of seconds before our card status check... */
2017 #define STATUS_TIMEOUT 60
2019 #define DRIVER_NAME "hfi1"
2020 #define HFI1_USER_MINOR_BASE 0
2021 #define HFI1_TRACE_MINOR 127
2022 #define HFI1_NMINORS 255
2024 #define PCI_VENDOR_ID_INTEL 0x8086
2025 #define PCI_DEVICE_ID_INTEL0 0x24f0
2026 #define PCI_DEVICE_ID_INTEL1 0x24f1
2028 #define HFI1_PKT_USER_SC_INTEGRITY \
2029 (SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK \
2030 | SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK \
2031 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK \
2032 | SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK)
2034 #define HFI1_PKT_KERNEL_SC_INTEGRITY \
2035 (SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK)
2037 static inline u64
hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata
*dd
,
2040 u64 base_sc_integrity
;
2042 /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
2043 if (HFI1_CAP_IS_KSET(NO_INTEGRITY
))
2047 SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
2048 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
2049 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
2050 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
2051 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
2052 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK
2053 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
2054 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
2055 | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
2056 | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK
2057 | SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
2058 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
2059 | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
2060 | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
2061 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
2062 | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK
;
2064 if (ctxt_type
== SC_USER
)
2065 base_sc_integrity
|= HFI1_PKT_USER_SC_INTEGRITY
;
2067 base_sc_integrity
|= HFI1_PKT_KERNEL_SC_INTEGRITY
;
2069 /* turn on send-side job key checks if !A0 */
2071 base_sc_integrity
|= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
;
2073 return base_sc_integrity
;
2076 static inline u64
hfi1_pkt_base_sdma_integrity(struct hfi1_devdata
*dd
)
2078 u64 base_sdma_integrity
;
2080 /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
2081 if (HFI1_CAP_IS_KSET(NO_INTEGRITY
))
2084 base_sdma_integrity
=
2085 SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
2086 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
2087 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
2088 | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
2089 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
2090 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
2091 | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
2092 | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK
2093 | SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
2094 | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
2095 | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
2096 | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
2097 | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
2098 | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK
;
2100 if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL
))
2101 base_sdma_integrity
|=
2102 SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
;
2104 /* turn on send-side job key checks if !A0 */
2106 base_sdma_integrity
|=
2107 SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
;
2109 return base_sdma_integrity
;
2113 * hfi1_early_err is used (only!) to print early errors before devdata is
2114 * allocated, or when dd->pcidev may not be valid, and at the tail end of
2115 * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is
2116 * the same as dd_dev_err, but is used when the message really needs
2117 * the IB port# to be definitive as to what's happening..
2119 #define hfi1_early_err(dev, fmt, ...) \
2120 dev_err(dev, fmt, ##__VA_ARGS__)
2122 #define hfi1_early_info(dev, fmt, ...) \
2123 dev_info(dev, fmt, ##__VA_ARGS__)
2125 #define dd_dev_emerg(dd, fmt, ...) \
2126 dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
2127 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2129 #define dd_dev_err(dd, fmt, ...) \
2130 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
2131 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2133 #define dd_dev_err_ratelimited(dd, fmt, ...) \
2134 dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2135 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2138 #define dd_dev_warn(dd, fmt, ...) \
2139 dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
2140 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2142 #define dd_dev_warn_ratelimited(dd, fmt, ...) \
2143 dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2144 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2147 #define dd_dev_info(dd, fmt, ...) \
2148 dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
2149 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2151 #define dd_dev_info_ratelimited(dd, fmt, ...) \
2152 dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2153 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2156 #define dd_dev_dbg(dd, fmt, ...) \
2157 dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
2158 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2160 #define hfi1_dev_porterr(dd, port, fmt, ...) \
2161 dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
2162 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
2165 * this is used for formatting hw error messages...
2167 struct hfi1_hwerror_msgs
{
2174 void hfi1_format_hwerrors(u64 hwerrs
,
2175 const struct hfi1_hwerror_msgs
*hwerrmsgs
,
2176 size_t nhwerrmsgs
, char *msg
, size_t lmsg
);
2178 #define USER_OPCODE_CHECK_VAL 0xC0
2179 #define USER_OPCODE_CHECK_MASK 0xC0
2180 #define OPCODE_CHECK_VAL_DISABLED 0x0
2181 #define OPCODE_CHECK_MASK_DISABLED 0x0
2183 static inline void hfi1_reset_cpu_counters(struct hfi1_devdata
*dd
)
2185 struct hfi1_pportdata
*ppd
;
2188 dd
->z_int_counter
= get_all_cpu_total(dd
->int_counter
);
2189 dd
->z_rcv_limit
= get_all_cpu_total(dd
->rcv_limit
);
2190 dd
->z_send_schedule
= get_all_cpu_total(dd
->send_schedule
);
2192 ppd
= (struct hfi1_pportdata
*)(dd
+ 1);
2193 for (i
= 0; i
< dd
->num_pports
; i
++, ppd
++) {
2194 ppd
->ibport_data
.rvp
.z_rc_acks
=
2195 get_all_cpu_total(ppd
->ibport_data
.rvp
.rc_acks
);
2196 ppd
->ibport_data
.rvp
.z_rc_qacks
=
2197 get_all_cpu_total(ppd
->ibport_data
.rvp
.rc_qacks
);
2201 /* Control LED state */
2202 static inline void setextled(struct hfi1_devdata
*dd
, u32 on
)
2205 write_csr(dd
, DCC_CFG_LED_CNTRL
, 0x1F);
2207 write_csr(dd
, DCC_CFG_LED_CNTRL
, 0x10);
2210 /* return the i2c resource given the target */
2211 static inline u32
i2c_target(u32 target
)
2213 return target
? CR_I2C2
: CR_I2C1
;
2216 /* return the i2c chain chip resource that this HFI uses for QSFP */
2217 static inline u32
qsfp_resource(struct hfi1_devdata
*dd
)
2219 return i2c_target(dd
->hfi1_id
);
2222 /* Is this device integrated or discrete? */
2223 static inline bool is_integrated(struct hfi1_devdata
*dd
)
2225 return dd
->pcidev
->device
== PCI_DEVICE_ID_INTEL1
;
2228 int hfi1_tempsense_rd(struct hfi1_devdata
*dd
, struct hfi1_temp
*temp
);
2230 #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
2231 #define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
2233 static inline void hfi1_update_ah_attr(struct ib_device
*ibdev
,
2234 struct rdma_ah_attr
*attr
)
2236 struct hfi1_pportdata
*ppd
;
2237 struct hfi1_ibport
*ibp
;
2238 u32 dlid
= rdma_ah_get_dlid(attr
);
2241 * Kernel clients may not have setup GRH information
2244 ibp
= to_iport(ibdev
, rdma_ah_get_port_num(attr
));
2245 ppd
= ppd_from_ibp(ibp
);
2246 if ((((dlid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
)) ||
2247 (ppd
->lid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
))) &&
2248 (dlid
!= be32_to_cpu(OPA_LID_PERMISSIVE
)) &&
2249 (dlid
!= be16_to_cpu(IB_LID_PERMISSIVE
)) &&
2250 (!(rdma_ah_get_ah_flags(attr
) & IB_AH_GRH
))) ||
2251 (rdma_ah_get_make_grd(attr
))) {
2252 rdma_ah_set_ah_flags(attr
, IB_AH_GRH
);
2253 rdma_ah_set_interface_id(attr
, OPA_MAKE_ID(dlid
));
2254 rdma_ah_set_subnet_prefix(attr
, ibp
->rvp
.gid_prefix
);
2259 * hfi1_check_mcast- Check if the given lid is
2260 * in the OPA multicast range.
2262 * The LID might either reside in ah.dlid or might be
2263 * in the GRH of the address handle as DGID if extended
2264 * addresses are in use.
2266 static inline bool hfi1_check_mcast(u32 lid
)
2268 return ((lid
>= opa_get_mcast_base(OPA_MCAST_NR
)) &&
2269 (lid
!= be32_to_cpu(OPA_LID_PERMISSIVE
)));
2272 #define opa_get_lid(lid, format) \
2273 __opa_get_lid(lid, OPA_PORT_PACKET_FORMAT_##format)
2275 /* Convert a lid to a specific lid space */
2276 static inline u32
__opa_get_lid(u32 lid
, u8 format
)
2278 bool is_mcast
= hfi1_check_mcast(lid
);
2281 case OPA_PORT_PACKET_FORMAT_8B
:
2282 case OPA_PORT_PACKET_FORMAT_10B
:
2284 return (lid
- opa_get_mcast_base(OPA_MCAST_NR
) +
2286 return lid
& 0xFFFFF;
2287 case OPA_PORT_PACKET_FORMAT_16B
:
2289 return (lid
- opa_get_mcast_base(OPA_MCAST_NR
) +
2291 return lid
& 0xFFFFFF;
2292 case OPA_PORT_PACKET_FORMAT_9B
:
2295 opa_get_mcast_base(OPA_MCAST_NR
) +
2296 be16_to_cpu(IB_MULTICAST_LID_BASE
));
2298 return lid
& 0xFFFF;
2304 /* Return true if the given lid is the OPA 16B multicast range */
2305 static inline bool hfi1_is_16B_mcast(u32 lid
)
2308 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR
), 16B
)) &&
2309 (lid
!= opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE
), 16B
)));
2312 static inline void hfi1_make_opa_lid(struct rdma_ah_attr
*attr
)
2314 const struct ib_global_route
*grh
= rdma_ah_read_grh(attr
);
2315 u32 dlid
= rdma_ah_get_dlid(attr
);
2317 /* Modify ah_attr.dlid to be in the 32 bit LID space.
2318 * This is how the address will be laid out:
2319 * Assuming MCAST_NR to be 4,
2320 * 32 bit permissive LID = 0xFFFFFFFF
2321 * Multicast LID range = 0xFFFFFFFE to 0xF0000000
2322 * Unicast LID range = 0xEFFFFFFF to 1
2325 if (ib_is_opa_gid(&grh
->dgid
))
2326 dlid
= opa_get_lid_from_gid(&grh
->dgid
);
2327 else if ((dlid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
)) &&
2328 (dlid
!= be16_to_cpu(IB_LID_PERMISSIVE
)) &&
2329 (dlid
!= be32_to_cpu(OPA_LID_PERMISSIVE
)))
2330 dlid
= dlid
- be16_to_cpu(IB_MULTICAST_LID_BASE
) +
2331 opa_get_mcast_base(OPA_MCAST_NR
);
2332 else if (dlid
== be16_to_cpu(IB_LID_PERMISSIVE
))
2333 dlid
= be32_to_cpu(OPA_LID_PERMISSIVE
);
2335 rdma_ah_set_dlid(attr
, dlid
);
2338 static inline u8
hfi1_get_packet_type(u32 lid
)
2340 /* 9B if lid > 0xF0000000 */
2341 if (lid
>= opa_get_mcast_base(OPA_MCAST_NR
))
2342 return HFI1_PKT_TYPE_9B
;
2344 /* 16B if lid > 0xC000 */
2345 if (lid
>= opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR
), 9B
))
2346 return HFI1_PKT_TYPE_16B
;
2348 return HFI1_PKT_TYPE_9B
;
2351 static inline bool hfi1_get_hdr_type(u32 lid
, struct rdma_ah_attr
*attr
)
2354 * If there was an incoming 16B packet with permissive
2355 * LIDs, OPA GIDs would have been programmed when those
2356 * packets were received. A 16B packet will have to
2357 * be sent in response to that packet. Return a 16B
2358 * header type if that's the case.
2360 if (rdma_ah_get_dlid(attr
) == be32_to_cpu(OPA_LID_PERMISSIVE
))
2361 return (ib_is_opa_gid(&rdma_ah_read_grh(attr
)->dgid
)) ?
2362 HFI1_PKT_TYPE_16B
: HFI1_PKT_TYPE_9B
;
2365 * Return a 16B header type if either the the destination
2366 * or source lid is extended.
2368 if (hfi1_get_packet_type(rdma_ah_get_dlid(attr
)) == HFI1_PKT_TYPE_16B
)
2369 return HFI1_PKT_TYPE_16B
;
2371 return hfi1_get_packet_type(lid
);
2374 static inline void hfi1_make_ext_grh(struct hfi1_packet
*packet
,
2375 struct ib_grh
*grh
, u32 slid
,
2378 struct hfi1_ibport
*ibp
= &packet
->rcd
->ppd
->ibport_data
;
2379 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
2385 grh
->sgid
.global
.subnet_prefix
= ibp
->rvp
.gid_prefix
;
2386 if (slid
== opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE
), 16B
))
2387 grh
->sgid
.global
.interface_id
=
2388 OPA_MAKE_ID(be32_to_cpu(OPA_LID_PERMISSIVE
));
2390 grh
->sgid
.global
.interface_id
= OPA_MAKE_ID(slid
);
2393 * Upper layers (like mad) may compare the dgid in the
2394 * wc that is obtained here with the sgid_index in
2395 * the wr. Since sgid_index in wr is always 0 for
2396 * extended lids, set the dgid here to the default
2399 grh
->dgid
.global
.subnet_prefix
= ibp
->rvp
.gid_prefix
;
2400 grh
->dgid
.global
.interface_id
=
2401 cpu_to_be64(ppd
->guids
[HFI1_PORT_GUID_INDEX
]);
2404 static inline int hfi1_get_16b_padding(u32 hdr_size
, u32 payload
)
2406 return -(hdr_size
+ payload
+ (SIZE_OF_CRC
<< 2) +
2410 static inline void hfi1_make_ib_hdr(struct ib_header
*hdr
,
2414 hdr
->lrh
[0] = cpu_to_be16(lrh0
);
2415 hdr
->lrh
[1] = cpu_to_be16(dlid
);
2416 hdr
->lrh
[2] = cpu_to_be16(len
);
2417 hdr
->lrh
[3] = cpu_to_be16(slid
);
2420 static inline void hfi1_make_16b_hdr(struct hfi1_16b_header
*hdr
,
2423 bool becn
, bool fecn
, u8 l4
,
2427 u32 lrh1
= 0x40000000;
2431 lrh0
= (lrh0
& ~OPA_16B_BECN_MASK
) | (becn
<< OPA_16B_BECN_SHIFT
);
2432 lrh0
= (lrh0
& ~OPA_16B_LEN_MASK
) | (len
<< OPA_16B_LEN_SHIFT
);
2433 lrh0
= (lrh0
& ~OPA_16B_LID_MASK
) | (slid
& OPA_16B_LID_MASK
);
2434 lrh1
= (lrh1
& ~OPA_16B_FECN_MASK
) | (fecn
<< OPA_16B_FECN_SHIFT
);
2435 lrh1
= (lrh1
& ~OPA_16B_SC_MASK
) | (sc
<< OPA_16B_SC_SHIFT
);
2436 lrh1
= (lrh1
& ~OPA_16B_LID_MASK
) | (dlid
& OPA_16B_LID_MASK
);
2437 lrh2
= (lrh2
& ~OPA_16B_SLID_MASK
) |
2438 ((slid
>> OPA_16B_SLID_SHIFT
) << OPA_16B_SLID_HIGH_SHIFT
);
2439 lrh2
= (lrh2
& ~OPA_16B_DLID_MASK
) |
2440 ((dlid
>> OPA_16B_DLID_SHIFT
) << OPA_16B_DLID_HIGH_SHIFT
);
2441 lrh2
= (lrh2
& ~OPA_16B_PKEY_MASK
) | (pkey
<< OPA_16B_PKEY_SHIFT
);
2442 lrh2
= (lrh2
& ~OPA_16B_L4_MASK
) | l4
;
2449 #endif /* _HFI1_KERNEL_H */