media: stv06xx: add missing descriptor sanity checks
[linux/fpc-iii.git] / drivers / infiniband / hw / hfi1 / driver.c
blob941b465244abef229c1c0a0c273192c2c2f0c17b
1 /*
2 * Copyright(c) 2015-2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
7 * GPL LICENSE SUMMARY
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * BSD LICENSE
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
49 #include <linux/pci.h>
50 #include <linux/io.h>
51 #include <linux/delay.h>
52 #include <linux/netdevice.h>
53 #include <linux/vmalloc.h>
54 #include <linux/module.h>
55 #include <linux/prefetch.h>
56 #include <rdma/ib_verbs.h>
58 #include "hfi.h"
59 #include "trace.h"
60 #include "qp.h"
61 #include "sdma.h"
62 #include "debugfs.h"
63 #include "vnic.h"
64 #include "fault.h"
66 #undef pr_fmt
67 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
70 * The size has to be longer than this string, so we can append
71 * board/chip information to it in the initialization code.
73 const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
75 DEFINE_MUTEX(hfi1_mutex); /* general driver use */
77 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
78 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify(
80 HFI1_DEFAULT_MAX_MTU));
82 unsigned int hfi1_cu = 1;
83 module_param_named(cu, hfi1_cu, uint, S_IRUGO);
84 MODULE_PARM_DESC(cu, "Credit return units");
86 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
87 static int hfi1_caps_set(const char *val, const struct kernel_param *kp);
88 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp);
89 static const struct kernel_param_ops cap_ops = {
90 .set = hfi1_caps_set,
91 .get = hfi1_caps_get
93 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
94 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
96 MODULE_LICENSE("Dual BSD/GPL");
97 MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
100 * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
102 #define MAX_PKT_RECV 64
104 * MAX_PKT_THREAD_RCV is the max # of packets processed before
105 * the qp_wait_list queue is flushed.
107 #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4)
108 #define EGR_HEAD_UPDATE_THRESHOLD 16
110 struct hfi1_ib_stats hfi1_stats;
112 static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
114 int ret = 0;
115 unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
116 cap_mask = *cap_mask_ptr, value, diff,
117 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
118 HFI1_CAP_WRITABLE_MASK);
120 ret = kstrtoul(val, 0, &value);
121 if (ret) {
122 pr_warn("Invalid module parameter value for 'cap_mask'\n");
123 goto done;
125 /* Get the changed bits (except the locked bit) */
126 diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
128 /* Remove any bits that are not allowed to change after driver load */
129 if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
130 pr_warn("Ignoring non-writable capability bits %#lx\n",
131 diff & ~write_mask);
132 diff &= write_mask;
135 /* Mask off any reserved bits */
136 diff &= ~HFI1_CAP_RESERVED_MASK;
137 /* Clear any previously set and changing bits */
138 cap_mask &= ~diff;
139 /* Update the bits with the new capability */
140 cap_mask |= (value & diff);
141 /* Check for any kernel/user restrictions */
142 diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
143 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
144 cap_mask &= ~diff;
145 /* Set the bitmask to the final set */
146 *cap_mask_ptr = cap_mask;
147 done:
148 return ret;
151 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
153 unsigned long cap_mask = *(unsigned long *)kp->arg;
155 cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
156 cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
158 return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
161 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
163 struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
164 struct hfi1_devdata *dd = container_of(ibdev,
165 struct hfi1_devdata, verbs_dev);
166 return dd->pcidev;
170 * Return count of units with at least one port ACTIVE.
172 int hfi1_count_active_units(void)
174 struct hfi1_devdata *dd;
175 struct hfi1_pportdata *ppd;
176 unsigned long index, flags;
177 int pidx, nunits_active = 0;
179 xa_lock_irqsave(&hfi1_dev_table, flags);
180 xa_for_each(&hfi1_dev_table, index, dd) {
181 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1)
182 continue;
183 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
184 ppd = dd->pport + pidx;
185 if (ppd->lid && ppd->linkup) {
186 nunits_active++;
187 break;
191 xa_unlock_irqrestore(&hfi1_dev_table, flags);
192 return nunits_active;
196 * Get address of eager buffer from it's index (allocated in chunks, not
197 * contiguous).
199 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
200 u8 *update)
202 u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
204 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
205 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
206 (offset * RCV_BUF_BLOCK_SIZE));
209 static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd,
210 __le32 *rhf_addr)
212 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
214 return (void *)(rhf_addr - rcd->rhf_offset + offset);
217 static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd,
218 __le32 *rhf_addr)
220 return (struct ib_header *)hfi1_get_header(rcd, rhf_addr);
223 static inline struct hfi1_16b_header
224 *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd,
225 __le32 *rhf_addr)
227 return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr);
231 * Validate and encode the a given RcvArray Buffer size.
232 * The function will check whether the given size falls within
233 * allowed size ranges for the respective type and, optionally,
234 * return the proper encoding.
236 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
238 if (unlikely(!PAGE_ALIGNED(size)))
239 return 0;
240 if (unlikely(size < MIN_EAGER_BUFFER))
241 return 0;
242 if (size >
243 (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
244 return 0;
245 if (encoded)
246 *encoded = ilog2(size / PAGE_SIZE) + 1;
247 return 1;
250 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
251 struct hfi1_packet *packet)
253 struct ib_header *rhdr = packet->hdr;
254 u32 rte = rhf_rcv_type_err(packet->rhf);
255 u32 mlid_base;
256 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
257 struct hfi1_devdata *dd = ppd->dd;
258 struct hfi1_ibdev *verbs_dev = &dd->verbs_dev;
259 struct rvt_dev_info *rdi = &verbs_dev->rdi;
261 if ((packet->rhf & RHF_DC_ERR) &&
262 hfi1_dbg_fault_suppress_err(verbs_dev))
263 return;
265 if (packet->rhf & RHF_ICRC_ERR)
266 return;
268 if (packet->etype == RHF_RCV_TYPE_BYPASS) {
269 goto drop;
270 } else {
271 u8 lnh = ib_get_lnh(rhdr);
273 mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE);
274 if (lnh == HFI1_LRH_BTH) {
275 packet->ohdr = &rhdr->u.oth;
276 } else if (lnh == HFI1_LRH_GRH) {
277 packet->ohdr = &rhdr->u.l.oth;
278 packet->grh = &rhdr->u.l.grh;
279 } else {
280 goto drop;
284 if (packet->rhf & RHF_TID_ERR) {
285 /* For TIDERR and RC QPs preemptively schedule a NAK */
286 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
287 u32 dlid = ib_get_dlid(rhdr);
288 u32 qp_num;
290 /* Sanity check packet */
291 if (tlen < 24)
292 goto drop;
294 /* Check for GRH */
295 if (packet->grh) {
296 u32 vtf;
297 struct ib_grh *grh = packet->grh;
299 if (grh->next_hdr != IB_GRH_NEXT_HDR)
300 goto drop;
301 vtf = be32_to_cpu(grh->version_tclass_flow);
302 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
303 goto drop;
306 /* Get the destination QP number. */
307 qp_num = ib_bth_get_qpn(packet->ohdr);
308 if (dlid < mlid_base) {
309 struct rvt_qp *qp;
310 unsigned long flags;
312 rcu_read_lock();
313 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
314 if (!qp) {
315 rcu_read_unlock();
316 goto drop;
320 * Handle only RC QPs - for other QP types drop error
321 * packet.
323 spin_lock_irqsave(&qp->r_lock, flags);
325 /* Check for valid receive state. */
326 if (!(ib_rvt_state_ops[qp->state] &
327 RVT_PROCESS_RECV_OK)) {
328 ibp->rvp.n_pkt_drops++;
331 switch (qp->ibqp.qp_type) {
332 case IB_QPT_RC:
333 hfi1_rc_hdrerr(rcd, packet, qp);
334 break;
335 default:
336 /* For now don't handle any other QP types */
337 break;
340 spin_unlock_irqrestore(&qp->r_lock, flags);
341 rcu_read_unlock();
342 } /* Unicast QP */
343 } /* Valid packet with TIDErr */
345 /* handle "RcvTypeErr" flags */
346 switch (rte) {
347 case RHF_RTE_ERROR_OP_CODE_ERR:
349 void *ebuf = NULL;
350 u8 opcode;
352 if (rhf_use_egr_bfr(packet->rhf))
353 ebuf = packet->ebuf;
355 if (!ebuf)
356 goto drop; /* this should never happen */
358 opcode = ib_bth_get_opcode(packet->ohdr);
359 if (opcode == IB_OPCODE_CNP) {
361 * Only in pre-B0 h/w is the CNP_OPCODE handled
362 * via this code path.
364 struct rvt_qp *qp = NULL;
365 u32 lqpn, rqpn;
366 u16 rlid;
367 u8 svc_type, sl, sc5;
369 sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf);
370 sl = ibp->sc_to_sl[sc5];
372 lqpn = ib_bth_get_qpn(packet->ohdr);
373 rcu_read_lock();
374 qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
375 if (!qp) {
376 rcu_read_unlock();
377 goto drop;
380 switch (qp->ibqp.qp_type) {
381 case IB_QPT_UD:
382 rlid = 0;
383 rqpn = 0;
384 svc_type = IB_CC_SVCTYPE_UD;
385 break;
386 case IB_QPT_UC:
387 rlid = ib_get_slid(rhdr);
388 rqpn = qp->remote_qpn;
389 svc_type = IB_CC_SVCTYPE_UC;
390 break;
391 default:
392 rcu_read_unlock();
393 goto drop;
396 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
397 rcu_read_unlock();
400 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
401 break;
403 default:
404 break;
407 drop:
408 return;
411 static inline void init_packet(struct hfi1_ctxtdata *rcd,
412 struct hfi1_packet *packet)
414 packet->rsize = rcd->rcvhdrqentsize; /* words */
415 packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
416 packet->rcd = rcd;
417 packet->updegr = 0;
418 packet->etail = -1;
419 packet->rhf_addr = get_rhf_addr(rcd);
420 packet->rhf = rhf_to_cpu(packet->rhf_addr);
421 packet->rhqoff = rcd->head;
422 packet->numpkt = 0;
425 /* We support only two types - 9B and 16B for now */
426 static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
427 [HFI1_PKT_TYPE_9B] = &return_cnp,
428 [HFI1_PKT_TYPE_16B] = &return_cnp_16B
432 * hfi1_process_ecn_slowpath - Process FECN or BECN bits
433 * @qp: The packet's destination QP
434 * @pkt: The packet itself.
435 * @prescan: Is the caller the RXQ prescan
437 * Process the packet's FECN or BECN bits. By now, the packet
438 * has already been evaluated whether processing of those bit should
439 * be done.
440 * The significance of the @prescan argument is that if the caller
441 * is the RXQ prescan, a CNP will be send out instead of waiting for the
442 * normal packet processing to send an ACK with BECN set (or a CNP).
444 bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
445 bool prescan)
447 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
448 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
449 struct ib_other_headers *ohdr = pkt->ohdr;
450 struct ib_grh *grh = pkt->grh;
451 u32 rqpn = 0;
452 u16 pkey;
453 u32 rlid, slid, dlid = 0;
454 u8 hdr_type, sc, svc_type, opcode;
455 bool is_mcast = false, ignore_fecn = false, do_cnp = false,
456 fecn, becn;
458 /* can be called from prescan */
459 if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
460 pkey = hfi1_16B_get_pkey(pkt->hdr);
461 sc = hfi1_16B_get_sc(pkt->hdr);
462 dlid = hfi1_16B_get_dlid(pkt->hdr);
463 slid = hfi1_16B_get_slid(pkt->hdr);
464 is_mcast = hfi1_is_16B_mcast(dlid);
465 opcode = ib_bth_get_opcode(ohdr);
466 hdr_type = HFI1_PKT_TYPE_16B;
467 fecn = hfi1_16B_get_fecn(pkt->hdr);
468 becn = hfi1_16B_get_becn(pkt->hdr);
469 } else {
470 pkey = ib_bth_get_pkey(ohdr);
471 sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
472 dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) :
473 ppd->lid;
474 slid = ib_get_slid(pkt->hdr);
475 is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
476 (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
477 opcode = ib_bth_get_opcode(ohdr);
478 hdr_type = HFI1_PKT_TYPE_9B;
479 fecn = ib_bth_get_fecn(ohdr);
480 becn = ib_bth_get_becn(ohdr);
483 switch (qp->ibqp.qp_type) {
484 case IB_QPT_UD:
485 rlid = slid;
486 rqpn = ib_get_sqpn(pkt->ohdr);
487 svc_type = IB_CC_SVCTYPE_UD;
488 break;
489 case IB_QPT_SMI:
490 case IB_QPT_GSI:
491 rlid = slid;
492 rqpn = ib_get_sqpn(pkt->ohdr);
493 svc_type = IB_CC_SVCTYPE_UD;
494 break;
495 case IB_QPT_UC:
496 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
497 rqpn = qp->remote_qpn;
498 svc_type = IB_CC_SVCTYPE_UC;
499 break;
500 case IB_QPT_RC:
501 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
502 rqpn = qp->remote_qpn;
503 svc_type = IB_CC_SVCTYPE_RC;
504 break;
505 default:
506 return false;
509 ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) ||
510 (opcode == IB_OPCODE_RC_ACKNOWLEDGE);
512 * ACKNOWLEDGE packets do not get a CNP but this will be
513 * guarded by ignore_fecn above.
515 do_cnp = prescan ||
516 (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
517 opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) ||
518 opcode == TID_OP(READ_RESP) ||
519 opcode == TID_OP(ACK);
521 /* Call appropriate CNP handler */
522 if (!ignore_fecn && do_cnp && fecn)
523 hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
524 dlid, rlid, sc, grh);
526 if (becn) {
527 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
528 u8 sl = ibp->sc_to_sl[sc];
530 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
532 return !ignore_fecn && fecn;
535 struct ps_mdata {
536 struct hfi1_ctxtdata *rcd;
537 u32 rsize;
538 u32 maxcnt;
539 u32 ps_head;
540 u32 ps_tail;
541 u32 ps_seq;
544 static inline void init_ps_mdata(struct ps_mdata *mdata,
545 struct hfi1_packet *packet)
547 struct hfi1_ctxtdata *rcd = packet->rcd;
549 mdata->rcd = rcd;
550 mdata->rsize = packet->rsize;
551 mdata->maxcnt = packet->maxcnt;
552 mdata->ps_head = packet->rhqoff;
554 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
555 mdata->ps_tail = get_rcvhdrtail(rcd);
556 if (rcd->ctxt == HFI1_CTRL_CTXT)
557 mdata->ps_seq = rcd->seq_cnt;
558 else
559 mdata->ps_seq = 0; /* not used with DMA_RTAIL */
560 } else {
561 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
562 mdata->ps_seq = rcd->seq_cnt;
566 static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
567 struct hfi1_ctxtdata *rcd)
569 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
570 return mdata->ps_head == mdata->ps_tail;
571 return mdata->ps_seq != rhf_rcv_seq(rhf);
574 static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
575 struct hfi1_ctxtdata *rcd)
578 * Control context can potentially receive an invalid rhf.
579 * Drop such packets.
581 if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail))
582 return mdata->ps_seq != rhf_rcv_seq(rhf);
584 return 0;
587 static inline void update_ps_mdata(struct ps_mdata *mdata,
588 struct hfi1_ctxtdata *rcd)
590 mdata->ps_head += mdata->rsize;
591 if (mdata->ps_head >= mdata->maxcnt)
592 mdata->ps_head = 0;
594 /* Control context must do seq counting */
595 if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
596 (rcd->ctxt == HFI1_CTRL_CTXT)) {
597 if (++mdata->ps_seq > 13)
598 mdata->ps_seq = 1;
603 * prescan_rxq - search through the receive queue looking for packets
604 * containing Excplicit Congestion Notifications (FECNs, or BECNs).
605 * When an ECN is found, process the Congestion Notification, and toggle
606 * it off.
607 * This is declared as a macro to allow quick checking of the port to avoid
608 * the overhead of a function call if not enabled.
610 #define prescan_rxq(rcd, packet) \
611 do { \
612 if (rcd->ppd->cc_prescan) \
613 __prescan_rxq(packet); \
614 } while (0)
615 static void __prescan_rxq(struct hfi1_packet *packet)
617 struct hfi1_ctxtdata *rcd = packet->rcd;
618 struct ps_mdata mdata;
620 init_ps_mdata(&mdata, packet);
622 while (1) {
623 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
624 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
625 packet->rcd->rhf_offset;
626 struct rvt_qp *qp;
627 struct ib_header *hdr;
628 struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
629 u64 rhf = rhf_to_cpu(rhf_addr);
630 u32 etype = rhf_rcv_type(rhf), qpn, bth1;
631 u8 lnh;
633 if (ps_done(&mdata, rhf, rcd))
634 break;
636 if (ps_skip(&mdata, rhf, rcd))
637 goto next;
639 if (etype != RHF_RCV_TYPE_IB)
640 goto next;
642 packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr);
643 hdr = packet->hdr;
644 lnh = ib_get_lnh(hdr);
646 if (lnh == HFI1_LRH_BTH) {
647 packet->ohdr = &hdr->u.oth;
648 packet->grh = NULL;
649 } else if (lnh == HFI1_LRH_GRH) {
650 packet->ohdr = &hdr->u.l.oth;
651 packet->grh = &hdr->u.l.grh;
652 } else {
653 goto next; /* just in case */
656 if (!hfi1_may_ecn(packet))
657 goto next;
659 bth1 = be32_to_cpu(packet->ohdr->bth[1]);
660 qpn = bth1 & RVT_QPN_MASK;
661 rcu_read_lock();
662 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
664 if (!qp) {
665 rcu_read_unlock();
666 goto next;
669 hfi1_process_ecn_slowpath(qp, packet, true);
670 rcu_read_unlock();
672 /* turn off BECN, FECN */
673 bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK);
674 packet->ohdr->bth[1] = cpu_to_be32(bth1);
675 next:
676 update_ps_mdata(&mdata, rcd);
680 static void process_rcv_qp_work(struct hfi1_packet *packet)
682 struct rvt_qp *qp, *nqp;
683 struct hfi1_ctxtdata *rcd = packet->rcd;
686 * Iterate over all QPs waiting to respond.
687 * The list won't change since the IRQ is only run on one CPU.
689 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
690 list_del_init(&qp->rspwait);
691 if (qp->r_flags & RVT_R_RSP_NAK) {
692 qp->r_flags &= ~RVT_R_RSP_NAK;
693 packet->qp = qp;
694 hfi1_send_rc_ack(packet, 0);
696 if (qp->r_flags & RVT_R_RSP_SEND) {
697 unsigned long flags;
699 qp->r_flags &= ~RVT_R_RSP_SEND;
700 spin_lock_irqsave(&qp->s_lock, flags);
701 if (ib_rvt_state_ops[qp->state] &
702 RVT_PROCESS_OR_FLUSH_SEND)
703 hfi1_schedule_send(qp);
704 spin_unlock_irqrestore(&qp->s_lock, flags);
706 rvt_put_qp(qp);
710 static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
712 if (thread) {
713 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0)
714 /* allow defered processing */
715 process_rcv_qp_work(packet);
716 cond_resched();
717 return RCV_PKT_OK;
718 } else {
719 this_cpu_inc(*packet->rcd->dd->rcv_limit);
720 return RCV_PKT_LIMIT;
724 static inline int check_max_packet(struct hfi1_packet *packet, int thread)
726 int ret = RCV_PKT_OK;
728 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0))
729 ret = max_packet_exceeded(packet, thread);
730 return ret;
733 static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
735 int ret;
737 packet->rcd->dd->ctx0_seq_drop++;
738 /* Set up for the next packet */
739 packet->rhqoff += packet->rsize;
740 if (packet->rhqoff >= packet->maxcnt)
741 packet->rhqoff = 0;
743 packet->numpkt++;
744 ret = check_max_packet(packet, thread);
746 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
747 packet->rcd->rhf_offset;
748 packet->rhf = rhf_to_cpu(packet->rhf_addr);
750 return ret;
753 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
755 int ret;
757 packet->etype = rhf_rcv_type(packet->rhf);
759 /* total length */
760 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
761 /* retrieve eager buffer details */
762 packet->ebuf = NULL;
763 if (rhf_use_egr_bfr(packet->rhf)) {
764 packet->etail = rhf_egr_index(packet->rhf);
765 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
766 &packet->updegr);
768 * Prefetch the contents of the eager buffer. It is
769 * OK to send a negative length to prefetch_range().
770 * The +2 is the size of the RHF.
772 prefetch_range(packet->ebuf,
773 packet->tlen - ((packet->rcd->rcvhdrqentsize -
774 (rhf_hdrq_offset(packet->rhf)
775 + 2)) * 4));
779 * Call a type specific handler for the packet. We
780 * should be able to trust that etype won't be beyond
781 * the range of valid indexes. If so something is really
782 * wrong and we can probably just let things come
783 * crashing down. There is no need to eat another
784 * comparison in this performance critical code.
786 packet->rcd->rhf_rcv_function_map[packet->etype](packet);
787 packet->numpkt++;
789 /* Set up for the next packet */
790 packet->rhqoff += packet->rsize;
791 if (packet->rhqoff >= packet->maxcnt)
792 packet->rhqoff = 0;
794 ret = check_max_packet(packet, thread);
796 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
797 packet->rcd->rhf_offset;
798 packet->rhf = rhf_to_cpu(packet->rhf_addr);
800 return ret;
803 static inline void process_rcv_update(int last, struct hfi1_packet *packet)
806 * Update head regs etc., every 16 packets, if not last pkt,
807 * to help prevent rcvhdrq overflows, when many packets
808 * are processed and queue is nearly full.
809 * Don't request an interrupt for intermediate updates.
811 if (!last && !(packet->numpkt & 0xf)) {
812 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
813 packet->etail, 0, 0);
814 packet->updegr = 0;
816 packet->grh = NULL;
819 static inline void finish_packet(struct hfi1_packet *packet)
822 * Nothing we need to free for the packet.
824 * The only thing we need to do is a final update and call for an
825 * interrupt
827 update_usrhead(packet->rcd, packet->rcd->head, packet->updegr,
828 packet->etail, rcv_intr_dynamic, packet->numpkt);
832 * Handle receive interrupts when using the no dma rtail option.
834 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
836 u32 seq;
837 int last = RCV_PKT_OK;
838 struct hfi1_packet packet;
840 init_packet(rcd, &packet);
841 seq = rhf_rcv_seq(packet.rhf);
842 if (seq != rcd->seq_cnt) {
843 last = RCV_PKT_DONE;
844 goto bail;
847 prescan_rxq(rcd, &packet);
849 while (last == RCV_PKT_OK) {
850 last = process_rcv_packet(&packet, thread);
851 seq = rhf_rcv_seq(packet.rhf);
852 if (++rcd->seq_cnt > 13)
853 rcd->seq_cnt = 1;
854 if (seq != rcd->seq_cnt)
855 last = RCV_PKT_DONE;
856 process_rcv_update(last, &packet);
858 process_rcv_qp_work(&packet);
859 rcd->head = packet.rhqoff;
860 bail:
861 finish_packet(&packet);
862 return last;
865 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
867 u32 hdrqtail;
868 int last = RCV_PKT_OK;
869 struct hfi1_packet packet;
871 init_packet(rcd, &packet);
872 hdrqtail = get_rcvhdrtail(rcd);
873 if (packet.rhqoff == hdrqtail) {
874 last = RCV_PKT_DONE;
875 goto bail;
877 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
879 prescan_rxq(rcd, &packet);
881 while (last == RCV_PKT_OK) {
882 last = process_rcv_packet(&packet, thread);
883 if (packet.rhqoff == hdrqtail)
884 last = RCV_PKT_DONE;
885 process_rcv_update(last, &packet);
887 process_rcv_qp_work(&packet);
888 rcd->head = packet.rhqoff;
889 bail:
890 finish_packet(&packet);
891 return last;
894 static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
896 struct hfi1_ctxtdata *rcd;
897 u16 i;
900 * For dynamically allocated kernel contexts (like vnic) switch
901 * interrupt handler only for that context. Otherwise, switch
902 * interrupt handler for all statically allocated kernel contexts.
904 if (ctxt >= dd->first_dyn_alloc_ctxt) {
905 rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
906 if (rcd) {
907 rcd->do_interrupt =
908 &handle_receive_interrupt_nodma_rtail;
909 hfi1_rcd_put(rcd);
911 return;
914 for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
915 rcd = hfi1_rcd_get_by_index(dd, i);
916 if (rcd)
917 rcd->do_interrupt =
918 &handle_receive_interrupt_nodma_rtail;
919 hfi1_rcd_put(rcd);
923 static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
925 struct hfi1_ctxtdata *rcd;
926 u16 i;
929 * For dynamically allocated kernel contexts (like vnic) switch
930 * interrupt handler only for that context. Otherwise, switch
931 * interrupt handler for all statically allocated kernel contexts.
933 if (ctxt >= dd->first_dyn_alloc_ctxt) {
934 rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
935 if (rcd) {
936 rcd->do_interrupt =
937 &handle_receive_interrupt_dma_rtail;
938 hfi1_rcd_put(rcd);
940 return;
943 for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
944 rcd = hfi1_rcd_get_by_index(dd, i);
945 if (rcd)
946 rcd->do_interrupt =
947 &handle_receive_interrupt_dma_rtail;
948 hfi1_rcd_put(rcd);
952 void set_all_slowpath(struct hfi1_devdata *dd)
954 struct hfi1_ctxtdata *rcd;
955 u16 i;
957 /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
958 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
959 rcd = hfi1_rcd_get_by_index(dd, i);
960 if (!rcd)
961 continue;
962 if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
963 rcd->do_interrupt = &handle_receive_interrupt;
965 hfi1_rcd_put(rcd);
969 static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
970 struct hfi1_packet *packet,
971 struct hfi1_devdata *dd)
973 struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
974 u8 etype = rhf_rcv_type(packet->rhf);
975 u8 sc = SC15_PACKET;
977 if (etype == RHF_RCV_TYPE_IB) {
978 struct ib_header *hdr = hfi1_get_msgheader(packet->rcd,
979 packet->rhf_addr);
980 sc = hfi1_9B_get_sc5(hdr, packet->rhf);
981 } else if (etype == RHF_RCV_TYPE_BYPASS) {
982 struct hfi1_16b_header *hdr = hfi1_get_16B_header(
983 packet->rcd,
984 packet->rhf_addr);
985 sc = hfi1_16B_get_sc(hdr);
987 if (sc != SC15_PACKET) {
988 int hwstate = driver_lstate(rcd->ppd);
990 if (hwstate != IB_PORT_ACTIVE) {
991 dd_dev_info(dd,
992 "Unexpected link state %s\n",
993 opa_lstate_name(hwstate));
994 return 0;
997 queue_work(rcd->ppd->link_wq, lsaw);
998 return 1;
1000 return 0;
1004 * handle_receive_interrupt - receive a packet
1005 * @rcd: the context
1007 * Called from interrupt handler for errors or receive interrupt.
1008 * This is the slow path interrupt handler.
1010 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
1012 struct hfi1_devdata *dd = rcd->dd;
1013 u32 hdrqtail;
1014 int needset, last = RCV_PKT_OK;
1015 struct hfi1_packet packet;
1016 int skip_pkt = 0;
1018 /* Control context will always use the slow path interrupt handler */
1019 needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
1021 init_packet(rcd, &packet);
1023 if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1024 u32 seq = rhf_rcv_seq(packet.rhf);
1026 if (seq != rcd->seq_cnt) {
1027 last = RCV_PKT_DONE;
1028 goto bail;
1030 hdrqtail = 0;
1031 } else {
1032 hdrqtail = get_rcvhdrtail(rcd);
1033 if (packet.rhqoff == hdrqtail) {
1034 last = RCV_PKT_DONE;
1035 goto bail;
1037 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
1040 * Control context can potentially receive an invalid
1041 * rhf. Drop such packets.
1043 if (rcd->ctxt == HFI1_CTRL_CTXT) {
1044 u32 seq = rhf_rcv_seq(packet.rhf);
1046 if (seq != rcd->seq_cnt)
1047 skip_pkt = 1;
1051 prescan_rxq(rcd, &packet);
1053 while (last == RCV_PKT_OK) {
1054 if (unlikely(dd->do_drop &&
1055 atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
1056 DROP_PACKET_ON)) {
1057 dd->do_drop = 0;
1059 /* On to the next packet */
1060 packet.rhqoff += packet.rsize;
1061 packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
1062 packet.rhqoff +
1063 rcd->rhf_offset;
1064 packet.rhf = rhf_to_cpu(packet.rhf_addr);
1066 } else if (skip_pkt) {
1067 last = skip_rcv_packet(&packet, thread);
1068 skip_pkt = 0;
1069 } else {
1070 /* Auto activate link on non-SC15 packet receive */
1071 if (unlikely(rcd->ppd->host_link_state ==
1072 HLS_UP_ARMED) &&
1073 set_armed_to_active(rcd, &packet, dd))
1074 goto bail;
1075 last = process_rcv_packet(&packet, thread);
1078 if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1079 u32 seq = rhf_rcv_seq(packet.rhf);
1081 if (++rcd->seq_cnt > 13)
1082 rcd->seq_cnt = 1;
1083 if (seq != rcd->seq_cnt)
1084 last = RCV_PKT_DONE;
1085 if (needset) {
1086 dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
1087 set_nodma_rtail(dd, rcd->ctxt);
1088 needset = 0;
1090 } else {
1091 if (packet.rhqoff == hdrqtail)
1092 last = RCV_PKT_DONE;
1094 * Control context can potentially receive an invalid
1095 * rhf. Drop such packets.
1097 if (rcd->ctxt == HFI1_CTRL_CTXT) {
1098 u32 seq = rhf_rcv_seq(packet.rhf);
1100 if (++rcd->seq_cnt > 13)
1101 rcd->seq_cnt = 1;
1102 if (!last && (seq != rcd->seq_cnt))
1103 skip_pkt = 1;
1106 if (needset) {
1107 dd_dev_info(dd,
1108 "Switching to DMA_RTAIL\n");
1109 set_dma_rtail(dd, rcd->ctxt);
1110 needset = 0;
1114 process_rcv_update(last, &packet);
1117 process_rcv_qp_work(&packet);
1118 rcd->head = packet.rhqoff;
1120 bail:
1122 * Always write head at end, and setup rcv interrupt, even
1123 * if no packets were processed.
1125 finish_packet(&packet);
1126 return last;
1130 * We may discover in the interrupt that the hardware link state has
1131 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
1132 * and we need to update the driver's notion of the link state. We cannot
1133 * run set_link_state from interrupt context, so we queue this function on
1134 * a workqueue.
1136 * We delay the regular interrupt processing until after the state changes
1137 * so that the link will be in the correct state by the time any application
1138 * we wake up attempts to send a reply to any message it received.
1139 * (Subsequent receive interrupts may possibly force the wakeup before we
1140 * update the link state.)
1142 * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
1143 * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
1144 * so we're safe from use-after-free of the rcd.
1146 void receive_interrupt_work(struct work_struct *work)
1148 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
1149 linkstate_active_work);
1150 struct hfi1_devdata *dd = ppd->dd;
1151 struct hfi1_ctxtdata *rcd;
1152 u16 i;
1154 /* Received non-SC15 packet implies neighbor_normal */
1155 ppd->neighbor_normal = 1;
1156 set_link_state(ppd, HLS_UP_ACTIVE);
1159 * Interrupt all statically allocated kernel contexts that could
1160 * have had an interrupt during auto activation.
1162 for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) {
1163 rcd = hfi1_rcd_get_by_index(dd, i);
1164 if (rcd)
1165 force_recv_intr(rcd);
1166 hfi1_rcd_put(rcd);
1171 * Convert a given MTU size to the on-wire MAD packet enumeration.
1172 * Return -1 if the size is invalid.
1174 int mtu_to_enum(u32 mtu, int default_if_bad)
1176 switch (mtu) {
1177 case 0: return OPA_MTU_0;
1178 case 256: return OPA_MTU_256;
1179 case 512: return OPA_MTU_512;
1180 case 1024: return OPA_MTU_1024;
1181 case 2048: return OPA_MTU_2048;
1182 case 4096: return OPA_MTU_4096;
1183 case 8192: return OPA_MTU_8192;
1184 case 10240: return OPA_MTU_10240;
1186 return default_if_bad;
1189 u16 enum_to_mtu(int mtu)
1191 switch (mtu) {
1192 case OPA_MTU_0: return 0;
1193 case OPA_MTU_256: return 256;
1194 case OPA_MTU_512: return 512;
1195 case OPA_MTU_1024: return 1024;
1196 case OPA_MTU_2048: return 2048;
1197 case OPA_MTU_4096: return 4096;
1198 case OPA_MTU_8192: return 8192;
1199 case OPA_MTU_10240: return 10240;
1200 default: return 0xffff;
1205 * set_mtu - set the MTU
1206 * @ppd: the per port data
1208 * We can handle "any" incoming size, the issue here is whether we
1209 * need to restrict our outgoing size. We do not deal with what happens
1210 * to programs that are already running when the size changes.
1212 int set_mtu(struct hfi1_pportdata *ppd)
1214 struct hfi1_devdata *dd = ppd->dd;
1215 int i, drain, ret = 0, is_up = 0;
1217 ppd->ibmtu = 0;
1218 for (i = 0; i < ppd->vls_supported; i++)
1219 if (ppd->ibmtu < dd->vld[i].mtu)
1220 ppd->ibmtu = dd->vld[i].mtu;
1221 ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
1223 mutex_lock(&ppd->hls_lock);
1224 if (ppd->host_link_state == HLS_UP_INIT ||
1225 ppd->host_link_state == HLS_UP_ARMED ||
1226 ppd->host_link_state == HLS_UP_ACTIVE)
1227 is_up = 1;
1229 drain = !is_ax(dd) && is_up;
1231 if (drain)
1233 * MTU is specified per-VL. To ensure that no packet gets
1234 * stuck (due, e.g., to the MTU for the packet's VL being
1235 * reduced), empty the per-VL FIFOs before adjusting MTU.
1237 ret = stop_drain_data_vls(dd);
1239 if (ret) {
1240 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
1241 __func__);
1242 goto err;
1245 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
1247 if (drain)
1248 open_fill_data_vls(dd); /* reopen all VLs */
1250 err:
1251 mutex_unlock(&ppd->hls_lock);
1253 return ret;
1256 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
1258 struct hfi1_devdata *dd = ppd->dd;
1260 ppd->lid = lid;
1261 ppd->lmc = lmc;
1262 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
1264 dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
1266 return 0;
1269 void shutdown_led_override(struct hfi1_pportdata *ppd)
1271 struct hfi1_devdata *dd = ppd->dd;
1274 * This pairs with the memory barrier in hfi1_start_led_override to
1275 * ensure that we read the correct state of LED beaconing represented
1276 * by led_override_timer_active
1278 smp_rmb();
1279 if (atomic_read(&ppd->led_override_timer_active)) {
1280 del_timer_sync(&ppd->led_override_timer);
1281 atomic_set(&ppd->led_override_timer_active, 0);
1282 /* Ensure the atomic_set is visible to all CPUs */
1283 smp_wmb();
1286 /* Hand control of the LED to the DC for normal operation */
1287 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
1290 static void run_led_override(struct timer_list *t)
1292 struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
1293 struct hfi1_devdata *dd = ppd->dd;
1294 unsigned long timeout;
1295 int phase_idx;
1297 if (!(dd->flags & HFI1_INITTED))
1298 return;
1300 phase_idx = ppd->led_override_phase & 1;
1302 setextled(dd, phase_idx);
1304 timeout = ppd->led_override_vals[phase_idx];
1306 /* Set up for next phase */
1307 ppd->led_override_phase = !ppd->led_override_phase;
1309 mod_timer(&ppd->led_override_timer, jiffies + timeout);
1313 * To have the LED blink in a particular pattern, provide timeon and timeoff
1314 * in milliseconds.
1315 * To turn off custom blinking and return to normal operation, use
1316 * shutdown_led_override()
1318 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
1319 unsigned int timeoff)
1321 if (!(ppd->dd->flags & HFI1_INITTED))
1322 return;
1324 /* Convert to jiffies for direct use in timer */
1325 ppd->led_override_vals[0] = msecs_to_jiffies(timeoff);
1326 ppd->led_override_vals[1] = msecs_to_jiffies(timeon);
1328 /* Arbitrarily start from LED on phase */
1329 ppd->led_override_phase = 1;
1332 * If the timer has not already been started, do so. Use a "quick"
1333 * timeout so the handler will be called soon to look at our request.
1335 if (!timer_pending(&ppd->led_override_timer)) {
1336 timer_setup(&ppd->led_override_timer, run_led_override, 0);
1337 ppd->led_override_timer.expires = jiffies + 1;
1338 add_timer(&ppd->led_override_timer);
1339 atomic_set(&ppd->led_override_timer_active, 1);
1340 /* Ensure the atomic_set is visible to all CPUs */
1341 smp_wmb();
1346 * hfi1_reset_device - reset the chip if possible
1347 * @unit: the device to reset
1349 * Whether or not reset is successful, we attempt to re-initialize the chip
1350 * (that is, much like a driver unload/reload). We clear the INITTED flag
1351 * so that the various entry points will fail until we reinitialize. For
1352 * now, we only allow this if no user contexts are open that use chip resources
1354 int hfi1_reset_device(int unit)
1356 int ret;
1357 struct hfi1_devdata *dd = hfi1_lookup(unit);
1358 struct hfi1_pportdata *ppd;
1359 int pidx;
1361 if (!dd) {
1362 ret = -ENODEV;
1363 goto bail;
1366 dd_dev_info(dd, "Reset on unit %u requested\n", unit);
1368 if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) {
1369 dd_dev_info(dd,
1370 "Invalid unit number %u or not initialized or not present\n",
1371 unit);
1372 ret = -ENXIO;
1373 goto bail;
1376 /* If there are any user/vnic contexts, we cannot reset */
1377 mutex_lock(&hfi1_mutex);
1378 if (dd->rcd)
1379 if (hfi1_stats.sps_ctxts) {
1380 mutex_unlock(&hfi1_mutex);
1381 ret = -EBUSY;
1382 goto bail;
1384 mutex_unlock(&hfi1_mutex);
1386 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1387 ppd = dd->pport + pidx;
1389 shutdown_led_override(ppd);
1391 if (dd->flags & HFI1_HAS_SEND_DMA)
1392 sdma_exit(dd);
1394 hfi1_reset_cpu_counters(dd);
1396 ret = hfi1_init(dd, 1);
1398 if (ret)
1399 dd_dev_err(dd,
1400 "Reinitialize unit %u after reset failed with %d\n",
1401 unit, ret);
1402 else
1403 dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
1404 unit);
1406 bail:
1407 return ret;
1410 static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
1412 packet->hdr = (struct hfi1_ib_message_header *)
1413 hfi1_get_msgheader(packet->rcd,
1414 packet->rhf_addr);
1415 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
1418 static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
1420 struct hfi1_pportdata *ppd = packet->rcd->ppd;
1422 /* slid and dlid cannot be 0 */
1423 if ((!packet->slid) || (!packet->dlid))
1424 return -EINVAL;
1426 /* Compare port lid with incoming packet dlid */
1427 if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
1428 (packet->dlid !=
1429 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) {
1430 if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid)
1431 return -EINVAL;
1434 /* No multicast packets with SC15 */
1435 if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF))
1436 return -EINVAL;
1438 /* Packets with permissive DLID always on SC15 */
1439 if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE),
1440 16B)) &&
1441 (packet->sc != 0xF))
1442 return -EINVAL;
1444 return 0;
1447 static int hfi1_setup_9B_packet(struct hfi1_packet *packet)
1449 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1450 struct ib_header *hdr;
1451 u8 lnh;
1453 hfi1_setup_ib_header(packet);
1454 hdr = packet->hdr;
1456 lnh = ib_get_lnh(hdr);
1457 if (lnh == HFI1_LRH_BTH) {
1458 packet->ohdr = &hdr->u.oth;
1459 packet->grh = NULL;
1460 } else if (lnh == HFI1_LRH_GRH) {
1461 u32 vtf;
1463 packet->ohdr = &hdr->u.l.oth;
1464 packet->grh = &hdr->u.l.grh;
1465 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1466 goto drop;
1467 vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1468 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
1469 goto drop;
1470 } else {
1471 goto drop;
1474 /* Query commonly used fields from packet header */
1475 packet->payload = packet->ebuf;
1476 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1477 packet->slid = ib_get_slid(hdr);
1478 packet->dlid = ib_get_dlid(hdr);
1479 if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
1480 (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE))))
1481 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1482 be16_to_cpu(IB_MULTICAST_LID_BASE);
1483 packet->sl = ib_get_sl(hdr);
1484 packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf);
1485 packet->pad = ib_bth_get_pad(packet->ohdr);
1486 packet->extra_byte = 0;
1487 packet->pkey = ib_bth_get_pkey(packet->ohdr);
1488 packet->migrated = ib_bth_is_migration(packet->ohdr);
1490 return 0;
1491 drop:
1492 ibp->rvp.n_pkt_drops++;
1493 return -EINVAL;
1496 static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
1499 * Bypass packets have a different header/payload split
1500 * compared to an IB packet.
1501 * Current split is set such that 16 bytes of the actual
1502 * header is in the header buffer and the remining is in
1503 * the eager buffer. We chose 16 since hfi1 driver only
1504 * supports 16B bypass packets and we will be able to
1505 * receive the entire LRH with such a split.
1508 struct hfi1_ctxtdata *rcd = packet->rcd;
1509 struct hfi1_pportdata *ppd = rcd->ppd;
1510 struct hfi1_ibport *ibp = &ppd->ibport_data;
1511 u8 l4;
1513 packet->hdr = (struct hfi1_16b_header *)
1514 hfi1_get_16B_header(packet->rcd,
1515 packet->rhf_addr);
1516 l4 = hfi1_16B_get_l4(packet->hdr);
1517 if (l4 == OPA_16B_L4_IB_LOCAL) {
1518 packet->ohdr = packet->ebuf;
1519 packet->grh = NULL;
1520 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1521 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1522 /* hdr_len_by_opcode already has an IB LRH factored in */
1523 packet->hlen = hdr_len_by_opcode[packet->opcode] +
1524 (LRH_16B_BYTES - LRH_9B_BYTES);
1525 packet->migrated = opa_bth_is_migration(packet->ohdr);
1526 } else if (l4 == OPA_16B_L4_IB_GLOBAL) {
1527 u32 vtf;
1528 u8 grh_len = sizeof(struct ib_grh);
1530 packet->ohdr = packet->ebuf + grh_len;
1531 packet->grh = packet->ebuf;
1532 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1533 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1534 /* hdr_len_by_opcode already has an IB LRH factored in */
1535 packet->hlen = hdr_len_by_opcode[packet->opcode] +
1536 (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len;
1537 packet->migrated = opa_bth_is_migration(packet->ohdr);
1539 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1540 goto drop;
1541 vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1542 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
1543 goto drop;
1544 } else if (l4 == OPA_16B_L4_FM) {
1545 packet->mgmt = packet->ebuf;
1546 packet->ohdr = NULL;
1547 packet->grh = NULL;
1548 packet->opcode = IB_OPCODE_UD_SEND_ONLY;
1549 packet->pad = OPA_16B_L4_FM_PAD;
1550 packet->hlen = OPA_16B_L4_FM_HLEN;
1551 packet->migrated = false;
1552 } else {
1553 goto drop;
1556 /* Query commonly used fields from packet header */
1557 packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES;
1558 packet->slid = hfi1_16B_get_slid(packet->hdr);
1559 packet->dlid = hfi1_16B_get_dlid(packet->hdr);
1560 if (unlikely(hfi1_is_16B_mcast(packet->dlid)))
1561 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1562 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR),
1563 16B);
1564 packet->sc = hfi1_16B_get_sc(packet->hdr);
1565 packet->sl = ibp->sc_to_sl[packet->sc];
1566 packet->extra_byte = SIZE_OF_LT;
1567 packet->pkey = hfi1_16B_get_pkey(packet->hdr);
1569 if (hfi1_bypass_ingress_pkt_check(packet))
1570 goto drop;
1572 return 0;
1573 drop:
1574 hfi1_cdbg(PKT, "%s: packet dropped\n", __func__);
1575 ibp->rvp.n_pkt_drops++;
1576 return -EINVAL;
1579 static void show_eflags_errs(struct hfi1_packet *packet)
1581 struct hfi1_ctxtdata *rcd = packet->rcd;
1582 u32 rte = rhf_rcv_type_err(packet->rhf);
1584 dd_dev_err(rcd->dd,
1585 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n",
1586 rcd->ctxt, packet->rhf,
1587 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
1588 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
1589 packet->rhf & RHF_DC_ERR ? "dc " : "",
1590 packet->rhf & RHF_TID_ERR ? "tid " : "",
1591 packet->rhf & RHF_LEN_ERR ? "len " : "",
1592 packet->rhf & RHF_ECC_ERR ? "ecc " : "",
1593 packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
1594 rte);
1597 void handle_eflags(struct hfi1_packet *packet)
1599 struct hfi1_ctxtdata *rcd = packet->rcd;
1601 rcv_hdrerr(rcd, rcd->ppd, packet);
1602 if (rhf_err_flags(packet->rhf))
1603 show_eflags_errs(packet);
1607 * The following functions are called by the interrupt handler. They are type
1608 * specific handlers for each packet type.
1610 static int process_receive_ib(struct hfi1_packet *packet)
1612 if (hfi1_setup_9B_packet(packet))
1613 return RHF_RCV_CONTINUE;
1615 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1616 return RHF_RCV_CONTINUE;
1618 trace_hfi1_rcvhdr(packet);
1620 if (unlikely(rhf_err_flags(packet->rhf))) {
1621 handle_eflags(packet);
1622 return RHF_RCV_CONTINUE;
1625 hfi1_ib_rcv(packet);
1626 return RHF_RCV_CONTINUE;
1629 static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
1631 /* Packet received in VNIC context via RSM */
1632 if (packet->rcd->is_vnic)
1633 return true;
1635 if ((hfi1_16B_get_l2(packet->ebuf) == OPA_16B_L2_TYPE) &&
1636 (hfi1_16B_get_l4(packet->ebuf) == OPA_16B_L4_ETHR))
1637 return true;
1639 return false;
1642 static int process_receive_bypass(struct hfi1_packet *packet)
1644 struct hfi1_devdata *dd = packet->rcd->dd;
1646 if (hfi1_is_vnic_packet(packet)) {
1647 hfi1_vnic_bypass_rcv(packet);
1648 return RHF_RCV_CONTINUE;
1651 if (hfi1_setup_bypass_packet(packet))
1652 return RHF_RCV_CONTINUE;
1654 trace_hfi1_rcvhdr(packet);
1656 if (unlikely(rhf_err_flags(packet->rhf))) {
1657 handle_eflags(packet);
1658 return RHF_RCV_CONTINUE;
1661 if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
1662 hfi1_16B_rcv(packet);
1663 } else {
1664 dd_dev_err(dd,
1665 "Bypass packets other than 16B are not supported in normal operation. Dropping\n");
1666 incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
1667 if (!(dd->err_info_rcvport.status_and_code &
1668 OPA_EI_STATUS_SMASK)) {
1669 u64 *flits = packet->ebuf;
1671 if (flits && !(packet->rhf & RHF_LEN_ERR)) {
1672 dd->err_info_rcvport.packet_flit1 = flits[0];
1673 dd->err_info_rcvport.packet_flit2 =
1674 packet->tlen > sizeof(flits[0]) ?
1675 flits[1] : 0;
1677 dd->err_info_rcvport.status_and_code |=
1678 (OPA_EI_STATUS_SMASK | BAD_L2_ERR);
1681 return RHF_RCV_CONTINUE;
1684 static int process_receive_error(struct hfi1_packet *packet)
1686 /* KHdrHCRCErr -- KDETH packet with a bad HCRC */
1687 if (unlikely(
1688 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
1689 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
1690 packet->rhf & RHF_DC_ERR)))
1691 return RHF_RCV_CONTINUE;
1693 hfi1_setup_ib_header(packet);
1694 handle_eflags(packet);
1696 if (unlikely(rhf_err_flags(packet->rhf)))
1697 dd_dev_err(packet->rcd->dd,
1698 "Unhandled error packet received. Dropping.\n");
1700 return RHF_RCV_CONTINUE;
1703 static int kdeth_process_expected(struct hfi1_packet *packet)
1705 hfi1_setup_9B_packet(packet);
1706 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1707 return RHF_RCV_CONTINUE;
1709 if (unlikely(rhf_err_flags(packet->rhf))) {
1710 struct hfi1_ctxtdata *rcd = packet->rcd;
1712 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
1713 return RHF_RCV_CONTINUE;
1716 hfi1_kdeth_expected_rcv(packet);
1717 return RHF_RCV_CONTINUE;
1720 static int kdeth_process_eager(struct hfi1_packet *packet)
1722 hfi1_setup_9B_packet(packet);
1723 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1724 return RHF_RCV_CONTINUE;
1726 trace_hfi1_rcvhdr(packet);
1727 if (unlikely(rhf_err_flags(packet->rhf))) {
1728 struct hfi1_ctxtdata *rcd = packet->rcd;
1730 show_eflags_errs(packet);
1731 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
1732 return RHF_RCV_CONTINUE;
1735 hfi1_kdeth_eager_rcv(packet);
1736 return RHF_RCV_CONTINUE;
1739 static int process_receive_invalid(struct hfi1_packet *packet)
1741 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
1742 rhf_rcv_type(packet->rhf));
1743 return RHF_RCV_CONTINUE;
1746 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
1748 struct hfi1_packet packet;
1749 struct ps_mdata mdata;
1751 seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s head %llu tail %llu\n",
1752 rcd->ctxt, rcd->rcvhdrq_cnt, rcd->rcvhdrqentsize,
1753 HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
1754 "dma_rtail" : "nodma_rtail",
1755 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) &
1756 RCV_HDR_HEAD_HEAD_MASK,
1757 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL));
1759 init_packet(rcd, &packet);
1760 init_ps_mdata(&mdata, &packet);
1762 while (1) {
1763 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
1764 rcd->rhf_offset;
1765 struct ib_header *hdr;
1766 u64 rhf = rhf_to_cpu(rhf_addr);
1767 u32 etype = rhf_rcv_type(rhf), qpn;
1768 u8 opcode;
1769 u32 psn;
1770 u8 lnh;
1772 if (ps_done(&mdata, rhf, rcd))
1773 break;
1775 if (ps_skip(&mdata, rhf, rcd))
1776 goto next;
1778 if (etype > RHF_RCV_TYPE_IB)
1779 goto next;
1781 packet.hdr = hfi1_get_msgheader(rcd, rhf_addr);
1782 hdr = packet.hdr;
1784 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
1786 if (lnh == HFI1_LRH_BTH)
1787 packet.ohdr = &hdr->u.oth;
1788 else if (lnh == HFI1_LRH_GRH)
1789 packet.ohdr = &hdr->u.l.oth;
1790 else
1791 goto next; /* just in case */
1793 opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24);
1794 qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK;
1795 psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2]));
1797 seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n",
1798 mdata.ps_head, opcode, qpn, psn);
1799 next:
1800 update_ps_mdata(&mdata, rcd);
1804 const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = {
1805 [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected,
1806 [RHF_RCV_TYPE_EAGER] = kdeth_process_eager,
1807 [RHF_RCV_TYPE_IB] = process_receive_ib,
1808 [RHF_RCV_TYPE_ERROR] = process_receive_error,
1809 [RHF_RCV_TYPE_BYPASS] = process_receive_bypass,
1810 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
1811 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
1812 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid,