2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_smi.h>
40 static int reply(struct ib_smp
*smp
)
43 * The verbs framework will handle the directed/LID route
46 smp
->method
= IB_MGMT_METHOD_GET_RESP
;
47 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
48 smp
->status
|= IB_SMP_DIRECTION
;
49 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
52 static void qib_send_trap(struct qib_ibport
*ibp
, void *data
, unsigned len
)
54 struct ib_mad_send_buf
*send_buf
;
55 struct ib_mad_agent
*agent
;
59 unsigned long timeout
;
61 agent
= ibp
->send_agent
;
66 if (!(ppd_from_ibp(ibp
)->lflags
& QIBL_LINKACTIVE
))
70 if (ibp
->trap_timeout
&& time_before(jiffies
, ibp
->trap_timeout
))
73 send_buf
= ib_create_send_mad(agent
, 0, 0, 0, IB_MGMT_MAD_HDR
,
74 IB_MGMT_MAD_DATA
, GFP_ATOMIC
);
79 smp
->base_version
= IB_MGMT_BASE_VERSION
;
80 smp
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
81 smp
->class_version
= 1;
82 smp
->method
= IB_MGMT_METHOD_TRAP
;
84 smp
->tid
= cpu_to_be64(ibp
->tid
);
85 smp
->attr_id
= IB_SMP_ATTR_NOTICE
;
86 /* o14-1: smp->mkey = 0; */
87 memcpy(smp
->data
, data
, len
);
89 spin_lock_irqsave(&ibp
->lock
, flags
);
91 if (ibp
->sm_lid
!= be16_to_cpu(IB_LID_PERMISSIVE
)) {
93 struct ib_ah_attr attr
;
95 memset(&attr
, 0, sizeof attr
);
96 attr
.dlid
= ibp
->sm_lid
;
97 attr
.port_num
= ppd_from_ibp(ibp
)->port
;
98 ah
= ib_create_ah(ibp
->qp0
->ibqp
.pd
, &attr
);
103 ibp
->sm_ah
= to_iah(ah
);
109 send_buf
->ah
= &ibp
->sm_ah
->ibah
;
112 spin_unlock_irqrestore(&ibp
->lock
, flags
);
115 ret
= ib_post_send_mad(send_buf
, NULL
);
118 timeout
= (4096 * (1UL << ibp
->subnet_timeout
)) / 1000;
119 ibp
->trap_timeout
= jiffies
+ usecs_to_jiffies(timeout
);
121 ib_free_send_mad(send_buf
);
122 ibp
->trap_timeout
= 0;
127 * Send a bad [PQ]_Key trap (ch. 14.3.8).
129 void qib_bad_pqkey(struct qib_ibport
*ibp
, __be16 trap_num
, u32 key
, u32 sl
,
130 u32 qp1
, u32 qp2
, __be16 lid1
, __be16 lid2
)
132 struct ib_mad_notice_attr data
;
134 if (trap_num
== IB_NOTICE_TRAP_BAD_PKEY
)
135 ibp
->pkey_violations
++;
137 ibp
->qkey_violations
++;
140 /* Send violation trap */
141 data
.generic_type
= IB_NOTICE_TYPE_SECURITY
;
142 data
.prod_type_msb
= 0;
143 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
144 data
.trap_num
= trap_num
;
145 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
146 data
.toggle_count
= 0;
147 memset(&data
.details
, 0, sizeof data
.details
);
148 data
.details
.ntc_257_258
.lid1
= lid1
;
149 data
.details
.ntc_257_258
.lid2
= lid2
;
150 data
.details
.ntc_257_258
.key
= cpu_to_be32(key
);
151 data
.details
.ntc_257_258
.sl_qp1
= cpu_to_be32((sl
<< 28) | qp1
);
152 data
.details
.ntc_257_258
.qp2
= cpu_to_be32(qp2
);
154 qib_send_trap(ibp
, &data
, sizeof data
);
158 * Send a bad M_Key trap (ch. 14.3.9).
160 static void qib_bad_mkey(struct qib_ibport
*ibp
, struct ib_smp
*smp
)
162 struct ib_mad_notice_attr data
;
164 /* Send violation trap */
165 data
.generic_type
= IB_NOTICE_TYPE_SECURITY
;
166 data
.prod_type_msb
= 0;
167 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
168 data
.trap_num
= IB_NOTICE_TRAP_BAD_MKEY
;
169 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
170 data
.toggle_count
= 0;
171 memset(&data
.details
, 0, sizeof data
.details
);
172 data
.details
.ntc_256
.lid
= data
.issuer_lid
;
173 data
.details
.ntc_256
.method
= smp
->method
;
174 data
.details
.ntc_256
.attr_id
= smp
->attr_id
;
175 data
.details
.ntc_256
.attr_mod
= smp
->attr_mod
;
176 data
.details
.ntc_256
.mkey
= smp
->mkey
;
177 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
180 data
.details
.ntc_256
.dr_slid
= smp
->dr_slid
;
181 data
.details
.ntc_256
.dr_trunc_hop
= IB_NOTICE_TRAP_DR_NOTICE
;
182 hop_cnt
= smp
->hop_cnt
;
183 if (hop_cnt
> ARRAY_SIZE(data
.details
.ntc_256
.dr_rtn_path
)) {
184 data
.details
.ntc_256
.dr_trunc_hop
|=
185 IB_NOTICE_TRAP_DR_TRUNC
;
186 hop_cnt
= ARRAY_SIZE(data
.details
.ntc_256
.dr_rtn_path
);
188 data
.details
.ntc_256
.dr_trunc_hop
|= hop_cnt
;
189 memcpy(data
.details
.ntc_256
.dr_rtn_path
, smp
->return_path
,
193 qib_send_trap(ibp
, &data
, sizeof data
);
197 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
199 void qib_cap_mask_chg(struct qib_ibport
*ibp
)
201 struct ib_mad_notice_attr data
;
203 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
204 data
.prod_type_msb
= 0;
205 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
206 data
.trap_num
= IB_NOTICE_TRAP_CAP_MASK_CHG
;
207 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
208 data
.toggle_count
= 0;
209 memset(&data
.details
, 0, sizeof data
.details
);
210 data
.details
.ntc_144
.lid
= data
.issuer_lid
;
211 data
.details
.ntc_144
.new_cap_mask
= cpu_to_be32(ibp
->port_cap_flags
);
213 qib_send_trap(ibp
, &data
, sizeof data
);
217 * Send a System Image GUID Changed trap (ch. 14.3.12).
219 void qib_sys_guid_chg(struct qib_ibport
*ibp
)
221 struct ib_mad_notice_attr data
;
223 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
224 data
.prod_type_msb
= 0;
225 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
226 data
.trap_num
= IB_NOTICE_TRAP_SYS_GUID_CHG
;
227 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
228 data
.toggle_count
= 0;
229 memset(&data
.details
, 0, sizeof data
.details
);
230 data
.details
.ntc_145
.lid
= data
.issuer_lid
;
231 data
.details
.ntc_145
.new_sys_guid
= ib_qib_sys_image_guid
;
233 qib_send_trap(ibp
, &data
, sizeof data
);
237 * Send a Node Description Changed trap (ch. 14.3.13).
239 void qib_node_desc_chg(struct qib_ibport
*ibp
)
241 struct ib_mad_notice_attr data
;
243 data
.generic_type
= IB_NOTICE_TYPE_INFO
;
244 data
.prod_type_msb
= 0;
245 data
.prod_type_lsb
= IB_NOTICE_PROD_CA
;
246 data
.trap_num
= IB_NOTICE_TRAP_CAP_MASK_CHG
;
247 data
.issuer_lid
= cpu_to_be16(ppd_from_ibp(ibp
)->lid
);
248 data
.toggle_count
= 0;
249 memset(&data
.details
, 0, sizeof data
.details
);
250 data
.details
.ntc_144
.lid
= data
.issuer_lid
;
251 data
.details
.ntc_144
.local_changes
= 1;
252 data
.details
.ntc_144
.change_flags
= IB_NOTICE_TRAP_NODE_DESC_CHG
;
254 qib_send_trap(ibp
, &data
, sizeof data
);
257 static int subn_get_nodedescription(struct ib_smp
*smp
,
258 struct ib_device
*ibdev
)
261 smp
->status
|= IB_SMP_INVALID_FIELD
;
263 memcpy(smp
->data
, ibdev
->node_desc
, sizeof(smp
->data
));
268 static int subn_get_nodeinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
271 struct ib_node_info
*nip
= (struct ib_node_info
*)&smp
->data
;
272 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
273 u32 vendor
, majrev
, minrev
;
274 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
276 /* GUID 0 is illegal */
277 if (smp
->attr_mod
|| pidx
>= dd
->num_pports
||
278 dd
->pport
[pidx
].guid
== 0)
279 smp
->status
|= IB_SMP_INVALID_FIELD
;
281 nip
->port_guid
= dd
->pport
[pidx
].guid
;
283 nip
->base_version
= 1;
284 nip
->class_version
= 1;
285 nip
->node_type
= 1; /* channel adapter */
286 nip
->num_ports
= ibdev
->phys_port_cnt
;
287 /* This is already in network order */
288 nip
->sys_guid
= ib_qib_sys_image_guid
;
289 nip
->node_guid
= dd
->pport
->guid
; /* Use first-port GUID as node */
290 nip
->partition_cap
= cpu_to_be16(qib_get_npkeys(dd
));
291 nip
->device_id
= cpu_to_be16(dd
->deviceid
);
294 nip
->revision
= cpu_to_be32((majrev
<< 16) | minrev
);
295 nip
->local_port_num
= port
;
296 vendor
= dd
->vendorid
;
297 nip
->vendor_id
[0] = QIB_SRC_OUI_1
;
298 nip
->vendor_id
[1] = QIB_SRC_OUI_2
;
299 nip
->vendor_id
[2] = QIB_SRC_OUI_3
;
304 static int subn_get_guidinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
307 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
308 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
309 __be64
*p
= (__be64
*) smp
->data
;
310 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
312 /* 32 blocks of 8 64-bit GUIDs per block */
314 memset(smp
->data
, 0, sizeof(smp
->data
));
316 if (startgx
== 0 && pidx
< dd
->num_pports
) {
317 struct qib_pportdata
*ppd
= dd
->pport
+ pidx
;
318 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
319 __be64 g
= ppd
->guid
;
322 /* GUID 0 is illegal */
324 smp
->status
|= IB_SMP_INVALID_FIELD
;
326 /* The first is a copy of the read-only HW GUID. */
328 for (i
= 1; i
< QIB_GUIDS_PER_PORT
; i
++)
329 p
[i
] = ibp
->guids
[i
- 1];
332 smp
->status
|= IB_SMP_INVALID_FIELD
;
337 static void set_link_width_enabled(struct qib_pportdata
*ppd
, u32 w
)
339 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LWID_ENB
, w
);
342 static void set_link_speed_enabled(struct qib_pportdata
*ppd
, u32 s
)
344 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_SPD_ENB
, s
);
347 static int get_overrunthreshold(struct qib_pportdata
*ppd
)
349 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_OVERRUN_THRESH
);
353 * set_overrunthreshold - set the overrun threshold
354 * @ppd: the physical port data
355 * @n: the new threshold
357 * Note that this will only take effect when the link state changes.
359 static int set_overrunthreshold(struct qib_pportdata
*ppd
, unsigned n
)
361 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_OVERRUN_THRESH
,
366 static int get_phyerrthreshold(struct qib_pportdata
*ppd
)
368 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_PHYERR_THRESH
);
372 * set_phyerrthreshold - set the physical error threshold
373 * @ppd: the physical port data
374 * @n: the new threshold
376 * Note that this will only take effect when the link state changes.
378 static int set_phyerrthreshold(struct qib_pportdata
*ppd
, unsigned n
)
380 (void) ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PHYERR_THRESH
,
386 * get_linkdowndefaultstate - get the default linkdown state
387 * @ppd: the physical port data
389 * Returns zero if the default is POLL, 1 if the default is SLEEP.
391 static int get_linkdowndefaultstate(struct qib_pportdata
*ppd
)
393 return ppd
->dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
) ==
394 IB_LINKINITCMD_SLEEP
;
397 static int check_mkey(struct qib_ibport
*ibp
, struct ib_smp
*smp
, int mad_flags
)
401 /* Is the mkey in the process of expiring? */
402 if (ibp
->mkey_lease_timeout
&&
403 time_after_eq(jiffies
, ibp
->mkey_lease_timeout
)) {
404 /* Clear timeout and mkey protection field. */
405 ibp
->mkey_lease_timeout
= 0;
409 /* M_Key checking depends on Portinfo:M_Key_protect_bits */
410 if ((mad_flags
& IB_MAD_IGNORE_MKEY
) == 0 && ibp
->mkey
!= 0 &&
411 ibp
->mkey
!= smp
->mkey
&&
412 (smp
->method
== IB_MGMT_METHOD_SET
||
413 smp
->method
== IB_MGMT_METHOD_TRAP_REPRESS
||
414 (smp
->method
== IB_MGMT_METHOD_GET
&& ibp
->mkeyprot
>= 2))) {
415 if (ibp
->mkey_violations
!= 0xFFFF)
416 ++ibp
->mkey_violations
;
417 if (!ibp
->mkey_lease_timeout
&& ibp
->mkey_lease_period
)
418 ibp
->mkey_lease_timeout
= jiffies
+
419 ibp
->mkey_lease_period
* HZ
;
420 /* Generate a trap notice. */
421 qib_bad_mkey(ibp
, smp
);
422 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
423 } else if (ibp
->mkey_lease_timeout
)
424 ibp
->mkey_lease_timeout
= 0;
429 static int subn_get_portinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
432 struct qib_devdata
*dd
;
433 struct qib_pportdata
*ppd
;
434 struct qib_ibport
*ibp
;
435 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
440 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
445 if (port_num
> ibdev
->phys_port_cnt
) {
446 smp
->status
|= IB_SMP_INVALID_FIELD
;
450 if (port_num
!= port
) {
451 ibp
= to_iport(ibdev
, port_num
);
452 ret
= check_mkey(ibp
, smp
, 0);
458 dd
= dd_from_ibdev(ibdev
);
459 /* IB numbers ports from 1, hdw from 0 */
460 ppd
= dd
->pport
+ (port_num
- 1);
461 ibp
= &ppd
->ibport_data
;
463 /* Clear all fields. Only set the non-zero fields. */
464 memset(smp
->data
, 0, sizeof(smp
->data
));
466 /* Only return the mkey if the protection field allows it. */
467 if (smp
->method
== IB_MGMT_METHOD_SET
|| ibp
->mkey
== smp
->mkey
||
469 pip
->mkey
= ibp
->mkey
;
470 pip
->gid_prefix
= ibp
->gid_prefix
;
472 pip
->lid
= lid
? cpu_to_be16(lid
) : IB_LID_PERMISSIVE
;
473 pip
->sm_lid
= cpu_to_be16(ibp
->sm_lid
);
474 pip
->cap_mask
= cpu_to_be32(ibp
->port_cap_flags
);
475 /* pip->diag_code; */
476 pip
->mkey_lease_period
= cpu_to_be16(ibp
->mkey_lease_period
);
477 pip
->local_port_num
= port
;
478 pip
->link_width_enabled
= ppd
->link_width_enabled
;
479 pip
->link_width_supported
= ppd
->link_width_supported
;
480 pip
->link_width_active
= ppd
->link_width_active
;
481 state
= dd
->f_iblink_state(ppd
->lastibcstat
);
482 pip
->linkspeed_portstate
= ppd
->link_speed_supported
<< 4 | state
;
484 pip
->portphysstate_linkdown
=
485 (dd
->f_ibphys_portstate(ppd
->lastibcstat
) << 4) |
486 (get_linkdowndefaultstate(ppd
) ? 1 : 2);
487 pip
->mkeyprot_resv_lmc
= (ibp
->mkeyprot
<< 6) | ppd
->lmc
;
488 pip
->linkspeedactive_enabled
= (ppd
->link_speed_active
<< 4) |
489 ppd
->link_speed_enabled
;
490 switch (ppd
->ibmtu
) {
491 default: /* something is wrong; fall through */
508 pip
->neighbormtu_mastersmsl
= (mtu
<< 4) | ibp
->sm_sl
;
509 pip
->vlcap_inittype
= ppd
->vls_supported
<< 4; /* InitType = 0 */
510 pip
->vl_high_limit
= ibp
->vl_high_limit
;
511 pip
->vl_arb_high_cap
=
512 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_VL_HIGH_CAP
);
513 pip
->vl_arb_low_cap
=
514 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_VL_LOW_CAP
);
515 /* InitTypeReply = 0 */
516 pip
->inittypereply_mtucap
= qib_ibmtu
? qib_ibmtu
: IB_MTU_4096
;
517 /* HCAs ignore VLStallCount and HOQLife */
518 /* pip->vlstallcnt_hoqlife; */
519 pip
->operationalvl_pei_peo_fpi_fpo
=
520 dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_OP_VLS
) << 4;
521 pip
->mkey_violations
= cpu_to_be16(ibp
->mkey_violations
);
522 /* P_KeyViolations are counted by hardware. */
523 pip
->pkey_violations
= cpu_to_be16(ibp
->pkey_violations
);
524 pip
->qkey_violations
= cpu_to_be16(ibp
->qkey_violations
);
525 /* Only the hardware GUID is supported for now */
526 pip
->guid_cap
= QIB_GUIDS_PER_PORT
;
527 pip
->clientrereg_resv_subnetto
= ibp
->subnet_timeout
;
528 /* 32.768 usec. response time (guessing) */
529 pip
->resv_resptimevalue
= 3;
530 pip
->localphyerrors_overrunerrors
=
531 (get_phyerrthreshold(ppd
) << 4) |
532 get_overrunthreshold(ppd
);
533 /* pip->max_credit_hint; */
534 if (ibp
->port_cap_flags
& IB_PORT_LINK_LATENCY_SUP
) {
537 v
= dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_LINKLATENCY
);
538 pip
->link_roundtrip_latency
[0] = v
>> 16;
539 pip
->link_roundtrip_latency
[1] = v
>> 8;
540 pip
->link_roundtrip_latency
[2] = v
;
550 * get_pkeys - return the PKEY table
551 * @dd: the qlogic_ib device
552 * @port: the IB port number
553 * @pkeys: the pkey table is placed here
555 static int get_pkeys(struct qib_devdata
*dd
, u8 port
, u16
*pkeys
)
557 struct qib_pportdata
*ppd
= dd
->pport
+ port
- 1;
559 * always a kernel context, no locking needed.
560 * If we get here with ppd setup, no need to check
563 struct qib_ctxtdata
*rcd
= dd
->rcd
[ppd
->hw_pidx
];
565 memcpy(pkeys
, rcd
->pkeys
, sizeof(rcd
->pkeys
));
570 static int subn_get_pkeytable(struct ib_smp
*smp
, struct ib_device
*ibdev
,
573 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
574 u16
*p
= (u16
*) smp
->data
;
575 __be16
*q
= (__be16
*) smp
->data
;
577 /* 64 blocks of 32 16-bit P_Key entries */
579 memset(smp
->data
, 0, sizeof(smp
->data
));
581 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
582 unsigned i
, n
= qib_get_npkeys(dd
);
584 get_pkeys(dd
, port
, p
);
586 for (i
= 0; i
< n
; i
++)
587 q
[i
] = cpu_to_be16(p
[i
]);
589 smp
->status
|= IB_SMP_INVALID_FIELD
;
594 static int subn_set_guidinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
597 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
598 u32 startgx
= 8 * be32_to_cpu(smp
->attr_mod
);
599 __be64
*p
= (__be64
*) smp
->data
;
600 unsigned pidx
= port
- 1; /* IB number port from 1, hdw from 0 */
602 /* 32 blocks of 8 64-bit GUIDs per block */
604 if (startgx
== 0 && pidx
< dd
->num_pports
) {
605 struct qib_pportdata
*ppd
= dd
->pport
+ pidx
;
606 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
609 /* The first entry is read-only. */
610 for (i
= 1; i
< QIB_GUIDS_PER_PORT
; i
++)
611 ibp
->guids
[i
- 1] = p
[i
];
613 smp
->status
|= IB_SMP_INVALID_FIELD
;
615 /* The only GUID we support is the first read-only entry. */
616 return subn_get_guidinfo(smp
, ibdev
, port
);
620 * subn_set_portinfo - set port information
621 * @smp: the incoming SM packet
622 * @ibdev: the infiniband device
623 * @port: the port on the device
625 * Set Portinfo (see ch. 14.2.5.6).
627 static int subn_set_portinfo(struct ib_smp
*smp
, struct ib_device
*ibdev
,
630 struct ib_port_info
*pip
= (struct ib_port_info
*)smp
->data
;
631 struct ib_event event
;
632 struct qib_devdata
*dd
;
633 struct qib_pportdata
*ppd
;
634 struct qib_ibport
*ibp
;
635 char clientrereg
= 0;
645 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
650 if (port_num
> ibdev
->phys_port_cnt
)
652 /* Port attributes can only be set on the receiving port */
653 if (port_num
!= port
)
657 dd
= dd_from_ibdev(ibdev
);
658 /* IB numbers ports from 1, hdw from 0 */
659 ppd
= dd
->pport
+ (port_num
- 1);
660 ibp
= &ppd
->ibport_data
;
661 event
.device
= ibdev
;
662 event
.element
.port_num
= port
;
664 ibp
->mkey
= pip
->mkey
;
665 ibp
->gid_prefix
= pip
->gid_prefix
;
666 ibp
->mkey_lease_period
= be16_to_cpu(pip
->mkey_lease_period
);
668 lid
= be16_to_cpu(pip
->lid
);
669 /* Must be a valid unicast LID address. */
670 if (lid
== 0 || lid
>= QIB_MULTICAST_LID_BASE
)
672 if (ppd
->lid
!= lid
|| ppd
->lmc
!= (pip
->mkeyprot_resv_lmc
& 7)) {
674 qib_set_uevent_bits(ppd
, _QIB_EVENT_LID_CHANGE_BIT
);
675 if (ppd
->lmc
!= (pip
->mkeyprot_resv_lmc
& 7))
676 qib_set_uevent_bits(ppd
, _QIB_EVENT_LMC_CHANGE_BIT
);
677 qib_set_lid(ppd
, lid
, pip
->mkeyprot_resv_lmc
& 7);
678 event
.event
= IB_EVENT_LID_CHANGE
;
679 ib_dispatch_event(&event
);
682 smlid
= be16_to_cpu(pip
->sm_lid
);
683 msl
= pip
->neighbormtu_mastersmsl
& 0xF;
684 /* Must be a valid unicast LID address. */
685 if (smlid
== 0 || smlid
>= QIB_MULTICAST_LID_BASE
)
687 if (smlid
!= ibp
->sm_lid
|| msl
!= ibp
->sm_sl
) {
688 spin_lock_irqsave(&ibp
->lock
, flags
);
690 if (smlid
!= ibp
->sm_lid
)
691 ibp
->sm_ah
->attr
.dlid
= smlid
;
692 if (msl
!= ibp
->sm_sl
)
693 ibp
->sm_ah
->attr
.sl
= msl
;
695 spin_unlock_irqrestore(&ibp
->lock
, flags
);
696 if (smlid
!= ibp
->sm_lid
)
698 if (msl
!= ibp
->sm_sl
)
700 event
.event
= IB_EVENT_SM_CHANGE
;
701 ib_dispatch_event(&event
);
704 /* Allow 1x or 4x to be set (see 14.2.6.6). */
705 lwe
= pip
->link_width_enabled
;
708 lwe
= ppd
->link_width_supported
;
709 else if (lwe
>= 16 || (lwe
& ~ppd
->link_width_supported
))
711 set_link_width_enabled(ppd
, lwe
);
714 lse
= pip
->linkspeedactive_enabled
& 0xF;
717 * The IB 1.2 spec. only allows link speed values
718 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
722 lse
= ppd
->link_speed_supported
;
723 else if (lse
>= 8 || (lse
& ~ppd
->link_speed_supported
))
725 set_link_speed_enabled(ppd
, lse
);
728 /* Set link down default state. */
729 switch (pip
->portphysstate_linkdown
& 0xF) {
733 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
,
734 IB_LINKINITCMD_SLEEP
);
737 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_LINKDEFAULT
,
738 IB_LINKINITCMD_POLL
);
744 ibp
->mkeyprot
= pip
->mkeyprot_resv_lmc
>> 6;
745 ibp
->vl_high_limit
= pip
->vl_high_limit
;
746 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_VL_HIGH_LIMIT
,
749 mtu
= ib_mtu_enum_to_int((pip
->neighbormtu_mastersmsl
>> 4) & 0xF);
752 qib_set_mtu(ppd
, mtu
);
754 /* Set operational VLs */
755 vls
= (pip
->operationalvl_pei_peo_fpi_fpo
>> 4) & 0xF;
757 if (vls
> ppd
->vls_supported
)
759 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_OP_VLS
, vls
);
762 if (pip
->mkey_violations
== 0)
763 ibp
->mkey_violations
= 0;
765 if (pip
->pkey_violations
== 0)
766 ibp
->pkey_violations
= 0;
768 if (pip
->qkey_violations
== 0)
769 ibp
->qkey_violations
= 0;
771 ore
= pip
->localphyerrors_overrunerrors
;
772 if (set_phyerrthreshold(ppd
, (ore
>> 4) & 0xF))
775 if (set_overrunthreshold(ppd
, (ore
& 0xF)))
778 ibp
->subnet_timeout
= pip
->clientrereg_resv_subnetto
& 0x1F;
780 if (pip
->clientrereg_resv_subnetto
& 0x80) {
782 event
.event
= IB_EVENT_CLIENT_REREGISTER
;
783 ib_dispatch_event(&event
);
787 * Do the port state change now that the other link parameters
789 * Changing the port physical state only makes sense if the link
790 * is down or is being set to down.
792 state
= pip
->linkspeed_portstate
& 0xF;
793 lstate
= (pip
->portphysstate_linkdown
>> 4) & 0xF;
794 if (lstate
&& !(state
== IB_PORT_DOWN
|| state
== IB_PORT_NOP
))
798 * Only state changes of DOWN, ARM, and ACTIVE are valid
799 * and must be in the correct state to take effect (see 7.2.6).
808 lstate
= QIB_IB_LINKDOWN_ONLY
;
809 else if (lstate
== 1)
810 lstate
= QIB_IB_LINKDOWN_SLEEP
;
811 else if (lstate
== 2)
812 lstate
= QIB_IB_LINKDOWN
;
813 else if (lstate
== 3)
814 lstate
= QIB_IB_LINKDOWN_DISABLE
;
817 spin_lock_irqsave(&ppd
->lflags_lock
, flags
);
818 ppd
->lflags
&= ~QIBL_LINKV
;
819 spin_unlock_irqrestore(&ppd
->lflags_lock
, flags
);
820 qib_set_linkstate(ppd
, lstate
);
822 * Don't send a reply if the response would be sent
823 * through the disabled port.
825 if (lstate
== QIB_IB_LINKDOWN_DISABLE
&& smp
->hop_cnt
) {
826 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
829 qib_wait_linkstate(ppd
, QIBL_LINKV
, 10);
832 qib_set_linkstate(ppd
, QIB_IB_LINKARM
);
835 qib_set_linkstate(ppd
, QIB_IB_LINKACTIVE
);
838 /* XXX We have already partially updated our state! */
842 ret
= subn_get_portinfo(smp
, ibdev
, port
);
845 pip
->clientrereg_resv_subnetto
|= 0x80;
850 smp
->status
|= IB_SMP_INVALID_FIELD
;
852 ret
= subn_get_portinfo(smp
, ibdev
, port
);
858 * rm_pkey - decrecment the reference count for the given PKEY
859 * @dd: the qlogic_ib device
860 * @key: the PKEY index
862 * Return true if this was the last reference and the hardware table entry
863 * needs to be changed.
865 static int rm_pkey(struct qib_pportdata
*ppd
, u16 key
)
870 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
871 if (ppd
->pkeys
[i
] != key
)
873 if (atomic_dec_and_test(&ppd
->pkeyrefs
[i
])) {
888 * add_pkey - add the given PKEY to the hardware table
889 * @dd: the qlogic_ib device
892 * Return an error code if unable to add the entry, zero if no change,
893 * or 1 if the hardware PKEY register needs to be updated.
895 static int add_pkey(struct qib_pportdata
*ppd
, u16 key
)
898 u16 lkey
= key
& 0x7FFF;
902 if (lkey
== 0x7FFF) {
907 /* Look for an empty slot or a matching PKEY. */
908 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
909 if (!ppd
->pkeys
[i
]) {
913 /* If it matches exactly, try to increment the ref count */
914 if (ppd
->pkeys
[i
] == key
) {
915 if (atomic_inc_return(&ppd
->pkeyrefs
[i
]) > 1) {
919 /* Lost the race. Look for an empty slot below. */
920 atomic_dec(&ppd
->pkeyrefs
[i
]);
924 * It makes no sense to have both the limited and unlimited
925 * PKEY set at the same time since the unlimited one will
926 * disable the limited one.
928 if ((ppd
->pkeys
[i
] & 0x7FFF) == lkey
) {
937 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); i
++) {
938 if (!ppd
->pkeys
[i
] &&
939 atomic_inc_return(&ppd
->pkeyrefs
[i
]) == 1) {
940 /* for qibstats, etc. */
953 * set_pkeys - set the PKEY table for ctxt 0
954 * @dd: the qlogic_ib device
955 * @port: the IB port number
956 * @pkeys: the PKEY table
958 static int set_pkeys(struct qib_devdata
*dd
, u8 port
, u16
*pkeys
)
960 struct qib_pportdata
*ppd
;
961 struct qib_ctxtdata
*rcd
;
966 * IB port one/two always maps to context zero/one,
967 * always a kernel context, no locking needed
968 * If we get here with ppd setup, no need to check
971 ppd
= dd
->pport
+ (port
- 1);
972 rcd
= dd
->rcd
[ppd
->hw_pidx
];
974 for (i
= 0; i
< ARRAY_SIZE(rcd
->pkeys
); i
++) {
976 u16 okey
= rcd
->pkeys
[i
];
981 * The value of this PKEY table entry is changing.
982 * Remove the old entry in the hardware's array of PKEYs.
985 changed
|= rm_pkey(ppd
, okey
);
987 int ret
= add_pkey(ppd
, key
);
997 struct ib_event event
;
999 (void) dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PKEYS
, 0);
1001 event
.event
= IB_EVENT_PKEY_CHANGE
;
1002 event
.device
= &dd
->verbs_dev
.ibdev
;
1003 event
.element
.port_num
= 1;
1004 ib_dispatch_event(&event
);
1009 static int subn_set_pkeytable(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1012 u32 startpx
= 32 * (be32_to_cpu(smp
->attr_mod
) & 0xffff);
1013 __be16
*p
= (__be16
*) smp
->data
;
1014 u16
*q
= (u16
*) smp
->data
;
1015 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
1016 unsigned i
, n
= qib_get_npkeys(dd
);
1018 for (i
= 0; i
< n
; i
++)
1019 q
[i
] = be16_to_cpu(p
[i
]);
1021 if (startpx
!= 0 || set_pkeys(dd
, port
, q
) != 0)
1022 smp
->status
|= IB_SMP_INVALID_FIELD
;
1024 return subn_get_pkeytable(smp
, ibdev
, port
);
1027 static int subn_get_sl_to_vl(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1030 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1031 u8
*p
= (u8
*) smp
->data
;
1034 memset(smp
->data
, 0, sizeof(smp
->data
));
1036 if (!(ibp
->port_cap_flags
& IB_PORT_SL_MAP_SUP
))
1037 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1039 for (i
= 0; i
< ARRAY_SIZE(ibp
->sl_to_vl
); i
+= 2)
1040 *p
++ = (ibp
->sl_to_vl
[i
] << 4) | ibp
->sl_to_vl
[i
+ 1];
1045 static int subn_set_sl_to_vl(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1048 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1049 u8
*p
= (u8
*) smp
->data
;
1052 if (!(ibp
->port_cap_flags
& IB_PORT_SL_MAP_SUP
)) {
1053 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1057 for (i
= 0; i
< ARRAY_SIZE(ibp
->sl_to_vl
); i
+= 2, p
++) {
1058 ibp
->sl_to_vl
[i
] = *p
>> 4;
1059 ibp
->sl_to_vl
[i
+ 1] = *p
& 0xF;
1061 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev
, port
)),
1062 _QIB_EVENT_SL2VL_CHANGE_BIT
);
1064 return subn_get_sl_to_vl(smp
, ibdev
, port
);
1067 static int subn_get_vl_arb(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1070 unsigned which
= be32_to_cpu(smp
->attr_mod
) >> 16;
1071 struct qib_pportdata
*ppd
= ppd_from_ibp(to_iport(ibdev
, port
));
1073 memset(smp
->data
, 0, sizeof(smp
->data
));
1075 if (ppd
->vls_supported
== IB_VL_VL0
)
1076 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1077 else if (which
== IB_VLARB_LOWPRI_0_31
)
1078 (void) ppd
->dd
->f_get_ib_table(ppd
, QIB_IB_TBL_VL_LOW_ARB
,
1080 else if (which
== IB_VLARB_HIGHPRI_0_31
)
1081 (void) ppd
->dd
->f_get_ib_table(ppd
, QIB_IB_TBL_VL_HIGH_ARB
,
1084 smp
->status
|= IB_SMP_INVALID_FIELD
;
1089 static int subn_set_vl_arb(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1092 unsigned which
= be32_to_cpu(smp
->attr_mod
) >> 16;
1093 struct qib_pportdata
*ppd
= ppd_from_ibp(to_iport(ibdev
, port
));
1095 if (ppd
->vls_supported
== IB_VL_VL0
)
1096 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1097 else if (which
== IB_VLARB_LOWPRI_0_31
)
1098 (void) ppd
->dd
->f_set_ib_table(ppd
, QIB_IB_TBL_VL_LOW_ARB
,
1100 else if (which
== IB_VLARB_HIGHPRI_0_31
)
1101 (void) ppd
->dd
->f_set_ib_table(ppd
, QIB_IB_TBL_VL_HIGH_ARB
,
1104 smp
->status
|= IB_SMP_INVALID_FIELD
;
1106 return subn_get_vl_arb(smp
, ibdev
, port
);
1109 static int subn_trap_repress(struct ib_smp
*smp
, struct ib_device
*ibdev
,
1113 * For now, we only send the trap once so no need to process this.
1115 * o14-3.a4 The SMA shall not send any message in response to a valid
1116 * SubnTrapRepress() message.
1118 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
1121 static int pma_get_classportinfo(struct ib_perf
*pmp
,
1122 struct ib_device
*ibdev
)
1124 struct ib_pma_classportinfo
*p
=
1125 (struct ib_pma_classportinfo
*)pmp
->data
;
1126 struct qib_devdata
*dd
= dd_from_ibdev(ibdev
);
1128 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1130 if (pmp
->attr_mod
!= 0)
1131 pmp
->status
|= IB_SMP_INVALID_FIELD
;
1133 /* Note that AllPortSelect is not valid */
1134 p
->base_version
= 1;
1135 p
->class_version
= 1;
1136 p
->cap_mask
= IB_PMA_CLASS_CAP_EXT_WIDTH
;
1138 * Set the most significant bit of CM2 to indicate support for
1139 * congestion statistics
1141 p
->reserved
[0] = dd
->psxmitwait_supported
<< 7;
1143 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1145 p
->resp_time_value
= 18;
1147 return reply((struct ib_smp
*) pmp
);
1150 static int pma_get_portsamplescontrol(struct ib_perf
*pmp
,
1151 struct ib_device
*ibdev
, u8 port
)
1153 struct ib_pma_portsamplescontrol
*p
=
1154 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
1155 struct qib_ibdev
*dev
= to_idev(ibdev
);
1156 struct qib_devdata
*dd
= dd_from_dev(dev
);
1157 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1158 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1159 unsigned long flags
;
1160 u8 port_select
= p
->port_select
;
1162 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1164 p
->port_select
= port_select
;
1165 if (pmp
->attr_mod
!= 0 || port_select
!= port
) {
1166 pmp
->status
|= IB_SMP_INVALID_FIELD
;
1169 spin_lock_irqsave(&ibp
->lock
, flags
);
1170 p
->tick
= dd
->f_get_ib_cfg(ppd
, QIB_IB_CFG_PMA_TICKS
);
1171 p
->sample_status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1172 p
->counter_width
= 4; /* 32 bit counters */
1173 p
->counter_mask0_9
= COUNTER_MASK0_9
;
1174 p
->sample_start
= cpu_to_be32(ibp
->pma_sample_start
);
1175 p
->sample_interval
= cpu_to_be32(ibp
->pma_sample_interval
);
1176 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1177 p
->counter_select
[0] = ibp
->pma_counter_select
[0];
1178 p
->counter_select
[1] = ibp
->pma_counter_select
[1];
1179 p
->counter_select
[2] = ibp
->pma_counter_select
[2];
1180 p
->counter_select
[3] = ibp
->pma_counter_select
[3];
1181 p
->counter_select
[4] = ibp
->pma_counter_select
[4];
1182 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1185 return reply((struct ib_smp
*) pmp
);
1188 static int pma_set_portsamplescontrol(struct ib_perf
*pmp
,
1189 struct ib_device
*ibdev
, u8 port
)
1191 struct ib_pma_portsamplescontrol
*p
=
1192 (struct ib_pma_portsamplescontrol
*)pmp
->data
;
1193 struct qib_ibdev
*dev
= to_idev(ibdev
);
1194 struct qib_devdata
*dd
= dd_from_dev(dev
);
1195 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1196 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1197 unsigned long flags
;
1198 u8 status
, xmit_flags
;
1201 if (pmp
->attr_mod
!= 0 || p
->port_select
!= port
) {
1202 pmp
->status
|= IB_SMP_INVALID_FIELD
;
1203 ret
= reply((struct ib_smp
*) pmp
);
1207 spin_lock_irqsave(&ibp
->lock
, flags
);
1209 /* Port Sampling code owns the PS* HW counters */
1210 xmit_flags
= ppd
->cong_stats
.flags
;
1211 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_SAMPLE
;
1212 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1213 if (status
== IB_PMA_SAMPLE_STATUS_DONE
||
1214 (status
== IB_PMA_SAMPLE_STATUS_RUNNING
&&
1215 xmit_flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)) {
1216 ibp
->pma_sample_start
= be32_to_cpu(p
->sample_start
);
1217 ibp
->pma_sample_interval
= be32_to_cpu(p
->sample_interval
);
1218 ibp
->pma_tag
= be16_to_cpu(p
->tag
);
1219 ibp
->pma_counter_select
[0] = p
->counter_select
[0];
1220 ibp
->pma_counter_select
[1] = p
->counter_select
[1];
1221 ibp
->pma_counter_select
[2] = p
->counter_select
[2];
1222 ibp
->pma_counter_select
[3] = p
->counter_select
[3];
1223 ibp
->pma_counter_select
[4] = p
->counter_select
[4];
1224 dd
->f_set_cntr_sample(ppd
, ibp
->pma_sample_interval
,
1225 ibp
->pma_sample_start
);
1227 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1229 ret
= pma_get_portsamplescontrol(pmp
, ibdev
, port
);
1235 static u64
get_counter(struct qib_ibport
*ibp
, struct qib_pportdata
*ppd
,
1241 case IB_PMA_PORT_XMIT_DATA
:
1242 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITDATA
);
1244 case IB_PMA_PORT_RCV_DATA
:
1245 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSRCVDATA
);
1247 case IB_PMA_PORT_XMIT_PKTS
:
1248 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITPKTS
);
1250 case IB_PMA_PORT_RCV_PKTS
:
1251 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSRCVPKTS
);
1253 case IB_PMA_PORT_XMIT_WAIT
:
1254 ret
= ppd
->dd
->f_portcntr(ppd
, QIBPORTCNTR_PSXMITWAIT
);
1263 /* This function assumes that the xmit_wait lock is already held */
1264 static u64
xmit_wait_get_value_delta(struct qib_pportdata
*ppd
)
1268 delta
= get_counter(&ppd
->ibport_data
, ppd
,
1269 IB_PMA_PORT_XMIT_WAIT
);
1270 return ppd
->cong_stats
.counter
+ delta
;
1273 static void cache_hw_sample_counters(struct qib_pportdata
*ppd
)
1275 struct qib_ibport
*ibp
= &ppd
->ibport_data
;
1277 ppd
->cong_stats
.counter_cache
.psxmitdata
=
1278 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_DATA
);
1279 ppd
->cong_stats
.counter_cache
.psrcvdata
=
1280 get_counter(ibp
, ppd
, IB_PMA_PORT_RCV_DATA
);
1281 ppd
->cong_stats
.counter_cache
.psxmitpkts
=
1282 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_PKTS
);
1283 ppd
->cong_stats
.counter_cache
.psrcvpkts
=
1284 get_counter(ibp
, ppd
, IB_PMA_PORT_RCV_PKTS
);
1285 ppd
->cong_stats
.counter_cache
.psxmitwait
=
1286 get_counter(ibp
, ppd
, IB_PMA_PORT_XMIT_WAIT
);
1289 static u64
get_cache_hw_sample_counters(struct qib_pportdata
*ppd
,
1295 case IB_PMA_PORT_XMIT_DATA
:
1296 ret
= ppd
->cong_stats
.counter_cache
.psxmitdata
;
1298 case IB_PMA_PORT_RCV_DATA
:
1299 ret
= ppd
->cong_stats
.counter_cache
.psrcvdata
;
1301 case IB_PMA_PORT_XMIT_PKTS
:
1302 ret
= ppd
->cong_stats
.counter_cache
.psxmitpkts
;
1304 case IB_PMA_PORT_RCV_PKTS
:
1305 ret
= ppd
->cong_stats
.counter_cache
.psrcvpkts
;
1307 case IB_PMA_PORT_XMIT_WAIT
:
1308 ret
= ppd
->cong_stats
.counter_cache
.psxmitwait
;
1317 static int pma_get_portsamplesresult(struct ib_perf
*pmp
,
1318 struct ib_device
*ibdev
, u8 port
)
1320 struct ib_pma_portsamplesresult
*p
=
1321 (struct ib_pma_portsamplesresult
*)pmp
->data
;
1322 struct qib_ibdev
*dev
= to_idev(ibdev
);
1323 struct qib_devdata
*dd
= dd_from_dev(dev
);
1324 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1325 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1326 unsigned long flags
;
1330 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1331 spin_lock_irqsave(&ibp
->lock
, flags
);
1332 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1333 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)
1334 p
->sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
1336 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1337 p
->sample_status
= cpu_to_be16(status
);
1338 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
1339 cache_hw_sample_counters(ppd
);
1340 ppd
->cong_stats
.counter
=
1341 xmit_wait_get_value_delta(ppd
);
1342 dd
->f_set_cntr_sample(ppd
,
1343 QIB_CONG_TIMER_PSINTERVAL
, 0);
1344 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
1347 for (i
= 0; i
< ARRAY_SIZE(ibp
->pma_counter_select
); i
++)
1348 p
->counter
[i
] = cpu_to_be32(
1349 get_cache_hw_sample_counters(
1350 ppd
, ibp
->pma_counter_select
[i
]));
1351 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1353 return reply((struct ib_smp
*) pmp
);
1356 static int pma_get_portsamplesresult_ext(struct ib_perf
*pmp
,
1357 struct ib_device
*ibdev
, u8 port
)
1359 struct ib_pma_portsamplesresult_ext
*p
=
1360 (struct ib_pma_portsamplesresult_ext
*)pmp
->data
;
1361 struct qib_ibdev
*dev
= to_idev(ibdev
);
1362 struct qib_devdata
*dd
= dd_from_dev(dev
);
1363 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1364 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1365 unsigned long flags
;
1369 /* Port Sampling code owns the PS* HW counters */
1370 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1371 spin_lock_irqsave(&ibp
->lock
, flags
);
1372 p
->tag
= cpu_to_be16(ibp
->pma_tag
);
1373 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_TIMER
)
1374 p
->sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
1376 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
1377 p
->sample_status
= cpu_to_be16(status
);
1379 p
->extended_width
= cpu_to_be32(0x80000000);
1380 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
1381 cache_hw_sample_counters(ppd
);
1382 ppd
->cong_stats
.counter
=
1383 xmit_wait_get_value_delta(ppd
);
1384 dd
->f_set_cntr_sample(ppd
,
1385 QIB_CONG_TIMER_PSINTERVAL
, 0);
1386 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
1389 for (i
= 0; i
< ARRAY_SIZE(ibp
->pma_counter_select
); i
++)
1390 p
->counter
[i
] = cpu_to_be64(
1391 get_cache_hw_sample_counters(
1392 ppd
, ibp
->pma_counter_select
[i
]));
1393 spin_unlock_irqrestore(&ibp
->lock
, flags
);
1395 return reply((struct ib_smp
*) pmp
);
1398 static int pma_get_portcounters(struct ib_perf
*pmp
,
1399 struct ib_device
*ibdev
, u8 port
)
1401 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1403 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1404 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1405 struct qib_verbs_counters cntrs
;
1406 u8 port_select
= p
->port_select
;
1408 qib_get_counters(ppd
, &cntrs
);
1410 /* Adjust counters for any resets done. */
1411 cntrs
.symbol_error_counter
-= ibp
->z_symbol_error_counter
;
1412 cntrs
.link_error_recovery_counter
-=
1413 ibp
->z_link_error_recovery_counter
;
1414 cntrs
.link_downed_counter
-= ibp
->z_link_downed_counter
;
1415 cntrs
.port_rcv_errors
-= ibp
->z_port_rcv_errors
;
1416 cntrs
.port_rcv_remphys_errors
-= ibp
->z_port_rcv_remphys_errors
;
1417 cntrs
.port_xmit_discards
-= ibp
->z_port_xmit_discards
;
1418 cntrs
.port_xmit_data
-= ibp
->z_port_xmit_data
;
1419 cntrs
.port_rcv_data
-= ibp
->z_port_rcv_data
;
1420 cntrs
.port_xmit_packets
-= ibp
->z_port_xmit_packets
;
1421 cntrs
.port_rcv_packets
-= ibp
->z_port_rcv_packets
;
1422 cntrs
.local_link_integrity_errors
-=
1423 ibp
->z_local_link_integrity_errors
;
1424 cntrs
.excessive_buffer_overrun_errors
-=
1425 ibp
->z_excessive_buffer_overrun_errors
;
1426 cntrs
.vl15_dropped
-= ibp
->z_vl15_dropped
;
1427 cntrs
.vl15_dropped
+= ibp
->n_vl15_dropped
;
1429 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1431 p
->port_select
= port_select
;
1432 if (pmp
->attr_mod
!= 0 || port_select
!= port
)
1433 pmp
->status
|= IB_SMP_INVALID_FIELD
;
1435 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1436 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1438 p
->symbol_error_counter
=
1439 cpu_to_be16((u16
)cntrs
.symbol_error_counter
);
1440 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1441 p
->link_error_recovery_counter
= 0xFF;
1443 p
->link_error_recovery_counter
=
1444 (u8
)cntrs
.link_error_recovery_counter
;
1445 if (cntrs
.link_downed_counter
> 0xFFUL
)
1446 p
->link_downed_counter
= 0xFF;
1448 p
->link_downed_counter
= (u8
)cntrs
.link_downed_counter
;
1449 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1450 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1452 p
->port_rcv_errors
=
1453 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1454 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1455 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1457 p
->port_rcv_remphys_errors
=
1458 cpu_to_be16((u16
)cntrs
.port_rcv_remphys_errors
);
1459 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1460 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1462 p
->port_xmit_discards
=
1463 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1464 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1465 cntrs
.local_link_integrity_errors
= 0xFUL
;
1466 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1467 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1468 p
->lli_ebor_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1469 cntrs
.excessive_buffer_overrun_errors
;
1470 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1471 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1473 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1474 if (cntrs
.port_xmit_data
> 0xFFFFFFFFUL
)
1475 p
->port_xmit_data
= cpu_to_be32(0xFFFFFFFF);
1477 p
->port_xmit_data
= cpu_to_be32((u32
)cntrs
.port_xmit_data
);
1478 if (cntrs
.port_rcv_data
> 0xFFFFFFFFUL
)
1479 p
->port_rcv_data
= cpu_to_be32(0xFFFFFFFF);
1481 p
->port_rcv_data
= cpu_to_be32((u32
)cntrs
.port_rcv_data
);
1482 if (cntrs
.port_xmit_packets
> 0xFFFFFFFFUL
)
1483 p
->port_xmit_packets
= cpu_to_be32(0xFFFFFFFF);
1485 p
->port_xmit_packets
=
1486 cpu_to_be32((u32
)cntrs
.port_xmit_packets
);
1487 if (cntrs
.port_rcv_packets
> 0xFFFFFFFFUL
)
1488 p
->port_rcv_packets
= cpu_to_be32(0xFFFFFFFF);
1490 p
->port_rcv_packets
=
1491 cpu_to_be32((u32
) cntrs
.port_rcv_packets
);
1493 return reply((struct ib_smp
*) pmp
);
1496 static int pma_get_portcounters_cong(struct ib_perf
*pmp
,
1497 struct ib_device
*ibdev
, u8 port
)
1499 /* Congestion PMA packets start at offset 24 not 64 */
1500 struct ib_pma_portcounters_cong
*p
=
1501 (struct ib_pma_portcounters_cong
*)pmp
->reserved
;
1502 struct qib_verbs_counters cntrs
;
1503 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1504 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1505 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
1506 u32 port_select
= be32_to_cpu(pmp
->attr_mod
) & 0xFF;
1507 u64 xmit_wait_counter
;
1508 unsigned long flags
;
1511 * This check is performed only in the GET method because the
1512 * SET method ends up calling this anyway.
1514 if (!dd
->psxmitwait_supported
)
1515 pmp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1516 if (port_select
!= port
)
1517 pmp
->status
|= IB_SMP_INVALID_FIELD
;
1519 qib_get_counters(ppd
, &cntrs
);
1520 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
1521 xmit_wait_counter
= xmit_wait_get_value_delta(ppd
);
1522 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
1524 /* Adjust counters for any resets done. */
1525 cntrs
.symbol_error_counter
-= ibp
->z_symbol_error_counter
;
1526 cntrs
.link_error_recovery_counter
-=
1527 ibp
->z_link_error_recovery_counter
;
1528 cntrs
.link_downed_counter
-= ibp
->z_link_downed_counter
;
1529 cntrs
.port_rcv_errors
-= ibp
->z_port_rcv_errors
;
1530 cntrs
.port_rcv_remphys_errors
-=
1531 ibp
->z_port_rcv_remphys_errors
;
1532 cntrs
.port_xmit_discards
-= ibp
->z_port_xmit_discards
;
1533 cntrs
.local_link_integrity_errors
-=
1534 ibp
->z_local_link_integrity_errors
;
1535 cntrs
.excessive_buffer_overrun_errors
-=
1536 ibp
->z_excessive_buffer_overrun_errors
;
1537 cntrs
.vl15_dropped
-= ibp
->z_vl15_dropped
;
1538 cntrs
.vl15_dropped
+= ibp
->n_vl15_dropped
;
1539 cntrs
.port_xmit_data
-= ibp
->z_port_xmit_data
;
1540 cntrs
.port_rcv_data
-= ibp
->z_port_rcv_data
;
1541 cntrs
.port_xmit_packets
-= ibp
->z_port_xmit_packets
;
1542 cntrs
.port_rcv_packets
-= ibp
->z_port_rcv_packets
;
1544 memset(pmp
->reserved
, 0, sizeof(pmp
->reserved
) +
1548 * Set top 3 bits to indicate interval in picoseconds in
1551 p
->port_check_rate
=
1552 cpu_to_be16((QIB_XMIT_RATE_PICO
<< 13) |
1553 (dd
->psxmitwait_check_rate
&
1554 ~(QIB_XMIT_RATE_PICO
<< 13)));
1555 p
->port_adr_events
= cpu_to_be64(0);
1556 p
->port_xmit_wait
= cpu_to_be64(xmit_wait_counter
);
1557 p
->port_xmit_data
= cpu_to_be64(cntrs
.port_xmit_data
);
1558 p
->port_rcv_data
= cpu_to_be64(cntrs
.port_rcv_data
);
1559 p
->port_xmit_packets
=
1560 cpu_to_be64(cntrs
.port_xmit_packets
);
1561 p
->port_rcv_packets
=
1562 cpu_to_be64(cntrs
.port_rcv_packets
);
1563 if (cntrs
.symbol_error_counter
> 0xFFFFUL
)
1564 p
->symbol_error_counter
= cpu_to_be16(0xFFFF);
1566 p
->symbol_error_counter
=
1568 (u16
)cntrs
.symbol_error_counter
);
1569 if (cntrs
.link_error_recovery_counter
> 0xFFUL
)
1570 p
->link_error_recovery_counter
= 0xFF;
1572 p
->link_error_recovery_counter
=
1573 (u8
)cntrs
.link_error_recovery_counter
;
1574 if (cntrs
.link_downed_counter
> 0xFFUL
)
1575 p
->link_downed_counter
= 0xFF;
1577 p
->link_downed_counter
=
1578 (u8
)cntrs
.link_downed_counter
;
1579 if (cntrs
.port_rcv_errors
> 0xFFFFUL
)
1580 p
->port_rcv_errors
= cpu_to_be16(0xFFFF);
1582 p
->port_rcv_errors
=
1583 cpu_to_be16((u16
) cntrs
.port_rcv_errors
);
1584 if (cntrs
.port_rcv_remphys_errors
> 0xFFFFUL
)
1585 p
->port_rcv_remphys_errors
= cpu_to_be16(0xFFFF);
1587 p
->port_rcv_remphys_errors
=
1589 (u16
)cntrs
.port_rcv_remphys_errors
);
1590 if (cntrs
.port_xmit_discards
> 0xFFFFUL
)
1591 p
->port_xmit_discards
= cpu_to_be16(0xFFFF);
1593 p
->port_xmit_discards
=
1594 cpu_to_be16((u16
)cntrs
.port_xmit_discards
);
1595 if (cntrs
.local_link_integrity_errors
> 0xFUL
)
1596 cntrs
.local_link_integrity_errors
= 0xFUL
;
1597 if (cntrs
.excessive_buffer_overrun_errors
> 0xFUL
)
1598 cntrs
.excessive_buffer_overrun_errors
= 0xFUL
;
1599 p
->lli_ebor_errors
= (cntrs
.local_link_integrity_errors
<< 4) |
1600 cntrs
.excessive_buffer_overrun_errors
;
1601 if (cntrs
.vl15_dropped
> 0xFFFFUL
)
1602 p
->vl15_dropped
= cpu_to_be16(0xFFFF);
1604 p
->vl15_dropped
= cpu_to_be16((u16
)cntrs
.vl15_dropped
);
1606 return reply((struct ib_smp
*)pmp
);
1609 static int pma_get_portcounters_ext(struct ib_perf
*pmp
,
1610 struct ib_device
*ibdev
, u8 port
)
1612 struct ib_pma_portcounters_ext
*p
=
1613 (struct ib_pma_portcounters_ext
*)pmp
->data
;
1614 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1615 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1616 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1617 u8 port_select
= p
->port_select
;
1619 memset(pmp
->data
, 0, sizeof(pmp
->data
));
1621 p
->port_select
= port_select
;
1622 if (pmp
->attr_mod
!= 0 || port_select
!= port
) {
1623 pmp
->status
|= IB_SMP_INVALID_FIELD
;
1627 qib_snapshot_counters(ppd
, &swords
, &rwords
, &spkts
, &rpkts
, &xwait
);
1629 /* Adjust counters for any resets done. */
1630 swords
-= ibp
->z_port_xmit_data
;
1631 rwords
-= ibp
->z_port_rcv_data
;
1632 spkts
-= ibp
->z_port_xmit_packets
;
1633 rpkts
-= ibp
->z_port_rcv_packets
;
1635 p
->port_xmit_data
= cpu_to_be64(swords
);
1636 p
->port_rcv_data
= cpu_to_be64(rwords
);
1637 p
->port_xmit_packets
= cpu_to_be64(spkts
);
1638 p
->port_rcv_packets
= cpu_to_be64(rpkts
);
1639 p
->port_unicast_xmit_packets
= cpu_to_be64(ibp
->n_unicast_xmit
);
1640 p
->port_unicast_rcv_packets
= cpu_to_be64(ibp
->n_unicast_rcv
);
1641 p
->port_multicast_xmit_packets
= cpu_to_be64(ibp
->n_multicast_xmit
);
1642 p
->port_multicast_rcv_packets
= cpu_to_be64(ibp
->n_multicast_rcv
);
1645 return reply((struct ib_smp
*) pmp
);
1648 static int pma_set_portcounters(struct ib_perf
*pmp
,
1649 struct ib_device
*ibdev
, u8 port
)
1651 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1653 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1654 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1655 struct qib_verbs_counters cntrs
;
1658 * Since the HW doesn't support clearing counters, we save the
1659 * current count and subtract it from future responses.
1661 qib_get_counters(ppd
, &cntrs
);
1663 if (p
->counter_select
& IB_PMA_SEL_SYMBOL_ERROR
)
1664 ibp
->z_symbol_error_counter
= cntrs
.symbol_error_counter
;
1666 if (p
->counter_select
& IB_PMA_SEL_LINK_ERROR_RECOVERY
)
1667 ibp
->z_link_error_recovery_counter
=
1668 cntrs
.link_error_recovery_counter
;
1670 if (p
->counter_select
& IB_PMA_SEL_LINK_DOWNED
)
1671 ibp
->z_link_downed_counter
= cntrs
.link_downed_counter
;
1673 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_ERRORS
)
1674 ibp
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
1676 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS
)
1677 ibp
->z_port_rcv_remphys_errors
=
1678 cntrs
.port_rcv_remphys_errors
;
1680 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DISCARDS
)
1681 ibp
->z_port_xmit_discards
= cntrs
.port_xmit_discards
;
1683 if (p
->counter_select
& IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS
)
1684 ibp
->z_local_link_integrity_errors
=
1685 cntrs
.local_link_integrity_errors
;
1687 if (p
->counter_select
& IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS
)
1688 ibp
->z_excessive_buffer_overrun_errors
=
1689 cntrs
.excessive_buffer_overrun_errors
;
1691 if (p
->counter_select
& IB_PMA_SEL_PORT_VL15_DROPPED
) {
1692 ibp
->n_vl15_dropped
= 0;
1693 ibp
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1696 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_DATA
)
1697 ibp
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1699 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_DATA
)
1700 ibp
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1702 if (p
->counter_select
& IB_PMA_SEL_PORT_XMIT_PACKETS
)
1703 ibp
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1705 if (p
->counter_select
& IB_PMA_SEL_PORT_RCV_PACKETS
)
1706 ibp
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1708 return pma_get_portcounters(pmp
, ibdev
, port
);
1711 static int pma_set_portcounters_cong(struct ib_perf
*pmp
,
1712 struct ib_device
*ibdev
, u8 port
)
1714 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1715 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1716 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
1717 struct qib_verbs_counters cntrs
;
1718 u32 counter_select
= (be32_to_cpu(pmp
->attr_mod
) >> 24) & 0xFF;
1720 unsigned long flags
;
1722 qib_get_counters(ppd
, &cntrs
);
1723 /* Get counter values before we save them */
1724 ret
= pma_get_portcounters_cong(pmp
, ibdev
, port
);
1726 if (counter_select
& IB_PMA_SEL_CONG_XMIT
) {
1727 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
1728 ppd
->cong_stats
.counter
= 0;
1729 dd
->f_set_cntr_sample(ppd
, QIB_CONG_TIMER_PSINTERVAL
,
1731 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
1733 if (counter_select
& IB_PMA_SEL_CONG_PORT_DATA
) {
1734 ibp
->z_port_xmit_data
= cntrs
.port_xmit_data
;
1735 ibp
->z_port_rcv_data
= cntrs
.port_rcv_data
;
1736 ibp
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
1737 ibp
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
1739 if (counter_select
& IB_PMA_SEL_CONG_ALL
) {
1740 ibp
->z_symbol_error_counter
=
1741 cntrs
.symbol_error_counter
;
1742 ibp
->z_link_error_recovery_counter
=
1743 cntrs
.link_error_recovery_counter
;
1744 ibp
->z_link_downed_counter
=
1745 cntrs
.link_downed_counter
;
1746 ibp
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
1747 ibp
->z_port_rcv_remphys_errors
=
1748 cntrs
.port_rcv_remphys_errors
;
1749 ibp
->z_port_xmit_discards
=
1750 cntrs
.port_xmit_discards
;
1751 ibp
->z_local_link_integrity_errors
=
1752 cntrs
.local_link_integrity_errors
;
1753 ibp
->z_excessive_buffer_overrun_errors
=
1754 cntrs
.excessive_buffer_overrun_errors
;
1755 ibp
->n_vl15_dropped
= 0;
1756 ibp
->z_vl15_dropped
= cntrs
.vl15_dropped
;
1762 static int pma_set_portcounters_ext(struct ib_perf
*pmp
,
1763 struct ib_device
*ibdev
, u8 port
)
1765 struct ib_pma_portcounters
*p
= (struct ib_pma_portcounters
*)
1767 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1768 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1769 u64 swords
, rwords
, spkts
, rpkts
, xwait
;
1771 qib_snapshot_counters(ppd
, &swords
, &rwords
, &spkts
, &rpkts
, &xwait
);
1773 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_DATA
)
1774 ibp
->z_port_xmit_data
= swords
;
1776 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_DATA
)
1777 ibp
->z_port_rcv_data
= rwords
;
1779 if (p
->counter_select
& IB_PMA_SELX_PORT_XMIT_PACKETS
)
1780 ibp
->z_port_xmit_packets
= spkts
;
1782 if (p
->counter_select
& IB_PMA_SELX_PORT_RCV_PACKETS
)
1783 ibp
->z_port_rcv_packets
= rpkts
;
1785 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_XMIT_PACKETS
)
1786 ibp
->n_unicast_xmit
= 0;
1788 if (p
->counter_select
& IB_PMA_SELX_PORT_UNI_RCV_PACKETS
)
1789 ibp
->n_unicast_rcv
= 0;
1791 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS
)
1792 ibp
->n_multicast_xmit
= 0;
1794 if (p
->counter_select
& IB_PMA_SELX_PORT_MULTI_RCV_PACKETS
)
1795 ibp
->n_multicast_rcv
= 0;
1797 return pma_get_portcounters_ext(pmp
, ibdev
, port
);
1800 static int process_subn(struct ib_device
*ibdev
, int mad_flags
,
1801 u8 port
, struct ib_mad
*in_mad
,
1802 struct ib_mad
*out_mad
)
1804 struct ib_smp
*smp
= (struct ib_smp
*)out_mad
;
1805 struct qib_ibport
*ibp
= to_iport(ibdev
, port
);
1806 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
1810 if (smp
->class_version
!= 1) {
1811 smp
->status
|= IB_SMP_UNSUP_VERSION
;
1816 ret
= check_mkey(ibp
, smp
, mad_flags
);
1818 u32 port_num
= be32_to_cpu(smp
->attr_mod
);
1821 * If this is a get/set portinfo, we already check the
1822 * M_Key if the MAD is for another port and the M_Key
1823 * is OK on the receiving port. This check is needed
1824 * to increment the error counters when the M_Key
1825 * fails to match on *both* ports.
1827 if (in_mad
->mad_hdr
.attr_id
== IB_SMP_ATTR_PORT_INFO
&&
1828 (smp
->method
== IB_MGMT_METHOD_GET
||
1829 smp
->method
== IB_MGMT_METHOD_SET
) &&
1830 port_num
&& port_num
<= ibdev
->phys_port_cnt
&&
1832 (void) check_mkey(to_iport(ibdev
, port_num
), smp
, 0);
1836 switch (smp
->method
) {
1837 case IB_MGMT_METHOD_GET
:
1838 switch (smp
->attr_id
) {
1839 case IB_SMP_ATTR_NODE_DESC
:
1840 ret
= subn_get_nodedescription(smp
, ibdev
);
1842 case IB_SMP_ATTR_NODE_INFO
:
1843 ret
= subn_get_nodeinfo(smp
, ibdev
, port
);
1845 case IB_SMP_ATTR_GUID_INFO
:
1846 ret
= subn_get_guidinfo(smp
, ibdev
, port
);
1848 case IB_SMP_ATTR_PORT_INFO
:
1849 ret
= subn_get_portinfo(smp
, ibdev
, port
);
1851 case IB_SMP_ATTR_PKEY_TABLE
:
1852 ret
= subn_get_pkeytable(smp
, ibdev
, port
);
1854 case IB_SMP_ATTR_SL_TO_VL_TABLE
:
1855 ret
= subn_get_sl_to_vl(smp
, ibdev
, port
);
1857 case IB_SMP_ATTR_VL_ARB_TABLE
:
1858 ret
= subn_get_vl_arb(smp
, ibdev
, port
);
1860 case IB_SMP_ATTR_SM_INFO
:
1861 if (ibp
->port_cap_flags
& IB_PORT_SM_DISABLED
) {
1862 ret
= IB_MAD_RESULT_SUCCESS
|
1863 IB_MAD_RESULT_CONSUMED
;
1866 if (ibp
->port_cap_flags
& IB_PORT_SM
) {
1867 ret
= IB_MAD_RESULT_SUCCESS
;
1872 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1877 case IB_MGMT_METHOD_SET
:
1878 switch (smp
->attr_id
) {
1879 case IB_SMP_ATTR_GUID_INFO
:
1880 ret
= subn_set_guidinfo(smp
, ibdev
, port
);
1882 case IB_SMP_ATTR_PORT_INFO
:
1883 ret
= subn_set_portinfo(smp
, ibdev
, port
);
1885 case IB_SMP_ATTR_PKEY_TABLE
:
1886 ret
= subn_set_pkeytable(smp
, ibdev
, port
);
1888 case IB_SMP_ATTR_SL_TO_VL_TABLE
:
1889 ret
= subn_set_sl_to_vl(smp
, ibdev
, port
);
1891 case IB_SMP_ATTR_VL_ARB_TABLE
:
1892 ret
= subn_set_vl_arb(smp
, ibdev
, port
);
1894 case IB_SMP_ATTR_SM_INFO
:
1895 if (ibp
->port_cap_flags
& IB_PORT_SM_DISABLED
) {
1896 ret
= IB_MAD_RESULT_SUCCESS
|
1897 IB_MAD_RESULT_CONSUMED
;
1900 if (ibp
->port_cap_flags
& IB_PORT_SM
) {
1901 ret
= IB_MAD_RESULT_SUCCESS
;
1906 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1911 case IB_MGMT_METHOD_TRAP_REPRESS
:
1912 if (smp
->attr_id
== IB_SMP_ATTR_NOTICE
)
1913 ret
= subn_trap_repress(smp
, ibdev
, port
);
1915 smp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1920 case IB_MGMT_METHOD_TRAP
:
1921 case IB_MGMT_METHOD_REPORT
:
1922 case IB_MGMT_METHOD_REPORT_RESP
:
1923 case IB_MGMT_METHOD_GET_RESP
:
1925 * The ib_mad module will call us to process responses
1926 * before checking for other consumers.
1927 * Just tell the caller to process it normally.
1929 ret
= IB_MAD_RESULT_SUCCESS
;
1932 case IB_MGMT_METHOD_SEND
:
1933 if (ib_get_smp_direction(smp
) &&
1934 smp
->attr_id
== QIB_VENDOR_IPG
) {
1935 ppd
->dd
->f_set_ib_cfg(ppd
, QIB_IB_CFG_PORT
,
1937 ret
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
1939 ret
= IB_MAD_RESULT_SUCCESS
;
1943 smp
->status
|= IB_SMP_UNSUP_METHOD
;
1951 static int process_perf(struct ib_device
*ibdev
, u8 port
,
1952 struct ib_mad
*in_mad
,
1953 struct ib_mad
*out_mad
)
1955 struct ib_perf
*pmp
= (struct ib_perf
*)out_mad
;
1959 if (pmp
->class_version
!= 1) {
1960 pmp
->status
|= IB_SMP_UNSUP_VERSION
;
1961 ret
= reply((struct ib_smp
*) pmp
);
1965 switch (pmp
->method
) {
1966 case IB_MGMT_METHOD_GET
:
1967 switch (pmp
->attr_id
) {
1968 case IB_PMA_CLASS_PORT_INFO
:
1969 ret
= pma_get_classportinfo(pmp
, ibdev
);
1971 case IB_PMA_PORT_SAMPLES_CONTROL
:
1972 ret
= pma_get_portsamplescontrol(pmp
, ibdev
, port
);
1974 case IB_PMA_PORT_SAMPLES_RESULT
:
1975 ret
= pma_get_portsamplesresult(pmp
, ibdev
, port
);
1977 case IB_PMA_PORT_SAMPLES_RESULT_EXT
:
1978 ret
= pma_get_portsamplesresult_ext(pmp
, ibdev
, port
);
1980 case IB_PMA_PORT_COUNTERS
:
1981 ret
= pma_get_portcounters(pmp
, ibdev
, port
);
1983 case IB_PMA_PORT_COUNTERS_EXT
:
1984 ret
= pma_get_portcounters_ext(pmp
, ibdev
, port
);
1986 case IB_PMA_PORT_COUNTERS_CONG
:
1987 ret
= pma_get_portcounters_cong(pmp
, ibdev
, port
);
1990 pmp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
1991 ret
= reply((struct ib_smp
*) pmp
);
1995 case IB_MGMT_METHOD_SET
:
1996 switch (pmp
->attr_id
) {
1997 case IB_PMA_PORT_SAMPLES_CONTROL
:
1998 ret
= pma_set_portsamplescontrol(pmp
, ibdev
, port
);
2000 case IB_PMA_PORT_COUNTERS
:
2001 ret
= pma_set_portcounters(pmp
, ibdev
, port
);
2003 case IB_PMA_PORT_COUNTERS_EXT
:
2004 ret
= pma_set_portcounters_ext(pmp
, ibdev
, port
);
2006 case IB_PMA_PORT_COUNTERS_CONG
:
2007 ret
= pma_set_portcounters_cong(pmp
, ibdev
, port
);
2010 pmp
->status
|= IB_SMP_UNSUP_METH_ATTR
;
2011 ret
= reply((struct ib_smp
*) pmp
);
2015 case IB_MGMT_METHOD_TRAP
:
2016 case IB_MGMT_METHOD_GET_RESP
:
2018 * The ib_mad module will call us to process responses
2019 * before checking for other consumers.
2020 * Just tell the caller to process it normally.
2022 ret
= IB_MAD_RESULT_SUCCESS
;
2026 pmp
->status
|= IB_SMP_UNSUP_METHOD
;
2027 ret
= reply((struct ib_smp
*) pmp
);
2035 * qib_process_mad - process an incoming MAD packet
2036 * @ibdev: the infiniband device this packet came in on
2037 * @mad_flags: MAD flags
2038 * @port: the port number this packet came in on
2039 * @in_wc: the work completion entry for this packet
2040 * @in_grh: the global route header for this packet
2041 * @in_mad: the incoming MAD
2042 * @out_mad: any outgoing MAD reply
2044 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2045 * interested in processing.
2047 * Note that the verbs framework has already done the MAD sanity checks,
2048 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2051 * This is called by the ib_mad module.
2053 int qib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port
,
2054 struct ib_wc
*in_wc
, struct ib_grh
*in_grh
,
2055 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
2059 switch (in_mad
->mad_hdr
.mgmt_class
) {
2060 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
:
2061 case IB_MGMT_CLASS_SUBN_LID_ROUTED
:
2062 ret
= process_subn(ibdev
, mad_flags
, port
, in_mad
, out_mad
);
2065 case IB_MGMT_CLASS_PERF_MGMT
:
2066 ret
= process_perf(ibdev
, port
, in_mad
, out_mad
);
2070 ret
= IB_MAD_RESULT_SUCCESS
;
2077 static void send_handler(struct ib_mad_agent
*agent
,
2078 struct ib_mad_send_wc
*mad_send_wc
)
2080 ib_free_send_mad(mad_send_wc
->send_buf
);
2083 static void xmit_wait_timer_func(unsigned long opaque
)
2085 struct qib_pportdata
*ppd
= (struct qib_pportdata
*)opaque
;
2086 struct qib_devdata
*dd
= dd_from_ppd(ppd
);
2087 unsigned long flags
;
2090 spin_lock_irqsave(&ppd
->ibport_data
.lock
, flags
);
2091 if (ppd
->cong_stats
.flags
== IB_PMA_CONG_HW_CONTROL_SAMPLE
) {
2092 status
= dd
->f_portcntr(ppd
, QIBPORTCNTR_PSSTAT
);
2093 if (status
== IB_PMA_SAMPLE_STATUS_DONE
) {
2094 /* save counter cache */
2095 cache_hw_sample_counters(ppd
);
2096 ppd
->cong_stats
.flags
= IB_PMA_CONG_HW_CONTROL_TIMER
;
2100 ppd
->cong_stats
.counter
= xmit_wait_get_value_delta(ppd
);
2101 dd
->f_set_cntr_sample(ppd
, QIB_CONG_TIMER_PSINTERVAL
, 0x0);
2103 spin_unlock_irqrestore(&ppd
->ibport_data
.lock
, flags
);
2104 mod_timer(&ppd
->cong_stats
.timer
, jiffies
+ HZ
);
2107 int qib_create_agents(struct qib_ibdev
*dev
)
2109 struct qib_devdata
*dd
= dd_from_dev(dev
);
2110 struct ib_mad_agent
*agent
;
2111 struct qib_ibport
*ibp
;
2115 for (p
= 0; p
< dd
->num_pports
; p
++) {
2116 ibp
= &dd
->pport
[p
].ibport_data
;
2117 agent
= ib_register_mad_agent(&dev
->ibdev
, p
+ 1, IB_QPT_SMI
,
2118 NULL
, 0, send_handler
,
2120 if (IS_ERR(agent
)) {
2121 ret
= PTR_ERR(agent
);
2125 /* Initialize xmit_wait structure */
2126 dd
->pport
[p
].cong_stats
.counter
= 0;
2127 init_timer(&dd
->pport
[p
].cong_stats
.timer
);
2128 dd
->pport
[p
].cong_stats
.timer
.function
= xmit_wait_timer_func
;
2129 dd
->pport
[p
].cong_stats
.timer
.data
=
2130 (unsigned long)(&dd
->pport
[p
]);
2131 dd
->pport
[p
].cong_stats
.timer
.expires
= 0;
2132 add_timer(&dd
->pport
[p
].cong_stats
.timer
);
2134 ibp
->send_agent
= agent
;
2140 for (p
= 0; p
< dd
->num_pports
; p
++) {
2141 ibp
= &dd
->pport
[p
].ibport_data
;
2142 if (ibp
->send_agent
) {
2143 agent
= ibp
->send_agent
;
2144 ibp
->send_agent
= NULL
;
2145 ib_unregister_mad_agent(agent
);
2152 void qib_free_agents(struct qib_ibdev
*dev
)
2154 struct qib_devdata
*dd
= dd_from_dev(dev
);
2155 struct ib_mad_agent
*agent
;
2156 struct qib_ibport
*ibp
;
2159 for (p
= 0; p
< dd
->num_pports
; p
++) {
2160 ibp
= &dd
->pport
[p
].ibport_data
;
2161 if (ibp
->send_agent
) {
2162 agent
= ibp
->send_agent
;
2163 ibp
->send_agent
= NULL
;
2164 ib_unregister_mad_agent(agent
);
2167 ib_destroy_ah(&ibp
->sm_ah
->ibah
);
2170 if (dd
->pport
[p
].cong_stats
.timer
.data
)
2171 del_timer_sync(&dd
->pport
[p
].cong_stats
.timer
);